hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
54d2af6cc6ffcbe94ad442887d35faa47a8ec2cd
| 1,090
|
py
|
Python
|
source/packages/scs-pm-server/src/python-server/app.py
|
amittkSharma/scs_predictive_maintenance
|
105a218b47d81d02f7e799287bd1e9279db452ce
|
[
"MIT"
] | null | null | null |
source/packages/scs-pm-server/src/python-server/app.py
|
amittkSharma/scs_predictive_maintenance
|
105a218b47d81d02f7e799287bd1e9279db452ce
|
[
"MIT"
] | 1
|
2022-02-05T17:13:00.000Z
|
2022-02-05T17:13:00.000Z
|
source/packages/scs-pm-server/src/python-server/app.py
|
amittkSharma/scs_predictive_maintenance
|
105a218b47d81d02f7e799287bd1e9279db452ce
|
[
"MIT"
] | null | null | null |
import json
import logging
import joblib
import pandas as pd
from flask import Flask, jsonify, request
from flask_cors import CORS, cross_origin
app = Flask(__name__)
CORS(app)
@app.route("/api/machinePrediction", methods=['GET'])
def home():
incomingMachineId = request.args.get('machineId')
modelPath = request.args.get('modelPath')
column_names = request.args.get('columnNames')
data_points = request.args.get('dataPoints')
app.logger.info('Received machine id is %s', incomingMachineId)
app.logger.info('Model path is %s', modelPath)
json_object = json.loads(data_points)
pairs = json_object.items()
vitals_value = []
for key, value in pairs:
vitals_value.append(value)
modelObj = joblib.load(modelPath)
data = [vitals_value]
df = pd.DataFrame(data=data, columns = column_names)
modelPrediction = modelObj.predict(df)
app.logger.info('Model prediction is: %s', modelPrediction)
return jsonify(modelPrediction[0])
if __name__ == "__main__":
app.run(debug=True)
# To start the server
# python3 app.py
| 24.222222
| 67
| 0.709174
| 141
| 1,090
| 5.319149
| 0.517731
| 0.058667
| 0.074667
| 0.048
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00222
| 0.173395
| 1,090
| 44
| 68
| 24.772727
| 0.830189
| 0.031193
| 0
| 0
| 0
| 0
| 0.129155
| 0.020893
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.206897
| 0
| 0.275862
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
54d3039f58743cfa00e492ea3768046369054479
| 4,411
|
py
|
Python
|
tests/test_remove_from_dependee_chain.py
|
ess-dmsc/nexus-constructor
|
ae0026c48f8f2d4d88d3ff00e45cb6591983853b
|
[
"BSD-2-Clause"
] | 3
|
2019-05-31T08:38:25.000Z
|
2022-01-06T09:23:21.000Z
|
tests/test_remove_from_dependee_chain.py
|
ess-dmsc/nexus-constructor
|
ae0026c48f8f2d4d88d3ff00e45cb6591983853b
|
[
"BSD-2-Clause"
] | 709
|
2019-02-06T08:23:07.000Z
|
2022-03-29T23:03:37.000Z
|
tests/test_remove_from_dependee_chain.py
|
ess-dmsc/nexus-constructor
|
ae0026c48f8f2d4d88d3ff00e45cb6591983853b
|
[
"BSD-2-Clause"
] | 2
|
2020-03-06T09:58:56.000Z
|
2020-08-04T18:32:57.000Z
|
import pytest
from PySide2.QtGui import QVector3D
from nexus_constructor.model.component import Component
from nexus_constructor.model.dataset import Dataset
from nexus_constructor.model.instrument import Instrument
from nexus_constructor.model.value_type import ValueTypes
values = Dataset(
name="scalar_value",
type=ValueTypes.DOUBLE,
size=[1],
values=90.0,
parent_node=None,
)
@pytest.fixture
def instrument():
return Instrument(parent_node=None)
def test_remove_from_beginning_1(instrument):
component1 = Component("component1", instrument)
rot = component1.add_rotation(
name="rotation1",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
)
component1.depends_on = rot
assert len(rot.dependents) == 1
rot.remove_from_dependee_chain()
assert component1.depends_on is None
def test_remove_from_beginning_2(instrument):
component1 = Component("component1", instrument)
rot1 = component1.add_rotation(
name="rotation1",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
)
rot2 = component1.add_rotation(
name="rotation2",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
)
component1.depends_on = rot1
rot1.depends_on = rot2
assert len(rot2.dependents) == 1
rot1.remove_from_dependee_chain()
assert len(rot2.dependents) == 1
assert rot2.dependents[0] == component1
assert component1.depends_on == rot2
def test_remove_from_beginning_3(instrument):
component1 = Component("component1", instrument)
component2 = Component("component2", instrument)
rot1 = component1.add_rotation(
name="rotation1",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
)
rot2 = component2.add_rotation(
name="rotation2",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
)
component1.depends_on = rot1
component2.depends_on = rot2
rot1.depends_on = rot2
assert len(rot2.dependents) == 2
rot1.remove_from_dependee_chain()
assert len(rot2.dependents) == 2
assert component2 in rot2.dependents
assert component1 in rot2.dependents
assert component1.depends_on == rot2
assert component1.transforms.link.linked_component == component2
def test_remove_from_middle():
component1 = Component("component1", instrument)
component2 = Component("component2", instrument)
component3 = Component("component3", instrument)
rot1 = component1.add_rotation(
name="rotation1",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
)
rot2 = component2.add_rotation(
name="rotation2",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
)
rot3 = component3.add_rotation(
name="rotation3",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
)
component1.depends_on = rot1
component2.depends_on = rot2
component3.depends_on = rot3
component1.transforms.link.linked_component = component2
component2.transforms.link.linked_component = component3
rot2.remove_from_dependee_chain()
assert rot1.depends_on == rot3
assert component1.transforms.link.linked_component == component3
assert rot1 in rot3.dependents
assert component3 in rot3.dependents
def test_remove_from_end():
component1 = Component("component1", instrument)
rot1 = component1.add_rotation(
name="rotation1",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
)
rot2 = component1.add_rotation(
name="rotation2",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
depends_on=rot1,
)
rot3 = component1.add_rotation(
name="rotation3",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
depends_on=rot2,
)
component1.depends_on = rot3
rot1.remove_from_dependee_chain()
assert rot1.depends_on is None
assert not rot1.dependents
assert component1.depends_on == rot3
assert rot2.dependents[0] == rot3
assert len(component1.transforms) == 2
| 28.275641
| 68
| 0.670143
| 526
| 4,411
| 5.479087
| 0.119772
| 0.030534
| 0.034351
| 0.030534
| 0.730396
| 0.620056
| 0.546149
| 0.546149
| 0.438584
| 0.403886
| 0
| 0.059478
| 0.226253
| 4,411
| 155
| 69
| 28.458065
| 0.78494
| 0
| 0
| 0.555556
| 0
| 0
| 0.043301
| 0
| 0
| 0
| 0
| 0
| 0.155556
| 1
| 0.044444
| false
| 0
| 0.044444
| 0.007407
| 0.096296
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
54d32f6738e6ad2c2884cf8b772cee6a6620a984
| 11,013
|
py
|
Python
|
fastmvsnet/train1.py
|
molspace/FastMVS_experiments
|
b897015d77600687ca2addf99bb6a6f0de524e5f
|
[
"MIT"
] | null | null | null |
fastmvsnet/train1.py
|
molspace/FastMVS_experiments
|
b897015d77600687ca2addf99bb6a6f0de524e5f
|
[
"MIT"
] | null | null | null |
fastmvsnet/train1.py
|
molspace/FastMVS_experiments
|
b897015d77600687ca2addf99bb6a6f0de524e5f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import argparse
import os.path as osp
import logging
import time
import sys
sys.path.insert(0, osp.dirname(__file__) + '/..')
import torch
import torch.nn as nn
from fastmvsnet.config import load_cfg_from_file
from fastmvsnet.utils.io import mkdir
from fastmvsnet.utils.logger import setup_logger
from fastmvsnet.utils.torch_utils import set_random_seed
from fastmvsnet.model1 import build_pointmvsnet as build_model
from fastmvsnet.solver import build_optimizer, build_scheduler
from fastmvsnet.utils.checkpoint import Checkpointer
from fastmvsnet.dataset1 import build_data_loader
from fastmvsnet.utils.tensorboard_logger import TensorboardLogger
from fastmvsnet.utils.metric_logger import MetricLogger
from fastmvsnet.utils.file_logger import file_logger
def parse_args():
parser = argparse.ArgumentParser(description="PyTorch Fast-MVSNet Training")
parser.add_argument(
"--cfg",
dest="config_file",
default="",
metavar="FILE",
help="path to config file",
type=str,
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
return args
def train_model(model,
loss_fn,
metric_fn,
image_scales,
inter_scales,
isFlow,
data_loader,
optimizer,
curr_epoch,
tensorboard_logger,
log_period=1,
output_dir="",
):
logger = logging.getLogger("fastmvsnet.train")
meters = MetricLogger(delimiter=" ")
model.train()
end = time.time()
total_iteration = data_loader.__len__()
path_list = []
for iteration, data_batch in enumerate(data_loader):
data_time = time.time() - end
curr_ref_img_path = data_batch["ref_img_path"]
path_list.extend(curr_ref_img_path)
data_batch = {k: v.cuda(non_blocking=True) for k, v in data_batch.items() if isinstance(v, torch.Tensor)}
preds = model(data_batch, image_scales, inter_scales, isFlow)
optimizer.zero_grad()
loss_dict = loss_fn(preds, data_batch, isFlow)
metric_dict = metric_fn(preds, data_batch, isFlow)
losses = sum(loss_dict.values())
#print("LOSS DICT", loss_dict['coarse_loss'])
#print("LOSSES", loss_dict.values())
meters.update(loss=losses, **loss_dict, **metric_dict)
losses.backward()
# print(poop)
optimizer.step()
batch_time = time.time() - end
end = time.time()
meters.update(time=batch_time, data=data_time)
if iteration % log_period == 0:
logger.info(
meters.delimiter.join(
[
"EPOCH: {epoch:2d}",
"iter: {iter:4d}",
"{meters}",
"lr: {lr:.2e}",
"max mem: {memory:.0f}",
]
).format(
epoch=curr_epoch,
iter=iteration,
meters=str(meters),
lr=optimizer.param_groups[0]["lr"],
memory=torch.cuda.max_memory_allocated() / (1024.0 ** 2),
)
)
tensorboard_logger.add_scalars(loss_dict, curr_epoch * total_iteration + iteration, prefix="train")
tensorboard_logger.add_scalars(metric_dict, curr_epoch * total_iteration + iteration, prefix="train")
if iteration % (100 * log_period) == 0:
file_logger(data_batch, preds, curr_epoch * total_iteration + iteration, output_dir, prefix="train")
return meters
def validate_model(model,
loss_fn,
metric_fn,
image_scales,
inter_scales,
isFlow,
data_loader,
curr_epoch,
tensorboard_logger,
log_period=1,
output_dir="",
):
logger = logging.getLogger("fastmvsnet.validate")
meters = MetricLogger(delimiter=" ")
model.train()
end = time.time()
total_iteration = data_loader.__len__()
with torch.no_grad():
for iteration, data_batch in enumerate(data_loader):
data_time = time.time() - end
curr_ref_img_path = data_batch["ref_img_path"]
data_batch = {k: v.cuda(non_blocking=True) for k, v in data_batch.items() if isinstance(v, torch.Tensor)}
preds = model(data_batch, image_scales, inter_scales, isFlow)
loss_dict = loss_fn(preds, data_batch, isFlow)
metric_dict = metric_fn(preds, data_batch, isFlow)
losses = sum(loss_dict.values())
meters.update(loss=losses, **loss_dict, **metric_dict)
batch_time = time.time() - end
end = time.time()
meters.update(time=batch_time, data=data_time)
if iteration % log_period == 0:
logger.info(
meters.delimiter.join(
[
"EPOCH: {epoch:2d}",
"iter: {iter:4d}",
"{meters}",
]
).format(
epoch=curr_epoch,
iter=iteration,
meters=str(meters),
)
)
tensorboard_logger.add_scalars(meters.meters, curr_epoch * total_iteration + iteration, prefix="valid")
if iteration % (100 * log_period) == 0:
file_logger(data_batch, preds, curr_epoch * total_iteration + iteration, output_dir, prefix="valid")
return meters
def train(cfg, output_dir=""):
logger = logging.getLogger("fastmvsnet.trainer")
# build model
set_random_seed(cfg.RNG_SEED)
model, loss_fn, metric_fn = build_model(cfg)
logger.info("Build model:\n{}".format(str(model)))
model = nn.DataParallel(model).cuda()
# build optimizer
optimizer = build_optimizer(cfg, model)
# build lr scheduler
scheduler = build_scheduler(cfg, optimizer)
# build checkpointer
checkpointer = Checkpointer(model,
optimizer=optimizer,
scheduler=scheduler,
save_dir=output_dir,
logger=logger)
checkpoint_data = checkpointer.load(cfg.MODEL.WEIGHT, resume=cfg.AUTO_RESUME)
ckpt_period = cfg.TRAIN.CHECKPOINT_PERIOD
# build data loader
train_data_loader = build_data_loader(cfg, mode="train")
val_period = cfg.TRAIN.VAL_PERIOD
val_data_loader = build_data_loader(cfg, mode="val") if val_period > 0 else None
# build tensorboard logger (optionally by comment)
tensorboard_logger = TensorboardLogger(output_dir)
# train
max_epoch = cfg.SCHEDULER.MAX_EPOCH
start_epoch = checkpoint_data.get("epoch", 0)
best_metric_name = "best_{}".format(cfg.TRAIN.VAL_METRIC)
best_metric = checkpoint_data.get(best_metric_name, None)
logger.info("Start training from epoch {}".format(start_epoch))
for epoch in range(start_epoch, max_epoch):
cur_epoch = epoch + 1
scheduler.step()
start_time = time.time()
train_meters = train_model(model,
loss_fn,
metric_fn,
image_scales=cfg.MODEL.TRAIN.IMG_SCALES,
inter_scales=cfg.MODEL.TRAIN.INTER_SCALES,
isFlow=(cur_epoch > cfg.SCHEDULER.INIT_EPOCH),
data_loader=train_data_loader,
optimizer=optimizer,
curr_epoch=epoch,
tensorboard_logger=tensorboard_logger,
log_period=cfg.TRAIN.LOG_PERIOD,
output_dir=output_dir,
)
epoch_time = time.time() - start_time
logger.info("Epoch[{}]-Train {} total_time: {:.2f}s".format(
cur_epoch, train_meters.summary_str, epoch_time))
# checkpoint
if cur_epoch % ckpt_period == 0 or cur_epoch == max_epoch:
checkpoint_data["epoch"] = cur_epoch
checkpoint_data[best_metric_name] = best_metric
checkpointer.save("model_{:03d}".format(cur_epoch), **checkpoint_data)
# validate
if val_period < 1:
continue
if cur_epoch % val_period == 0 or cur_epoch == max_epoch:
val_meters = validate_model(model,
loss_fn,
metric_fn,
image_scales=cfg.MODEL.VAL.IMG_SCALES,
inter_scales=cfg.MODEL.VAL.INTER_SCALES,
isFlow=(cur_epoch > cfg.SCHEDULER.INIT_EPOCH),
data_loader=val_data_loader,
curr_epoch=epoch,
tensorboard_logger=tensorboard_logger,
log_period=cfg.TEST.LOG_PERIOD,
output_dir=output_dir,
)
logger.info("Epoch[{}]-Val {}".format(cur_epoch, val_meters.summary_str))
# best validation
cur_metric = val_meters.meters[cfg.TRAIN.VAL_METRIC].global_avg
if best_metric is None or cur_metric > best_metric:
best_metric = cur_metric
checkpoint_data["epoch"] = cur_epoch
checkpoint_data[best_metric_name] = best_metric
checkpointer.save("model_best", **checkpoint_data)
logger.info("Best val-{} = {}".format(cfg.TRAIN.VAL_METRIC, best_metric))
return model
def main():
args = parse_args()
num_gpus = torch.cuda.device_count()
cfg = load_cfg_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
output_dir = cfg.OUTPUT_DIR
if output_dir:
config_path = osp.splitext(args.config_file)[0]
config_path = config_path.replace("configs", "outputs1")
output_dir = output_dir.replace('@', config_path)
mkdir(output_dir)
logger = setup_logger("fastmvsnet", output_dir, prefix="train")
logger.info("Using {} GPUs".format(num_gpus))
logger.info(args)
logger.info("Loaded configuration file {}".format(args.config_file))
logger.info("Running with config:\n{}".format(cfg))
train(cfg, output_dir)
if __name__ == "__main__":
main()
| 37.080808
| 119
| 0.562608
| 1,172
| 11,013
| 5.013652
| 0.171502
| 0.029101
| 0.022634
| 0.014466
| 0.470388
| 0.462049
| 0.429204
| 0.39857
| 0.382573
| 0.361811
| 0
| 0.005401
| 0.34432
| 11,013
| 296
| 120
| 37.206081
| 0.808337
| 0.025969
| 0
| 0.417021
| 0
| 0
| 0.05694
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021277
| false
| 0
| 0.076596
| 0
| 0.114894
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
54d41bf8d53f9ade04da7c58f9daea5fe0658840
| 857
|
py
|
Python
|
modulo2/3-detectores/3.2-detector/models.py
|
fossabot/unifacisa-visao-computacional
|
14aef22a3e7fe10ee820d31ce12ad21a3cad7b0b
|
[
"MIT"
] | null | null | null |
modulo2/3-detectores/3.2-detector/models.py
|
fossabot/unifacisa-visao-computacional
|
14aef22a3e7fe10ee820d31ce12ad21a3cad7b0b
|
[
"MIT"
] | null | null | null |
modulo2/3-detectores/3.2-detector/models.py
|
fossabot/unifacisa-visao-computacional
|
14aef22a3e7fe10ee820d31ce12ad21a3cad7b0b
|
[
"MIT"
] | 1
|
2021-02-06T00:49:32.000Z
|
2021-02-06T00:49:32.000Z
|
# Estrutura básica para projetos de Machine Learning e Deep Learning
# Por Adriano Santos.
from torch import nn, relu
import torch.nn.functional as F
import torch.optim as optim
import torch
from torchvision import models
class ResNet(nn.Module):
def __init__(self, saida, pretreinado=True):
super(ResNet, self).__init__()
resnet = models.resnet34(pretrained=pretreinado)
layers = list(resnet.children())[:8]
self.features1 = nn.Sequential(*layers[:6])
self.features2 = nn.Sequential(*layers[6:])
self.classificador = nn.Sequential(nn.BatchNorm1d(512), nn.Linear(512, saida))
def forward(self, x):
x = self.features1(x)
x = self.features2(x)
x = F.relu(x)
x = nn.AdaptiveAvgPool2d((1,1))(x)
x = x.view(x.shape[0], -1)
return self.classificador(x)
| 29.551724
| 86
| 0.655776
| 115
| 857
| 4.817391
| 0.478261
| 0.021661
| 0.064982
| 0.068592
| 0.083032
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031484
| 0.221704
| 857
| 29
| 87
| 29.551724
| 0.7991
| 0.10035
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.25
| 0
| 0.45
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
54d5248eff89e3f435c1da7e63250cb5c736a60a
| 3,231
|
py
|
Python
|
python/setup.py
|
sbrodeur/evert
|
c7005ba29576145ab650144f9b9230eaf7bec460
|
[
"BSD-3-Clause"
] | 28
|
2017-10-04T13:58:43.000Z
|
2021-11-06T10:46:51.000Z
|
python/setup.py
|
sbrodeur/evert
|
c7005ba29576145ab650144f9b9230eaf7bec460
|
[
"BSD-3-Clause"
] | 7
|
2017-12-04T17:17:55.000Z
|
2021-07-29T08:58:26.000Z
|
python/setup.py
|
sbrodeur/evert
|
c7005ba29576145ab650144f9b9230eaf7bec460
|
[
"BSD-3-Clause"
] | 10
|
2017-11-07T14:51:08.000Z
|
2019-06-05T04:17:44.000Z
|
#!/usr/bin/env python
# Copyright (c) 2017, Simon Brodeur
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
"""
setup.py file for installing Python bindings using SWIG
"""
from distutils.core import setup, Extension
evert_module = Extension('_evert',
define_macros = [('MAJOR_VERSION', '1'),
('MINOR_VERSION', '0')],
include_dirs = ['../include'],
sources=['../src/elBeam.cpp',
'../src/elBSP.cpp',
'../src/elGLUT.cpp',
'../src/elListener.cpp',
'../src/elOrientedPoint.cpp',
'../src/elPathSolution.cpp',
'../src/elPolygon.cpp',
'../src/elRay.cpp',
'../src/elRoom.cpp',
'../src/elSource.cpp',
'../src/elTimer.cpp',
'../src/elVector.cpp',
'../src/elViewer.cpp',
'evert.i'],
libraries = ['GL', 'GLU', 'glut'],
library_dirs = [],
language='c++',
swig_opts=['-c++', '-I../include'],
#extra_compile_args=['-std=c++11'],
)
setup (name = 'evert',
version = '1.0',
author = "Samuli Laine",
description = """Accelerated beam tracing algorithm""",
ext_modules = [evert_module],
py_modules = ["evert"],
)
| 46.826087
| 89
| 0.556484
| 341
| 3,231
| 5.234604
| 0.557185
| 0.040336
| 0.019048
| 0.02577
| 0.103081
| 0.07619
| 0.07619
| 0.07619
| 0.07619
| 0.07619
| 0
| 0.006167
| 0.34757
| 3,231
| 68
| 90
| 47.514706
| 0.840607
| 0.501393
| 0
| 0
| 0
| 0
| 0.246349
| 0.045714
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.032258
| 0
| 0.032258
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
54d6049e6360802df5527ba35f15e6ff291748e2
| 530
|
py
|
Python
|
somegame/fps_osd.py
|
kodo-pp/somegame-but-not-that-one
|
6252d34b84fe7c83ada9e699df17688c50dd7596
|
[
"MIT"
] | null | null | null |
somegame/fps_osd.py
|
kodo-pp/somegame-but-not-that-one
|
6252d34b84fe7c83ada9e699df17688c50dd7596
|
[
"MIT"
] | null | null | null |
somegame/fps_osd.py
|
kodo-pp/somegame-but-not-that-one
|
6252d34b84fe7c83ada9e699df17688c50dd7596
|
[
"MIT"
] | null | null | null |
import pygame
from loguru import logger
from somegame.osd import OSD
class FpsOSD(OSD):
def __init__(self, game):
super().__init__(game)
logger.info('Loading font')
self.font = pygame.font.Font(pygame.font.get_default_font(), 32)
def draw(self, surface):
fps = self.game.get_average_fps()
fps_text = '<unknown>' if fps is None else '{:.1f}'.format(fps)
tmp_surf = self.font.render('{} FPS'.format(fps_text), True, (255, 255, 255))
surface.blit(tmp_surf, (0, 0))
| 29.444444
| 85
| 0.635849
| 76
| 530
| 4.223684
| 0.513158
| 0.049844
| 0.087227
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033816
| 0.218868
| 530
| 17
| 86
| 31.176471
| 0.741546
| 0
| 0
| 0
| 0
| 0
| 0.062264
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.230769
| 0
| 0.461538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
54d6ce148b09071a1e33198868f6c84a03813ea1
| 11,846
|
py
|
Python
|
python/chronos/test/bigdl/chronos/data/experimental/test_xshardstsdataset.py
|
sgwhat/BigDL
|
25b402666fbb26b0bc18fc8100e9a00469844778
|
[
"Apache-2.0"
] | null | null | null |
python/chronos/test/bigdl/chronos/data/experimental/test_xshardstsdataset.py
|
sgwhat/BigDL
|
25b402666fbb26b0bc18fc8100e9a00469844778
|
[
"Apache-2.0"
] | null | null | null |
python/chronos/test/bigdl/chronos/data/experimental/test_xshardstsdataset.py
|
sgwhat/BigDL
|
25b402666fbb26b0bc18fc8100e9a00469844778
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import numpy as np
import pandas as pd
import random
import os
from unittest import TestCase
from bigdl.chronos.data import TSDataset
from bigdl.chronos.data.experimental import XShardsTSDataset
from bigdl.orca.data.pandas import read_csv
from bigdl.orca.common import init_orca_context, stop_orca_context, OrcaContext
from pandas.testing import assert_frame_equal
from numpy.testing import assert_array_almost_equal
def generate_spark_df():
init_orca_context(cores=8)
sc = OrcaContext.get_spark_context()
rdd = sc.range(0, 100)
from pyspark.ml.linalg import DenseVector
df = rdd.map(lambda x: (DenseVector(np.random.randn(1, ).astype(np.float)),
int(np.random.randint(0, 2, size=())),
int(x))).toDF(["feature", "id", "date"])
return df
def get_ugly_ts_df():
data = np.random.random_sample((100, 5))
mask = np.random.random_sample((100, 5))
newmask = mask.copy()
mask[newmask >= 0.4] = 2
mask[newmask < 0.4] = 1
mask[newmask < 0.2] = 0
data[mask == 0] = None
data[mask == 1] = np.nan
df = pd.DataFrame(data, columns=['a', 'b', 'c', 'd', 'e'])
df['a'][0] = np.nan # make sure column 'a' has a N/A
df["datetime"] = pd.date_range('1/1/2019', periods=100)
df.loc[50:100, "datetime"] = pd.date_range('1/1/2019', periods=50)
df["id"] = np.array(['00']*50 + ['01']*50)
return df
class TestXShardsTSDataset(TestCase):
def setUp(self):
self.resource_path = os.path.join(os.path.split(__file__)[0], "../../resources/")
def tearDown(self):
pass
@classmethod
def tearDownClass(cls):
# stop possible active_spark_context
from pyspark import SparkContext
from bigdl.orca.ray import OrcaRayContext
if SparkContext._active_spark_context is not None:
print("Stopping spark_orca context")
sc = SparkContext.getOrCreate()
if sc.getConf().get("spark.master").startswith("spark://"):
from bigdl.dllib.nncontext import stop_spark_standalone
stop_spark_standalone()
sc.stop()
def test_xshardstsdataset_initialization(self):
shards_single = read_csv(os.path.join(self.resource_path, "single.csv"))
tsdata = XShardsTSDataset.from_xshards(shards_single, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
assert tsdata._id_list == [0]
assert tsdata.feature_col == ["extra feature"]
assert tsdata.target_col == ["value"]
assert tsdata.dt_col == "datetime"
assert tsdata.shards.num_partitions() == 1
tsdata = XShardsTSDataset.from_xshards(shards_single, dt_col="datetime",
target_col=["value"],
extra_feature_col="extra feature", id_col="id")
assert tsdata._id_list == [0]
assert tsdata.feature_col == ["extra feature"]
assert tsdata.target_col == ["value"]
assert tsdata.dt_col == "datetime"
assert tsdata.shards.num_partitions() == 1
tsdata = XShardsTSDataset.from_xshards(shards_single, dt_col="datetime",
target_col=["value"],
extra_feature_col="extra feature")
assert tsdata._id_list == ["0"]
assert tsdata.feature_col == ["extra feature"]
assert tsdata.target_col == ["value"]
assert tsdata.dt_col == "datetime"
assert tsdata.shards.num_partitions() == 1
def test_xshardstsdataset_initialization_multiple(self):
shards_multiple = read_csv(os.path.join(self.resource_path, "multiple.csv"))
# legal input
tsdata = XShardsTSDataset.from_xshards(shards_multiple, dt_col="datetime",
target_col="value",
extra_feature_col=["extra feature"], id_col="id")
assert tsdata._id_list == [0, 1]
assert tsdata.feature_col == ["extra feature"]
assert tsdata.target_col == ["value"]
assert tsdata.dt_col == "datetime"
assert tsdata.shards.num_partitions() == 2
tsdata = XShardsTSDataset.from_xshards(shards_multiple, dt_col="datetime",
target_col=["value"],
extra_feature_col="extra feature", id_col="id")
assert tsdata._id_list == [0, 1]
assert tsdata.feature_col == ["extra feature"]
assert tsdata.target_col == ["value"]
assert tsdata.dt_col == "datetime"
assert tsdata.shards.num_partitions() == 2
tsdata = XShardsTSDataset.from_xshards(shards_multiple, dt_col="datetime",
target_col=["value"],
extra_feature_col="extra feature")
assert tsdata._id_list == ['0']
assert tsdata.feature_col == ["extra feature"]
assert tsdata.target_col == ["value"]
assert tsdata.dt_col == "datetime"
assert tsdata.shards.num_partitions() == 1
def test_xshardstsdataset_split(self):
shards_multiple = read_csv(os.path.join(self.resource_path, "multiple.csv"))
# only train and test
tsdata_train, tsdata_valid, tsdata_test =\
XShardsTSDataset.from_xshards(shards_multiple, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id",
with_split=True, val_ratio=0, test_ratio=0.1)
# standard split with all three sets
tsdata_train, tsdata_valid, tsdata_test =\
XShardsTSDataset.from_xshards(shards_multiple, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id",
with_split=True, val_ratio=0.1, test_ratio=0.1,
largest_look_back=5, largest_horizon=2)
tsdata_train.feature_col.append("new extra feature")
assert len(tsdata_train.feature_col) == 2
assert len(tsdata_valid.feature_col) == 1
assert len(tsdata_test.feature_col) == 1
tsdata_train.target_col[0] = "new value"
assert tsdata_train.target_col[0] == "new value"
assert tsdata_valid.target_col[0] != "new value"
assert tsdata_test.target_col[0] != "new value"
def test_xshardstsdataset_roll_multiple_id(self):
shards_multiple = read_csv(os.path.join(self.resource_path, "multiple.csv"))
horizon = random.randint(1, 10)
lookback = random.randint(1, 20)
tsdata = XShardsTSDataset.from_xshards(shards_multiple, dt_col="datetime",
target_col="value",
extra_feature_col=["extra feature"], id_col="id")
with pytest.raises(RuntimeError):
tsdata.to_xshards()
# roll train
tsdata.roll(lookback=lookback, horizon=horizon)
shards_numpy = tsdata.to_xshards()
collected_numpy = shards_numpy.collect() # collect and valid
x = np.concatenate([collected_numpy[i]['x'] for i in range(len(collected_numpy))], axis=0)
y = np.concatenate([collected_numpy[i]['y'] for i in range(len(collected_numpy))], axis=0)
assert x.shape == ((50-lookback-horizon+1)*2, lookback, 2)
assert y.shape == ((50-lookback-horizon+1)*2, horizon, 1)
tsdata.roll(lookback=lookback, horizon=horizon,
feature_col=["extra feature"], target_col="value")
shards_numpy = tsdata.to_xshards()
collected_numpy = shards_numpy.collect() # collect and valid
x = np.concatenate([collected_numpy[i]['x'] for i in range(len(collected_numpy))], axis=0)
y = np.concatenate([collected_numpy[i]['y'] for i in range(len(collected_numpy))], axis=0)
assert x.shape == ((50-lookback-horizon+1)*2, lookback, 2)
assert y.shape == ((50-lookback-horizon+1)*2, horizon, 1)
tsdata.roll(lookback=lookback, horizon=horizon,
feature_col=[], target_col="value")
shards_numpy = tsdata.to_xshards()
collected_numpy = shards_numpy.collect() # collect and valid
x = np.concatenate([collected_numpy[i]['x'] for i in range(len(collected_numpy))], axis=0)
y = np.concatenate([collected_numpy[i]['y'] for i in range(len(collected_numpy))], axis=0)
assert x.shape == ((50-lookback-horizon+1)*2, lookback, 1)
assert y.shape == ((50-lookback-horizon+1)*2, horizon, 1)
# roll test
horizon = 0
lookback = random.randint(1, 20)
tsdata.roll(lookback=lookback, horizon=horizon)
shards_numpy = tsdata.to_xshards()
collected_numpy = shards_numpy.collect() # collect and valid
x = np.concatenate([collected_numpy[i]['x'] for i in range(len(collected_numpy))], axis=0)
assert x.shape == ((50-lookback-horizon+1)*2, lookback, 2)
def test_xshardstsdataset_impute(self):
from tempfile import TemporaryDirectory
tmp_df = get_ugly_ts_df()
with TemporaryDirectory() as tmpdir:
file_name = os.path.join(tmpdir, 'impute.csv')
tmp_df.to_csv(file_name, index=False)
shards_tmp = read_csv(file_name)
for val in ["last", "const", "linear"]:
tsdata = XShardsTSDataset.from_xshards(shards_tmp,
dt_col="datetime", target_col="e",
extra_feature_col=["a", "b", "c", "d"], id_col="id")
tsdata.impute(mode=val)
collected_df = tsdata.shards.collect()
collected_df = pd.concat(collected_df, axis=0)
assert collected_df.isna().sum().sum() == 0
assert len(collected_df) == 100
def test_xshardstsdataset_sparkdf(self):
df = generate_spark_df()
# with id
tsdata = XShardsTSDataset.from_sparkdf(df, dt_col="date",
target_col="feature",
id_col="id")
tsdata.roll(lookback=4, horizon=2)
data = tsdata.to_xshards().collect()
assert data[0]['x'].shape[1] == 4
assert data[0]['x'].shape[2] == 1
assert data[0]['y'].shape[1] == 2
assert data[0]['y'].shape[2] == 1
assert tsdata.shards.num_partitions() == 2
# with only 1 id
tsdata = XShardsTSDataset.from_sparkdf(df, dt_col="date",
target_col="feature")
tsdata.roll(lookback=4, horizon=2)
data = tsdata.to_xshards().collect()
assert data[0]['x'].shape[1] == 4
assert data[0]['x'].shape[2] == 1
assert data[0]['y'].shape[1] == 2
assert data[0]['y'].shape[2] == 1
assert tsdata.shards.num_partitions() == 1
| 46.454902
| 100
| 0.593534
| 1,435
| 11,846
| 4.715679
| 0.168641
| 0.062066
| 0.035171
| 0.052017
| 0.612236
| 0.600562
| 0.58608
| 0.581646
| 0.567312
| 0.556081
| 0
| 0.021901
| 0.286932
| 11,846
| 254
| 101
| 46.637795
| 0.779212
| 0.067871
| 0
| 0.510101
| 0
| 0
| 0.066013
| 0
| 0
| 0
| 0
| 0
| 0.287879
| 1
| 0.055556
| false
| 0.005051
| 0.085859
| 0
| 0.156566
| 0.005051
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
54d7680f93fc7f5f7a46d60f37723337c7dce6f3
| 2,603
|
py
|
Python
|
zoom_functions.py
|
WXSD-Sales/ZoomToWebex
|
16cc663620e2ef2904b0e2857d709aee96b78eb7
|
[
"MIT"
] | 1
|
2021-10-21T01:36:33.000Z
|
2021-10-21T01:36:33.000Z
|
zoom_functions.py
|
WXSD-Sales/integration-samples
|
2f18be740329f3c35c78c268a6d4544cae5d313e
|
[
"MIT"
] | null | null | null |
zoom_functions.py
|
WXSD-Sales/integration-samples
|
2f18be740329f3c35c78c268a6d4544cae5d313e
|
[
"MIT"
] | null | null | null |
import json
import tornado.gen
import traceback
from base64 import b64encode
from tornado.httpclient import AsyncHTTPClient, HTTPRequest, HTTPError
from settings import Settings
from mongo_db_controller import ZoomUserDB
@tornado.gen.coroutine
def zoomRefresh(zoom_user):
url = "https://zoom.us/oauth/token"
payload = "grant_type=refresh_token&"
payload += "refresh_token={0}".format(zoom_user.get('refresh_token'))
#we need to base 64 encode it
#and then decode it to acsii as python 3 stores it as a byte string
userAndPass = b64encode("{0}:{1}".format(Settings.zoom_client_id, Settings.zoom_client_secret).encode()).decode("ascii")
headers = {
'authorization': 'Basic {0}'.format(userAndPass),
'content-type': "application/x-www-form-urlencoded"
}
request = HTTPRequest(url, method="POST", headers=headers, body=payload)
http_client = AsyncHTTPClient()
print(zoom_user)
print('making zoomRefresh')
print(payload)
try:
response = yield http_client.fetch(request)
resp = json.loads(response.body.decode("utf-8"))
print("zoomRefresh /access_token Response: {0}".format(resp))
zoom_user = ZoomUserDB.db.insert_user(zoom_user['person_id'], resp['access_token'], resp['expires_in'], resp['refresh_token'], "zoom")
print('new zoom_user:{0}'.format(zoom_user))
except HTTPError as he:
print('zoomRefresh HTTPError:')
print(he.code)
print(he.response.body)
if he.code == 401:
ZoomUserDB.db.delete_user(zoom_user['person_id'], "zoom")
zoom_user = None
raise tornado.gen.Return(zoom_user)
@tornado.gen.coroutine
def zoomGET(endpoint_url, zoom_user):
url = "https://api.zoom.us/v2{0}".format(endpoint_url)
headers = {"Authorization":"Bearer {0}".format(zoom_user.get('token'))}
request = HTTPRequest(url, method="GET", headers=headers)
http_client = AsyncHTTPClient()
response = None
try:
response = yield http_client.fetch(request)
body = response.body.decode('utf-8')
response = json.loads(body)
except HTTPError as he:
if he.code == 401:
print('token may be expired, attempting refresh')
zoom_user = yield zoomRefresh(zoom_user)
if zoom_user:
response, zoom_user = yield zoomGET(endpoint_url, zoom_user)
else:
try:
print(he.response.body)
except Exception as e:
pass
traceback.print_exc()
raise tornado.gen.Return((response, zoom_user))
| 38.850746
| 142
| 0.661929
| 325
| 2,603
| 5.166154
| 0.341538
| 0.085765
| 0.019655
| 0.026802
| 0.147707
| 0.045265
| 0.045265
| 0
| 0
| 0
| 0
| 0.01277
| 0.217826
| 2,603
| 66
| 143
| 39.439394
| 0.811886
| 0.036112
| 0
| 0.25
| 0
| 0
| 0.170722
| 0.023135
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0.05
| 0.116667
| 0
| 0.15
| 0.183333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
54d83fe60a2207f45c149a5e0cac230756ba7376
| 1,484
|
py
|
Python
|
crypten/mpc/__init__.py
|
gmuraru/CrypTen
|
e39a7aaf65436706321fe4e3fc055308c78b6b92
|
[
"MIT"
] | null | null | null |
crypten/mpc/__init__.py
|
gmuraru/CrypTen
|
e39a7aaf65436706321fe4e3fc055308c78b6b92
|
[
"MIT"
] | null | null | null |
crypten/mpc/__init__.py
|
gmuraru/CrypTen
|
e39a7aaf65436706321fe4e3fc055308c78b6b92
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from crypten.mpc import primitives # noqa: F401
from crypten.mpc import provider # noqa: F40
from .context import run_multiprocess
from .mpc import MPCTensor
from .ptype import ptype
__all__ = ["MPCTensor", "primitives", "provider", "ptype", "run_multiprocess"]
# the different private type attributes of an mpc encrypted tensor
arithmetic = ptype.arithmetic
binary = ptype.binary
# Set provider
__SUPPORTED_PROVIDERS = {
"TFP": provider.TrustedFirstParty,
"TTP": provider.TrustedThirdParty,
"HE": provider.HomomorphicProvider,
}
__default_provider = __SUPPORTED_PROVIDERS[
os.environ.get("CRYPTEN_PROVIDER_NAME", "TFP")
]
def set_default_provider(new_default_provider):
global __default_provider
assert_msg = "Provider %s is not supported" % new_default_provider
if isinstance(new_default_provider, str):
assert new_default_provider in __SUPPORTED_PROVIDERS.keys(), assert_msg
else:
assert new_default_provider in __SUPPORTED_PROVIDERS.values(), assert_msg
__default_provider = new_default_provider
os.environ["CRYPTEN_PROVIDER_NAME"] = new_default_provider.NAME
def get_default_provider():
return __default_provider
def ttp_required():
return __default_provider == provider.TrustedThirdParty
| 28.538462
| 81
| 0.768194
| 185
| 1,484
| 5.843243
| 0.432432
| 0.194265
| 0.116559
| 0.037003
| 0.142461
| 0.081406
| 0.081406
| 0
| 0
| 0
| 0
| 0.004781
| 0.154313
| 1,484
| 51
| 82
| 29.098039
| 0.856574
| 0.194744
| 0
| 0
| 0
| 0
| 0.108769
| 0.035413
| 0
| 0
| 0
| 0
| 0.1
| 1
| 0.1
| false
| 0
| 0.2
| 0.066667
| 0.366667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
54da3dc2f38e9f403fcf4bc41db3259f59c8f372
| 1,763
|
py
|
Python
|
features.py
|
ptorresmanque/MachineLearning_v2.0
|
795e47b9cfc68f4e0fefb700d43af6c59e2f1d73
|
[
"MIT"
] | null | null | null |
features.py
|
ptorresmanque/MachineLearning_v2.0
|
795e47b9cfc68f4e0fefb700d43af6c59e2f1d73
|
[
"MIT"
] | null | null | null |
features.py
|
ptorresmanque/MachineLearning_v2.0
|
795e47b9cfc68f4e0fefb700d43af6c59e2f1d73
|
[
"MIT"
] | null | null | null |
import sqlite3
from random import randint, choice
import numpy as np
conn = sqlite3.connect('ej.db')
c = conn.cursor()
#OBTENIENDO TAMAnOS MAXIMOS MINIMOS Y PROMEDIO#
c.execute('SELECT MAX(alto) FROM features')
resultado = c.fetchone()
if resultado:
altoMax = resultado[0]
c.execute('SELECT MIN(alto) FROM features')
resultado = c.fetchone()
if resultado:
altoMin = resultado[0]
altoProm = abs((altoMax + altoMin) / 2)
#print altoMax , altoProm , altoMin
arrAlto = [altoMax , altoProm , altoMin]
c.execute('SELECT MAX(ancho) FROM features')
resultado = c.fetchone()
if resultado:
anchoMax = resultado[0]
c.execute('SELECT MIN(ancho) FROM features')
resultado = c.fetchone()
if resultado:
anchoMin = resultado[0]
anchoProm = abs((anchoMax + anchoMin) / 2)
anchoMaxProm = abs((anchoMax + anchoProm) / 2)
anchoMinProm = abs((anchoMin + anchoProm) / 2)
arrAncho = [anchoMax, anchoMaxProm, anchoProm, anchoMinProm, anchoMin]
#### CREANDO CLASES NEGATIVAS
for i in range(0,3):
for j in range(0,5):
for _ in range(10):
negAncho = arrAncho[j]
negAlto = arrAlto[i]
rand_alto_max = int(negAlto * 1.5)
rand_alto_min = int(negAlto * 0.5)
r3 = rand_alto_max * 2
rand_ancho_max = int(negAncho*1.5)
rand_ancho_min = int(negAncho*0.5)
r33 = rand_ancho_max * 2
f1 = choice([np.random.randint(1, rand_alto_min), np.random.randint(rand_alto_max, r3)])
f2 = choice([np.random.randint(1, rand_ancho_min), np.random.randint(rand_ancho_max, r33)])
c.execute("insert into features (ancho, alto, area, clase) values (?, ?, ?, ?)",
(f2, f1, f2*f1, 0))
conn.commit()
conn.close()
| 23.506667
| 103
| 0.640953
| 233
| 1,763
| 4.759657
| 0.309013
| 0.036069
| 0.050496
| 0.079351
| 0.299369
| 0.259693
| 0.164112
| 0.164112
| 0
| 0
| 0
| 0.030236
| 0.230856
| 1,763
| 75
| 104
| 23.506667
| 0.787611
| 0.059558
| 0
| 0.181818
| 0
| 0
| 0.117433
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.068182
| 0
| 0.068182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
54dbf6330b24d0c6aff3e7ee1c31934c49d43385
| 12,082
|
py
|
Python
|
nuscenes/eval/detection/evaluate.py
|
WJ-Lai/NightFusion
|
1555692eceb6b85127d21cd43e6fc780b7f91ffd
|
[
"Apache-2.0"
] | null | null | null |
nuscenes/eval/detection/evaluate.py
|
WJ-Lai/NightFusion
|
1555692eceb6b85127d21cd43e6fc780b7f91ffd
|
[
"Apache-2.0"
] | 1
|
2019-04-24T12:14:59.000Z
|
2019-04-24T12:14:59.000Z
|
nuscenes/eval/detection/evaluate.py
|
WJ-Lai/NightFusion
|
1555692eceb6b85127d21cd43e6fc780b7f91ffd
|
[
"Apache-2.0"
] | null | null | null |
# nuScenes dev-kit.
# Code written by Holger Caesar & Oscar Beijbom, 2018.
# Licensed under the Creative Commons [see licence.txt]
import argparse
import json
import os
import random
import time
from typing import Tuple, Dict, Any
import numpy as np
from nuscenes import NuScenes
from nuscenes.eval.detection.algo import accumulate, calc_ap, calc_tp
from nuscenes.eval.detection.config import config_factory
from nuscenes.eval.detection.constants import TP_METRICS
from nuscenes.eval.detection.data_classes import DetectionConfig, MetricDataList, DetectionMetrics, EvalBoxes
from nuscenes.eval.detection.loaders import load_prediction, load_gt, add_center_dist, filter_eval_boxes
from nuscenes.eval.detection.render import summary_plot, class_pr_curve, class_tp_curve, dist_pr_curve, visualize_sample
class NuScenesEval:
"""
This is the official nuScenes detection evaluation code.
Results are written to the provided output_dir.
nuScenes uses the following metrics:
- Mean Average Precision (mAP): Uses center-distance as matching criterion; averaged over distance thresholds.
- True Positive (TP) metrics: Average of translation, velocity, scale, orientation and attribute errors.
- nuScenes Detection Score (NDS): The weighted sum of the above.
Here is an overview of the functions in this method:
- init: Loads GT annotations an predictions stored in JSON format and filters the boxes.
- run: Performs evaluation and dumps the metric data to disk.
- render: Renders various plots and dumps to disk.
We assume that:
- Every sample_token is given in the results, although there may be not predictions for that sample.
Please see https://github.com/nutonomy/nuscenes-devkit for more details.
"""
def __init__(self,
nusc: NuScenes,
config: DetectionConfig,
result_path: str,
eval_set: str,
output_dir: str = None,
verbose: bool = True):
"""
Initialize a NuScenesEval object.
:param nusc: A NuScenes object.
:param config: A DetectionConfig object.
:param result_path: Path of the nuScenes JSON result file.
:param eval_set: The dataset split to evaluate on, e.g. train or val.
:param output_dir: Folder to save plots and results to.
:param verbose: Whether to print to stdout.
"""
self.nusc = nusc
self.result_path = result_path
self.eval_set = eval_set
self.output_dir = output_dir
self.verbose = verbose
self.cfg = config
# Make dirs.
self.plot_dir = os.path.join(self.output_dir, 'plots')
if not os.path.isdir(self.output_dir):
os.makedirs(self.output_dir)
if not os.path.isdir(self.plot_dir):
os.makedirs(self.plot_dir)
# Load data.
self.pred_boxes, self.meta = load_prediction(self.result_path, self.cfg.max_boxes_per_sample, verbose=verbose)
self.gt_boxes = load_gt(self.nusc, self.eval_set, verbose=verbose)
assert set(self.pred_boxes.sample_tokens) == set(self.gt_boxes.sample_tokens), \
"Samples in split doesn't match samples in predictions."
# Add center distances.
self.pred_boxes = add_center_dist(nusc, self.pred_boxes)
self.gt_boxes = add_center_dist(nusc, self.gt_boxes)
# Filter boxes (distance, points per box, etc.).
if verbose:
print('Filtering predictions')
self.pred_boxes = filter_eval_boxes(nusc, self.pred_boxes, self.cfg.class_range, verbose=verbose)
if verbose:
print('Filtering ground truth annotations')
self.gt_boxes = filter_eval_boxes(nusc, self.gt_boxes, self.cfg.class_range, verbose=verbose)
self.sample_tokens = self.gt_boxes.sample_tokens
def evaluate(self) -> Tuple[DetectionMetrics, MetricDataList]:
"""
Performs the actual evaluation.
:return: A tuple of high-level and the raw metric data.
"""
start_time = time.time()
# -----------------------------------
# Step 1: Accumulate metric data for all classes and distance thresholds.
# -----------------------------------
if self.verbose:
print('Accumulating metric data')
metric_data_list = MetricDataList()
for class_name in self.cfg.class_names:
for dist_th in self.cfg.dist_ths:
md = accumulate(self.gt_boxes, self.pred_boxes, class_name, self.cfg.dist_fcn, dist_th)
metric_data_list.set(class_name, dist_th, md)
# -----------------------------------
# Step 2: Calculate metrics from the data.
# -----------------------------------
if self.verbose:
print('Calculating metrics')
metrics = DetectionMetrics(self.cfg)
for class_name in self.cfg.class_names:
for dist_th in self.cfg.dist_ths:
metric_data = metric_data_list[(class_name, dist_th)]
ap = calc_ap(metric_data, self.cfg.min_recall, self.cfg.min_precision)
metrics.add_label_ap(class_name, dist_th, ap)
for metric_name in TP_METRICS:
metric_data = metric_data_list[(class_name, self.cfg.dist_th_tp)]
if class_name in ['traffic_cone'] and metric_name in ['attr_err', 'vel_err', 'orient_err']:
tp = np.nan
elif class_name in ['barrier'] and metric_name in ['attr_err', 'vel_err']:
tp = np.nan
else:
tp = calc_tp(metric_data, self.cfg.min_recall, metric_name)
metrics.add_label_tp(class_name, metric_name, tp)
metrics.add_runtime(time.time() - start_time)
return metrics, metric_data_list
def render(self, metrics: DetectionMetrics, md_list: MetricDataList) -> None:
"""
Renders various PR and TP curves.
:param metrics: DetectionMetrics instance.
:param md_list: MetricDataList instance.
"""
def savepath(name):
return os.path.join(self.plot_dir, name + '.pdf')
summary_plot(md_list, metrics, min_precision=self.cfg.min_precision, min_recall=self.cfg.min_recall,
dist_th_tp=self.cfg.dist_th_tp, savepath=savepath('summary'))
for detection_name in self.cfg.class_names:
class_pr_curve(md_list, metrics, detection_name, self.cfg.min_precision, self.cfg.min_recall,
savepath=savepath(detection_name + '_pr'))
class_tp_curve(md_list, metrics, detection_name, self.cfg.min_recall, self.cfg.dist_th_tp,
savepath=savepath(detection_name + '_tp'))
for dist_th in self.cfg.dist_ths:
dist_pr_curve(md_list, metrics, dist_th, self.cfg.min_precision, self.cfg.min_recall,
savepath=savepath('dist_pr_' + str(dist_th)))
def main(self,
plot_examples: int = 0,
render_curves: bool = True) -> Dict[str, Any]:
"""
Main function that loads the evaluation code, visualizes samples, runs the evaluation and renders stat plots.
:param plot_examples: How many example visualizations to write to disk.
:param render_curves: Whether to render PR and TP curves to disk.
:return: A dict that stores the high-level metrics and meta data.
"""
if plot_examples > 0:
# Select a random but fixed subset to plot.
random.seed(43)
sample_tokens = list(self.sample_tokens)
random.shuffle(sample_tokens)
sample_tokens = sample_tokens[:plot_examples]
# Visualize samples.
example_dir = os.path.join(self.output_dir, 'examples')
if not os.path.isdir(example_dir):
os.mkdir(example_dir)
for sample_token in sample_tokens:
visualize_sample(self.nusc,
sample_token,
self.gt_boxes if self.eval_set != 'test' else EvalBoxes(),
# Don't render test GT.
self.pred_boxes,
eval_range=max(self.cfg.class_range.values()),
savepath=os.path.join(example_dir, '{}.png'.format(sample_token)))
# Run evaluation.
metrics, metric_data_list = self.evaluate()
# Render PR and TP curves.
if render_curves:
self.render(metrics, metric_data_list)
# Dump the metric data, meta and metrics to disk.
if self.verbose:
print('Saving metrics to: %s' % self.output_dir)
metrics_summary = metrics.serialize()
metrics_summary['meta'] = self.meta.copy()
with open(os.path.join(self.output_dir, 'metrics_summary.json'), 'w') as f:
json.dump(metrics_summary, f, indent=2)
with open(os.path.join(self.output_dir, 'metrics_details.json'), 'w') as f:
json.dump(metric_data_list.serialize(), f, indent=2)
# Print high-level metrics.
print('mAP: %.4f' % (metrics_summary['mean_ap']))
err_name_mapping = {
'trans_err': 'mATE',
'scale_err': 'mASE',
'orient_err': 'mAOE',
'vel_err': 'mAVE',
'attr_err': 'mAAE'
}
for tp_name, tp_val in metrics_summary['tp_errors'].items():
print('%s: %.4f' % (err_name_mapping[tp_name], tp_val))
print('NDS: %.4f' % (metrics_summary['nd_score']))
print('Eval time: %.1fs' % metrics_summary['eval_time'])
return metrics_summary
if __name__ == "__main__":
# Settings.
parser = argparse.ArgumentParser(description='Evaluate nuScenes result submission.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('result_path', type=str, help='The submission as a JSON file.')
parser.add_argument('--output_dir', type=str, default='~/nuscenes-metrics',
help='Folder to store result metrics, graphs and example visualizations.')
parser.add_argument('--eval_set', type=str, default='val',
help='Which dataset split to evaluate on, train, val or test.')
parser.add_argument('--dataroot', type=str, default='/data/sets/nuscenes',
help='Default nuScenes data directory.')
parser.add_argument('--version', type=str, default='v1.0-trainval',
help='Which version of the nuScenes dataset to evaluate on, e.g. v1.0-trainval.')
parser.add_argument('--config_name', type=str, default='cvpr_2019',
help='Name of the configuration to use for evaluation, e.g. cvpr_2019.')
parser.add_argument('--plot_examples', type=int, default=10,
help='How many example visualizations to write to disk.')
parser.add_argument('--render_curves', type=int, default=1,
help='Whether to render PR and TP curves to disk.')
parser.add_argument('--verbose', type=int, default=1,
help='Whether to print to stdout.')
args = parser.parse_args()
result_path_ = os.path.expanduser(args.result_path)
output_dir_ = os.path.expanduser(args.output_dir)
eval_set_ = args.eval_set
dataroot_ = args.dataroot
version_ = args.version
config_name_ = args.config_name
plot_examples_ = args.plot_examples
render_curves_ = bool(args.render_curves)
verbose_ = bool(args.verbose)
cfg_ = config_factory(config_name_)
nusc_ = NuScenes(version=version_, verbose=verbose_, dataroot=dataroot_)
nusc_eval = NuScenesEval(nusc_, config=cfg_, result_path=result_path_, eval_set=eval_set_,
output_dir=output_dir_, verbose=verbose_)
nusc_eval.main(plot_examples=plot_examples_, render_curves=render_curves_)
| 45.421053
| 120
| 0.630525
| 1,515
| 12,082
| 4.815842
| 0.206601
| 0.024945
| 0.013706
| 0.020559
| 0.215872
| 0.158991
| 0.122259
| 0.081277
| 0.058936
| 0.029331
| 0
| 0.003611
| 0.266595
| 12,082
| 265
| 121
| 45.592453
| 0.819772
| 0.199388
| 0
| 0.073171
| 0
| 0
| 0.118392
| 0
| 0
| 0
| 0
| 0
| 0.006098
| 1
| 0.030488
| false
| 0
| 0.085366
| 0.006098
| 0.140244
| 0.060976
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
54dcf21edb2556756e4c18e431858f02788f9d3a
| 9,520
|
py
|
Python
|
tests/get_problem_atcoder.py
|
aberent/api-client
|
845e5f1daa02cc7fee5a65234a24bb59a7b71083
|
[
"MIT"
] | null | null | null |
tests/get_problem_atcoder.py
|
aberent/api-client
|
845e5f1daa02cc7fee5a65234a24bb59a7b71083
|
[
"MIT"
] | null | null | null |
tests/get_problem_atcoder.py
|
aberent/api-client
|
845e5f1daa02cc7fee5a65234a24bb59a7b71083
|
[
"MIT"
] | null | null | null |
import unittest
from onlinejudge_api.main import main
class DownloadAtCoderTest(unittest.TestCase):
def test_icpc2013spring_a(self):
"""This problem contains both words `Input` and `Output` for the headings for sample outputs.
"""
url = 'http://jag2013spring.contest.atcoder.jp/tasks/icpc2013spring_a'
expected = {
"status": "ok",
"messages": [],
"result": {
"url": "https://atcoder.jp/contests/jag2013spring/tasks/icpc2013spring_a",
"tests": [{
"input": "2 2\n2 \n1 >= 3\n2 <= 5\n2\n1 >= 4\n2 >= 3\n",
"output": "Yes\n"
}, {
"input": "2 2\n2 \n1 >= 5\n2 >= 5\n2\n1 <= 4\n2 <= 3\n",
"output": "Yes\n"
}, {
"input": "2 2\n2 \n1 >= 3\n2 <= 3\n2\n1 <= 2\n2 >= 5\n",
"output": "No\n"
}, {
"input": "1 2\n2\n1 <= 10\n1 >= 15\n",
"output": "No\n"
}, {
"input": "5 5\n3\n2 <= 1\n3 <= 1\n4 <= 1\n4\n2 >= 2\n3 <= 1\n4 <= 1\n5 <= 1\n3\n3 >= 2\n4 <= 1\n5 <= 1\n2\n4 >= 2\n5 <= 1\n1\n5 >= 2 \n",
"output": "Yes\n"
}],
"name": "Everlasting Zero",
"context": {
"contest": {
"name": "Japan Alumni Group Spring Contest 2013",
"url": "https://atcoder.jp/contests/jag2013spring"
},
"alphabet": "A"
},
"memoryLimit": 128,
"timeLimit": 5000
},
}
actual = main(['get-problem', url], debug=True)
self.assertEqual(expected, actual)
def test_arc035_a(self):
"""This problem uses <code> tags in the descriptoin text in the sample section.
"""
url = 'http://arc035.contest.atcoder.jp/tasks/arc035_a'
expected = {
"status": "ok",
"messages": [],
"result": {
"url": "https://atcoder.jp/contests/arc035/tasks/arc035_a",
"tests": [{
"input": "ab*\n",
"output": "YES\n"
}, {
"input": "abc\n",
"output": "NO\n"
}, {
"input": "a*bc*\n",
"output": "YES\n"
}, {
"input": "***\n",
"output": "YES\n"
}],
"name": "\u9ad8\u6a4b\u304f\u3093\u3068\u56de\u6587",
"context": {
"contest": {
"name": "AtCoder Regular Contest 035",
"url": "https://atcoder.jp/contests/arc035"
},
"alphabet": "A"
},
"memoryLimit": 256,
"timeLimit": 2000
},
}
actual = main(['get-problem', url], debug=True)
self.assertEqual(expected, actual)
def test_abc114_c(self):
"""This tests a problem which uses a new-style format HTML.
"""
url = 'https://atcoder.jp/contests/abc114/tasks/abc114_c'
expected = {
"status": "ok",
"messages": [],
"result": {
"url": "https://atcoder.jp/contests/abc114/tasks/abc114_c",
"tests": [{
"input": "575\n",
"output": "4\n"
}, {
"input": "3600\n",
"output": "13\n"
}, {
"input": "999999999\n",
"output": "26484\n"
}],
"name": "755",
"context": {
"contest": {
"name": "AtCoder Beginner Contest 114",
"url": "https://atcoder.jp/contests/abc114"
},
"alphabet": "C"
},
"memoryLimit": 1024,
"timeLimit": 2000
},
}
actual = main(['get-problem', url], debug=True)
self.assertEqual(expected, actual)
def test_call_download_atcoder_abc003_4(self):
"""This tests a problem which uses an old-style format HTML.
"""
url = 'https://atcoder.jp/contests/abc003/tasks/abc003_4'
expected = {
"status": "ok",
"messages": [],
"result": {
"url": "https://atcoder.jp/contests/abc003/tasks/abc003_4",
"tests": [{
"input": "3 2\n2 2\n2 2\n",
"output": "12\n"
}, {
"input": "4 5\n3 1\n3 0\n",
"output": "10\n"
}, {
"input": "23 18\n15 13\n100 95\n",
"output": "364527243\n"
}, {
"input": "30 30\n24 22\n145 132\n",
"output": "976668549\n"
}],
"name": "AtCoder\u793e\u306e\u51ac",
"context": {
"contest": {
"name": "AtCoder Beginner Contest 003",
"url": "https://atcoder.jp/contests/abc003"
},
"alphabet": "D"
},
"memoryLimit": 64,
"timeLimit": 2000
},
}
actual = main(['get-problem', url], debug=True)
self.assertEqual(expected, actual)
def test_agc036_b(self):
"""In this problem, a sample output is empty.
"""
url = 'https://atcoder.jp/contests/agc036/tasks/agc036_b'
expected = {
"status": "ok",
"messages": [],
"result": {
"url": "https://atcoder.jp/contests/agc036/tasks/agc036_b",
"tests": [{
"input": "3 2\n1 2 3\n",
"output": "2 3\n"
}, {
"input": "5 10\n1 2 3 2 3\n",
"output": "3\n"
}, {
"input": "6 1000000000000\n1 1 2 2 3 3\n",
"output": "\n"
}, {
"input": "11 97\n3 1 4 1 5 9 2 6 5 3 5\n",
"output": "9 2 6\n"
}],
"name": "Do Not Duplicate",
"context": {
"contest": {
"name": "AtCoder Grand Contest 036",
"url": "https://atcoder.jp/contests/agc036"
},
"alphabet": "B"
},
"memoryLimit": 1024,
"timeLimit": 2000
},
}
actual = main(['get-problem', url], debug=True)
self.assertEqual(expected, actual)
def test_tenka1_2014_qualA_e(self):
"""This problem uses an unusual HTML markup.
.. seealso::
https://github.com/kmyk/online-judge-tools/issues/618
"""
url = 'https://atcoder.jp/contests/tenka1-2014-quala/tasks/tenka1_2014_qualA_e'
expected = {
"status": "ok",
"messages": [],
"result": {
"url": "https://atcoder.jp/contests/tenka1-2014-quala/tasks/tenka1_2014_qualA_e",
"tests": [{
"input": "5 3\nAAB\nABB\nCDE\nFFH\nGHH\n2\n1 1\n2 3\n",
"output": "15\n7\n"
}, {
"input": "2 2\nAB\nBA\n2\n1 1\n2 1\n",
"output": "2\n2\n"
}, {
"input": "5 5\nAABAA\nACDEA\nAFGHA\nAIJKA\nAAAAA\n1\n3 1\n",
"output": "25\n"
}],
"name": "\u30d1\u30ba\u30eb\u306e\u79fb\u52d5",
"context": {
"contest": {
"name": "\u5929\u4e0b\u4e00\u30d7\u30ed\u30b0\u30e9\u30de\u30fc\u30b3\u30f3\u30c6\u30b9\u30c82014\u4e88\u9078A",
"url": "https://atcoder.jp/contests/tenka1-2014-quala"
},
"alphabet": "E"
},
"memoryLimit": 256,
"timeLimit": 5000
},
}
actual = main(['get-problem', url], debug=True)
self.assertEqual(expected, actual)
def test_non_existing_problem(self):
"""This tests an non-existing problem.
"""
url = 'http://abc001.contest.atcoder.jp/tasks/abc001_100'
expected = {
"status": "error",
"messages": ["requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://atcoder.jp/contests/abc001/tasks/abc001_100"],
"result": None,
}
actual = main(['get-problem', url], debug=True)
self.assertEqual(expected, actual)
def test_impossible_problem(self):
"""This tests a problem impossible to parse sample cases.
"""
url = 'https://chokudai001.contest.atcoder.jp/tasks/chokudai_001_a'
expected = {
"status": "error",
"messages": ["onlinejudge.type.SampleParseError: failed to parse samples"],
"result": None,
}
actual = main(['get-problem', url], debug=True)
self.assertEqual(expected, actual)
| 36.615385
| 157
| 0.411029
| 892
| 9,520
| 4.338565
| 0.253363
| 0.041602
| 0.065891
| 0.074677
| 0.491473
| 0.451938
| 0.384496
| 0.365116
| 0.347028
| 0.299742
| 0
| 0.104319
| 0.43813
| 9,520
| 259
| 158
| 36.756757
| 0.619181
| 0.06229
| 0
| 0.481982
| 0
| 0.031532
| 0.358086
| 0.038596
| 0
| 0
| 0
| 0
| 0.036036
| 1
| 0.036036
| false
| 0
| 0.009009
| 0
| 0.04955
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
54dcf64898b0684c67b6786b86aa9adc1e8b99c7
| 681
|
py
|
Python
|
odm/libexec/odm_tenant.py
|
UMCollab/ODM
|
95da49939dbcd54318a58a132aa76725fd9c0b5f
|
[
"MIT"
] | 2
|
2019-04-26T13:26:02.000Z
|
2019-10-18T10:36:52.000Z
|
odm/libexec/odm_tenant.py
|
flowerysong/ODM
|
95da49939dbcd54318a58a132aa76725fd9c0b5f
|
[
"MIT"
] | 1
|
2020-10-28T00:38:07.000Z
|
2020-10-28T00:38:07.000Z
|
odm/libexec/odm_tenant.py
|
flowerysong/ODM
|
95da49939dbcd54318a58a132aa76725fd9c0b5f
|
[
"MIT"
] | 1
|
2019-02-21T16:41:24.000Z
|
2019-02-21T16:41:24.000Z
|
#!/usr/bin/env python3
# This file is part of ODM and distributed under the terms of the
# MIT license. See COPYING.
import json
import sys
import odm.cli
def main():
cli = odm.cli.CLI(['action'])
client = cli.client
if cli.args.action == 'list-users':
print(json.dumps(client.list_users(), indent=2))
elif cli.args.action == 'list-sites':
print(json.dumps(client.list_sites(), indent=2))
elif cli.args.action == 'list-groups':
print(json.dumps(client.list_groups(), indent=2))
else:
print('Unsupported action {}'.format(cli.args.action), file=sys.stderr)
sys.exit(1)
if __name__ == '__main__':
main()
| 21.28125
| 79
| 0.638767
| 98
| 681
| 4.326531
| 0.459184
| 0.066038
| 0.122642
| 0.120283
| 0.301887
| 0.132075
| 0.132075
| 0
| 0
| 0
| 0
| 0.009294
| 0.209985
| 681
| 31
| 80
| 21.967742
| 0.77881
| 0.162996
| 0
| 0
| 0
| 0
| 0.116402
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.176471
| 0
| 0.235294
| 0.235294
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
54dde115e15519f27b695b4a4ec6e5589e225fb7
| 17,182
|
py
|
Python
|
tests/test_tag_value_parser.py
|
quaresmajose/tools-python
|
53c917a1a2491a373efa23e4ef8570b5e863fabc
|
[
"Apache-2.0"
] | 74
|
2015-12-25T09:43:18.000Z
|
2022-03-30T00:23:30.000Z
|
tests/test_tag_value_parser.py
|
quaresmajose/tools-python
|
53c917a1a2491a373efa23e4ef8570b5e863fabc
|
[
"Apache-2.0"
] | 184
|
2016-11-23T15:57:16.000Z
|
2022-03-15T05:25:59.000Z
|
tests/test_tag_value_parser.py
|
quaresmajose/tools-python
|
53c917a1a2491a373efa23e4ef8570b5e863fabc
|
[
"Apache-2.0"
] | 98
|
2015-12-13T12:20:34.000Z
|
2022-03-18T15:28:35.000Z
|
# Copyright (c) 2014 Ahmed H. Ismail
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from unittest import TestCase
import spdx
from spdx.parsers.tagvalue import Parser
from spdx.parsers.lexers.tagvalue import Lexer
from spdx.parsers.tagvaluebuilders import Builder
from spdx.parsers.loggers import StandardLogger
from spdx.version import Version
class TestLexer(TestCase):
maxDiff = None
def setUp(self):
self.l = Lexer()
self.l.build()
def test_document(self):
data = '''
SPDXVersion: SPDX-2.1
# Comment.
DataLicense: CC0-1.0
DocumentName: Sample_Document-V2.1
SPDXID: SPDXRef-DOCUMENT
DocumentNamespace: https://spdx.org/spdxdocs/spdx-example-444504E0-4F89-41D3-9A0C-0305E82C3301
DocumentComment: <text>This is a sample spreadsheet</text>
'''
self.l.input(data)
self.token_assert_helper(self.l.token(), 'DOC_VERSION', 'SPDXVersion', 2)
self.token_assert_helper(self.l.token(), 'LINE', 'SPDX-2.1', 2)
self.token_assert_helper(self.l.token(), 'DOC_LICENSE', 'DataLicense', 4)
self.token_assert_helper(self.l.token(), 'LINE', 'CC0-1.0', 4)
self.token_assert_helper(self.l.token(), 'DOC_NAME', 'DocumentName', 5)
self.token_assert_helper(self.l.token(), 'LINE', 'Sample_Document-V2.1',
5)
self.token_assert_helper(self.l.token(), 'SPDX_ID', 'SPDXID', 6)
self.token_assert_helper(self.l.token(), 'LINE', 'SPDXRef-DOCUMENT', 6)
self.token_assert_helper(self.l.token(), 'DOC_NAMESPACE',
'DocumentNamespace', 7)
self.token_assert_helper(self.l.token(), 'LINE',
'https://spdx.org/spdxdocs/spdx-example-444504E0-4F89-41D3-9A0C-0305E82C3301',
7)
self.token_assert_helper(self.l.token(), 'DOC_COMMENT', 'DocumentComment', 8)
self.token_assert_helper(self.l.token(), 'TEXT', '<text>This is a sample spreadsheet</text>', 8)
def test_external_document_references(self):
data = '''
ExternalDocumentRef:DocumentRef-spdx-tool-2.1 http://spdx.org/spdxdocs/spdx-tools-v2.1-3F2504E0-4F89-41D3-9A0C-0305E82C3301 SHA1: d6a770ba38583ed4bb4525bd96e50461655d2759
'''
self.l.input(data)
self.token_assert_helper(self.l.token(), 'EXT_DOC_REF',
'ExternalDocumentRef', 2)
self.token_assert_helper(self.l.token(), 'DOC_REF_ID',
'DocumentRef-spdx-tool-2.1', 2)
self.token_assert_helper(self.l.token(), 'DOC_URI',
'http://spdx.org/spdxdocs/spdx-tools-v2.1-3F25'
'04E0-4F89-41D3-9A0C-0305E82C3301', 2)
self.token_assert_helper(self.l.token(), 'EXT_DOC_REF_CHKSUM',
'SHA1: '
'd6a770ba38583ed4bb4525bd96e50461655d2759', 2)
def test_creation_info(self):
data = '''
## Creation Information
Creator: Person: Gary O'Neall
Creator: Organization: Source Auditor Inc.
Creator: Tool: SourceAuditor-V1.2
Created: 2010-02-03T00:00:00Z
CreatorComment: <text>This is an example of an SPDX
spreadsheet format</text>
'''
self.l.input(data)
self.token_assert_helper(self.l.token(), 'CREATOR', 'Creator', 3)
self.token_assert_helper(self.l.token(), 'PERSON_VALUE', "Person: Gary O'Neall", 3)
self.token_assert_helper(self.l.token(), 'CREATOR', 'Creator', 4)
self.token_assert_helper(self.l.token(), 'ORG_VALUE', 'Organization: Source Auditor Inc.', 4)
self.token_assert_helper(self.l.token(), 'CREATOR', 'Creator', 5)
self.token_assert_helper(self.l.token(), 'TOOL_VALUE', 'Tool: SourceAuditor-V1.2', 5)
self.token_assert_helper(self.l.token(), 'CREATED', 'Created', 6)
self.token_assert_helper(self.l.token(), 'DATE', '2010-02-03T00:00:00Z', 6)
def test_review_info(self):
data = '''
Reviewer: Person: Joe Reviewer
ReviewDate: 2010-02-10T00:00:00Z
ReviewComment: <text>This is just an example.
Some of the non-standard licenses look like they are actually
BSD 3 clause licenses</text>
'''
self.l.input(data)
self.token_assert_helper(self.l.token(), 'REVIEWER', 'Reviewer', 2)
self.token_assert_helper(self.l.token(), 'PERSON_VALUE', "Person: Joe Reviewer", 2)
self.token_assert_helper(self.l.token(), 'REVIEW_DATE', 'ReviewDate', 3)
self.token_assert_helper(self.l.token(), 'DATE', '2010-02-10T00:00:00Z', 3)
self.token_assert_helper(self.l.token(), 'REVIEW_COMMENT', 'ReviewComment', 4)
self.token_assert_helper(self.l.token(), 'TEXT', '''<text>This is just an example.
Some of the non-standard licenses look like they are actually
BSD 3 clause licenses</text>''', 4)
def test_pacakage(self):
data = '''
SPDXID: SPDXRef-Package
FilesAnalyzed: False
PackageChecksum: SHA1: 2fd4e1c67a2d28fced849ee1bb76e7391b93eb12
PackageVerificationCode: 4e3211c67a2d28fced849ee1bb76e7391b93feba (SpdxTranslatorSpdx.rdf, SpdxTranslatorSpdx.txt)
ExternalRef: SECURITY cpe23Type cpe:2.3:a:pivotal_software:spring_framework:4.1.0:*:*:*:*:*:*:
ExternalRefComment: <text>Some comment about the package.</text>
'''
self.l.input(data)
self.token_assert_helper(self.l.token(), 'SPDX_ID', 'SPDXID', 2)
self.token_assert_helper(self.l.token(), 'LINE', 'SPDXRef-Package', 2)
self.token_assert_helper(self.l.token(), 'PKG_FILES_ANALYZED', 'FilesAnalyzed', 3)
self.token_assert_helper(self.l.token(), 'LINE', 'False', 3)
self.token_assert_helper(self.l.token(), 'PKG_CHKSUM', 'PackageChecksum', 4)
self.token_assert_helper(self.l.token(), 'CHKSUM', 'SHA1: 2fd4e1c67a2d28fced849ee1bb76e7391b93eb12', 4)
self.token_assert_helper(self.l.token(), 'PKG_VERF_CODE', 'PackageVerificationCode', 5)
self.token_assert_helper(self.l.token(), 'LINE', '4e3211c67a2d28fced849ee1bb76e7391b93feba (SpdxTranslatorSpdx.rdf, SpdxTranslatorSpdx.txt)', 5)
self.token_assert_helper(self.l.token(), 'PKG_EXT_REF', 'ExternalRef', 6)
self.token_assert_helper(self.l.token(), 'LINE', 'SECURITY cpe23Type cpe:2.3:a:pivotal_software:spring_framework:4.1.0:*:*:*:*:*:*:', 6)
self.token_assert_helper(self.l.token(), 'PKG_EXT_REF_COMMENT', 'ExternalRefComment', 7)
self.token_assert_helper(self.l.token(), 'TEXT', '<text>Some comment about the package.</text>', 7)
def test_unknown_tag(self):
data = '''
SomeUnknownTag: SomeUnknownValue
'''
self.l.input(data)
self.token_assert_helper(self.l.token(), 'UNKNOWN_TAG', 'SomeUnknownTag', 2)
self.token_assert_helper(self.l.token(), 'LINE', 'SomeUnknownValue', 2)
def test_snippet(self):
data = '''
SnippetSPDXID: SPDXRef-Snippet
SnippetLicenseComments: <text>Some lic comment.</text>
SnippetCopyrightText: <text>Some cr text.</text>
SnippetComment: <text>Some snippet comment.</text>
SnippetName: from linux kernel
SnippetFromFileSPDXID: SPDXRef-DoapSource
SnippetLicenseConcluded: Apache-2.0
LicenseInfoInSnippet: Apache-2.0
'''
self.l.input(data)
self.token_assert_helper(self.l.token(), 'SNIPPET_SPDX_ID', 'SnippetSPDXID', 2)
self.token_assert_helper(self.l.token(), 'LINE', 'SPDXRef-Snippet', 2)
self.token_assert_helper(self.l.token(), 'SNIPPET_LICS_COMMENT', 'SnippetLicenseComments', 3)
self.token_assert_helper(self.l.token(), 'TEXT', '<text>Some lic comment.</text>', 3)
self.token_assert_helper(self.l.token(), 'SNIPPET_CR_TEXT', 'SnippetCopyrightText', 4)
self.token_assert_helper(self.l.token(), 'TEXT', '<text>Some cr text.</text>', 4)
self.token_assert_helper(self.l.token(), 'SNIPPET_COMMENT', 'SnippetComment', 5)
self.token_assert_helper(self.l.token(), 'TEXT', '<text>Some snippet comment.</text>', 5)
self.token_assert_helper(self.l.token(), 'SNIPPET_NAME', 'SnippetName', 6)
self.token_assert_helper(self.l.token(), 'LINE', 'from linux kernel', 6)
self.token_assert_helper(self.l.token(), 'SNIPPET_FILE_SPDXID',
'SnippetFromFileSPDXID', 7)
self.token_assert_helper(self.l.token(), 'LINE', 'SPDXRef-DoapSource', 7)
self.token_assert_helper(self.l.token(), 'SNIPPET_LICS_CONC',
'SnippetLicenseConcluded', 8)
self.token_assert_helper(self.l.token(), 'LINE', 'Apache-2.0', 8)
self.token_assert_helper(self.l.token(), 'SNIPPET_LICS_INFO',
'LicenseInfoInSnippet', 9)
self.token_assert_helper(self.l.token(), 'LINE', 'Apache-2.0', 9)
def token_assert_helper(self, token, ttype, value, line):
assert token.type == ttype
assert token.value == value
assert token.lineno == line
class TestParser(TestCase):
maxDiff = None
document_str = '\n'.join([
'SPDXVersion: SPDX-2.1',
'DataLicense: CC0-1.0',
'DocumentName: Sample_Document-V2.1',
'SPDXID: SPDXRef-DOCUMENT',
'DocumentComment: <text>Sample Comment</text>',
'DocumentNamespace: https://spdx.org/spdxdocs/spdx-example-444504E0-4F89-41D3-9A0C-0305E82C3301'
])
creation_str = '\n'.join([
'Creator: Person: Bob (bob@example.com)',
'Creator: Organization: Acme.',
'Created: 2010-02-03T00:00:00Z',
'CreatorComment: <text>Sample Comment</text>'
])
review_str = '\n'.join([
'Reviewer: Person: Bob the Reviewer',
'ReviewDate: 2010-02-10T00:00:00Z',
'ReviewComment: <text>Bob was Here.</text>',
'Reviewer: Person: Alice the Reviewer',
'ReviewDate: 2011-02-10T00:00:00Z',
'ReviewComment: <text>Alice was also here.</text>'
])
package_str = '\n'.join([
'PackageName: Test',
'SPDXID: SPDXRef-Package',
'PackageVersion: Version 0.9.2',
'PackageDownloadLocation: http://example.com/test',
'FilesAnalyzed: True',
'PackageSummary: <text>Test package</text>',
'PackageSourceInfo: <text>Version 1.0 of test</text>',
'PackageFileName: test-1.0.zip',
'PackageSupplier: Organization:ACME',
'PackageOriginator: Organization:ACME',
'PackageChecksum: SHA1: 2fd4e1c67a2d28fced849ee1bb76e7391b93eb12',
'PackageVerificationCode: 4e3211c67a2d28fced849ee1bb76e7391b93feba (something.rdf, something.txt)',
'PackageDescription: <text>A package.</text>',
'PackageComment: <text>Comment on the package.</text>',
'PackageCopyrightText: <text> Copyright 2014 Acme Inc.</text>',
'PackageLicenseDeclared: Apache-2.0',
'PackageLicenseConcluded: (LicenseRef-2.0 and Apache-2.0)',
'PackageLicenseInfoFromFiles: Apache-1.0',
'PackageLicenseInfoFromFiles: Apache-2.0',
'PackageLicenseComments: <text>License Comments</text>',
'ExternalRef: SECURITY cpe23Type cpe:2.3:a:pivotal_software:spring_framework:4.1.0:*:*:*:*:*:*:',
'ExternalRefComment: <text>Some comment about the package.</text>'
])
file_str = '\n'.join([
'FileName: testfile.java',
'SPDXID: SPDXRef-File',
'FileType: SOURCE',
'FileChecksum: SHA1: 2fd4e1c67a2d28fced849ee1bb76e7391b93eb12',
'LicenseConcluded: Apache-2.0',
'LicenseInfoInFile: Apache-2.0',
'FileCopyrightText: <text>Copyright 2014 Acme Inc.</text>',
'ArtifactOfProjectName: AcmeTest',
'ArtifactOfProjectHomePage: http://www.acme.org/',
'ArtifactOfProjectURI: http://www.acme.org/',
'FileComment: <text>Very long file</text>'
])
unknown_tag_str = 'SomeUnknownTag: SomeUnknownValue'
snippet_str = '\n'.join([
'SnippetSPDXID: SPDXRef-Snippet',
'SnippetLicenseComments: <text>Some lic comment.</text>',
'SnippetCopyrightText: <text> Copyright 2008-2010 John Smith </text>',
'SnippetComment: <text>Some snippet comment.</text>',
'SnippetName: from linux kernel',
'SnippetFromFileSPDXID: SPDXRef-DoapSource',
'SnippetLicenseConcluded: Apache-2.0',
'LicenseInfoInSnippet: Apache-2.0',
])
complete_str = '{0}\n{1}\n{2}\n{3}\n{4}\n{5}'.format(document_str, creation_str, review_str, package_str, file_str, snippet_str)
def setUp(self):
self.p = Parser(Builder(), StandardLogger())
self.p.build()
def test_doc(self):
document, error = self.p.parse(self.complete_str)
assert document is not None
assert not error
assert document.version == Version(major=2, minor=1)
assert document.data_license.identifier == 'CC0-1.0'
assert document.name == 'Sample_Document-V2.1'
assert document.spdx_id == 'SPDXRef-DOCUMENT'
assert document.comment == 'Sample Comment'
assert document.namespace == 'https://spdx.org/spdxdocs/spdx-example-444504E0-4F89-41D3-9A0C-0305E82C3301'
def test_creation_info(self):
document, error = self.p.parse(self.complete_str)
assert document is not None
assert not error
assert len(document.creation_info.creators) == 2
assert document.creation_info.comment == 'Sample Comment'
assert (document.creation_info.created_iso_format == '2010-02-03T00:00:00Z')
def test_review(self):
document, error = self.p.parse(self.complete_str)
assert document is not None
assert not error
assert len(document.reviews) == 2
def test_package(self):
document, error = self.p.parse(self.complete_str)
assert document is not None
assert not error
assert document.package.name == 'Test'
assert document.package.spdx_id == 'SPDXRef-Package'
assert document.package.version == 'Version 0.9.2'
assert len(document.package.licenses_from_files) == 2
assert (document.package.conc_lics.identifier == 'LicenseRef-2.0 AND Apache-2.0')
assert document.package.files_analyzed == True
assert document.package.comment == 'Comment on the package.'
assert document.package.pkg_ext_refs[-1].category == 'SECURITY'
assert document.package.pkg_ext_refs[-1].pkg_ext_ref_type == 'cpe23Type'
assert document.package.pkg_ext_refs[-1].locator == 'cpe:2.3:a:pivotal_software:spring_framework:4.1.0:*:*:*:*:*:*:'
assert document.package.pkg_ext_refs[-1].comment == 'Some comment about the package.'
def test_file(self):
document, error = self.p.parse(self.complete_str)
assert document is not None
assert not error
assert len(document.package.files) == 1
spdx_file = document.package.files[0]
assert spdx_file.name == 'testfile.java'
assert spdx_file.spdx_id == 'SPDXRef-File'
assert spdx_file.type == spdx.file.FileType.SOURCE
assert len(spdx_file.artifact_of_project_name) == 1
assert len(spdx_file.artifact_of_project_home) == 1
assert len(spdx_file.artifact_of_project_uri) == 1
def test_unknown_tag(self):
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
saved_out = sys.stdout
sys.stdout = StringIO()
document, error = self.p.parse(self.unknown_tag_str)
self.assertEqual(sys.stdout.getvalue(), 'Found unknown tag : SomeUnknownTag at line: 1\n')
sys.stdout = saved_out
assert error
assert document is not None
def test_snippet(self):
document, error = self.p.parse(self.complete_str)
assert document is not None
assert not error
assert len(document.snippet) == 1
assert document.snippet[-1].spdx_id == 'SPDXRef-Snippet'
assert document.snippet[-1].name == 'from linux kernel'
assert document.snippet[-1].comment == 'Some snippet comment.'
assert document.snippet[-1].copyright == ' Copyright 2008-2010 John Smith '
assert document.snippet[-1].license_comment == 'Some lic comment.'
assert document.snippet[-1].snip_from_file_spdxid == 'SPDXRef-DoapSource'
assert document.snippet[-1].conc_lics.identifier == 'Apache-2.0'
assert document.snippet[-1].licenses_in_snippet[-1].identifier == 'Apache-2.0'
| 49.091429
| 178
| 0.650506
| 2,066
| 17,182
| 5.277348
| 0.151016
| 0.031643
| 0.095111
| 0.117491
| 0.555994
| 0.478676
| 0.468403
| 0.442355
| 0.364303
| 0.295515
| 0
| 0.052878
| 0.217437
| 17,182
| 349
| 179
| 49.232092
| 0.757995
| 0.032359
| 0
| 0.190164
| 0
| 0.02623
| 0.416912
| 0.086127
| 0
| 0
| 0
| 0
| 0.380328
| 1
| 0.055738
| false
| 0
| 0.036066
| 0
| 0.131148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
54df90a5374a87e257978dcb4c0e1caa9abfa7f7
| 2,024
|
py
|
Python
|
mount_drives.py
|
DT-was-an-ET/fanshim-python-pwm
|
dd3e6e29251000946e34d80704c040b5bcad7f8e
|
[
"MIT"
] | null | null | null |
mount_drives.py
|
DT-was-an-ET/fanshim-python-pwm
|
dd3e6e29251000946e34d80704c040b5bcad7f8e
|
[
"MIT"
] | null | null | null |
mount_drives.py
|
DT-was-an-ET/fanshim-python-pwm
|
dd3e6e29251000946e34d80704c040b5bcad7f8e
|
[
"MIT"
] | 3
|
2020-02-27T13:45:19.000Z
|
2020-03-26T13:38:17.000Z
|
# Standard library imports
from subprocess import call as subprocess_call
from utility import fileexists
from time import sleep as time_sleep
from datetime import datetime
mount_try = 1
not_yet = True
done = False
start_time = datetime.now()
if fileexists("/home/rpi4-sftp/usb/drive_present.txt"):
when_usba = 0
else:
when_usba = -1
if fileexists("/home/duck-sftp/usb/drive_present.txt"):
when_usbb = 0
else:
when_usbb = -1
if fileexists("/home/pi/mycloud/drive_present.txt"):
when_mycloud = 0
else:
when_mycloud = -1
while (mount_try < 30) and not_yet:
try:
usba_mounted = fileexists("/home/rpi4-sftp/usb/drive_present.txt")
usbb_mounted = fileexists("/home/duck-sftp/usb/drive_present.txt")
mycloud_mounted = fileexists("/home/pi/mycloud/drive_present.txt")
if not(usba_mounted and usbb_mounted and mycloud_mounted):
print("Something Needs mounting this is try number: ", mount_try)
subprocess_call(["sudo", "mount", "-a"])
mount_try += 1
usba_mounted_after = fileexists("/home/rpi4-sftp/usb/drive_present.txt")
usbb_mounted_after = fileexists("/home/duck-sftp/usb/drive_present.txt")
mycloud_mounted_after = fileexists("/home/pi/mycloud/drive_present.txt")
if not(usba_mounted) and usba_mounted_after:
when_usba = round((datetime.now() - start_time).total_seconds(),2)
if not(usbb_mounted) and usbb_mounted_after:
when_usbb = round((datetime.now() - start_time).total_seconds(),2)
if not(mycloud_mounted) and mycloud_mounted_after:
when_mycloud = round((datetime.now() - start_time).total_seconds(),2)
if usba_mounted_after and usbb_mounted_after and mycloud_mounted_after:
print("Success at :",when_usba,when_usbb,when_mycloud, " secs from start")
not_yet = False
done = True
except:
print("Count: ", count," error")
time_sleep(1)
if done:
print("Great!")
else:
print("Failed to do all or drive_present.txt file not present; Times :",when_usba,when_usbb,when_mycloud)
while True:
time_sleep(20000)
| 36.142857
| 107
| 0.733202
| 301
| 2,024
| 4.694352
| 0.239203
| 0.084926
| 0.106157
| 0.080679
| 0.44586
| 0.44586
| 0.401982
| 0.375088
| 0.318471
| 0.290163
| 0
| 0.012768
| 0.148715
| 2,024
| 55
| 108
| 36.8
| 0.807313
| 0.011858
| 0
| 0.078431
| 0
| 0
| 0.252702
| 0.166752
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.078431
| 0
| 0.078431
| 0.098039
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
54e179a25d793c478f7e42c99a00025d13aed6d0
| 1,438
|
py
|
Python
|
codes/Lib/site-packages/openpyxl/writer/tests/test_style.py
|
charlescayno/automation
|
a4a34d87f372d49fd69740ad3ca46ae19bf2612d
|
[
"MIT"
] | null | null | null |
codes/Lib/site-packages/openpyxl/writer/tests/test_style.py
|
charlescayno/automation
|
a4a34d87f372d49fd69740ad3ca46ae19bf2612d
|
[
"MIT"
] | null | null | null |
codes/Lib/site-packages/openpyxl/writer/tests/test_style.py
|
charlescayno/automation
|
a4a34d87f372d49fd69740ad3ca46ae19bf2612d
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2010-2014 openpyxl
import pytest
from openpyxl.styles.borders import Border, Side
from openpyxl.styles.fills import GradientFill
from openpyxl.styles.colors import Color
from openpyxl.writer.styles import StyleWriter
from openpyxl.tests.helper import get_xml, compare_xml
class DummyWorkbook:
style_properties = []
def test_write_gradient_fill():
fill = GradientFill(degree=90, stop=[Color(theme=0), Color(theme=4)])
writer = StyleWriter(DummyWorkbook())
writer._write_gradient_fill(writer._root, fill)
xml = get_xml(writer._root)
expected = """<?xml version="1.0" ?>
<styleSheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<gradientFill degree="90" type="linear">
<stop position="0">
<color theme="0"/>
</stop>
<stop position="1">
<color theme="4"/>
</stop>
</gradientFill>
</styleSheet>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_write_borders():
borders = Border()
writer = StyleWriter(DummyWorkbook())
writer._write_border(writer._root, borders)
xml = get_xml(writer._root)
expected = """<?xml version="1.0"?>
<styleSheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
</styleSheet>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
| 25.678571
| 78
| 0.684284
| 173
| 1,438
| 5.566474
| 0.364162
| 0.062305
| 0.056075
| 0.074766
| 0.413292
| 0.328141
| 0.328141
| 0.328141
| 0.328141
| 0.328141
| 0
| 0.025274
| 0.174548
| 1,438
| 55
| 79
| 26.145455
| 0.786015
| 0.022253
| 0
| 0.4
| 0
| 0
| 0.36396
| 0
| 0
| 0
| 0
| 0
| 0.044444
| 1
| 0.044444
| false
| 0
| 0.133333
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
54e459da47af69f9dc842497504519a50554986e
| 774
|
py
|
Python
|
tests/__init__.py
|
zhangyiming07/QT4C
|
2d8d60efe0a4ad78a2618c5beeb0c456a63da067
|
[
"BSD-3-Clause"
] | 53
|
2020-02-20T06:56:03.000Z
|
2022-03-03T03:09:25.000Z
|
tests/__init__.py
|
zhangyiming07/QT4C
|
2d8d60efe0a4ad78a2618c5beeb0c456a63da067
|
[
"BSD-3-Clause"
] | 6
|
2020-03-03T03:15:53.000Z
|
2021-01-29T02:24:06.000Z
|
tests/__init__.py
|
zhangyiming07/QT4C
|
2d8d60efe0a4ad78a2618c5beeb0c456a63da067
|
[
"BSD-3-Clause"
] | 17
|
2020-02-26T03:51:41.000Z
|
2022-03-24T02:23:51.000Z
|
# -*- coding: utf-8 -*-
#
# Tencent is pleased to support the open source community by making QT4C available.
# Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
# QT4C is licensed under the BSD 3-Clause License, except for the third-party components listed below.
# A copy of the BSD 3-Clause License is included in this file.
#
'''单元测试
'''
import unittest
import os
import sys
test_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.dirname(test_dir))
def main():
runner = unittest.TextTestRunner(verbosity=10 + sys.argv.count('-v'))
suite = unittest.TestLoader().discover(test_dir, pattern='test_*.py')
raise SystemExit(not runner.run(suite).wasSuccessful())
if __name__ == '__main__':
main()
| 28.666667
| 103
| 0.719638
| 114
| 774
| 4.745614
| 0.692982
| 0.038817
| 0.025878
| 0.048059
| 0.073937
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021472
| 0.157623
| 774
| 26
| 104
| 29.769231
| 0.808282
| 0.453488
| 0
| 0
| 0
| 0
| 0.046341
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.272727
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
54e639174a97601933059aabae1c3acdb2b90d00
| 323
|
py
|
Python
|
brute/brute_build.py
|
sweetsbeats/starter-snake-python
|
e7cb56a3a623a324f4b5ef956020990e8c61f871
|
[
"MIT"
] | null | null | null |
brute/brute_build.py
|
sweetsbeats/starter-snake-python
|
e7cb56a3a623a324f4b5ef956020990e8c61f871
|
[
"MIT"
] | null | null | null |
brute/brute_build.py
|
sweetsbeats/starter-snake-python
|
e7cb56a3a623a324f4b5ef956020990e8c61f871
|
[
"MIT"
] | 2
|
2019-05-05T00:41:26.000Z
|
2019-05-05T00:46:45.000Z
|
from cffi import FFI
ffibuilder = FFI()
ffibuilder.cdef("""
int test(int t);
""")
ffibuilder.set_source("_pi_cffi",
"""
#include "brute.h"
""",
sources=['brute.c'])
if __name__ == "__main__":
ffibuilder.compile(verbose = True)
| 19
| 42
| 0.479876
| 30
| 323
| 4.8
| 0.766667
| 0.180556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.374613
| 323
| 16
| 43
| 20.1875
| 0.712871
| 0
| 0
| 0
| 0
| 0
| 0.162055
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
54e64db782245fc204cf4d668f6d515f9131a03b
| 2,392
|
py
|
Python
|
src/board.py
|
JNotelddim/python-snake
|
da95339d3a982040a84422e5f7b95453095a4450
|
[
"MIT"
] | null | null | null |
src/board.py
|
JNotelddim/python-snake
|
da95339d3a982040a84422e5f7b95453095a4450
|
[
"MIT"
] | null | null | null |
src/board.py
|
JNotelddim/python-snake
|
da95339d3a982040a84422e5f7b95453095a4450
|
[
"MIT"
] | null | null | null |
"""Board Module"""
import copy
from typing import Tuple, List
from src.coordinate import Coordinate
from src.snake import Snake
class Board:
"""Track the cooardinates for all snakes and food in the game."""
def __init__(self, data):
self._data = data
self._snakes = None
self._foods = None
@property
def snakes(self) -> List[Snake]:
"""Retreive the list of snakes from the board data."""
if self._snakes is None:
snakes = [Snake(snake_data) for snake_data in self._data['snakes']]
self._snakes = snakes
return self._snakes
@property
def foods(self) -> List[Coordinate]:
"""Retreive the list of food from the board data."""
if self._foods is None:
self._foods = [Coordinate(food_data) for food_data in self._data['food']]
return self._foods
@property
def width(self) -> int:
"""Get width of the board -- note: it's a square."""
return self._data['width']
def is_coordinate_in_bounds(self, coordinate) -> bool:
"""Check whether or not the Coordinate is within the bounds of the Board."""
is_wall = (coordinate.x == -1 or coordinate.x == self.width
or coordinate.y == -1 or coordinate.y == self.width)
return not is_wall
def get_other_snakes(self, exclude_id) -> List[Snake]:
"""Get the List of Snakes whose IDs don't match the given ID."""
return [snake for snake in self.snakes if snake.id != exclude_id]
def advance_snake_along_path(self, snake_id: str, path: List[Coordinate]):
"""Return a new board with our snake advanced along given path."""
new_board = copy.deepcopy(self)
return new_board.__help_advance_snake_along_path(snake_id, path)
def __help_advance_snake_along_path(self, snake_id: str, path: List[Coordinate]):
"""Do the actual advancement of the snake along the path."""
me = next((snake for snake in self.snakes if snake.id == snake_id), None)
if not me:
raise ValueError("No snake for given id!")
me.coordinates += path
me.coordinates = me.coordinates[len(path):]
me.coordinates.reverse()
me.coordinates.append(me.coordinates[-1])
print("new coords:")
for coord in me.coordinates:
print(coord)
return self
| 37.375
| 85
| 0.633361
| 330
| 2,392
| 4.439394
| 0.263636
| 0.062116
| 0.01843
| 0.043003
| 0.168601
| 0.148805
| 0.118771
| 0.118771
| 0.118771
| 0.072355
| 0
| 0.0017
| 0.262124
| 2,392
| 63
| 86
| 37.968254
| 0.828329
| 0.192726
| 0
| 0.068182
| 0
| 0
| 0.025464
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.090909
| 0
| 0.454545
| 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
54e781207e20bd9e8679af88a83847cfe7947287
| 2,349
|
py
|
Python
|
personalized_nlp/datasets/wiki/base.py
|
CLARIN-PL/personalized-nlp
|
340294300f93d12cabc59b055ff2548df8f4081a
|
[
"MIT"
] | null | null | null |
personalized_nlp/datasets/wiki/base.py
|
CLARIN-PL/personalized-nlp
|
340294300f93d12cabc59b055ff2548df8f4081a
|
[
"MIT"
] | 1
|
2022-03-15T23:48:51.000Z
|
2022-03-15T23:48:51.000Z
|
personalized_nlp/datasets/wiki/base.py
|
CLARIN-PL/personalized-nlp
|
340294300f93d12cabc59b055ff2548df8f4081a
|
[
"MIT"
] | null | null | null |
import os
import zipfile
from typing import List
import pandas as pd
import urllib
from personalized_nlp.settings import STORAGE_DIR
from personalized_nlp.utils.data_splitting import split_texts
from personalized_nlp.datasets.datamodule_base import BaseDataModule
class WikiDataModule(BaseDataModule):
def __init__(
self,
split_sizes: List[float] = [0.55, 0.15, 0.15, 0.15],
**kwargs,
):
super().__init__(**kwargs)
self.data_dir = STORAGE_DIR / 'wiki_data'
self.annotation_column = ''
self.word_stats_annotation_column = ''
self.embeddings_path = ''
self.train_split_names = ['present', 'past']
self.val_split_names = ['future1']
self.test_split_names = ['future2']
self.split_sizes = split_sizes
os.makedirs(self.data_dir / 'embeddings', exist_ok=True)
@property
def class_dims(self):
return [2]
@property
def texts_clean(self):
texts = self.data.text.to_list()
texts = [c.replace('NEWLINE_TOKEN', ' ') for c in texts]
return texts
def _remap_column_names(self, df):
mapping = {'rev_id': 'text_id',
'worker_id': 'annotator_id', 'comment': 'text'}
df.columns = [mapping.get(col, col) for col in df.columns]
return df
def prepare_data(self) -> None:
self.data = pd.read_csv(
self.data_dir / (self.annotation_column + '_annotated_comments.tsv'), sep='\t')
self.data = self._remap_column_names(self.data)
self.data['text'] = self.data['text'].str.replace(
'NEWLINE_TOKEN', ' ')
self.annotators = pd.read_csv(
self.data_dir / (self.annotation_column + '_worker_demographics.tsv'), sep='\t')
self.annotators = self._remap_column_names(self.annotators)
self.annotations = pd.read_csv(
self.data_dir / (self.annotation_column + '_annotations.tsv'), sep='\t')
self.annotations = self._remap_column_names(self.annotations)
self._assign_splits()
personal_df = self.annotations_with_data.loc[self.annotations_with_data.split == 'past']
self.compute_annotator_biases(personal_df)
def _assign_splits(self):
self.data = split_texts(self.data, self.split_sizes)
| 32.178082
| 96
| 0.638995
| 288
| 2,349
| 4.920139
| 0.34375
| 0.073395
| 0.038814
| 0.056457
| 0.135498
| 0.084686
| 0.084686
| 0.084686
| 0.084686
| 0
| 0
| 0.00846
| 0.245211
| 2,349
| 73
| 97
| 32.178082
| 0.79075
| 0
| 0
| 0.037037
| 0
| 0
| 0.084681
| 0.02
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.148148
| 0.018519
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
54ea3d9d70532f8dc30f4d5946975cecc10f6326
| 11,009
|
py
|
Python
|
pilbox/test/app_test.py
|
joevandyk/pilbox
|
b84732a78e5bdb2d24bf7ef4177d45806ac03ea6
|
[
"Apache-2.0"
] | null | null | null |
pilbox/test/app_test.py
|
joevandyk/pilbox
|
b84732a78e5bdb2d24bf7ef4177d45806ac03ea6
|
[
"Apache-2.0"
] | null | null | null |
pilbox/test/app_test.py
|
joevandyk/pilbox
|
b84732a78e5bdb2d24bf7ef4177d45806ac03ea6
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import, division, print_function, \
with_statement
import logging
import os.path
import time
import tornado.escape
import tornado.gen
import tornado.ioloop
from tornado.test.util import unittest
from tornado.testing import AsyncHTTPTestCase, gen_test
import tornado.web
from pilbox.app import PilboxApplication
from pilbox.errors import SignatureError, ClientError, HostError, \
BackgroundError, DimensionsError, FilterError, FormatError, ModeError, \
PositionError, QualityError, UrlError, ImageFormatError, FetchError
from pilbox.signature import sign
from pilbox.test import image_test
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
try:
import cv
except ImportError:
cv = None
logger = logging.getLogger("tornado.application")
class _AppAsyncMixin(object):
def fetch_error(self, code, *args, **kwargs):
response = self.fetch(*args, **kwargs)
self.assertEqual(response.code, code)
self.assertEqual(response.headers.get("Content-Type", None),
"application/json")
return tornado.escape.json_decode(response.body)
def fetch_success(self, *args, **kwargs):
response = self.fetch(*args, **kwargs)
self.assertEqual(response.code, 200)
return response
def get_image_resize_cases(self):
cases = image_test.get_image_resize_cases()
m = dict(background="bg", filter="filter", format="fmt",
position="pos", quality="q")
for i, case in enumerate(cases):
path = "/test/data/%s" % os.path.basename(case["source_path"])
cases[i]["source_query_params"] = dict(
url=self.get_url(path),
w=case["width"] or "",
h=case["height"] or "",
mode=case["mode"])
for k in m.keys():
if k in case:
cases[i]["source_query_params"][m.get(k)] = case[k]
if case.get("format") in ["jpeg", "jpg"]:
cases[i]["content_type"] = "image/jpeg"
elif case.get("format") == "png":
cases[i]["content_type"] = "image/png"
elif case.get("format") == "webp":
cases[i]["content_type"] = "image/webp"
else:
cases[i]["content_type"] = None
return cases
class _PilboxTestApplication(PilboxApplication):
def get_handlers(self):
path = os.path.join(os.path.dirname(__file__), "data")
handlers = [(r"/test/data/test-delayed.jpg", _DelayedHandler),
(r"/test/data/(.*)",
tornado.web.StaticFileHandler,
{"path": path})]
handlers.extend(super(_PilboxTestApplication, self).get_handlers())
return handlers
class _DelayedHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
@tornado.gen.engine
def get(self):
delay = time.time() + float(self.get_argument("delay", 0.0))
yield tornado.gen.Task(
tornado.ioloop.IOLoop.instance().add_timeout, delay)
self.finish()
class AppTest(AsyncHTTPTestCase, _AppAsyncMixin):
def get_app(self):
return _PilboxTestApplication()
def test_missing_url(self):
qs = urlencode(dict(w=1, h=1))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), UrlError.get_code())
def test_missing_dimensions(self):
qs = urlencode(dict(url="http://foo.co/x.jpg"))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), DimensionsError.get_code())
def test_invalid_width(self):
qs = urlencode(dict(url="http://foo.co/x.jpg", w="a", h=1))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), DimensionsError.get_code())
def test_invalid_height(self):
qs = urlencode(dict(url="http://foo.co/x.jpg", w=1, h="a"))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), DimensionsError.get_code())
def test_invalid_mode(self):
qs = urlencode(dict(url="http://foo.co/x.jpg", w=1, h=1, mode="foo"))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), ModeError.get_code())
def test_invalid_hexadecimal_background(self):
qs = urlencode(dict(url="http://foo.co/x.jpg", w=1, h=1,
mode="fill", bg="r"))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), BackgroundError.get_code())
def test_invalid_long_background(self):
qs = urlencode(dict(url="http://foo.co/x.jpg", w=1, h=1,
mode="fill", bg="0f0f0f0f0"))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), BackgroundError.get_code())
def test_invalid_position(self):
qs = urlencode(dict(url="http://foo.co/x.jpg", w=1, h=1, pos="foo"))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), PositionError.get_code())
def test_invalid_filter(self):
qs = urlencode(dict(url="http://foo.co/x.jpg", w=1, h=1, filter="bar"))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), FilterError.get_code())
def test_invalid_format(self):
qs = urlencode(dict(url="http://foo.co/x.jpg", w=1, h=1, fmt="foo"))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), FormatError.get_code())
def test_invalid_integer_quality(self):
qs = urlencode(dict(url="http://foo.co/x.jpg", w=1, h=1, q="a"))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), QualityError.get_code())
def test_outofbounds_quality(self):
qs = urlencode(dict(url="http://foo.co/x.jpg", w=1, h=1, q=200))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), QualityError.get_code())
def test_unsupported_image_format(self):
path = "/test/data/test-bad-format.gif"
qs = urlencode(dict(url=self.get_url(path), w=1, h=1))
resp = self.fetch_error(415, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), ImageFormatError.get_code())
def test_not_found(self):
path = "/test/data/test-not-found.jpg"
qs = urlencode(dict(url=self.get_url(path), w=1, h=1))
resp = self.fetch_error(404, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), FetchError.get_code())
def test_not_connect(self):
qs = urlencode(dict(url="http://a.com/a.jpg", w=1, h=1))
resp = self.fetch_error(404, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), FetchError.get_code())
def test_invalid_protocol(self):
path = os.path.join(os.path.dirname(__file__), "data", "test1.jpg")
qs = urlencode(dict(url="file://%s" % path, w=1, h=1))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), UrlError.get_code())
def test_valid(self):
cases = self.get_image_resize_cases()
for case in cases:
if case.get("mode") == "crop" and case.get("position") == "face":
continue
self._assert_expected_resize(case)
@unittest.skipIf(cv is None, "OpenCV is not installed")
def test_valid_face(self):
cases = self.get_image_resize_cases()
for case in cases:
if case.get("mode") == "crop" and case.get("position") == "face":
self._assert_expected_resize(case)
def _assert_expected_resize(self, case):
qs = urlencode(case["source_query_params"])
resp = self.fetch_success("/?%s" % qs)
msg = "/?%s does not match %s" \
% (qs, case["expected_path"])
if case["content_type"]:
self.assertEqual(resp.headers.get("Content-Type", None),
case["content_type"])
with open(case["expected_path"], "rb") as expected:
self.assertEqual(resp.buffer.read(), expected.read(), msg)
class AppRestrictedTest(AsyncHTTPTestCase, _AppAsyncMixin):
KEY = "abcdef"
NAME = "abc"
def get_app(self):
return _PilboxTestApplication(
client_name=self.NAME,
client_key=self.KEY,
allowed_hosts=["foo.co", "bar.io", "localhost"])
def test_missing_client_name(self):
params = dict(url="http://foo.co/x.jpg", w=1, h=1)
qs = sign(self.KEY, urlencode(params))
resp = self.fetch_error(403, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), ClientError.get_code())
def test_bad_client_name(self):
params = dict(url="http://foo.co/x.jpg", w=1, h=1, client="123")
qs = sign(self.KEY, urlencode(params))
resp = self.fetch_error(403, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), ClientError.get_code())
def test_missing_signature(self):
params = dict(url="http://foo.co/x.jpg", w=1, h=1, client=self.NAME)
qs = urlencode(params)
resp = self.fetch_error(403, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), SignatureError.get_code())
def test_bad_signature(self):
params = dict(url="http://foo.co/x.jpg", w=1, h=1,
client=self.NAME, sig="abc123")
qs = urlencode(params)
resp = self.fetch_error(403, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), SignatureError.get_code())
def test_bad_host(self):
params = dict(url="http://bar.co/x.jpg", w=1, h=1, client=self.NAME)
qs = sign(self.KEY, urlencode(params))
resp = self.fetch_error(403, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), HostError.get_code())
def test_valid(self):
cases = self.get_image_resize_cases()
for case in cases:
if case.get("mode") == "crop" and case.get("position") == "face":
continue
params = case["source_query_params"]
params["client"] = self.NAME
qs = sign(self.KEY, urlencode(params))
resp = self.fetch_success("/?%s" % qs)
msg = "/?%s does not match %s" \
% (qs, case["expected_path"])
with open(case["expected_path"], "rb") as expected:
self.assertEqual(resp.buffer.read(), expected.read(), msg)
class AppSlowTest(AsyncHTTPTestCase, _AppAsyncMixin):
def get_app(self):
return _PilboxTestApplication(timeout=0.5)
def test_timeout(self):
url = self.get_url("/test/data/test-delayed.jpg?delay=1.0")
qs = urlencode(dict(url=url, w=1, h=1))
resp = self.fetch_error(404, "/?%s" %qs)
self.assertEqual(resp.get("error_code"), FetchError.get_code())
| 39.887681
| 79
| 0.606504
| 1,433
| 11,009
| 4.51291
| 0.144452
| 0.064945
| 0.07345
| 0.061234
| 0.639709
| 0.565796
| 0.55559
| 0.552188
| 0.525282
| 0.524664
| 0
| 0.015445
| 0.235444
| 11,009
| 275
| 80
| 40.032727
| 0.752881
| 0
| 0
| 0.358407
| 0
| 0
| 0.126624
| 0.011173
| 0
| 0
| 0
| 0
| 0.137168
| 1
| 0.150442
| false
| 0
| 0.084071
| 0.013274
| 0.300885
| 0.004425
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
54eaca929e4c45b157fe05142cabf897db4cf571
| 1,202
|
py
|
Python
|
hackathon/darkmattertemperaturedistribution/example.py
|
Neelraj21/phython
|
68a2cedccae694eb84880f3aa55cc01d458e055e
|
[
"WTFPL"
] | 6
|
2017-08-09T09:41:42.000Z
|
2021-04-22T05:10:17.000Z
|
hackathon/darkmattertemperaturedistribution/example.py
|
Neelraj21/phython
|
68a2cedccae694eb84880f3aa55cc01d458e055e
|
[
"WTFPL"
] | null | null | null |
hackathon/darkmattertemperaturedistribution/example.py
|
Neelraj21/phython
|
68a2cedccae694eb84880f3aa55cc01d458e055e
|
[
"WTFPL"
] | 5
|
2015-11-04T12:57:10.000Z
|
2020-10-18T17:32:25.000Z
|
#!/usr/bin/env python
from scipy import *
from pylab import *
#from pylab import imshow
#!
#! Some graphical explorations of the Julia sets with python and pyreport
#!#########################################################################
#$
#$ We start by defining a function J:
#$ \[ J_c : z \rightarrow z^2 + c \]
#$
def J(c):
return lambda z : z**2 + c
[x,y] = ogrid[ -1:1:0.002, -1:1:0.002 ]
z = x + y *1j
#! If we study the divergence of function J under repeated iteration
#! depending on its inital conditions we get a very pretty graph
threshTime = zeros_like(z)
for i in range(40):
z = J(0.285)(z)
threshTime += z*conj(z) > 4
figure(0)
axes([0,0,1,1])
axis('off')
imshow(threshTime)
bone()
show()
#! We can also do that systematicaly for other values of c:
axes([0,0,1,1])
axis('off')
rcParams.update({'figure.figsize': [10.5,5]})
c_values = (0.285 + 0.013j, 0.45 - 0.1428j, -0.70176 -0.3842j,
-0.835-0.2321j, -0.939 +0.167j, -0.986+0.87j)
for i,c in enumerate(c_values):
threshTime = zeros_like(z)
z = x + y *1j
for n in range(40):
z = J(c)(z)
threshTime += z*conj(z) > 4
subplot(2,3,i+1)
imshow(threshTime)
axis('off')
show()
| 26.130435
| 75
| 0.584859
| 207
| 1,202
| 3.371981
| 0.478261
| 0.011461
| 0.04298
| 0.060172
| 0.126075
| 0.094556
| 0.04298
| 0
| 0
| 0
| 0
| 0.096174
| 0.195507
| 1,202
| 45
| 76
| 26.711111
| 0.625646
| 0.315308
| 0
| 0.483871
| 0
| 0
| 0.031165
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.064516
| 0.032258
| 0.129032
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
54ed860d4a6171f4dc1581a63c75ee95835b9b75
| 6,287
|
py
|
Python
|
eris/script/ferdian.py
|
ferdianap/Eris_test
|
c2a00d65f816ad6d48a65c14b4bea4f3d081b86b
|
[
"BSD-3-Clause"
] | 1
|
2015-06-12T04:38:09.000Z
|
2015-06-12T04:38:09.000Z
|
eris/script/ferdian.py
|
ferdianap/eris
|
c2a00d65f816ad6d48a65c14b4bea4f3d081b86b
|
[
"BSD-3-Clause"
] | null | null | null |
eris/script/ferdian.py
|
ferdianap/eris
|
c2a00d65f816ad6d48a65c14b4bea4f3d081b86b
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2013-2014, Rethink Robotics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Rethink Robotics nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
copied from
Baxter RSDK Joint Position Example: file playback
"""
from __future__ import print_function
import sys
import rospy
import baxter_interface
from baxter_interface import CHECK_VERSION
import glob
from std_srvs.srv import Empty
def try_float(x):
try:
return float(x)
except ValueError:
return None
def clean_line(line, names):
"""
Cleans a single line of recorded joint positions
@param line: the line described in a list to process
@param names: joint name keys
"""
#convert the line of strings to a float or None
line = [try_float(x) for x in line.rstrip().split(',')]
#zip the values with the joint names
combined = zip(names[1:], line[1:])
#take out any tuples that have a none value
cleaned = [x for x in combined if x[1] is not None]
#convert it to a dictionary with only valid commands
command = dict(cleaned)
left_command = dict((key, command[key]) for key in command.keys()
if key[:-2] == 'left_')
right_command = dict((key, command[key]) for key in command.keys()
if key[:-2] == 'right_')
return (command, left_command, right_command, line)
def map_file(filename, loops=1):
"""
Loops through csv file
@param filename: the file to play
@param loops: number of times to loop
values < 0 mean 'infinite'
Does not loop indefinitely, but only until the file is read
and processed. Reads each line, split up in columns and
formats each line into a controller command in the form of
name/value pairs. Names come from the column headers
first column is the time stamp
"""
left = baxter_interface.Limb('left')
right = baxter_interface.Limb('right')
grip_left = baxter_interface.Gripper('left', CHECK_VERSION)
grip_right = baxter_interface.Gripper('right', CHECK_VERSION)
rate = rospy.Rate(1000)
if grip_left.error():
grip_left.reset()
if grip_right.error():
grip_right.reset()
if (not grip_left.calibrated() and
grip_left.type() != 'custom'):
grip_left.calibrate()
if (not grip_right.calibrated() and
grip_right.type() != 'custom'):
grip_right.calibrate()
print("Playing back: %s" % (filename,))
with open(filename, 'r') as f:
lines = f.readlines()
keys = lines[0].rstrip().split(',')
l = 0
# If specified, repeat the file playback 'loops' number of times
while loops < 1 or l < loops:
i = 0
l += 1
print("Moving to start position...")
_cmd, lcmd_start, rcmd_start, _raw = clean_line(lines[1], keys)
left.move_to_joint_positions(lcmd_start)
right.move_to_joint_positions(rcmd_start)
start_time = rospy.get_time()
for values in lines[1:]:
i += 1
loopstr = str(loops) if loops > 0 else "forever"
sys.stdout.write("\r Record %d of %d, loop %d of %s" %
(i, len(lines) - 1, l, loopstr))
sys.stdout.flush()
cmd, lcmd, rcmd, values = clean_line(values, keys)
#command this set of commands until the next frame
while (rospy.get_time() - start_time) < values[0]:
if rospy.is_shutdown():
print("\n Aborting - ROS shutdown")
return False
if len(lcmd):
left.set_joint_positions(lcmd)
if len(rcmd):
right.set_joint_positions(rcmd)
if ('left_gripper' in cmd and
grip_left.type() != 'custom'):
grip_left.command_position(cmd['left_gripper'])
if ('right_gripper' in cmd and
grip_right.type() != 'custom'):
grip_right.command_position(cmd['right_gripper'])
rate.sleep()
print
return True
def main():
dir = '/home/ros-baxter/sequence1/'
fam = 'no'
ext = '.rec'
#fname = fam+'*'+ext
#fam_list = glob.glob(ext)
#print(fam_list)
rospy.init_node("ferdian_file_playback")
client = rospy.ServiceProxy("ferdian_example_service",Empty)
rs = baxter_interface.RobotEnable(CHECK_VERSION)
rs.enable()
rospy.loginfo("waiting for service")
rospy.wait_for_service("ferdian_example_service")
rospy.loginfo("service available")
#put your loop here
for file in sorted(glob.glob('./sequence1/*.rec')):
map_file(file)
rospy.loginfo("sending signal...") # to the image processing node
#for x in range(0, 3):
# map_file("AtoE.rec")
res = client()
rospy.loginfo("service returned")
###
if __name__ == '__main__':
main()
| 34.543956
| 77
| 0.655798
| 858
| 6,287
| 4.69697
| 0.36014
| 0.015881
| 0.013896
| 0.011414
| 0.105707
| 0.087841
| 0.087841
| 0.058065
| 0.058065
| 0.058065
| 0
| 0.007852
| 0.250517
| 6,287
| 181
| 78
| 34.734807
| 0.847411
| 0.405122
| 0
| 0.043011
| 0
| 0
| 0.112983
| 0.025967
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043011
| false
| 0
| 0.075269
| 0
| 0.172043
| 0.053763
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
54f048a7a0b7d058cdc56c1d7f2c7462bde0f3d6
| 4,461
|
py
|
Python
|
core/src/main/python/akdl/entry/base_entry.py
|
zhangjun0x01/Alink
|
c1cd3380bed29a4be4eb058a7462213869c02387
|
[
"Apache-2.0"
] | 3,301
|
2018-10-01T16:30:44.000Z
|
2022-03-30T08:07:16.000Z
|
core/src/main/python/akdl/entry/base_entry.py
|
zhangjun0x01/Alink
|
c1cd3380bed29a4be4eb058a7462213869c02387
|
[
"Apache-2.0"
] | 206
|
2019-11-27T14:04:42.000Z
|
2022-03-28T08:02:05.000Z
|
core/src/main/python/akdl/entry/base_entry.py
|
zhangjun0x01/Alink
|
c1cd3380bed29a4be4eb058a7462213869c02387
|
[
"Apache-2.0"
] | 765
|
2018-10-09T02:02:19.000Z
|
2022-03-31T12:06:21.000Z
|
import abc
from typing import Dict, Callable
import tensorflow as tf
from flink_ml_framework.context import Context
from flink_ml_framework.java_file import *
from ..runner import tf_helper, io_helper
from ..runner.output_writer import DirectOutputWriter
try:
from flink_ml_tensorflow.tensorflow_context import TFContext
except:
from flink_ml_tensorflow2.tensorflow_context import TFContext
# noinspection PyUnresolvedReferences
from tensorflow_io.core.python.ops import core_ops
__all__ = ['TF1_TYPE', 'TF2_TYPE']
TF1_TYPE = 'tf1'
TF2_TYPE = 'tf2'
class BaseEntry(abc.ABC):
def __init__(self, func_name, engine_type):
self.func_name = func_name
self.engine_type = engine_type
@staticmethod
def get_func_by_name(func_name):
"""
Get function by the func name
:param func_name: func name
:return: function
"""
if '.' not in func_name:
if func_name in globals():
return globals()[func_name]
else:
raise RuntimeError('cannot find function[{}]'.format(func_name))
else:
module_name, func_name = func_name.rsplit('.', 1)
import importlib
# load the module, will raise ImportError if module cannot be loaded
m = importlib.import_module(module_name)
# get the class, will raise AttributeError if class cannot be found
c = getattr(m, func_name)
return c
@abc.abstractmethod
def construct_args(self, **kwargs):
pass
def is_batch(self):
return True
def post_process(self, **kwargs):
pass
def entry_func(self, context: Context):
tf_context = TFContext(context)
properties = tf_context.properties
print('properties', properties, flush=True)
# intra_op_parallelism is set by akdl, because there is a bug in TensorFlow 1.x
# See: https://stackoverflow.com/questions/34426268/restricting-number-of-cores-used
intra_op_parallelism = int(properties['ALINK:intra_op_parallelism'])
if self.engine_type == TF1_TYPE:
tf_helper.set_intra_op_parallelism(intra_op_parallelism_threads=intra_op_parallelism)
elif self.engine_type == TF2_TYPE:
tf.config.threading.set_intra_op_parallelism_threads(intra_op_parallelism)
num_workers = int(properties['ALINK:num_workers'])
work_dir = properties['ALINK:work_dir']
cluster, task_type, task_index = tf_context.export_estimator_cluster()
if self.is_batch():
java_queue_file = JavaFile(context.from_java(), context.to_java())
dataset_file = os.path.join(work_dir, 'dataset.tfrecords')
dataset, dataset_length = io_helper.convert_java_queue_file_to_repeatable_dataset(java_queue_file,
dataset_file)
print("number of records: " + str(dataset_length), flush=True)
dataset_fn: Callable[[], tf.data.TFRecordDataset] = lambda: tf.data.TFRecordDataset(dataset_file)
else:
dataset_fn: Callable[[], tf.data.TFRecordDataset] = lambda: tf_context.flink_stream_dataset()
dataset = None
dataset_file = None
dataset_length = None
saved_model_dir = os.path.join(work_dir, 'savedmodel')
user_params: Dict = json.loads(properties['ALINK:user_defined_params'])
for i in range(1, 1024):
key = "ALINK:bc_" + str(i)
if key in properties:
user_params[key] = context.properties[key]
key = "ALINK:model_dir"
if key in properties:
user_params[key] = properties[key]
output_writer = DirectOutputWriter(tf_context.from_java(), tf_context.to_java())
locals_copy = locals().copy()
locals_copy.pop("self")
print("locals_copy = ", locals_copy, flush=True)
args = self.construct_args(**locals_copy)
func = self.get_func_by_name(self.func_name)
func(args)
print("task_type = {}, task_index = {}: done tf_user_main".format(task_type, task_index), flush=True)
local_vars = locals().copy()
local_vars.pop('self')
self.post_process(**local_vars)
print("task_type = {}, task_index = {}: exit".format(task_type, task_index), flush=True)
output_writer.close()
| 36.867769
| 110
| 0.647837
| 546
| 4,461
| 5.007326
| 0.302198
| 0.043892
| 0.05267
| 0.03109
| 0.13899
| 0.110461
| 0.110461
| 0.03365
| 0
| 0
| 0
| 0.007268
| 0.259807
| 4,461
| 120
| 111
| 37.175
| 0.820715
| 0.091011
| 0
| 0.082353
| 0
| 0
| 0.079531
| 0.012715
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070588
| false
| 0.023529
| 0.141176
| 0.011765
| 0.258824
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
54f164400ecea40c3dfdfcd5317d3f9f381a79ff
| 12,450
|
py
|
Python
|
corm-tests/test_corm_api.py
|
jbcurtin/cassandra-orm
|
2c5540de36166c81832c1ccd0ee40c52e598e05c
|
[
"MIT"
] | 1
|
2021-03-25T01:21:19.000Z
|
2021-03-25T01:21:19.000Z
|
corm-tests/test_corm_api.py
|
jbcurtin/cassandra-orm
|
2c5540de36166c81832c1ccd0ee40c52e598e05c
|
[
"MIT"
] | null | null | null |
corm-tests/test_corm_api.py
|
jbcurtin/cassandra-orm
|
2c5540de36166c81832c1ccd0ee40c52e598e05c
|
[
"MIT"
] | null | null | null |
import pytest
ENCODING = 'utf-8'
@pytest.fixture(scope='function', autouse=True)
def setup_case(request):
def destroy_case():
from corm import annihilate_keyspace_tables, SESSIONS
annihilate_keyspace_tables('mykeyspace')
for keyspace_name, session in SESSIONS.copy().items():
if keyspace_name in ['global']:
continue
session.shutdown()
del SESSIONS[keyspace_name]
request.addfinalizer(destroy_case)
def test_initial_api():
from corm import register_table, insert, sync_schema
from corm.models import CORMBase
class TestModel(CORMBase):
__keyspace__ = 'mykeyspace'
something: str
other: str
register_table(TestModel)
sync_schema()
one = TestModel('one', 'two')
two = TestModel('one', 'two')
three = TestModel('one', 'three')
insert([one, two, three])
def test_keyspace_api():
import hashlib
import uuid
from corm import register_table, insert, sync_schema, \
keyspace_exists, keyspace_destroy, keyspace_create
from corm.datatypes import CassandraKeyspaceStrategy
from corm.models import CORMBase
# Keyspaces seem to have to start with Alpha-Letters
keyspace_name = hashlib.md5(str(uuid.uuid4()).encode(ENCODING)).hexdigest()
keyspace_name = f'abc_{keyspace_name}'
assert keyspace_exists(keyspace_name) is False
keyspace_create(keyspace_name, CassandraKeyspaceStrategy.Simple)
assert keyspace_exists(keyspace_name) is True
keyspace_destroy(keyspace_name)
assert keyspace_exists(keyspace_name) is False
class TestModelKeyspace(CORMBase):
__keyspace__ = keyspace_name
item: str
register_table(TestModelKeyspace)
assert keyspace_exists(keyspace_name) is False
sync_schema()
assert keyspace_exists(keyspace_name) is True
one = TestModelKeyspace('one')
insert([one])
keyspace_destroy(keyspace_name)
assert keyspace_exists(keyspace_name) is False
def test_float_api():
from corm import register_table, insert, sync_schema, select
from corm.models import CORMBase
class TestModelFloat(CORMBase):
__keyspace__ = 'mykeyspace'
input_one: float
register_table(TestModelFloat)
sync_schema()
data = 324.593998934
one = TestModelFloat(data)
insert([one])
for idx, entry in enumerate(select(TestModelFloat)):
assert entry.input_one == data
def test_boolean_api():
from corm import register_table, insert, sync_schema
from corm.models import CORMBase
from datetime import datetime
class TestModelBoolean(CORMBase):
__keyspace__ = 'mykeyspace'
item: str
created: datetime
value: bool
register_table(TestModelBoolean)
sync_schema()
one = TestModelBoolean('one', datetime.utcnow(), True)
two = TestModelBoolean('two', datetime.utcnow(), False)
insert([one, two])
def test_datetime_api():
from corm import register_table, insert, sync_schema
from corm.models import CORMBase
from datetime import datetime
class TestModelDatetime(CORMBase):
__keyspace__ = 'mykeyspace'
item: str
created: datetime
register_table(TestModelDatetime)
sync_schema()
one = TestModelDatetime('one', datetime.utcnow())
two = TestModelDatetime('two', datetime.utcnow())
insert([one, two])
def test_set_api():
from corm import register_table, insert, sync_schema
from corm.models import CORMBase
from corm.annotations import Set
class TestModelSet(CORMBase):
__keyspace__ = 'mykeyspace'
something: str
other: Set
register_table(TestModelSet)
sync_schema()
one = TestModelSet('one', {'first'})
two = TestModelSet('two', {'last', 'second-to-last'})
three = TestModelSet('three', {'last', 'second-to-last', 'last'})
four = TestModelSet('four', ['one', 'two', 'three', 'four'])
insert([one, two, three, four])
def test_select_api():
import random
from corm import register_table, insert, sync_schema, select
from corm.models import CORMBase
from corm.annotations import Set
from datetime import datetime
MAX_INT = 1000
class TestModelSelect(CORMBase):
__keyspace__ = 'mykeyspace'
random_number: int
created: datetime
register_table(TestModelSelect)
sync_schema()
insert_later = []
values = []
for idx in range(0, 100):
values.append({
'random_number': random.randint(0, MAX_INT),
'created': datetime.utcnow()
})
entry = TestModelSelect(values[-1]['random_number'], values[-1]['created'])
insert_later.append(entry)
if len(insert_later) > 20:
insert(insert_later)
insert_later = []
insert(insert_later)
for idx, entry in enumerate(select(TestModelSelect, fetch_size=100)):
assert isinstance(entry, TestModelSelect)
# Order is not consistent
# assert entry.random_number == values[idx]['random_number']
# assert entry.created == values[idx]['created']
assert idx > 0
def test_select_where_api():
import random
from corm import register_table, insert, sync_schema, select, where
from corm.models import CORMBase
from datetime import datetime
MAX_INT = 99999
class TestModelSelectSource(CORMBase):
__keyspace__ = 'mykeyspace'
random_number: int
created: datetime
one: str
two: str
class TestModelSelectPivot(CORMBase):
__keyspace__ = 'mykeyspace'
random_number: int
created: datetime
one: str
two: str
source: TestModelSelectSource
# TODO: Build UserType integration
# register_table(TestModelSelectSource)
# register_table(TestModelSelectPivot)
def test_alter_table_api():
from corm import register_table, insert, sync_schema, select, obtain_session
from corm.models import CORMBase
from datetime import datetime
# Create Table or Delete Column on existing Table
class TestModelAlter(CORMBase):
__keyspace__ = 'mykeyspace'
random_number: int
created: datetime
register_table(TestModelAlter)
sync_schema()
COL_CQL = f'''
SELECT
column_name, type
FROM
system_schema.columns
WHERE
table_name = '{TestModelAlter._corm_details.table_name}'
AND
keyspace_name = '{TestModelAlter._corm_details.keyspace}'
'''
rows = [(row.column_name, row.type) for row in obtain_session('mykeyspace').execute(COL_CQL)]
assert len(rows) == 3
# Add Column on existing Table
class TestModelAlter(CORMBase):
__keyspace__ = 'mykeyspace'
random_number: int
created: datetime
new_column: str
register_table(TestModelAlter)
sync_schema()
rows = [(row.column_name, row.type) for row in obtain_session('mykeyspace').execute(COL_CQL)]
assert len(rows) == 4
def test_not_ordered_by_pk_field():
import random
from corm import register_table, insert, sync_schema, select, obtain_session
from corm.models import CORMBase
from datetime import datetime
class TestNotOrderedByPkField(CORMBase):
__keyspace__ = 'mykeyspace'
__primary_keys__ = ['one', 'two', 'three']
random_number: int
created: datetime
one: str
two: str
three: str
register_table(TestNotOrderedByPkField)
sync_schema()
first_entry = TestNotOrderedByPkField(random.randint(0, 99999), datetime.utcnow(), 'one', 'one', 'beta')
gamma = TestNotOrderedByPkField(random.randint(0, 99999), datetime.utcnow(), 'one', 'one', 'gamma')
delta = TestNotOrderedByPkField(random.randint(0, 99999), datetime.utcnow(), 'one', 'one', 'delta')
second_entry = TestNotOrderedByPkField(random.randint(0, 99999), datetime.utcnow(), 'one', 'one', 'alpha')
insert([first_entry, gamma, delta, second_entry])
for idx, entry in enumerate(select(TestNotOrderedByPkField)):
if idx == 0:
assert entry.three != 'alpha'
def test_ordered_by_pk_field():
import random
from corm import register_table, insert, sync_schema, select, obtain_session
from corm.models import CORMBase
from corm.datatypes import TableOrdering
from datetime import datetime
class TestOrderedByPkField(CORMBase):
__keyspace__ = 'mykeyspace'
__primary_keys__ = ['one', 'two', 'three']
__ordered_by_primary_keys__ = TableOrdering.DESC
random_number: int
created: datetime
one: str
two: str
three: str
register_table(TestOrderedByPkField)
sync_schema()
first_entry = TestOrderedByPkField(random.randint(0, 99999), datetime.utcnow(), 'one', 'one', 'beta')
second_entry = TestOrderedByPkField(random.randint(0, 99999), datetime.utcnow(), 'one', 'one', 'alpha')
gamma = TestOrderedByPkField(random.randint(0, 99999), datetime.utcnow(), 'one', 'one', 'gamma')
delta = TestOrderedByPkField(random.randint(0, 99999), datetime.utcnow(), 'one', 'one', 'delta')
insert([first_entry, second_entry, delta, gamma])
for idx, entry in enumerate(select(TestOrderedByPkField)):
if idx == 0:
assert entry.three == 'alpha'
elif idx == 1:
assert entry.three == 'beta'
elif idx == 2:
assert entry.three == 'delta'
elif idx == 3:
assert entry.three == 'gamma'
def test_corm_auth():
import os
os.environ['CLUSTER_PORT'] = '9043'
os.environ['CLUSTER_USERNAME'] = 'cassandra'
os.environ['CLUSTER_PASSWORD'] = 'cassandra'
from corm import register_table, insert, sync_schema
from corm.models import CORMBase
class TestCORMAuth(CORMBase):
one: str
__keyspace__ = 'test_corm_auth'
register_table(TestCORMAuth)
sync_schema()
def test_corm_enum():
import enum
from corm import register_table, insert, sync_schema, select
from corm.models import CORMBase
class OptionList(enum.Enum):
One = 'one'
Two = 'two'
class TestCormEnum(CORMBase):
__keyspace__ = 'test_corm_enum'
option: OptionList
register_table(TestCormEnum)
sync_schema()
first = TestCormEnum(OptionList.One)
second = TestCormEnum(OptionList.Two)
insert([first, second])
for idx, entry in enumerate(select(TestCormEnum)):
assert entry.option in OptionList.__members__.values()
def test_corm_where():
import enum
from corm import register_table, insert, sync_schema, select, where, cp, Operator
from corm.models import CORMBase
class OptionList(enum.Enum):
One = 'one'
Two = 'two'
class TestCORMWhere(CORMBase):
__keyspace__ = 'test_corm_where'
option: OptionList
score: int
register_table(TestCORMWhere)
sync_schema()
one = TestCORMWhere(OptionList.One, 1)
two = TestCORMWhere(OptionList.One, 2)
three = TestCORMWhere(OptionList.Two, 3)
four = TestCORMWhere(OptionList.Two, 4)
insert([one, two, three, four])
for idx, entry in enumerate(where(TestCORMWhere, [cp(Operator.Equal, 'score', 4)])):
assert idx == 0
assert entry.score == 4
assert entry.option == OptionList.Two
for idx, entry in enumerate(where(TestCORMWhere, [cp(Operator.Equal, 'score', 1)])):
assert idx == 0
assert entry.score == 1
assert entry.option == OptionList.One
for idx, entry in enumerate(where(TestCORMWhere, [cp(Operator.Equal, 'option', OptionList.One)])):
assert idx in [0, 1]
assert entry.score in [1, 2]
assert entry.option == OptionList.One
for idx, entry in enumerate(where(TestCORMWhere, [cp(Operator.Equal, 'option', OptionList.Two)])):
assert idx in [0, 1]
assert entry.score in [3, 4]
assert entry.option == OptionList.Two
def test_corm_uuid():
import uuid
from corm import register_table, insert, sync_schema, select
from corm.models import CORMBase
class TestCORMUUID(CORMBase):
__keyspace__ = 'mykeyspace'
identity_test: uuid.UUID
register_table(TestCORMUUID)
sync_schema()
one = TestCORMUUID(uuid.uuid4())
insert([one])
for entry in select(TestCORMUUID):
assert isinstance(entry.identity_test, uuid.UUID)
| 29.294118
| 110
| 0.67245
| 1,402
| 12,450
| 5.767475
| 0.13766
| 0.034628
| 0.027702
| 0.040811
| 0.538214
| 0.51348
| 0.466362
| 0.433094
| 0.421222
| 0.376824
| 0
| 0.011997
| 0.23004
| 12,450
| 424
| 111
| 29.363208
| 0.831525
| 0.029317
| 0
| 0.473016
| 0
| 0
| 0.06766
| 0.008696
| 0
| 0
| 0
| 0.002358
| 0.095238
| 1
| 0.053968
| false
| 0.003175
| 0.168254
| 0
| 0.492063
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
54f3bbb19576152c565203e49a32298c3f423ec9
| 6,337
|
py
|
Python
|
src/utilities/getInfo.py
|
UCSB-dataScience-ProjectGroup/movie_rating_prediction
|
c0c29c0463dccc6ad286bd59e77993fdf0d05fb2
|
[
"RSA-MD"
] | 2
|
2017-12-15T23:10:11.000Z
|
2018-05-07T04:18:03.000Z
|
src/utilities/getInfo.py
|
UCSB-dataScience-ProjectGroup/movie_rating_prediction
|
c0c29c0463dccc6ad286bd59e77993fdf0d05fb2
|
[
"RSA-MD"
] | 1
|
2018-02-26T06:23:32.000Z
|
2018-02-27T03:34:01.000Z
|
src/utilities/getInfo.py
|
UCSB-dataScience-ProjectGroup/movie_rating_prediction
|
c0c29c0463dccc6ad286bd59e77993fdf0d05fb2
|
[
"RSA-MD"
] | 2
|
2017-10-19T21:50:24.000Z
|
2018-01-01T03:40:35.000Z
|
import json
import os
from utilities.SaveLoadJson import SaveLoadJson as SLJ
from utilities.LineCount import LineCount as LC
import subprocess
from geolite2 import geolite2
class getData:
#Get Data Functions ------------------------------------------------------
@staticmethod
def getDATA():
result = {"requests":{},
"time":'',
"cpuload":'',
"uptime":'',
"temp":'',
"ip":''}
result["requests"]=getData.getRequests()
time = getData.getTime().split('\t')
result["time"] = time[0]
result["cpuload"]=time[1]
result["uptime"]=getData.getUptime()
result["temp"]=getData.getTemp()
result["ip"]=getData.getIP()
return json.dumps(result)
@staticmethod
def getRequests():
data = SLJ.load('dataStore.txt')
return {"totalRequests":str(data["totalRequests"]),
"totalQueries":str(data["totalQueries"]),
"totalAdjusts":str(data["totalAdjusts"])}
@staticmethod
def getTime():
proc = subprocess.Popen(['uptime'],stdout=subprocess.PIPE, shell=False)
(out, err) = proc.communicate()
return (str(out)[1:9] + '\t' +
str(float(str(out).split(',')[4])*100)+'%')
@staticmethod
def getUptime():
proc = subprocess.Popen(['uptime', '-p'],stdout=subprocess.PIPE, shell=False)
(out, err) = proc.communicate()
return str(out)
@staticmethod
def getTemp():
proc = subprocess.Popen(['vcgencmd', 'measure_temp'],stdout=subprocess.PIPE, shell=False)
(out,err) = proc.communicate()
return str(out)[5:-1]
@staticmethod
def getIP():
proc = subprocess.Popen(['hostname', '-I'],stdout=subprocess.PIPE, shell=False)
(out, err) = proc.communicate()
return str(out)
#Get Access Functions ---------------------------------------------------
@staticmethod
def getAccess():
result={"Countries":dict(),
"CountrySrs":dict(),
"devices":dict(),
"mostRecentSearch":'',
"mostRecentAcc":'',
"mostRecentIP":'',
"recentSearches":[],
"Users":0}
lastNum = 200
total=0
mostRecentIP = ''
mostRecentAcc = ''
mostRecentSearch = ''
Cname='Unknown'
Sname='Unknown'
Ctyname='Unknown'
ips=dict()
logFile = 'utilities/access.log'
newFile='utilities/new.log'
#f = open(newFile, 'w')
with open(logFile, 'r') as lf:
for temp in lf:
line = temp.split(';')
if len(line) > 1:
if line[2] == '200':
if 'GET /find' in line[3]:
#f.write(temp)
mostRecentIP=line[0]
mostRecentAcc=line[1]
reader = geolite2.reader()
loc = reader.get(line[0])
Cname = loc['country']['names']['en']
if 'subdivisions' in loc:
Sname = loc['subdivisions'][0]['names']['en']
else:
Sname='Unknown'
if 'city' in loc:
Ctyname = loc['city']['names']['en']
else:
Ctyname='Unknown'
if Cname not in result["Countries"]:
result["Countries"][Cname]=dict()
result["CountrySrs"][Cname]=0
if Sname not in result["Countries"][Cname]:
result["Countries"][Cname][Sname]=dict()
if Ctyname not in result["Countries"][Cname][Sname]:
result["Countries"][Cname][Sname][Ctyname] = []
result["CountrySrs"][Cname]+=1
total+=1
search = (line[3].split(' ')[1][6:]).replace('%20',' ')
mostRecentSearch=search
if search not in result["Countries"][Cname][Sname][Ctyname]:
result["Countries"][Cname][Sname][Ctyname].append(search)
if len(result["Countries"][Cname][Sname][Ctyname]) >= lastNum:
result["Countries"][Cname][Sname][Ctyname].pop(0)
if search not in result["recentSearches"]:
result["recentSearches"].insert(0,search)
if len(result["recentSearches"]) >= lastNum:
result["recentSearches"].pop(-1)
ips[line[0]]=1
device=(line[4].split('('))
if len(device)>1:
device=device[1]
else:
device="Unknown"
if device not in result["devices"]:
result["devices"][device]=0
result["devices"][device]+=1
#f.close()
#Most recent stuff
result["mostRecentIP"]=mostRecentIP
result["mostRecentAcc"]=mostRecentAcc
result["mostRecentSearch"]=mostRecentSearch
result["mostRecentLoc"]=str(Ctyname+', '+Sname+', '+Cname)
#Unique Users
for key, value in ips.items():
result["Users"]+=1
#Device percents
for key, value in result["devices"].items():
percnt = (float(value)/float(total))*100
result["devices"][key]=format(percnt, '.2f')
#Country percents
for key, value in result["CountrySrs"].items():
percnt = (float(value)/float(total))*100
result["CountrySrs"][key]=format(percnt,'.2f')
#os.system("sudo mv -f "+newFile+" "+logFile)
return json.dumps(result)
| 37.276471
| 97
| 0.447373
| 527
| 6,337
| 5.377609
| 0.254269
| 0.058222
| 0.063514
| 0.06175
| 0.225476
| 0.175371
| 0.117149
| 0.117149
| 0.08892
| 0.08892
| 0
| 0.014523
| 0.402399
| 6,337
| 169
| 98
| 37.497041
| 0.733826
| 0.046236
| 0
| 0.183206
| 0
| 0
| 0.121644
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053435
| false
| 0
| 0.045802
| 0
| 0.160305
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
54f89b5cd05a9ee6ba8e82764ddc7f2a5b7aea7d
| 1,689
|
py
|
Python
|
eval/metrics.py
|
RecoHut-Stanzas/S168471
|
7e0ac621c36f839e1df6876ec517d0ad00672790
|
[
"BSD-3-Clause"
] | 37
|
2020-06-15T02:04:37.000Z
|
2022-02-09T06:26:42.000Z
|
eval/metrics.py
|
RecoHut-Stanzas/S168471
|
7e0ac621c36f839e1df6876ec517d0ad00672790
|
[
"BSD-3-Clause"
] | 5
|
2020-08-06T13:16:34.000Z
|
2022-02-04T07:29:29.000Z
|
eval/metrics.py
|
RecoHut-Stanzas/S168471
|
7e0ac621c36f839e1df6876ec517d0ad00672790
|
[
"BSD-3-Clause"
] | 11
|
2020-09-01T23:08:51.000Z
|
2022-02-09T06:26:44.000Z
|
import torch
def ndcg_binary_at_k_batch_torch(X_pred, heldout_batch, k=100, device='cpu'):
"""
Normalized Discounted Cumulative Gain@k for for predictions [B, I] and ground-truth [B, I], with binary relevance.
ASSUMPTIONS: all the 0's in heldout_batch indicate 0 relevance.
"""
batch_users = X_pred.shape[0] # batch_size
_, idx_topk = torch.topk(X_pred, k, dim=1, sorted=True)
tp = 1. / torch.log2(torch.arange(2, k + 2, device=device).float())
heldout_batch_nonzero = (heldout_batch > 0).float()
DCG = (heldout_batch_nonzero[torch.arange(batch_users, device=device).unsqueeze(1), idx_topk] * tp).sum(dim=1)
heldout_nonzero = (heldout_batch > 0).sum(dim=1) # num. of non-zero items per batch. [B]
IDCG = torch.tensor([(tp[:min(n, k)]).sum() for n in heldout_nonzero]).to(device)
return DCG / IDCG
def recall_at_k_batch_torch(X_pred, heldout_batch, k=100):
"""
Recall@k for predictions [B, I] and ground-truth [B, I].
"""
batch_users = X_pred.shape[0]
_, topk_indices = torch.topk(X_pred, k, dim=1, sorted=False) # [B, K]
X_pred_binary = torch.zeros_like(X_pred)
if torch.cuda.is_available():
X_pred_binary = X_pred_binary.cuda()
X_pred_binary[torch.arange(batch_users).unsqueeze(1), topk_indices] = 1
X_true_binary = (heldout_batch > 0).float() # .toarray() # [B, I]
k_tensor = torch.tensor([k], dtype=torch.float32)
if torch.cuda.is_available():
X_true_binary = X_true_binary.cuda()
k_tensor = k_tensor.cuda()
tmp = (X_true_binary * X_pred_binary).sum(dim=1).float()
recall = tmp / torch.min(k_tensor, X_true_binary.sum(dim=1).float())
return recall
| 44.447368
| 118
| 0.674956
| 271
| 1,689
| 3.9631
| 0.284133
| 0.055866
| 0.05121
| 0.024209
| 0.284916
| 0.251397
| 0.16946
| 0.16946
| 0.122905
| 0.063315
| 0
| 0.020231
| 0.18058
| 1,689
| 37
| 119
| 45.648649
| 0.75578
| 0.184725
| 0
| 0.16
| 0
| 0
| 0.002242
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.04
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
54f8ec657caa5b90b66baca8ce435c82f8e1413e
| 5,029
|
py
|
Python
|
simba/run_dash_tkinter.py
|
justinshenk/simba
|
a58ccd0ceeda201c1452d186033ce6b25fbab564
|
[
"MIT"
] | 172
|
2019-12-18T22:19:42.000Z
|
2022-03-29T01:58:25.000Z
|
simba/run_dash_tkinter.py
|
justinshenk/simba
|
a58ccd0ceeda201c1452d186033ce6b25fbab564
|
[
"MIT"
] | 165
|
2020-01-10T19:05:16.000Z
|
2022-03-31T16:08:36.000Z
|
simba/run_dash_tkinter.py
|
justinshenk/simba
|
a58ccd0ceeda201c1452d186033ce6b25fbab564
|
[
"MIT"
] | 80
|
2019-12-20T00:01:43.000Z
|
2022-03-29T16:20:10.000Z
|
# All credit to https://stackoverflow.com/questions/46571448/tkinter-and-a-html-file - thanks DELICA - https://stackoverflow.com/users/7027346/delica
from cefpython3 import cefpython as cef
import ctypes
try:
import tkinter as tk
from tkinter import messagebox
except ImportError:
import Tkinter as tk
import sys
import platform
import logging as _logging
# Fix for PyCharm hints warnings
WindowUtils = cef.WindowUtils()
# Platforms
WINDOWS = (platform.system() == "Windows")
LINUX = (platform.system() == "Linux")
MAC = (platform.system() == "Darwin")
# Globals
logger = _logging.getLogger("tkinter_.py")
url = "localhost:8050/"
class MainFrame(tk.Frame):
def __init__(self, root):
self.closing = False
self.browser = None
# Root
root.geometry("900x640")
tk.Grid.rowconfigure(root, 0, weight=1)
tk.Grid.columnconfigure(root, 0, weight=1)
# MainFrame
tk.Frame.__init__(self, root)
self.master.title('SimBA Dashboard')
self.master.protocol("WM_DELETE_WINDOW", self.on_close)
self.bind("<Configure>", self.on_configure)
self.bind("<FocusIn>", self.on_focus_in)
self.bind("<FocusOut>", self.on_focus_out)
self.focus_set()
# Pack MainFrame
self.pack(fill=tk.BOTH, expand=tk.YES)
def embed_browser(self):
window_info = cef.WindowInfo()
rect = [0, 0, self.winfo_width(), self.winfo_height()]
window_info.SetAsChild(self.get_window_handle(), rect)
self.browser = cef.CreateBrowserSync(window_info,
url=url) #todo
assert self.browser
self.browser.SetClientHandler(LoadHandler(self))
self.browser.SetClientHandler(FocusHandler(self))
self.message_loop_work()
def get_window_handle(self):
if self.winfo_id() > 0:
return self.winfo_id()
else:
raise Exception("Couldn't obtain window handle")
def message_loop_work(self):
cef.MessageLoopWork()
self.after(10, self.message_loop_work)
def on_configure(self, event):
width = event.width
height = event.height
if self.browser:
if WINDOWS:
ctypes.windll.user32.SetWindowPos(
self.browser.GetWindowHandle(), 0,
0, 0, width, height, 0x0002)
elif LINUX:
self.browser.SetBounds(0, 0, width, height)
self.browser.NotifyMoveOrResizeStarted()
if not self.browser:
self.embed_browser()
def on_focus_in(self, _):
logger.debug("BrowserFrame.on_focus_in")
if self.browser:
self.browser.SetFocus(True)
self.focus_set()
def on_focus_out(self, _):
logger.debug("BrowserFrame.on_focus_out")
if self.browser:
self.browser.SetFocus(False)
def on_close(self):
if self.browser:
self.browser.CloseBrowser(True)
self.clear_browser_references()
self.destroy()
self.master.destroy()
def get_browser(self):
if self.browser:
return self.browser
return None
def clear_browser_references(self):
self.browser = None
class LoadHandler(object):
def __init__(self, browser_frame):
self.browser_frame = browser_frame
class FocusHandler(object):
def __init__(self, browser):
self.browser = browser
def OnTakeFocus(self, next_component, **_):
logger.debug("FocusHandler.OnTakeFocus, next={next}"
.format(next=next_component))
def OnSetFocus(self, source, **_):
logger.debug("FocusHandler.OnSetFocus, source={source}"
.format(source=source))
return False
def OnGotFocus(self, **_):
"""Fix CEF focus issues (#255). Call browser frame's focus_set
to get rid of type cursor in url entry widget."""
logger.debug("FocusHandler.OnGotFocus")
self.browser.focus_set()
# if __name__ == '__main__':
logger.setLevel(_logging.INFO)
stream_handler = _logging.StreamHandler()
formatter = _logging.Formatter("[%(filename)s] %(message)s")
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
logger.info("CEF Python {ver}".format(ver=cef.__version__))
logger.info("Python {ver} {arch}".format(
ver=platform.python_version(), arch=platform.architecture()[0]))
logger.info("Tk {ver}".format(ver=tk.Tcl().eval('info patchlevel')))
assert cef.__version__ >= "55.3", "CEF Python v55.3+ required to run this"
sys.excepthook = cef.ExceptHook # To shutdown all CEF processes on error
root = tk.Tk()
app = MainFrame(root)
def on_closing():
if messagebox.askokcancel("Quit", "Do you want to quit?"):
root.destroy()
root.protocol("WM_DELETE_WINDOW", on_closing)
# Tk must be initialized before CEF otherwise fatal error (Issue #306)
cef.Initialize()
root.mainloop()
# app.mainloop()
cef.Shutdown()
| 30.478788
| 149
| 0.644064
| 596
| 5,029
| 5.263423
| 0.342282
| 0.084157
| 0.02869
| 0.035065
| 0.079056
| 0.042078
| 0
| 0
| 0
| 0
| 0
| 0.015686
| 0.239411
| 5,029
| 164
| 150
| 30.664634
| 0.804444
| 0.096838
| 0
| 0.07563
| 0
| 0
| 0.101041
| 0.026811
| 0
| 0
| 0.001329
| 0.006098
| 0.016807
| 1
| 0.134454
| false
| 0
| 0.07563
| 0
| 0.268908
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
54fb3d7c53a19a5375f0b43976b42347774b6cca
| 1,010
|
py
|
Python
|
domain_data/mujoco_worlds/make_xml.py
|
sfpd/rlreloaded
|
650c64ec22ad45996c8c577d85b1a4f20aa1c692
|
[
"MIT"
] | null | null | null |
domain_data/mujoco_worlds/make_xml.py
|
sfpd/rlreloaded
|
650c64ec22ad45996c8c577d85b1a4f20aa1c692
|
[
"MIT"
] | null | null | null |
domain_data/mujoco_worlds/make_xml.py
|
sfpd/rlreloaded
|
650c64ec22ad45996c8c577d85b1a4f20aa1c692
|
[
"MIT"
] | null | null | null |
import re
def do_substitution(in_lines):
lines_iter = iter(in_lines)
defn_lines = []
while True:
try:
line = lines_iter.next()
except StopIteration:
raise RuntimeError("didn't find line starting with ---")
if line.startswith('---'):
break
else:
defn_lines.append(line)
d = {}
exec("\n".join(defn_lines), d)
pat = re.compile("\$\((.+?)\)")
out_lines = []
for line in lines_iter:
matches = pat.finditer(line)
for m in matches:
line = line.replace(m.group(0), str(eval(m.group(1),d)))
out_lines.append(line)
return out_lines
from glob import glob
import os.path as osp
infiles = glob(osp.join(osp.dirname(__file__),"*.xml.in"))
for fname in infiles:
with open(fname,"r") as fh:
in_lines = fh.readlines()
out_lines = do_substitution(in_lines)
outfname = fname[:-3]
with open(outfname,"w") as fh:
fh.writelines(out_lines)
| 25.897436
| 68
| 0.581188
| 135
| 1,010
| 4.192593
| 0.481481
| 0.061837
| 0.056537
| 0.074205
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004138
| 0.282178
| 1,010
| 38
| 69
| 26.578947
| 0.776552
| 0
| 0
| 0
| 0
| 0
| 0.059406
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030303
| false
| 0
| 0.090909
| 0
| 0.151515
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
54fcf0226ece66aeec4bb6bba4646c87e745e2e5
| 799
|
py
|
Python
|
hilton_sign_in.py
|
bmintz/python-snippets
|
982861c173bf4bcd5d908514a9e8b1914a580a5d
|
[
"CC0-1.0"
] | 2
|
2018-11-12T10:33:13.000Z
|
2019-02-24T05:01:40.000Z
|
hilton_sign_in.py
|
iomintz/python-snippets
|
982861c173bf4bcd5d908514a9e8b1914a580a5d
|
[
"CC0-1.0"
] | null | null | null |
hilton_sign_in.py
|
iomintz/python-snippets
|
982861c173bf4bcd5d908514a9e8b1914a580a5d
|
[
"CC0-1.0"
] | 2
|
2018-11-24T08:16:59.000Z
|
2019-02-24T04:41:30.000Z
|
#!/usr/bin/env python3
# encoding: utf-8
import sys
import urllib.parse
import selenium.webdriver
def exit():
driver.quit()
sys.exit(0)
driver = selenium.webdriver.Firefox()
# for some reason, detectportal.firefox.com and connectivitycheck.gstatic.com are not blocked
# therefore, they cannot be used to detect connectivity
# we instead visit another site that is known not to ever have TLS
driver.get('http://neverssl.com')
if 'neverssl.com' in urllib.parse.urlparse(driver.current_url).netloc:
exit()
driver.find_element_by_css_selector('label[for="promo_button"]').click()
driver.find_element_by_css_selector('input[alt="Next"]').click()
driver.find_element_by_css_selector('#PromotionCode').send_keys('lobby18')
driver.find_element_by_css_selector('input[alt="Connect"]').click()
exit()
| 30.730769
| 93
| 0.779725
| 119
| 799
| 5.07563
| 0.630252
| 0.066225
| 0.112583
| 0.125828
| 0.241722
| 0.241722
| 0.192053
| 0.125828
| 0
| 0
| 0
| 0.006859
| 0.08761
| 799
| 25
| 94
| 31.96
| 0.821674
| 0.310388
| 0
| 0.133333
| 0
| 0
| 0.208791
| 0.045788
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.2
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
54fd38f1410793bf1398c7ca975380689133f595
| 1,539
|
py
|
Python
|
src/figures/trends/leaf_response.py
|
rhyswhitley/savanna_iav
|
4eadf29a4e9c05d0b14d3b9c973eb8db3ea7edba
|
[
"CC0-1.0"
] | null | null | null |
src/figures/trends/leaf_response.py
|
rhyswhitley/savanna_iav
|
4eadf29a4e9c05d0b14d3b9c973eb8db3ea7edba
|
[
"CC0-1.0"
] | null | null | null |
src/figures/trends/leaf_response.py
|
rhyswhitley/savanna_iav
|
4eadf29a4e9c05d0b14d3b9c973eb8db3ea7edba
|
[
"CC0-1.0"
] | 1
|
2019-09-01T04:15:21.000Z
|
2019-09-01T04:15:21.000Z
|
#!/usr/bin/env python
import os
from collections import OrderedDict
import cPickle as pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.cm import get_cmap
from matplotlib import style
from scipy import stats
from scipy import integrate
def plot_monthly_response(norm, pert):
plot_grid = gridspec.GridSpec(4, 1, hspace=0.1)
ax1 = plt.subplot(plot_grid[0])
ax2 = plt.subplot(plot_grid[1])
ax3 = plt.subplot(plot_grid[2])
ax4 = plt.subplot(plot_grid[3])
# Stomatal conductance
ax1.plot(norm["Gtree"].values)
ax1.plot(pert["Gtree"].values)
# Leaf transpiration
ax2.plot(norm["Etree"].values)
ax2.plot(pert["Etree"].values)
# Leaf assimilation
ax3.plot(norm["Atree"].values)
ax3.plot(pert["Atree"].values)
ax4.plot(norm["LAItree"].values)
ax4.plot(pert["LAItree"].values)
ax4.plot(norm["LAIgrass"].values)
ax4.plot(pert["LAIgrass"].values)
plt.show()
return 1
def main():
data_dict = pickle.load(open(PKLPATH, 'rb'))
year_agg = lambda x: x.groupby(level=['month', 'hour']).mean()
data_mean_year = [year_agg(df) \
for df in OrderedDict(data_dict).values()]
# **FOR LOOP WILL GO HERE
plot_monthly_response(data_mean_year[3], data_mean_year[6])
return 1
if __name__ == "__main__":
FILEPATH = "~/Savanna/Data/HowardSprings_IAV/pickled/agg/mean_monthly_leaf.pkl"
PKLPATH = os.path.expanduser(FILEPATH)
main()
| 23.676923
| 83
| 0.684211
| 219
| 1,539
| 4.666667
| 0.420091
| 0.039139
| 0.054795
| 0.07045
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020717
| 0.184535
| 1,539
| 64
| 84
| 24.046875
| 0.793626
| 0.066277
| 0
| 0.05
| 0
| 0
| 0.101257
| 0.046089
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.275
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
54fe1eee5bca5dc248b6bf225d479bd8fc671965
| 1,041
|
py
|
Python
|
app/index.py
|
vprnet/school-closings
|
04c63170ea36cabe0a3486f0e58830952e1fd0a8
|
[
"Apache-2.0"
] | null | null | null |
app/index.py
|
vprnet/school-closings
|
04c63170ea36cabe0a3486f0e58830952e1fd0a8
|
[
"Apache-2.0"
] | null | null | null |
app/index.py
|
vprnet/school-closings
|
04c63170ea36cabe0a3486f0e58830952e1fd0a8
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/local/bin/python2.7
from flask import Flask
import sys
from flask_frozen import Freezer
from upload_s3 import set_metadata
from config import AWS_DIRECTORY
app = Flask(__name__)
app.config.from_object('config')
from views import *
# Serving from s3 leads to some complications in how static files are served
if len(sys.argv) > 1:
if sys.argv[1] == 'build':
PROJECT_ROOT = '/' + AWS_DIRECTORY
elif sys.argv[1] == 'test':
PROJECT_ROOT = '/www.vpr.net/' + AWS_DIRECTORY
else:
PROJECT_ROOT = '/'
class WebFactionMiddleware(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
environ['SCRIPT_NAME'] = PROJECT_ROOT
return self.app(environ, start_response)
app.wsgi_app = WebFactionMiddleware(app.wsgi_app)
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'build':
app.debug = True
freezer = Freezer(app)
freezer.freeze()
set_metadata()
else:
app.run(debug=True)
| 24.209302
| 76
| 0.668588
| 142
| 1,041
| 4.640845
| 0.450704
| 0.053111
| 0.060698
| 0.036419
| 0.039454
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01107
| 0.21902
| 1,041
| 42
| 77
| 24.785714
| 0.799508
| 0.096061
| 0
| 0.066667
| 0
| 0
| 0.057508
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.2
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
07010f1430c53be8c3d42e4a620d3fc295e28964
| 1,799
|
py
|
Python
|
proxyclient/linux.py
|
modwizcode/m1n1
|
96d133e854dfe878ea39f9c994545a2026a446a8
|
[
"MIT"
] | 1
|
2021-06-05T08:30:21.000Z
|
2021-06-05T08:30:21.000Z
|
proxyclient/linux.py
|
modwizcode/m1n1
|
96d133e854dfe878ea39f9c994545a2026a446a8
|
[
"MIT"
] | null | null | null |
proxyclient/linux.py
|
modwizcode/m1n1
|
96d133e854dfe878ea39f9c994545a2026a446a8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
from setup import *
payload = open(sys.argv[1], "rb").read()
dtb = open(sys.argv[2], "rb").read()
if len(sys.argv) > 3:
initramfs = open(sys.argv[3], "rb").read()
initramfs_size = len(initramfs)
else:
initramfs = None
initramfs_size = 0
compressed_size = len(payload)
compressed_addr = u.malloc(compressed_size)
dtb_addr = u.malloc(len(dtb))
print("Loading %d bytes to 0x%x..0x%x..." % (compressed_size, compressed_addr, compressed_addr + compressed_size))
iface.writemem(compressed_addr, payload, True)
print("Loading DTB to 0x%x..." % dtb_addr)
iface.writemem(dtb_addr, dtb)
kernel_size = 32 * 1024 * 1024
kernel_base = u.memalign(2 * 1024 * 1024, kernel_size)
print("Kernel_base: 0x%x" % kernel_base)
assert not (kernel_base & 0xffff)
if initramfs is not None:
initramfs_base = u.memalign(65536, initramfs_size)
print("Loading %d initramfs bytes to 0x%x..." % (initramfs_size, initramfs_base))
iface.writemem(initramfs_base, initramfs, True)
p.kboot_set_initrd(initramfs_base, initramfs_size)
if p.kboot_prepare_dt(dtb_addr):
print("DT prepare failed")
sys.exit(1)
#kernel_size = p.xzdec(compressed_addr, compressed_size)
#if kernel_size < 0:
#raise Exception("Decompression header check error!",)
#print("Uncompressed kernel size: %d bytes" % kernel_size)
print("Uncompressing...")
iface.dev.timeout = 40
kernel_size = p.gzdec(compressed_addr, compressed_size, kernel_base, kernel_size)
print(kernel_size)
if kernel_size < 0:
raise Exception("Decompression error!")
print("Decompress OK...")
p.dc_cvau(kernel_base, kernel_size)
p.ic_ivau(kernel_base, kernel_size)
print("Ready to boot")
daif = u.mrs(DAIF)
daif |= 0x3c0
u.msr(DAIF, daif)
print("DAIF: %x" % daif)
p.kboot_boot(kernel_base)
iface.ttymode()
| 24.310811
| 114
| 0.721512
| 269
| 1,799
| 4.635688
| 0.301115
| 0.096231
| 0.076985
| 0.067362
| 0.110666
| 0.070569
| 0.070569
| 0.070569
| 0
| 0
| 0
| 0.027724
| 0.137854
| 1,799
| 73
| 115
| 24.643836
| 0.776273
| 0.111173
| 0
| 0
| 0
| 0
| 0.128607
| 0
| 0
| 0
| 0.006901
| 0
| 0.022727
| 1
| 0
| false
| 0
| 0.022727
| 0
| 0.022727
| 0.227273
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
07027cec6982fe1f9197878d8796ee05b6d45b5e
| 1,313
|
py
|
Python
|
src/server.py
|
shizhongpwn/ancypwn
|
716146e4986c514754492c8503ab196eecb9466d
|
[
"MIT"
] | 1
|
2021-06-29T03:41:27.000Z
|
2021-06-29T03:41:27.000Z
|
src/server.py
|
shizhongpwn/ancypwn
|
716146e4986c514754492c8503ab196eecb9466d
|
[
"MIT"
] | null | null | null |
src/server.py
|
shizhongpwn/ancypwn
|
716146e4986c514754492c8503ab196eecb9466d
|
[
"MIT"
] | 1
|
2021-06-18T05:36:28.000Z
|
2021-06-18T05:36:28.000Z
|
import json
import os
import multiprocessing
import struct
import importlib
from socketserver import TCPServer, StreamRequestHandler
def plugin_module_import(name):
try:
return importlib.import_module(name)
except ModuleNotFoundError as e:
prompt = 'plugin {} not found, please install it first.\n'.format(name)
prompt += 'try follwing:\n\tpip3 install {}'.format(name)
raise PluginNotFoundError(prompt)
class NotificationHandler(StreamRequestHandler):
def handle(self):
length = struct.unpack('<I', self.request.recv(4))[0]
json_content = self.request.recv(length)
content = json.loads(json_content)
terminal = content['terminal']
if content['exec'] != '':
command = 'ancypwn attach -c \'{}\''.format(content['exec'])
else:
command = 'ancypwn attach'
realname = 'ancypwn_terminal_{}'.format(terminal)
mod = plugin_module_import(realname)
mod.run(command)
class ServerProcess(multiprocessing.Process):
def __init__(self, port, *args, **kwargs):
super(ServerProcess, self).__init__(*args, **kwargs)
self.port = port
def run(self):
self.server = TCPServer(('', self.port), NotificationHandler)
self.server.serve_forever()
| 31.261905
| 79
| 0.657273
| 140
| 1,313
| 6.035714
| 0.478571
| 0.028402
| 0.042604
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002947
| 0.224676
| 1,313
| 41
| 80
| 32.02439
| 0.827112
| 0
| 0
| 0
| 0
| 0
| 0.113481
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121212
| false
| 0
| 0.272727
| 0
| 0.484848
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0703c45ad1851ac29ed524b5ee1616259ba14bdb
| 537
|
py
|
Python
|
pytorch_utils/collection_utils.py
|
c-hofer/pytorch_utils
|
55278272690937ff1180c8d549bc866a63a5ac51
|
[
"MIT"
] | null | null | null |
pytorch_utils/collection_utils.py
|
c-hofer/pytorch_utils
|
55278272690937ff1180c8d549bc866a63a5ac51
|
[
"MIT"
] | null | null | null |
pytorch_utils/collection_utils.py
|
c-hofer/pytorch_utils
|
55278272690937ff1180c8d549bc866a63a5ac51
|
[
"MIT"
] | null | null | null |
def keychain_value_iter(d, key_chain=None, allowed_values=None):
key_chain = [] if key_chain is None else list(key_chain).copy()
if not isinstance(d, dict):
if allowed_values is not None:
assert isinstance(d, allowed_values), 'Value needs to be of type {}!'.format(
allowed_values)
yield key_chain, d
else:
for k, v in d.items():
yield from keychain_value_iter(
v,
key_chain + [k],
allowed_values=allowed_values)
| 38.357143
| 89
| 0.588454
| 72
| 537
| 4.166667
| 0.444444
| 0.16
| 0.113333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.324022
| 537
| 14
| 90
| 38.357143
| 0.826446
| 0
| 0
| 0
| 0
| 0
| 0.053903
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 1
| 0.076923
| false
| 0
| 0
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
070792428b154808490c0fc141036d69c221ccfb
| 2,981
|
py
|
Python
|
security_monkey/watchers/vpc/vpn.py
|
boladmin/security_monkey
|
c28592ffd518fa399527d26262683fc860c30eef
|
[
"Apache-2.0"
] | 4,258
|
2015-01-04T22:06:10.000Z
|
2022-03-31T23:40:27.000Z
|
security_monkey/watchers/vpc/vpn.py
|
boladmin/security_monkey
|
c28592ffd518fa399527d26262683fc860c30eef
|
[
"Apache-2.0"
] | 1,013
|
2015-01-12T02:31:03.000Z
|
2021-09-16T19:09:03.000Z
|
security_monkey/watchers/vpc/vpn.py
|
boladmin/security_monkey
|
c28592ffd518fa399527d26262683fc860c30eef
|
[
"Apache-2.0"
] | 965
|
2015-01-11T21:06:07.000Z
|
2022-03-17T16:53:57.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.watchers.vpc.vpn
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Alex Cline <alex.cline@gmail.com> @alex.cline
"""
from cloudaux.aws.ec2 import describe_vpn_connections
from security_monkey.cloudaux_watcher import CloudAuxWatcher
from security_monkey.watcher import ChangeItem
DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
class VPN(CloudAuxWatcher):
index = 'vpn'
i_am_singular = 'VPN Connection'
i_am_plural = 'VPN Connections'
def __init__(self, *args, **kwargs):
super(VPN, self).__init__(*args, **kwargs)
self.honor_ephemerals = True
self.ephemeral_paths = [
'VgwTelemetry$*$LastStatusChange',
'VgwTelemetry$*$Status',
'VgwTelemetry$*$StatusMessage',
]
def get_name_from_list_output(self, item):
if item.get("Tags"):
for tag in item["Tags"]:
if tag["Key"] == "Name":
return "{} ({})".format(tag["Value"], item["VpnConnectionId"])
return item["VpnConnectionId"]
def list_method(self, **kwargs):
return describe_vpn_connections(**kwargs)
def get_method(self, item, **kwargs):
# Remove the CustomerGatewayConfiguration -- it's not necessary as all the details are present anyway:
item.pop("CustomerGatewayConfiguration", None)
# Set the ARN:
item["Arn"] = "arn:aws:ec2:{region}:{account}:vpn-connection/{id}".format(region=kwargs["region"],
account=kwargs["account_number"],
id=item["VpnConnectionId"])
# Cast the datetimes to something JSON serializable (ISO 8601 string):
for vgw in item.get("VgwTelemetry", []):
if vgw.get("LastStatusChange"):
vgw["LastStatusChange"] = vgw["LastStatusChange"].strftime(DATETIME_FORMAT)
return item
class VPNItem(ChangeItem):
def __init__(self, region=None, account=None, name=None, arn=None, config=None, source_watcher=None):
super(VPNItem, self).__init__(
index=VPN.index,
region=region,
account=account,
name=name,
arn=arn,
new_config=config if config else {},
source_watcher=source_watcher)
| 37.2625
| 115
| 0.614223
| 334
| 2,981
| 5.353293
| 0.47006
| 0.033557
| 0.014541
| 0.017897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004632
| 0.275746
| 2,981
| 79
| 116
| 37.734177
| 0.823529
| 0.300235
| 0
| 0
| 0
| 0
| 0.175872
| 0.07655
| 0
| 0
| 0
| 0
| 0
| 1
| 0.116279
| false
| 0
| 0.069767
| 0.023256
| 0.395349
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
07092a144b2a5c13ba5ef9b78acec4dd39f5a15b
| 4,840
|
py
|
Python
|
soar_instruments/sami/adclass.py
|
soar-telescope/dragons-soar
|
a1c600074f532c1af6bd59bc2cc662a1aecd39c4
|
[
"MIT"
] | 1
|
2017-10-31T21:02:59.000Z
|
2017-10-31T21:02:59.000Z
|
soar_instruments/sami/adclass.py
|
soar-telescope/dragons-soar
|
a1c600074f532c1af6bd59bc2cc662a1aecd39c4
|
[
"MIT"
] | null | null | null |
soar_instruments/sami/adclass.py
|
soar-telescope/dragons-soar
|
a1c600074f532c1af6bd59bc2cc662a1aecd39c4
|
[
"MIT"
] | null | null | null |
import re
import astrodata
from astrodata import (astro_data_tag, TagSet, astro_data_descriptor,
returns_list)
from astrodata.fits import FitsLoader, FitsProvider
from ..soar import AstroDataSOAR
class AstroDataSAMI(AstroDataSOAR):
__keyword_dict = dict(data_section='DATASEC', gain='GAIN')
@staticmethod
def _matches_data(source):
return source[0].header.get('INSTRUME', '').upper() in {'SAMI', 'SAM'}
@astrodata.astro_data_tag
def _tag_instrument(self):
# QUESTIONS:
# 1) is SAMI always used with the SAM AO?
# 2) is SAMI used only at one telescopes or multiple ones?
# ANSWER:
# 1) SAMI is always used withing SAM but not always with AO.
# 2) SAMI and SAM are only used at SOAR Telescope.
return astrodata.TagSet(['SAMI', 'SAM'])
@astrodata.astro_data_tag
def _tag_flat(self):
# Ideally, we would want 'IMAGE' to be set by the 'IMAGE' tag.
# But since OBSTYPE is being used for both, not clear how that
# can be done right now.
obstype = self.phu.get('OBSTYPE', '')
if 'FLAT' in obstype:
return astrodata.TagSet(['FLAT', 'CAL', 'IMAGE'])
@astrodata.astro_data_tag
def _tag_twilight(self):
if self.phu.get('OBSTYPE') == 'SFLAT':
return astrodata.TagSet(['TWILIGHT'])
@astrodata.astro_data_tag
def _tag_domeflat(self):
if self.phu.get('OBSTYPE') == 'DFLAT':
return astrodata.TagSet(['DOME'])
@astrodata.astro_data_tag
def _tag_acquisition(self):
# Ideally, we would want 'IMAGE' to be set by the 'IMAGE' tag.
# But since OBSTYPE is being used for both, not clear how that
# can be done right now.
filename = self.phu.get('FILENAME', '')
notes = self.phu.get('NOTES', '')
if re.search('acq.[0-9]+', filename) or re.search('/acq/i', notes):
return astrodata.TagSet(['ACQUISITION', 'IMAGE'])
@astrodata.astro_data_tag
def _tag_image(self):
# this one will need something like "if not FABRY keyword", I think.
if self.phu.get('OBSTYPE') == 'OBJECT':
return astrodata.TagSet(['IMAGE'])
@astrodata.astro_data_tag
def _tag_bias(self):
if self.phu.get('OBSTYPE') == 'ZERO':
return astrodata.TagSet(['BIAS', 'CAL'], blocks=['IMAGE', 'FABRY'])
@astrodata.astro_data_descriptor
def data_section(self, pretty=False):
"""
Returns the rectangular section that includes the pixels that would be
exposed to light. If pretty is False, a tuple of 0-based coordinates
is returned with format (x1, x2, y1, y2). If pretty is True, a keyword
value is returned without parsing as a string. In this format, the
coordinates are generally 1-based.
One tuple or string is return per extension/array, in a list. If the
method is called on a single slice, the section is returned as a tuple
or a string.
Parameters
----------
pretty : bool
If True, return the formatted string found in the header.
Returns
-------
tuple of integers or list of tuples
Location of the pixels exposed to light using Python slice values.
string or list of strings
Location of the pixels exposed to light using an IRAF section
format (1-based).
"""
return self._parse_section(self._keyword_for('data_section'), pretty)
@astrodata.astro_data_descriptor
def filter_name(self):
"""
Returns the name of the filter used according to the summary FILTERS
keyword.
Returns
-------
str
The name of the filter.
"""
return self.phu.get('FILTERS')
@astrodata.astro_data_descriptor
def gain(self):
"""
Gain of the amplifier
Returns
-------
float
The gain for each amplifier
"""
# Bruno: GAIN is set to "unavail" in the headers. Do you have
# the gain for each amp in some lookup table?
gain = []
for ad in self[1:]:
val = ad.hdr['gain']
if val != 'unavail':
gain.append(val)
else:
gain.append(None)
return gain
@classmethod
def load(cls, source):
def sami_parser(hdu):
m = re.match('im(\d)', hdu.header.get('EXTNAME', ''))
if m:
hdu.header['EXTNAME'] = ('SCI', 'Added by AstroData')
hdu.header['EXTVER'] = (int(m.group(1)), 'Added by AstroData')
return cls(FitsLoader(FitsProvider).load(source,
extname_parser=sami_parser))
| 33.846154
| 79
| 0.590083
| 612
| 4,840
| 4.576797
| 0.320261
| 0.038558
| 0.064263
| 0.052481
| 0.262406
| 0.209568
| 0.165655
| 0.131382
| 0.079971
| 0.079971
| 0
| 0.004763
| 0.305992
| 4,840
| 143
| 80
| 33.846154
| 0.829116
| 0.35186
| 0
| 0.151515
| 0
| 0
| 0.097991
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.19697
| false
| 0
| 0.075758
| 0.030303
| 0.484848
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
070a2f74e288d9e0f7d67adf9e2e415a8758caa2
| 1,957
|
py
|
Python
|
yoon/stage1_kernel.py
|
yoon28/realsr-noise-injection
|
402679490bf0972d09aaaadee3b5b9850c2a36e4
|
[
"Apache-2.0"
] | 17
|
2020-07-29T11:08:19.000Z
|
2021-01-07T11:23:33.000Z
|
yoon/stage1_kernel.py
|
yoon28/realsr-noise-injection
|
402679490bf0972d09aaaadee3b5b9850c2a36e4
|
[
"Apache-2.0"
] | 5
|
2020-08-04T02:51:39.000Z
|
2020-08-21T03:44:08.000Z
|
yoon/stage1_kernel.py
|
yoon28/realsr-noise-injection
|
402679490bf0972d09aaaadee3b5b9850c2a36e4
|
[
"Apache-2.0"
] | null | null | null |
import os, sys
import numpy as np
import cv2
import random
import torch
from configs import Config
from kernelGAN import KernelGAN
from data import DataGenerator
from learner import Learner
import tqdm
DATA_LOC = "/mnt/data/NTIRE2020/realSR/track2" # "/mnt/data/NTIRE2020/realSR/track1"
DATA_X = "DPEDiphone-tr-x" # "Corrupted-tr-x"
DATA_Y = "DPEDiphone-tr-y" # "Corrupted-tr-y"
DATA_VAL = "DPEDiphone-va" # "Corrupted-va-x"
def config_kernelGAN(afile):
img_folder = os.path.dirname(afile)
img_file = os.path.basename(afile)
out_dir = "yoon/kernels/track2"
params = ["--input_image_path", afile,
"--output_dir_path", out_dir,
"--noise_scale", str(1.0),
"--X4"]
conf = Config().parse(params)
conf.input2 = None
return conf
def estimate_kernel(img_file):
conf = config_kernelGAN(img_file)
kgan = KernelGAN(conf)
learner = Learner()
data = DataGenerator(conf, kgan)
for iteration in tqdm.tqdm(range(conf.max_iters), ncols=70):
[g_in, d_in, _] = data.__getitem__(iteration)
kgan.train(g_in, d_in)
learner.update(iteration, kgan)
kgan.finish()
if __name__ == "__main__":
seed_num = 0
torch.manual_seed(seed_num)
torch.cuda.manual_seed(seed_num)
torch.cuda.manual_seed_all(seed_num)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed_num)
random.seed(seed_num)
# exit(0)
data = {"X":[os.path.join(DATA_LOC, DATA_X, f) for f in os.listdir(os.path.join(DATA_LOC, DATA_X)) if f[-4:] == ".png"],
"Y":[os.path.join(DATA_LOC, DATA_Y, f) for f in os.listdir(os.path.join(DATA_LOC, DATA_Y)) if f[-4:] == ".png"],
"val":[os.path.join(DATA_LOC, DATA_VAL, f) for f in os.listdir(os.path.join(DATA_LOC, DATA_VAL)) if f[-4:] == ".png"]}
Kernels = []
Noises = []
for f in data["X"]:
estimate_kernel(f)
print("fin.")
| 30.107692
| 130
| 0.654573
| 294
| 1,957
| 4.156463
| 0.329932
| 0.03928
| 0.0491
| 0.06874
| 0.201309
| 0.201309
| 0.201309
| 0.141571
| 0.090835
| 0.090835
| 0
| 0.014678
| 0.199285
| 1,957
| 64
| 131
| 30.578125
| 0.765156
| 0.048033
| 0
| 0
| 0
| 0
| 0.095315
| 0.017771
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.192308
| 0
| 0.25
| 0.019231
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
070dfc39dd180a0fc71b0110b529e2e8beee6cea
| 10,971
|
py
|
Python
|
python/zzz/v1-all_feat_cnn/components/features.py
|
emorynlp/character-identification-old
|
f6519166dd30bd8140f05aa3e43225ab27c2ea6d
|
[
"Apache-2.0"
] | 1
|
2019-09-03T13:38:08.000Z
|
2019-09-03T13:38:08.000Z
|
python/zzz/v1-all_feat_cnn/components/features.py
|
emorynlp/character-identification-old
|
f6519166dd30bd8140f05aa3e43225ab27c2ea6d
|
[
"Apache-2.0"
] | null | null | null |
python/zzz/v1-all_feat_cnn/components/features.py
|
emorynlp/character-identification-old
|
f6519166dd30bd8140f05aa3e43225ab27c2ea6d
|
[
"Apache-2.0"
] | null | null | null |
from abc import *
import numpy as np
###########################################################
class AbstractFeatureExtractor(object):
@abstractmethod
def extract(self, object):
return
###########################################################
class EntityFeatureExtractor(AbstractFeatureExtractor):
def __init__(self, empty_embd_shape=None, empty_feat_shape=None):
self.e_EMPTY = np.zeros(empty_embd_shape) if empty_embd_shape else None
self.f_EMPTY = np.zeros(empty_feat_shape) if empty_feat_shape else None
def extract(self, entity, include_average=True, nb_mentions=5, selection_method='last'):
embedding, feature = ([], [])
if entity and include_average:
nb_mentions -= 1
embedding.append(entity.get_avg_mention_embedding())
feature.append(entity.get_avg_mention_feature())
nb_padding = max(0, nb_mentions - len(entity))
nb_mentions -= nb_padding
if selection_method is 'last':
mentions = entity[-nb_mentions:]
embedding += map(lambda m: m.embedding, mentions)
feature += map(lambda m: m.feature, mentions)
for i in xrange(nb_padding):
embedding.append(self.e_EMPTY)
feature.append(self.f_EMPTY)
return np.array(embedding), np.array(feature)
###########################################################
class MentionFeatureExtractor(AbstractFeatureExtractor):
def __init__(self, word2vec, word2gender, spks, poss, deps, ners, spk_dim=8, pos_dim=8, dep_dim=8, ner_dim=8):
self.word2vec = word2vec
self.word2vec_dim = len(word2vec.values()[0])
self.word2gender = word2gender
self.word2gender_dim = len(word2gender.values()[0])
self.spk_dim = spk_dim
self.spk2vec = dict()
for spk in spks:
self.spk2vec[spk] = np.random.rand(spk_dim)
self.pos_dim = pos_dim
self.pos2vec = dict()
for pos in poss:
self.pos2vec[pos] = np.random.rand(pos_dim)
self.dep_dim = dep_dim
self.dep2vec = dict()
for dep in deps:
self.dep2vec[dep] = np.random.rand(dep_dim)
self.ner_dim = ner_dim
self.ner2vec = dict()
for ner in ners:
self.ner2vec[ner] = np.random.rand(ner_dim)
def extract(self, mention):
head_token = self.get_head_token(mention)
first_token, last_token = mention.tokens[0], mention.tokens[-1]
utterance = first_token.parent_utterance()
scene = utterance.parent_scene()
episode = scene.parent_episode()
speaker = utterance.speaker
prev_utterance = utterance.previous_utterance()
prev_speaker = prev_utterance.speaker if prev_utterance is not None else None
flatten_utterance_tokens = self.flatten_utterance(utterance)
flatten_sentence_tokens = self.get_mention_sentence_tokens(utterance, mention)
ft_locations = self.get_token_locations(flatten_utterance_tokens, mention)
start_ftid, end_ftid = ft_locations[0], ft_locations[-1]
token_len = end_ftid - start_ftid
embeddings = list()
# Word embeddings of the head word
embeddings.append(self.get_token_word_vector(head_token))
# First word of the mention
embeddings.append(self.get_token_word_vector(first_token))
# Last word of the mention
embeddings.append(self.get_token_word_vector(last_token))
# Avg of all words in the mention
embeddings.append(self.get_tokens_word_vector(mention))
# Two preceding words of the mention
embeddings.append(self.get_tokens_word_vector_wOffset(flatten_utterance_tokens, start_ftid-1, 1))
embeddings.append(self.get_tokens_word_vector_wOffset(flatten_utterance_tokens, start_ftid-2, 1))
# Two following words of the mention
embeddings.append(self.get_tokens_word_vector_wOffset(flatten_utterance_tokens, end_ftid+1, 1))
embeddings.append(self.get_tokens_word_vector_wOffset(flatten_utterance_tokens, end_ftid+2, 1))
# Avg of the +-1 words
embeddings.append(self.get_tokens_word_vector_wOffset(flatten_utterance_tokens, start_ftid-1, token_len+2))
# Avg of the +-2 words
embeddings.append(self.get_tokens_word_vector_wOffset(flatten_utterance_tokens, start_ftid-2, token_len+4))
# Avg of the -5 words
embeddings.append(self.get_tokens_word_vector_wOffset(flatten_utterance_tokens, start_ftid-1, -5))
# Avg of the +5 words
embeddings.append(self.get_tokens_word_vector_wOffset(flatten_utterance_tokens, end_ftid+1, 5))
# Avg of all words in the mention's sentence
embeddings.append(self.get_tokens_word_vector_wOffset(flatten_sentence_tokens, 0, len(flatten_sentence_tokens)))
# Avg of all words in current utterance
embeddings.append(self.get_utterance_vector(utterance))
# Avg of all words in previous utterance
embeddings.append(self.get_utterance_vector(prev_utterance))
# Avg of all words in the scene
embeddings.append(self.get_scene_vector(scene))
# Avg of all words in the episode
embeddings.append(self.get_episode_vector(episode))
features = list()
# Gender information of head token in the mention
features.append(self.get_token_gender_vector(head_token))
# Avg gender information of all tokens in the mention
features.append(self.get_tokens_gender_vector(mention))
# Current speaker information of the utterance
features.append(self.get_speaker_vector(speaker))
# Previous speaker information of the utterance
features.append(self.get_speaker_vector(prev_speaker))
# Pos tag information of head token
features.append(self.get_pos_tag_vector(head_token.pos_tag))
# Ner tag information of head token
features.append(self.get_ner_tag_vector(head_token.ner_tag))
# Dep label information of head token
features.append(self.get_dep_label_vector(head_token.dep_label))
# Dep label information of head token'parent
features.append(np.zeros(self.dep_dim) if head_token.dep_head is None
else self.get_dep_label_vector(head_token.dep_head.dep_label))
# Mention token length/location information within utterance
features.append(self.get_mention_location_information(flatten_utterance_tokens, start_ftid, end_ftid))
return np.array(embeddings), np.concatenate(features)
###### Helper functions #######
def get_head_token(self, mention):
tids = map(lambda t: t.id, mention.tokens)
for token in mention.tokens:
if token.dep_head is not None and token.dep_head.id not in tids:
return token
return mention.tokens[0]
def flatten_utterance(self, utterance):
return [st for statements in utterance.statements for st in statements]
def get_token_locations(self, flatten_tokens, mention):
locations = []
for idx, token in enumerate(flatten_tokens):
if token in mention.tokens:
locations.append(idx)
locations.sort()
return locations
def get_mention_sentence_tokens(self, utterance, mention):
token = mention.tokens[0]
for statement in utterance.statements:
if token in statement:
return statement
return None
###### Mention tokens features #######
def get_token_word_vector(self, token):
word_form = token.word_form.lower()
return self.word2vec[word_form] if word_form in self.word2vec else np.zeros(self.word2vec_dim)
def get_tokens_word_vector(self, mention):
tvector = np.zeros(self.word2vec_dim)
for token in mention.tokens:
tvector += self.get_token_word_vector(token)
return tvector / float(len(mention.tokens))
def get_tokens_word_vector_wOffset(self, flatten_tokens, start, offset):
tvector = np.zeros(self.word2vec_dim)
if offset > 0:
for tid in xrange(start, start+offset):
tvector += self.get_token_word_vector(flatten_tokens[tid]) \
if tid < len(flatten_tokens) else np.zeros(self.word2vec_dim)
else:
for tid in xrange(start, start-offset, -1):
tvector += self.get_token_word_vector(flatten_tokens[tid]) \
if tid <= 0 else np.zeros(self.word2vec_dim)
return tvector / float(offset)
def get_token_gender_vector(self, token):
word_form = token.word_form.lower()
return self.word2gender[word_form] if word_form in self.word2gender else np.zeros(self.word2gender_dim)
def get_tokens_gender_vector(self, mention):
gvector = np.zeros(self.word2gender_dim)
for token in mention.tokens:
gvector += self.get_token_gender_vector(token)
return gvector / float(len(mention.tokens))
def get_speaker_vector(self, speaker):
return self.spk2vec[speaker] if speaker in self.spk2vec else np.zeros(self.spk_dim)
def get_pos_tag_vector(self, tag):
return self.pos2vec[tag] if tag in self.pos2vec else np.zeros(self.pos_dim)
def get_ner_tag_vector(self, tag):
return self.ner2vec[tag] if tag in self.ner2vec else np.zeros(self.ner_dim)
def get_dep_label_vector(self, label):
return self.dep2vec[label] if label in self.dep2vec else np.zeros(self.dep_dim)
def get_mention_location_information(self, flatten_utternace_tokens, start_idx, end_index):
length = len(flatten_utternace_tokens)
# Normalized mention word length, start token location, end token location
return np.array([float(end_index-start_idx)/length, float(start_idx)/length, float(end_index)/length])
#### Transcript document features ####
def get_utterance_vector(self, utterance):
tcount = 0
uvector = np.zeros(self.word2vec_dim)
if utterance is not None:
for u in utterance.statements:
for t in u:
word = t.word_form.lower()
if word in self.word2vec:
uvector = uvector + self.word2vec[word]
tcount += len(u)
return uvector / float(tcount) if tcount > 0 else uvector
def get_scene_vector(self, scene):
svector = np.zeros(self.word2vec_dim)
for utterance in scene.utterances:
svector += self.get_utterance_vector(utterance)
return svector / float(len(scene.utterances)) if scene.utterances else svector
def get_episode_vector(self, episode):
evector = np.zeros(self.word2vec_dim)
for scene in episode.scenes:
evector += self.get_scene_vector(scene)
return evector / float(len(episode.scenes)) if episode.scenes else evector
| 44.417004
| 120
| 0.668854
| 1,420
| 10,971
| 4.93662
| 0.107042
| 0.03495
| 0.046362
| 0.055777
| 0.385307
| 0.324964
| 0.25321
| 0.203852
| 0.189301
| 0.161626
| 0
| 0.009853
| 0.232158
| 10,971
| 246
| 121
| 44.597561
| 0.822293
| 0.090329
| 0
| 0.052326
| 0
| 0
| 0.000821
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.127907
| false
| 0
| 0.011628
| 0.034884
| 0.284884
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
071099c9cb76fe44fe601d2109b5cad6021d0a3d
| 2,420
|
py
|
Python
|
_ar/masking_provement.py
|
TomKingsfordUoA/ResidualMaskingNetwork
|
6ce5ddf70f8ac8f1e6da2746b0bbeb9e457ceb7d
|
[
"MIT"
] | 242
|
2020-01-09T11:06:21.000Z
|
2022-03-26T14:51:48.000Z
|
_ar/masking_provement.py
|
huyhnueit68/ResidualMaskingNetwork
|
b77abb6e548b9a09b5c96b1592d71332b45d050e
|
[
"MIT"
] | 33
|
2020-01-09T08:42:10.000Z
|
2022-03-23T07:52:56.000Z
|
_ar/masking_provement.py
|
huyhnueit68/ResidualMaskingNetwork
|
b77abb6e548b9a09b5c96b1592d71332b45d050e
|
[
"MIT"
] | 61
|
2020-01-19T02:20:37.000Z
|
2022-03-25T13:08:48.000Z
|
import os
import glob
import cv2
import numpy as np
import torch
from torchvision.transforms import transforms
from natsort import natsorted
from models import resmasking_dropout1
from utils.datasets.fer2013dataset import EMOTION_DICT
from barez import show
transform = transforms.Compose(
[
transforms.ToPILImage(),
transforms.ToTensor(),
]
)
def activations_mask(tensor):
tensor = torch.squeeze(tensor, 0)
tensor = torch.mean(tensor, 0)
tensor = tensor.detach().cpu().numpy()
tensor = np.maximum(tensor, 0)
tensor = cv2.resize(tensor, (224, 224))
tensor = tensor - np.min(tensor)
tensor = tensor / np.max(tensor)
heatmap = cv2.applyColorMap(np.uint8(255 * tensor), cv2.COLORMAP_JET)
return heatmap
model = resmasking_dropout1(3, 7)
# state = torch.load('./saved/checkpoints/resmasking_dropout1_rot30_2019Nov17_14.33')
state = torch.load("./saved/checkpoints/Z_resmasking_dropout1_rot30_2019Nov30_13.32")
model.load_state_dict(state["net"])
model.cuda()
model.eval()
for image_path in natsorted(
glob.glob("/home/z/research/bkemo/images/**/*.png", recursive=True)
):
image_name = os.path.basename(image_path)
print(image_name)
# image_path = '/home/z/research/bkemo/images/disgust/0.0_dc10a3_1976_0.png'
image = cv2.imread(image_path)
image = cv2.resize(image, (224, 224))
tensor = transform(image)
tensor = torch.unsqueeze(tensor, 0)
tensor = tensor.cuda()
# output = model(tensor)
x = model.conv1(tensor) # 112
x = model.bn1(x)
x = model.relu(x)
x = model.maxpool(x) # 56
x = model.layer1(x) # 56
m = model.mask1(x)
x = x * (1 + m)
x = model.layer2(x) # 28
m = model.mask2(x)
x = x * (1 + m)
x = model.layer3(x) # 14
heat_1 = activations_mask(x)
m = model.mask3(x)
x = x * (1 + m)
# heat_2 = activations_mask(m)
x = model.layer4(x) # 7
m = model.mask4(x)
x = x * (1 + m)
x = model.avgpool(x)
x = torch.flatten(x, 1)
output = model.fc(x)
# print(np.sum(heat_1 - heat_2))
# show(np.concatenate((image, heat_1, heat_2), axis=1))
cv2.imwrite(
"./masking_provements/{}".format(image_name),
np.concatenate((image, heat_1), axis=1),
)
# np.concatenate((image, heat_1, heat_2), axis=1))
# output = output.cpu().numpy()
# print(EMOTION_DICT[torch.argmax(output, 1).item()])
| 26.021505
| 85
| 0.647934
| 343
| 2,420
| 4.457726
| 0.344023
| 0.014388
| 0.034009
| 0.010464
| 0.153695
| 0.064748
| 0.064748
| 0.043165
| 0.043165
| 0
| 0
| 0.057903
| 0.207851
| 2,420
| 92
| 86
| 26.304348
| 0.739697
| 0.183471
| 0
| 0.061538
| 0
| 0
| 0.064796
| 0.063265
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015385
| false
| 0
| 0.153846
| 0
| 0.184615
| 0.015385
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
07112b5b2ca5ebda12c4c78461b67e41243aa4a8
| 1,727
|
py
|
Python
|
Python/Gerenciador de pagamentos.py
|
Kauan677/Projetos-Python
|
62f6b476e6d250d9ff31c95808b31ebd3ab4fdbb
|
[
"MIT"
] | 1
|
2022-03-03T23:19:57.000Z
|
2022-03-03T23:19:57.000Z
|
Python/Gerenciador de pagamentos.py
|
Kauan677/Projetos-Python
|
62f6b476e6d250d9ff31c95808b31ebd3ab4fdbb
|
[
"MIT"
] | null | null | null |
Python/Gerenciador de pagamentos.py
|
Kauan677/Projetos-Python
|
62f6b476e6d250d9ff31c95808b31ebd3ab4fdbb
|
[
"MIT"
] | null | null | null |
import time
import colorama
def gerenciador_de_pagamento():
preço = float(str(input('Preço das compras: R$')))
print('''Escolha de pagamento:
[ 1 ]A vista dinheiro/cheque: 10% de desconto.
[ 2 ]A vista no cartão: 5% de desconto.
[ 3 ]Em até duas 2x no cartão: preço formal.
[ 4 ]3x ou mais no cartão: 20% de juros.''')
opção = int(input('Opção de pagamento: '))
print('processando...')
time.sleep(2)
if opção == 1:
print('Você ganhará 10% de desconto!')
print(f'Sendo assim as compras custaram R${preço - (preço * 10 / 100 ):.2f}.')
elif opção == 2:
print('Você ganhará 5% de desconto!')
print(f'Sendo assim as compras custaram R${preço - (preço * 5 /100):.2f}')
elif opção == 3:
print(f'As compras sairam em 2x de R${preço / 2:.2f}.')
print(f'Sendo assim custando o preço formal de R${preço:.2f} no final.')
elif opção == 4:
parcelas = int(input('Quantas parcelas: '))
if parcelas >= 3:
print(f'Compras com 20% de juros')
print(f'As compras sairam em {parcelas}x de R${(preço + (preço * 20 / 100)) / parcelas:.2f}')
print(f'Sendo assim as compras custaram R${preço + (preço * 20 / 100):.2f} no final.')
else:
print('Parcela não compreendida, TENTE NOVAMENTE...')
else:
print('Valor não compreendido, TENTE NOVAMENTE...')
gerenciador_de_pagamento()
return opção
while True:
consulta = gerenciador_de_pagamento()
consulta = str(input('Quer consultar novamente? '))
if consulta in ['sim', 'Sim', 'SIM']:
pass
elif consulta in ['não', 'nao','Não', 'Nao', 'NAO','NÃO']:
break
else:
break
| 38.377778
| 105
| 0.59062
| 237
| 1,727
| 4.278481
| 0.341772
| 0.04142
| 0.043393
| 0.063116
| 0.2357
| 0.195266
| 0.149901
| 0.149901
| 0.149901
| 0.149901
| 0
| 0.038613
| 0.2652
| 1,727
| 44
| 106
| 39.25
| 0.760441
| 0
| 0
| 0.119048
| 0
| 0.047619
| 0.522319
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02381
| false
| 0.02381
| 0.047619
| 0
| 0.095238
| 0.309524
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0714065ddc085782b982ec392f121b65f95bc048
| 911
|
py
|
Python
|
mod/tools/ccmake.py
|
mattiasljungstrom/fips
|
8775e299f710ae5b977d49dc0672b607f2a10378
|
[
"MIT"
] | 429
|
2015-01-06T18:44:20.000Z
|
2022-03-19T22:22:11.000Z
|
mod/tools/ccmake.py
|
mattiasljungstrom/fips
|
8775e299f710ae5b977d49dc0672b607f2a10378
|
[
"MIT"
] | 254
|
2015-01-01T18:11:57.000Z
|
2022-03-22T09:55:51.000Z
|
mod/tools/ccmake.py
|
mattiasljungstrom/fips
|
8775e299f710ae5b977d49dc0672b607f2a10378
|
[
"MIT"
] | 102
|
2015-01-17T11:41:16.000Z
|
2022-02-24T23:47:30.000Z
|
"""
wrapper for ccmake command line tool
"""
import subprocess
name = 'ccmake'
platforms = ['linux', 'osx']
optional = True
not_found = "required for 'fips config' functionality"
#-------------------------------------------------------------------------------
def check_exists(fips_dir) :
"""test if ccmake is in the path
:returns: True if ccmake is in the path
"""
try:
out = subprocess.check_output(['ccmake', '--version'])
return True
except (OSError, subprocess.CalledProcessError):
return False
#-------------------------------------------------------------------------------
def run(build_dir) :
"""run ccmake to configure cmake project
:param build_dir: directory where ccmake should run
:returns: True if ccmake returns successful
"""
res = subprocess.call('ccmake .', cwd=build_dir, shell=True)
return res == 0
| 26.794118
| 80
| 0.535675
| 93
| 911
| 5.172043
| 0.602151
| 0.049896
| 0.04158
| 0.049896
| 0.079002
| 0.079002
| 0
| 0
| 0
| 0
| 0
| 0.001376
| 0.201976
| 911
| 33
| 81
| 27.606061
| 0.660248
| 0.453348
| 0
| 0
| 0
| 0
| 0.170732
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.071429
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
07167e515430a27837434e8e166dc173dffdcc37
| 1,914
|
py
|
Python
|
codewars/4 kyu/strip-comments.py
|
sirken/coding-practice
|
9c5e23b2c24f525a89a5e1d15ce3aec3ad1a01ab
|
[
"MIT"
] | null | null | null |
codewars/4 kyu/strip-comments.py
|
sirken/coding-practice
|
9c5e23b2c24f525a89a5e1d15ce3aec3ad1a01ab
|
[
"MIT"
] | null | null | null |
codewars/4 kyu/strip-comments.py
|
sirken/coding-practice
|
9c5e23b2c24f525a89a5e1d15ce3aec3ad1a01ab
|
[
"MIT"
] | null | null | null |
from Test import Test, Test as test
'''
Complete the solution so that it strips all text that follows any of a set of comment markers passed in. Any whitespace at the end of the line should also be stripped out.
Example:
Given an input string of:
apples, pears # and bananas
grapes
bananas !apples
The output expected would be:
apples, pears
grapes
bananas
The code would be called like so:
result = solution("apples, pears # and bananas\ngrapes\nbananas !apples", ["#", "!"])
# result should == "apples, pears\ngrapes\nbananas"
'''
# Split by rows, then find earliest marker and extract string before it
def solution(string,markers):
strings = string.split('\n')
l = []
for line in strings:
pos = len(line)
for m in markers:
if m in line:
if line.index(m) < pos:
pos = line.index(m)
l.append(line[:pos].rstrip())
return '\n'.join(l)
# Top solution, split list by \n, edit in place
def solution(string,markers):
parts = string.split('\n')
for s in markers:
parts = [v.split(s)[0].rstrip() for v in parts]
return '\n'.join(parts)
# Top solution expanded
def solution(string,markers):
# split by lines
parts = string.split('\n')
# Loop through markers
for s in markers:
# Loop through all lines, check for any markers
# Split by marker, grab first item, and rstrip whitespace
for num, v in enumerate(parts):
parts[num] = v.split(s)[0].rstrip()
return '\n'.join(parts)
Test.assert_equals(solution("apples, pears # and bananas\ngrapes\nbananas !apples", ["#", "!"]), "apples, pears\ngrapes\nbananas")
Test.assert_equals(solution("a #b\nc\nd $e f g", ["#", "$"]), "a\nc\nd")
Test.assert_equals(solution('= - avocados oranges pears cherries\nlemons apples\n- watermelons strawberries', ['#', '?', '=', ',', '.', '-', '!']), '\nlemons apples\n')
| 31.9
| 171
| 0.640021
| 273
| 1,914
| 4.47619
| 0.384615
| 0.05401
| 0.03437
| 0.051555
| 0.104746
| 0.081833
| 0.081833
| 0.081833
| 0
| 0
| 0
| 0.001341
| 0.221003
| 1,914
| 59
| 172
| 32.440678
| 0.818243
| 0.143678
| 0
| 0.346154
| 0
| 0
| 0.198055
| 0.040672
| 0
| 0
| 0
| 0
| 0.115385
| 1
| 0.115385
| false
| 0
| 0.038462
| 0
| 0.269231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0718f25c782fcd74f5e9c8f0ae638c3321dd5b08
| 6,221
|
py
|
Python
|
qat/interop/qiskit/quantum_channels.py
|
myQLM/myqlm-interop
|
9d77cb7c719f82be05d9f88493522940b8142124
|
[
"Apache-2.0"
] | 5
|
2020-09-09T09:44:31.000Z
|
2021-07-02T09:49:21.000Z
|
qat/interop/qiskit/quantum_channels.py
|
myQLM/myqlm-interop
|
9d77cb7c719f82be05d9f88493522940b8142124
|
[
"Apache-2.0"
] | null | null | null |
qat/interop/qiskit/quantum_channels.py
|
myQLM/myqlm-interop
|
9d77cb7c719f82be05d9f88493522940b8142124
|
[
"Apache-2.0"
] | 3
|
2020-07-10T17:51:47.000Z
|
2021-04-13T16:33:44.000Z
|
# -*- coding: utf-8 -*-
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
from qiskit.quantum_info.operators.channel import Choi, PTM, Kraus, Chi, SuperOp
import numpy as np
from qat.comm.quops.ttypes import QuantumChannel, RepresentationType
from qat.comm.datamodel.ttypes import Matrix, ComplexNumber
def array_to_matrix(array):
"""
Transform a two dimmentional numpy array to a myqlm Matrix.
Args:
array: (ndarray) a two dimmentional numpy array
Returns:
(Matrix): a myqlm Matrix
"""
assert len(array.shape) == 2, "The array must be two dimmentional"
data = []
for arr in array:
for elem in arr:
data.append(ComplexNumber(np.real(elem), np.imag(elem)))
matri = Matrix(array.shape[0], array.shape[1], data)
return matri
def qiskit_to_qchannel(representation):
"""
Create a myqlm representation of quantum channel from a qiskit representation
of a quantum channel.
Args:
representation: (Kraus|Choi|Chi|SuperOp|PTM) qiskit representation of a quantum channel.
Returns:
(QuantumChannel): myqlm representation of a quantum channel.
"""
qchannel = None
qiskit_data = representation.data
# Find what representation it is.
# Then create the corresponding matrix (kraus_ops|basis|matrix)from the data
# of the representation.
# Finally, create the QuantumChannel with the RepresentationType, the arity
# (got from the qiskit representation) and the matrix.
if isinstance(representation, Kraus):
kraus_ops = []
for arr in qiskit_data:
kraus_ops.append(array_to_matrix(arr))
qchannel = QuantumChannel(
representation=RepresentationType.KRAUS,
arity=representation.num_qubits,
kraus_ops=kraus_ops)
elif isinstance(representation, Chi):
basis = []
basis.append(array_to_matrix(qiskit_data))
qchannel = QuantumChannel(
representation=RepresentationType.CHI,
arity=representation.num_qubits,
basis=basis)
elif isinstance(representation, SuperOp):
basis = []
basis.append(array_to_matrix(qiskit_data))
qchannel = QuantumChannel(
representation=RepresentationType.SUPEROP,
arity=representation.num_qubits,
basis=basis)
elif isinstance(representation, PTM):
matri = array_to_matrix(qiskit_data)
qchannel = QuantumChannel(
representation=RepresentationType.PTM,
arity=representation.num_qubits,
matrix=matri)
elif isinstance(representation, Choi):
matri = array_to_matrix(qiskit_data)
qchannel = QuantumChannel(
representation=RepresentationType.CHOI,
arity=representation.num_qubits,
matrix=matri)
return qchannel
def qchannel_to_qiskit(representation):
"""
Create a qiskit representation of quantum channel from a myqlm representation
of a quantum channel.
Args:
representation: (QuantumChannel) myqlm representation of a quantum channel.
Returns:
(Kraus|Choi|Chi|SuperOp|PTM): qiskit representation of a quantum channel.
"""
rep = representation.representation
# Find what representation it is.
# Then create the corresponding matrix and shape it like qiskit is expecting it.
# Finally, create the qiskit representation from that matrix.
if rep in (RepresentationType.PTM, RepresentationType.CHOI):
matri = representation.matrix
data_re = []
data_im = []
for i in range(matri.nRows):
for j in range(matri.nCols):
data_re.append(matri.data[i * matri.nRows + j].re + 0.j)
data_im.append(matri.data[i * matri.nRows + j].im)
data = np.array(data_re)
data.imag = np.array(data_im)
data = data.reshape((matri.nRows, matri.nCols))
return PTM(data) if (rep == RepresentationType.PTM) else Choi(data)
if rep in (RepresentationType.CHI, RepresentationType.SUPEROP):
final_data = []
for matri in representation.basis:
data_re = []
data_im = []
for i in range(matri.nRows):
for j in range(matri.nCols):
data_re.append(matri.data[i * matri.nRows + j].re + 0.j)
data_im.append(matri.data[i * matri.nRows + j].im)
data = np.array(data_re)
data.imag = np.array(data_im)
data = data.reshape((matri.nRows, matri.nCols))
final_data.append(data)
if rep == RepresentationType.CHI:
return Chi(final_data) if len(final_data) > 1 else Chi(final_data[0])
return SuperOp(final_data) if len(final_data) > 1 else SuperOp(final_data[0])
if rep == RepresentationType.KRAUS:
final_data = []
for matri in representation.kraus_ops:
data_re = []
data_im = []
for i in range(matri.nRows):
for j in range(matri.nCols):
data_re.append(matri.data[i * matri.nRows + j].re + 0.j)
data_im.append(matri.data[i * matri.nRows + j].im)
data = np.array(data_re)
data.imag = np.array(data_im)
data = data.reshape((matri.nRows, matri.nCols))
final_data.append(data)
return Kraus(final_data)
return None
| 37.70303
| 96
| 0.649735
| 758
| 6,221
| 5.24934
| 0.201847
| 0.030158
| 0.019603
| 0.03619
| 0.476502
| 0.463433
| 0.403368
| 0.353606
| 0.339533
| 0.306358
| 0
| 0.003289
| 0.266999
| 6,221
| 164
| 97
| 37.932927
| 0.869298
| 0.307185
| 0
| 0.536842
| 0
| 0
| 0.008219
| 0
| 0
| 0
| 0
| 0
| 0.010526
| 1
| 0.031579
| false
| 0
| 0.042105
| 0
| 0.147368
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0719b950e4a48282eaf1194cb80f0583e44f000f
| 2,061
|
py
|
Python
|
mne_nirs/simulation/_simulation.py
|
mshader/mne-nirs
|
d59a5436d162108226f31b33b194dfecada40d72
|
[
"BSD-3-Clause"
] | null | null | null |
mne_nirs/simulation/_simulation.py
|
mshader/mne-nirs
|
d59a5436d162108226f31b33b194dfecada40d72
|
[
"BSD-3-Clause"
] | null | null | null |
mne_nirs/simulation/_simulation.py
|
mshader/mne-nirs
|
d59a5436d162108226f31b33b194dfecada40d72
|
[
"BSD-3-Clause"
] | null | null | null |
# Authors: Robert Luke <mail@robertluke.net>
#
# License: BSD (3-clause)
import numpy as np
from mne import Annotations, create_info
from mne.io import RawArray
def simulate_nirs_raw(sfreq=3., amplitude=1.,
sig_dur=300., stim_dur=5.,
isi_min=15., isi_max=45.):
"""
Create simulated data.
.. warning:: Work in progress: I am trying to think on the best API.
Parameters
----------
sfreq : Number
The sample rate.
amplitude : Number
The amplitude of the signal to simulate in uM.
sig_dur : Number
The length of the signal to generate in seconds.
stim_dur : Number
The length of the stimulus to generate in seconds.
isi_min : Number
The minimum duration of the inter stimulus interval in seconds.
isi_max : Number
The maximum duration of the inter stimulus interval in seconds.
Returns
-------
raw : instance of Raw
The generated raw instance.
"""
from nilearn.stats.first_level_model import make_first_level_design_matrix
from pandas import DataFrame
frame_times = np.arange(sig_dur * sfreq) / sfreq
onset = 0.
onsets = []
conditions = []
durations = []
while onset < sig_dur - 60:
onset += np.random.uniform(isi_min, isi_max) + stim_dur
onsets.append(onset)
conditions.append("A")
durations.append(stim_dur)
events = DataFrame({'trial_type': conditions,
'onset': onsets,
'duration': durations})
dm = make_first_level_design_matrix(frame_times, events,
drift_model='polynomial',
drift_order=0)
annotations = Annotations(onsets, durations, conditions)
info = create_info(ch_names=['Simulated'], sfreq=sfreq, ch_types=['hbo'])
raw = RawArray(dm[["A"]].to_numpy().T * amplitude * 1.e-6,
info, verbose=False)
raw.set_annotations(annotations)
return raw
| 29.442857
| 78
| 0.606016
| 251
| 2,061
| 4.828685
| 0.454183
| 0.044554
| 0.018152
| 0.021452
| 0.151815
| 0.108911
| 0.070957
| 0.070957
| 0
| 0
| 0
| 0.011822
| 0.30228
| 2,061
| 69
| 79
| 29.869565
| 0.831015
| 0.327511
| 0
| 0
| 0
| 0
| 0.036378
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0.166667
| 0
| 0.233333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
071b7fe4a170335142cb957704dfc31f09df575c
| 1,125
|
py
|
Python
|
FeView/pstaticwidget.py
|
motiurce/FeView
|
8897b37062be88dd5ead2c8524f6b3b73451e25d
|
[
"MIT"
] | 10
|
2021-04-09T02:32:23.000Z
|
2022-03-12T15:21:41.000Z
|
FeView/pstaticwidget.py
|
ElsevierSoftwareX/SOFTX-D-21-00063
|
50eca2a003e6281dea3f1cf43fee221b61f53978
|
[
"MIT"
] | 2
|
2021-08-07T09:02:21.000Z
|
2022-02-25T09:30:22.000Z
|
FeView/pstaticwidget.py
|
motiurce/FeView
|
8897b37062be88dd5ead2c8524f6b3b73451e25d
|
[
"MIT"
] | 7
|
2021-04-09T02:32:25.000Z
|
2022-03-12T15:21:45.000Z
|
from PyQt5.QtWidgets import *
from matplotlib.backends.backend_qt5agg import FigureCanvas
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
class PstaticWidget(QWidget):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
self.fig_pstatic = Figure()
self.fig_pstatic.set_facecolor('#ffffff')
self.canvas_pstatic = FigureCanvas(self.fig_pstatic)
vertical_layout = QVBoxLayout()
vertical_layout.addWidget(self.canvas_pstatic)
self.canvas_pstatic.axes_pstatic = self.canvas_pstatic.figure.add_subplot(111)
self.setLayout(vertical_layout)
self.canvas_pstatic.axes_pstatic.set_xticks([])
self.canvas_pstatic.axes_pstatic.set_yticks([])
self.canvas_pstatic.axes_pstatic.axis('off')
self.fig_pstatic.subplots_adjust(left=0.12, bottom=0.15, right=0.985, top=0.95)
self.toolbar = NavigationToolbar(self.canvas_pstatic, self)
self.toolbar.setFixedHeight(25)
vertical_layout.addWidget(self.toolbar)
| 46.875
| 89
| 0.728889
| 133
| 1,125
| 5.902256
| 0.406015
| 0.101911
| 0.173248
| 0.107006
| 0.254777
| 0.183439
| 0
| 0
| 0
| 0
| 0
| 0.023835
| 0.179556
| 1,125
| 24
| 90
| 46.875
| 0.826652
| 0
| 0
| 0
| 0
| 0
| 0.009066
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.190476
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
071b9acd086c7ba6412ea5c6a8e8d3fc44d05f5c
| 1,719
|
py
|
Python
|
pyallocation/solvers/exhaustive.py
|
julesy89/pyallocation
|
af80a8e2367a006121dd0702b55efa7b954bb039
|
[
"Apache-2.0"
] | null | null | null |
pyallocation/solvers/exhaustive.py
|
julesy89/pyallocation
|
af80a8e2367a006121dd0702b55efa7b954bb039
|
[
"Apache-2.0"
] | null | null | null |
pyallocation/solvers/exhaustive.py
|
julesy89/pyallocation
|
af80a8e2367a006121dd0702b55efa7b954bb039
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from pymoo.core.algorithm import Algorithm
from pymoo.core.population import Population
from pymoo.util.termination.no_termination import NoTermination
from pyallocation.allocation import FastAllocation
from pyallocation.problem import AllocationProblem
def exhaustively(problem):
alloc = FastAllocation(problem, debug=False)
k = 0
sols = []
rec_exhaustively(problem, alloc, k, sols)
sols.sort(key=lambda x: (x[1], x[2]))
return sols[:100]
def rec_exhaustively(problem, alloc, k, sols):
if not alloc.feas:
return
if k == problem.n_var:
x, cv, f = np.copy(alloc.x), alloc.CV, (alloc.F * problem.w).sum()
sols.append((x, cv, f))
if len(sols) > 1000:
sols.sort(key=lambda x: (x[1], x[2]))
while len(sols) > 100:
sols.pop()
else:
for val in range(problem.xl[k], problem.xu[k] + 1):
alloc.set(k, val)
rec_exhaustively(problem, alloc, k + 1, sols)
alloc.set(k, -1)
class ExhaustiveAlgorithm(Algorithm):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.default_termination = NoTermination()
def setup(self, problem, **kwargs):
super().setup(problem, **kwargs)
assert isinstance(problem, AllocationProblem)
return self
def _initialize(self):
self._next()
def _next(self):
solutions = exhaustively(self.problem)
self.pop = Population.new(X=np.array([x for x, _, _ in solutions]))
self.evaluator.eval(self.problem, self.pop)
for ind in self.pop:
print(ind.F[0], ind.X)
self.termination.force_termination = True
| 26.859375
| 75
| 0.623618
| 221
| 1,719
| 4.760181
| 0.357466
| 0.072243
| 0.091255
| 0.076996
| 0.129278
| 0.102662
| 0.041825
| 0.041825
| 0.041825
| 0
| 0
| 0.014809
| 0.253636
| 1,719
| 63
| 76
| 27.285714
| 0.805144
| 0
| 0
| 0.044444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022222
| 1
| 0.133333
| false
| 0
| 0.133333
| 0
| 0.355556
| 0.022222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
071ec6aa5cdf0ac5081a189dd02a7abf4954448d
| 3,571
|
py
|
Python
|
pykrev/formula/find_intersections.py
|
Kzra/pykrev
|
1a328fccded962f309e951c8509b87a82c3d3ae6
|
[
"MIT"
] | 4
|
2021-02-18T10:19:13.000Z
|
2021-10-04T16:17:30.000Z
|
pykrev/formula/find_intersections.py
|
erikafreeman/pykrev
|
1a328fccded962f309e951c8509b87a82c3d3ae6
|
[
"MIT"
] | null | null | null |
pykrev/formula/find_intersections.py
|
erikafreeman/pykrev
|
1a328fccded962f309e951c8509b87a82c3d3ae6
|
[
"MIT"
] | 1
|
2021-09-23T16:03:03.000Z
|
2021-09-23T16:03:03.000Z
|
import itertools
import numpy as np
import pandas as pd
def find_intersections(formula_lists,group_labels,exclusive = True):
"""
Docstring for function pyKrev.find_intersections
====================
This function compares n lists of molecular formula and outputs a dictionary containing the intersections between each list.
Use
----
find_intersections([list_1,..,list_n],['group_1',...,'group_n'])
Returns a dictionary in which each key corresponds to a combination of group labels
and the corresponding value is a set containing the intersections between the groups in that combination.
Parameters
----------
formula_lists: a list containing n lists of molecular formula. Each item in the sub list should be a formula string.
group_labels: a list containing n strings of corresponding group labels.
exclusive: True or False, depending on whether you want the intersections to contain only unique values.
"""
if len(formula_lists) != len(group_labels):
raise InputError('formula_lists and group_labels must be of equal length')
combinations = [seq for i in range(0,len(group_labels)+1) for seq in itertools.combinations(group_labels,i) if len(seq) > 0]
combinations = sorted(combinations,key = lambda c : len(c),reverse = True) # sort combinations by length
if exclusive == True:
assigned_formula = set() #create a set that will hold all the formula already assigned to a group
amb = pd.DataFrame(data = formula_lists).T
amb.columns = group_labels
intersections = dict()
for combo in combinations:
queries = []
for c in combo:
formula = list(filter(None,amb[c])) #Remove None entries introduced by dataframe
queries.append(set(formula))
if len(queries) == 1: #if there is only one query find the unique elements in it
q_set = frozenset(queries[0]) #qset is a frozen set, so it will not be mutated by changes to queries[0]
for f_list in formula_lists: #cycle all formula in formula_lists
set_f = frozenset(f_list) #convert f_list to sets, must be frozen so type matches q_set
if set_f == q_set: # ignore the set that corresponds to the query
pass
else:
queries[0] = queries[0] - set_f #delete any repeated elements in fset
intersections[combo] = queries[0]
elif len(queries) > 1:
if exclusive == True:
q_intersect = intersect(queries)
intersections[combo] = q_intersect - assigned_formula #remove any elements from q_intersect that have already been assigned
assigned_formula.update(q_intersect) #update the assigned_set with q_intersect
else:
intersections[combo] = intersect(queries)
return intersections
def intersect(samples,counter=0):
""" This command uses recursion to find the intersections between a variable number of sets given in samples.
Where samples = [set_1,set_2,...,set_n] """
if len(samples) == 1:
return samples[0]
a = samples[counter]
b = samples[counter+1::]
if len(b) == 1: #check to see whether the recursion has reached the final element
return a & b[0]
else:
counter += 1
return a & intersect(samples,counter)
| 46.376623
| 143
| 0.633436
| 463
| 3,571
| 4.794816
| 0.336933
| 0.044595
| 0.031081
| 0.021622
| 0.021622
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008297
| 0.291235
| 3,571
| 77
| 144
| 46.376623
| 0.868827
| 0.434332
| 0
| 0.111111
| 0
| 0
| 0.027806
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044444
| false
| 0.022222
| 0.066667
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
071ee3300e784ba72ea76c1cd34d240a111eb588
| 5,386
|
py
|
Python
|
Create Playlist.py
|
j4ck64/PlaylistDirectories
|
4a7caf0923620a84aea9bb91e643011e7ee118db
|
[
"MIT"
] | null | null | null |
Create Playlist.py
|
j4ck64/PlaylistDirectories
|
4a7caf0923620a84aea9bb91e643011e7ee118db
|
[
"MIT"
] | null | null | null |
Create Playlist.py
|
j4ck64/PlaylistDirectories
|
4a7caf0923620a84aea9bb91e643011e7ee118db
|
[
"MIT"
] | null | null | null |
import os
import glob
import shutil
from tinytag import TinyTag
""" root = 'C:/'
copy_to = '/copy to/folder'
tag = TinyTag.get('C:/Users/jchap/OneDrive/Pictures/(VERYRAREBOYZ) (feat. $ki Mask The Slump God and Drugz).mp3')
print(tag.artist)
print('song duration: '+str(tag.duration))
"""
f = []
f=glob.glob('C:/Users/jchap/OneDrive/*.mp3')
print(f)
musicDirectory=[]
musicFiles =[]
# tag = TinyTag.get(f[0])
# print(tag.artist)
# for root, dirs, files in os.walk("C:/Users/jchap/OneDrive/"):
for root, dirs, files in os.walk("C:/"):
for file in files:
if file.endswith(".mp3"):
musicFiles.append(file)
musicDirectory.append(os.path.join(root, file))
#print(os.path.join(root, file))
print('files'+str(musicFiles))
tag = TinyTag.get(musicDirectory[0])
print('Artist',tag.artist)
print('Album Artist',tag.albumartist)
print('Title',tag.title)
print('Biterate',tag.bitrate)
print('music directory'+str(musicDirectory))
print(len(musicDirectory))
currentDirectory =os.path.dirname(__file__)
with open(currentDirectory+'/The_Krabby_Patty Formula_.m3u', "r") as f:
content_list = [word.strip() for word in f]
""" my_file = open(currentDirectory+'/The_Krabby_Patty Formula_.m3u', "r")
content_list = my_file. readlines() """
# print('playlist contents')
# print(content_list)
musicDirectory
musicWithoutDuplicates = []
duplicatesList = []
count =0
# check for tags equal to none
#musicDirectory =[x for x in musicDirectory j = TinyTag.get(x) if x != 'wdg']
#remove tracks without albumn artist or title
for track in reversed(range(len(musicDirectory))):
try:
trackTag = TinyTag.get(musicDirectory[track])
if str(trackTag.albumartist)== 'None' or str(trackTag.title)=='None':
print('albumArtist = none',musicDirectory[track])
print('removing track and adding to log file')
musicDirectory.remove(musicDirectory[track])
except IndexError:
break
#check for duplicates
for j in range(len(musicDirectory)):
musicDtag = TinyTag.get(musicDirectory[j])
duplicateL=[]
duplicateLBiterate=[]
for duplicate in range(len(musicDirectory)):
duplicateTag = TinyTag.get(musicDirectory[duplicate])
musicWithoutDuplicates.append(musicDirectory[j])
if duplicateTag.albumartist == musicDtag.albumartist or duplicateTag.albumartist in musicDtag.albumartist:
if duplicateTag.title == musicDtag.title or duplicateTag.title in musicDtag.title :
#check if last iteration
if duplicate>=len(musicDirectory)-1:
print("found a duplicate!",musicDirectory[duplicate],duplicateTag.albumartist,duplicateTag.title)
if len(duplicateLBiterate)==1:## did something here may need to change the conditional statement or add another
print('biterate')
#[x for x in duplicateL if TinyTag.get(musicDirectory[x]).bitrate > musicDirectory[x]]
print("Current duplicate Bite rate", duplicateLBiterate)
for x in range(len(duplicateL)):
if TinyTag.get(duplicateL[x]).bitrate == max(duplicateLBiterate):
#REMOVE ONE WITH THE BEST BITERATE
duplicateL.remove(duplicateL[x])
print('duplicate list',duplicateL)
#Add
duplicatesList = duplicatesList + duplicateL
else:
print("found a duplicate!",musicDirectory[duplicate],duplicateTag.albumartist,duplicateTag.title)
duplicateL.append(musicDirectory[duplicate])
duplicateLBiterate.append(duplicateTag.bitrate)
print('dup ',duplicatesList)
#remove duplicates from list
for u in range(len(duplicatesList)):
for i in range(len(musicDirectory)):
if duplicatesList[u]==musicDirectory[i]:
musicDirectory.remove(musicDirectory[i])
print('music ',musicDirectory)
#create playlist
newPlaylist = open("Test.m3u", "w")
#add file path to the respective track in the new playlist
for content in enumerate(content_list):
# split strings into artist and title
trackNumber=content[0]
trackArray =str(content[1]).split('-')
albumArtist= trackArray[0].strip()
title=trackArray[1].strip()
print('title:',title)
print('albumArtist:',albumArtist)
for trackDirectory in range(len(musicDirectory)):
trackTag = TinyTag.get(musicDirectory[trackDirectory])
if trackTag.albumartist == albumArtist or trackTag.albumartist in albumArtist:
if trackTag.title == title or trackTag.title in title:
newPlaylist.write(trackDirectory + " " + content)
newPlaylist.close()
try:
while True:
content.next()
except StopIteration:
pass
break
else:
print()
else:
print()
| 35.668874
| 133
| 0.604716
| 553
| 5,386
| 5.858951
| 0.271248
| 0.030864
| 0.044444
| 0.02963
| 0.108642
| 0.108642
| 0.094444
| 0.094444
| 0.051235
| 0.051235
| 0
| 0.003913
| 0.28834
| 5,386
| 151
| 134
| 35.668874
| 0.841378
| 0.131823
| 0
| 0.120879
| 0
| 0
| 0.073177
| 0.006958
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.010989
| 0.043956
| 0
| 0.043956
| 0.230769
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
071fd543532fedf42da52e8b37bdf2f56e668e0e
| 1,636
|
py
|
Python
|
PyBank/main.py
|
Alexis-Kepano/python_challenge
|
2d86e0d891c549d5fba99bd48d612be80746e34b
|
[
"ADSL"
] | null | null | null |
PyBank/main.py
|
Alexis-Kepano/python_challenge
|
2d86e0d891c549d5fba99bd48d612be80746e34b
|
[
"ADSL"
] | null | null | null |
PyBank/main.py
|
Alexis-Kepano/python_challenge
|
2d86e0d891c549d5fba99bd48d612be80746e34b
|
[
"ADSL"
] | null | null | null |
#import modules
import os
import csv
#input
csvpath = os.path.join('Resources', 'budget_data.csv')
#output
outfile = os.path.join('Analysis', 'pybankstatements.txt')
#declare variables
months = []; total_m = 1; net_total = 0; total_change = 0; monthly_changes = []; greatest_inc = ['', 0]; greatest_dec = ['', 0]
#open & read csv
with open(csvpath) as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
header = next(csvreader)
first_row = next(csvreader)
previous_row = int(first_row[1])
net_total = int(first_row[1])
#loop
for row in csvreader:
net_total += int(row[1])
total_m = total_m+1
current_value = int(row[1])
change_value = int(current_value-previous_row)
monthly_changes.append(change_value)
months.append(row[0])
previous_row = int(row[1])
total_change = total_change + change_value
if change_value > greatest_inc[1]:
greatest_inc[0] = str(row[0])
greatest_inc[1] = change_value
if change_value < greatest_dec[1]:
greatest_dec[0] = str(row[0])
greatest_dec[1] = change_value
avg_change = total_change/len(months)
output = (
f"\n Financial Analysis \n"
f"------------------------------\n"
f"Total Months: {total_m}\n"
f"Total: ${net_total}\n"
f"Average Change: ${avg_change:.2f}\n"
f"Greatest Increase in Profits: {greatest_inc[0]} (${greatest_inc[1]})\n"
f"Greatest Decrease in Profits: {greatest_dec[0]} (${greatest_dec[1]})\n")
with open(outfile, "w") as txt_file:
txt_file.write(output)
outfile
| 28.206897
| 127
| 0.621027
| 224
| 1,636
| 4.330357
| 0.290179
| 0.079381
| 0.037113
| 0.041237
| 0.098969
| 0.065979
| 0
| 0
| 0
| 0
| 0
| 0.019747
| 0.226161
| 1,636
| 58
| 128
| 28.206897
| 0.746446
| 0.037286
| 0
| 0
| 0
| 0
| 0.211465
| 0.048408
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.052632
| 0
| 0.052632
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
07201c5460a410eeac1f4cdd74f83fabb16f4ba2
| 3,993
|
py
|
Python
|
src/interactive_conditional_samples.py
|
RanHerOver/cometaai
|
02d459da5bbc58536112cfe6343f5ceef4ff2356
|
[
"MIT"
] | null | null | null |
src/interactive_conditional_samples.py
|
RanHerOver/cometaai
|
02d459da5bbc58536112cfe6343f5ceef4ff2356
|
[
"MIT"
] | null | null | null |
src/interactive_conditional_samples.py
|
RanHerOver/cometaai
|
02d459da5bbc58536112cfe6343f5ceef4ff2356
|
[
"MIT"
] | null | null | null |
import random
import fire
import json
import os
import numpy as np
import tensorflow as tf
import pytumblr
import mysql.connector
import datetime
from random import seed
import model, sample, encoder
def interact_model(
model_name='1558M',
seed=None,
nsamples=1,
batch_size=1,
length=None,
temperature=.7,
top_k=10,
top_p=1,
models_dir='models',
):
# Autenticazione
client = pytumblr.TumblrRestClient(
'',
'',
'',
''
)
# Al fine di mantenere la sicurezza del mio account le due coppie di chiavi per la connessione a Tumblr sono state eliminate da questo file.
# Connessione al DB
mydb = mysql.connector.connect(
host="localhost",
user="root",
password="",
database="cometa"
)
print(mydb)
cursor = mydb.cursor()
# Generazione query
print("prima di eseguire la query")
cursor.execute("SELECT testo FROM prompts ORDER BY RAND() LIMIT 1")
print("dopo query")
for (testo) in cursor:
print("{}".format(testo))
# Formattazione del prompt
testoBuono = "{}".format(testo)
testoBuono=testoBuono.replace("(","")
testoBuono=testoBuono.replace(")","")
testoBuono=testoBuono.replace("'","")
testoBuono=testoBuono.replace(",","")
print(testoBuono)
client.info() # Riceve e trattiene le informazioni del profilo
blogName='unlikelycrownkitty'
models_dir = os.path.expanduser(os.path.expandvars(models_dir))
if batch_size is None:
batch_size = 1
assert nsamples % batch_size == 0
# Carico il modello dalla directory
enc = encoder.get_encoder(model_name, models_dir)
hparams = model.default_hparams()
with open(os.path.join(models_dir, model_name, 'hparams.json')) as f:
hparams.override_from_dict(json.load(f))
# Eseguo un controllo per verificare che il prompt non sia eccessivamente lungo
if length is None:
length = hparams.n_ctx // 2
elif length > hparams.n_ctx:
raise ValueError("Can't get samples longer than window size: %s" % hparams.n_ctx)
# Avvio il modello con i parametri
with tf.Session(graph=tf.Graph()) as sess:
context = tf.placeholder(tf.int32, [batch_size, None])
np.random.seed(seed)
tf.set_random_seed(seed)
output = sample.sample_sequence(
hparams=hparams, length=length,
context=context,
batch_size=batch_size,
temperature=temperature, top_k=top_k, top_p=top_p
)
continua=True
# Inizio la generazione del testo
saver = tf.train.Saver()
ckpt = tf.train.latest_checkpoint(os.path.join(models_dir, model_name))
saver.restore(sess, ckpt)
while continua:
raw_text = testoBuono
# raw_text = f.read()
while not raw_text:
print('The file is empty! Write something yourself.')
raw_text = input("Model prompt >>> ")
context_tokens = enc.encode(raw_text)
generated = 0
for _ in range(nsamples // batch_size):
out = sess.run(output, feed_dict={
context: [context_tokens for _ in range(batch_size)]
})[:, len(context_tokens):]
for i in range(batch_size):
generated += 1
text = enc.decode(out[i])
print("=" * 40 + " SAMPLE " + str(generated) + " " + "=" * 40)
print(text)
print("=" * 80)
# Pubblico il testo generato
client.create_text(blogName, state="published", slug="testing-text-posts",title=raw_text, body=text)
print('Continue? y/n')
risposta=input()
if risposta.lower() in ['y', 'yes']:
continua=True
else:
continua=False
exit()
if __name__ == '__main__':
fire.Fire(interact_model())
| 30.953488
| 143
| 0.599048
| 471
| 3,993
| 4.951168
| 0.447983
| 0.038593
| 0.046312
| 0.047599
| 0.070326
| 0.070326
| 0.070326
| 0.046312
| 0.046312
| 0
| 0
| 0.008514
| 0.294015
| 3,993
| 128
| 144
| 31.195313
| 0.81873
| 0.121463
| 0
| 0.04902
| 0
| 0
| 0.092444
| 0
| 0.009804
| 0
| 0
| 0
| 0.009804
| 1
| 0.009804
| false
| 0.009804
| 0.107843
| 0
| 0.117647
| 0.098039
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
072173681d53ec2482387460364698d940573600
| 3,839
|
py
|
Python
|
src/cms/carousels/serializers.py
|
UniversitaDellaCalabria/uniCMS
|
b0af4e1a767867f0a9b3c135a5c84587e713cb71
|
[
"Apache-2.0"
] | 6
|
2021-01-26T17:22:53.000Z
|
2022-02-15T10:09:03.000Z
|
src/cms/carousels/serializers.py
|
UniversitaDellaCalabria/uniCMS
|
b0af4e1a767867f0a9b3c135a5c84587e713cb71
|
[
"Apache-2.0"
] | 5
|
2020-12-24T14:29:23.000Z
|
2021-08-10T10:32:18.000Z
|
src/cms/carousels/serializers.py
|
UniversitaDellaCalabria/uniCMS
|
b0af4e1a767867f0a9b3c135a5c84587e713cb71
|
[
"Apache-2.0"
] | 2
|
2020-12-24T14:13:39.000Z
|
2020-12-30T16:48:52.000Z
|
from rest_framework import serializers
from cms.api.serializers import UniCMSContentTypeClass, UniCMSCreateUpdateSerializer
from cms.medias.serializers import MediaSerializer
from . models import Carousel, CarouselItem, CarouselItemLink, CarouselItemLinkLocalization, CarouselItemLocalization
class CarouselForeignKey(serializers.PrimaryKeyRelatedField):
def get_queryset(self):
request = self.context.get('request', None)
if request:
carousel_id = self.context['request'].parser_context['kwargs']['carousel_id']
return Carousel.objects.filter(pk=carousel_id)
return None # pragma: no cover
class CarouselItemForeignKey(serializers.PrimaryKeyRelatedField):
def get_queryset(self):
request = self.context.get('request', None)
if request:
carousel_id = self.context['request'].parser_context['kwargs']['carousel_id']
item_id = self.context['request'].parser_context['kwargs']['carousel_item_id']
return CarouselItem.objects.filter(pk=item_id,
carousel__pk=carousel_id)
return None # pragma: no cover
class CarouselItemLinkForeignKey(serializers.PrimaryKeyRelatedField):
def get_queryset(self):
request = self.context.get('request', None)
if request:
carousel_id = self.context['request'].parser_context['kwargs']['carousel_id']
item_id = self.context['request'].parser_context['kwargs']['carousel_item_id']
link_id = self.context['request'].parser_context['kwargs']['carousel_item_link_id']
return CarouselItemLink.objects.filter(pk=link_id,
carousel_item__pk=item_id,
carousel_item__carousel__pk=carousel_id)
return None # pragma: no cover
class CarouselSerializer(UniCMSCreateUpdateSerializer,
UniCMSContentTypeClass):
class Meta:
model = Carousel
fields = '__all__'
read_only_fields = ('created_by', 'modified_by')
class CarouselItemSerializer(UniCMSCreateUpdateSerializer,
UniCMSContentTypeClass):
carousel = CarouselForeignKey()
def to_representation(self, instance):
data = super().to_representation(instance)
image = MediaSerializer(instance.image)
data['image'] = image.data
return data
class Meta:
model = CarouselItem
fields = '__all__'
read_only_fields = ('created_by', 'modified_by')
class CarouselItemLocalizationSerializer(UniCMSCreateUpdateSerializer,
UniCMSContentTypeClass):
carousel_item = CarouselItemForeignKey()
class Meta:
model = CarouselItemLocalization
fields = '__all__'
read_only_fields = ('created_by', 'modified_by')
class CarouselItemLinkSerializer(UniCMSCreateUpdateSerializer,
UniCMSContentTypeClass):
carousel_item = CarouselItemForeignKey()
class Meta:
model = CarouselItemLink
fields = '__all__'
class CarouselItemLinkLocalizationSerializer(UniCMSCreateUpdateSerializer,
UniCMSContentTypeClass):
carousel_item_link = CarouselItemLinkForeignKey()
class Meta:
model = CarouselItemLinkLocalization
fields = '__all__'
read_only_fields = ('created_by', 'modified_by')
class CarouselSelectOptionsSerializer(serializers.ModelSerializer):
def to_representation(self, instance):
data = super().to_representation(instance)
data['value'] = instance.pk
data['text'] = instance.name
return data
class Meta:
model = Carousel
fields = ()
| 35.546296
| 117
| 0.657202
| 328
| 3,839
| 7.429878
| 0.204268
| 0.040624
| 0.032007
| 0.049241
| 0.551498
| 0.517029
| 0.517029
| 0.517029
| 0.436602
| 0.399261
| 0
| 0
| 0.260745
| 3,839
| 107
| 118
| 35.878505
| 0.858703
| 0.013024
| 0
| 0.602564
| 0
| 0
| 0.084016
| 0.005548
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064103
| false
| 0
| 0.051282
| 0
| 0.461538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
072216b7c95085e52120d7afc6bcf448dd8b5843
| 7,298
|
py
|
Python
|
demos/colorization_demo/python/colorization_demo.py
|
mzegla/open_model_zoo
|
092576b4c598c1e301ebc38ad74b323972e54f3e
|
[
"Apache-2.0"
] | null | null | null |
demos/colorization_demo/python/colorization_demo.py
|
mzegla/open_model_zoo
|
092576b4c598c1e301ebc38ad74b323972e54f3e
|
[
"Apache-2.0"
] | null | null | null |
demos/colorization_demo/python/colorization_demo.py
|
mzegla/open_model_zoo
|
092576b4c598c1e301ebc38ad74b323972e54f3e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
"""
Copyright (c) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from openvino.runtime import Core, get_version
import cv2 as cv
import numpy as np
import logging as log
from time import perf_counter
import sys
from argparse import ArgumentParser, SUPPRESS
from pathlib import Path
sys.path.append(str(Path(__file__).resolve().parents[2] / 'common/python'))
sys.path.append(str(Path(__file__).resolve().parents[2] / 'common/python/openvino/model_zoo'))
import monitors
from images_capture import open_images_capture
from model_api.performance_metrics import PerformanceMetrics
log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.DEBUG, stream=sys.stdout)
def build_arg():
parser = ArgumentParser(add_help=False)
in_args = parser.add_argument_group('Options')
in_args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Help with the script.')
in_args.add_argument("-m", "--model", help="Required. Path to .xml file with pre-trained model.",
required=True, type=Path)
in_args.add_argument("-d", "--device",
help="Optional. Specify target device for infer: CPU, GPU, HDDL or MYRIAD. "
"Default: CPU",
default="CPU", type=str)
in_args.add_argument('-i', "--input", required=True,
help='Required. An input to process. The input must be a single image, '
'a folder of images, video file or camera id.')
in_args.add_argument('--loop', default=False, action='store_true',
help='Optional. Enable reading the input in a loop.')
in_args.add_argument('-o', '--output', required=False,
help='Optional. Name of the output file(s) to save.')
in_args.add_argument('-limit', '--output_limit', required=False, default=1000, type=int,
help='Optional. Number of frames to store in output. '
'If 0 is set, all frames are stored.')
in_args.add_argument("--no_show", help="Optional. Don't show output.",
action='store_true', default=False)
in_args.add_argument("-u", "--utilization_monitors", default="", type=str,
help="Optional. List of monitors to show initially.")
return parser
def main(args):
cap = open_images_capture(args.input, args.loop)
log.info('OpenVINO Inference Engine')
log.info('\tbuild: {}'.format(get_version()))
core = Core()
log.info('Reading model {}'.format(args.model))
model = core.read_model(args.model, args.model.with_suffix(".bin"))
input_tensor_name = 'data_l'
input_shape = model.input(input_tensor_name).shape
assert input_shape[1] == 1, "Expected model input shape with 1 channel"
inputs = {}
for input in model.inputs:
inputs[input.get_any_name()] = np.zeros(input.shape)
assert len(model.outputs) == 1, "Expected number of outputs is equal 1"
compiled_model = core.compile_model(model, device_name=args.device)
infer_request = compiled_model.create_infer_request()
log.info('The model {} is loaded to {}'.format(args.model, args.device))
_, _, h_in, w_in = input_shape
frames_processed = 0
imshow_size = (640, 480)
graph_size = (imshow_size[0] // 2, imshow_size[1] // 4)
presenter = monitors.Presenter(args.utilization_monitors, imshow_size[1] * 2 - graph_size[1], graph_size)
metrics = PerformanceMetrics()
video_writer = cv.VideoWriter()
if args.output and not video_writer.open(args.output, cv.VideoWriter_fourcc(*'MJPG'),
cap.fps(), (imshow_size[0] * 2, imshow_size[1] * 2)):
raise RuntimeError("Can't open video writer")
start_time = perf_counter()
original_frame = cap.read()
if original_frame is None:
raise RuntimeError("Can't read an image from the input")
while original_frame is not None:
(h_orig, w_orig) = original_frame.shape[:2]
if original_frame.shape[2] > 1:
frame = cv.cvtColor(cv.cvtColor(original_frame, cv.COLOR_BGR2GRAY), cv.COLOR_GRAY2RGB)
else:
frame = cv.cvtColor(original_frame, cv.COLOR_GRAY2RGB)
img_rgb = frame.astype(np.float32) / 255
img_lab = cv.cvtColor(img_rgb, cv.COLOR_RGB2Lab)
img_l_rs = cv.resize(img_lab.copy(), (w_in, h_in))[:, :, 0]
inputs[input_tensor_name] = np.expand_dims(img_l_rs, axis=[0, 1])
res = next(iter(infer_request.infer(inputs).values()))
update_res = np.squeeze(res)
out = update_res.transpose((1, 2, 0))
out = cv.resize(out, (w_orig, h_orig))
img_lab_out = np.concatenate((img_lab[:, :, 0][:, :, np.newaxis], out), axis=2)
img_bgr_out = np.clip(cv.cvtColor(img_lab_out, cv.COLOR_Lab2BGR), 0, 1)
original_image = cv.resize(original_frame, imshow_size)
grayscale_image = cv.resize(frame, imshow_size)
colorize_image = (cv.resize(img_bgr_out, imshow_size) * 255).astype(np.uint8)
lab_image = cv.resize(img_lab_out, imshow_size).astype(np.uint8)
original_image = cv.putText(original_image, 'Original', (25, 50),
cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv.LINE_AA)
grayscale_image = cv.putText(grayscale_image, 'Grayscale', (25, 50),
cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv.LINE_AA)
colorize_image = cv.putText(colorize_image, 'Colorize', (25, 50),
cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv.LINE_AA)
lab_image = cv.putText(lab_image, 'LAB interpretation', (25, 50),
cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv.LINE_AA)
ir_image = [cv.hconcat([original_image, grayscale_image]),
cv.hconcat([lab_image, colorize_image])]
final_image = cv.vconcat(ir_image)
metrics.update(start_time, final_image)
frames_processed += 1
if video_writer.isOpened() and (args.output_limit <= 0 or frames_processed <= args.output_limit):
video_writer.write(final_image)
presenter.drawGraphs(final_image)
if not args.no_show:
cv.imshow('Colorization Demo', final_image)
key = cv.waitKey(1)
if key in {ord("q"), ord("Q"), 27}:
break
presenter.handleKey(key)
start_time = perf_counter()
original_frame = cap.read()
metrics.log_total()
for rep in presenter.reportMeans():
log.info(rep)
if __name__ == "__main__":
args = build_arg().parse_args()
sys.exit(main(args) or 0)
| 43.183432
| 109
| 0.639216
| 992
| 7,298
| 4.510081
| 0.298387
| 0.017211
| 0.018105
| 0.034198
| 0.09924
| 0.09924
| 0.085829
| 0.075548
| 0.057667
| 0.057667
| 0
| 0.021978
| 0.239381
| 7,298
| 168
| 110
| 43.440476
| 0.784003
| 0.080707
| 0
| 0.066667
| 0
| 0
| 0.152294
| 0.008071
| 0
| 0
| 0
| 0
| 0.016667
| 1
| 0.016667
| false
| 0
| 0.091667
| 0
| 0.116667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
07223524f59210dbb5356506e6de9ffb41f47883
| 8,174
|
py
|
Python
|
swagger_client/models/transfer.py
|
chbndrhnns/ahoi-client
|
8bd25f541c05af17c82904fa250272514b7971f2
|
[
"MIT"
] | null | null | null |
swagger_client/models/transfer.py
|
chbndrhnns/ahoi-client
|
8bd25f541c05af17c82904fa250272514b7971f2
|
[
"MIT"
] | null | null | null |
swagger_client/models/transfer.py
|
chbndrhnns/ahoi-client
|
8bd25f541c05af17c82904fa250272514b7971f2
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
[AHOI cookbook](/ahoi/docs/cookbook/index.html) [Data Privacy](/sandboxmanager/#/privacy) [Terms of Service](/sandboxmanager/#/terms) [Imprint](https://sparkassen-hub.com/impressum/) © 2016‐2017 Starfinanz - Ein Unternehmen der Finanz Informatik # noqa: E501
OpenAPI spec version: 2.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.amount import Amount # noqa: F401,E501
class Transfer(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'iban': 'str',
'bic': 'str',
'name': 'str',
'amount': 'Amount',
'purpose': 'str',
'tan_media_id': 'str',
'tan_scheme': 'str'
}
attribute_map = {
'iban': 'iban',
'bic': 'bic',
'name': 'name',
'amount': 'amount',
'purpose': 'purpose',
'tan_media_id': 'tanMediaId',
'tan_scheme': 'tanScheme'
}
def __init__(self, iban=None, bic=None, name=None, amount=None, purpose=None, tan_media_id=None, tan_scheme=None): # noqa: E501
"""Transfer - a model defined in Swagger""" # noqa: E501
self._iban = None
self._bic = None
self._name = None
self._amount = None
self._purpose = None
self._tan_media_id = None
self._tan_scheme = None
self.discriminator = None
self.iban = iban
if bic is not None:
self.bic = bic
self.name = name
self.amount = amount
if purpose is not None:
self.purpose = purpose
self.tan_media_id = tan_media_id
self.tan_scheme = tan_scheme
@property
def iban(self):
"""Gets the iban of this Transfer. # noqa: E501
IBAN - International Bank Account Number (defined in ISO 13616-1) # noqa: E501
:return: The iban of this Transfer. # noqa: E501
:rtype: str
"""
return self._iban
@iban.setter
def iban(self, iban):
"""Sets the iban of this Transfer.
IBAN - International Bank Account Number (defined in ISO 13616-1) # noqa: E501
:param iban: The iban of this Transfer. # noqa: E501
:type: str
"""
if iban is None:
raise ValueError("Invalid value for `iban`, must not be `None`") # noqa: E501
self._iban = iban
@property
def bic(self):
"""Gets the bic of this Transfer. # noqa: E501
BIC - Business Identifier Code (defined in ISO-9362) # noqa: E501
:return: The bic of this Transfer. # noqa: E501
:rtype: str
"""
return self._bic
@bic.setter
def bic(self, bic):
"""Sets the bic of this Transfer.
BIC - Business Identifier Code (defined in ISO-9362) # noqa: E501
:param bic: The bic of this Transfer. # noqa: E501
:type: str
"""
self._bic = bic
@property
def name(self):
"""Gets the name of this Transfer. # noqa: E501
Name - Name of the creditor # noqa: E501
:return: The name of this Transfer. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Transfer.
Name - Name of the creditor # noqa: E501
:param name: The name of this Transfer. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def amount(self):
"""Gets the amount of this Transfer. # noqa: E501
Amount to be transfered # noqa: E501
:return: The amount of this Transfer. # noqa: E501
:rtype: Amount
"""
return self._amount
@amount.setter
def amount(self, amount):
"""Sets the amount of this Transfer.
Amount to be transfered # noqa: E501
:param amount: The amount of this Transfer. # noqa: E501
:type: Amount
"""
if amount is None:
raise ValueError("Invalid value for `amount`, must not be `None`") # noqa: E501
self._amount = amount
@property
def purpose(self):
"""Gets the purpose of this Transfer. # noqa: E501
Purpose # noqa: E501
:return: The purpose of this Transfer. # noqa: E501
:rtype: str
"""
return self._purpose
@purpose.setter
def purpose(self, purpose):
"""Sets the purpose of this Transfer.
Purpose # noqa: E501
:param purpose: The purpose of this Transfer. # noqa: E501
:type: str
"""
self._purpose = purpose
@property
def tan_media_id(self):
"""Gets the tan_media_id of this Transfer. # noqa: E501
TANMediaId - The identifying ID of the TANMedia. # noqa: E501
:return: The tan_media_id of this Transfer. # noqa: E501
:rtype: str
"""
return self._tan_media_id
@tan_media_id.setter
def tan_media_id(self, tan_media_id):
"""Sets the tan_media_id of this Transfer.
TANMediaId - The identifying ID of the TANMedia. # noqa: E501
:param tan_media_id: The tan_media_id of this Transfer. # noqa: E501
:type: str
"""
if tan_media_id is None:
raise ValueError("Invalid value for `tan_media_id`, must not be `None`") # noqa: E501
self._tan_media_id = tan_media_id
@property
def tan_scheme(self):
"""Gets the tan_scheme of this Transfer. # noqa: E501
TANScheme - The scheme **id** that is used to verify this payment (e.g. \"901\") # noqa: E501
:return: The tan_scheme of this Transfer. # noqa: E501
:rtype: str
"""
return self._tan_scheme
@tan_scheme.setter
def tan_scheme(self, tan_scheme):
"""Sets the tan_scheme of this Transfer.
TANScheme - The scheme **id** that is used to verify this payment (e.g. \"901\") # noqa: E501
:param tan_scheme: The tan_scheme of this Transfer. # noqa: E501
:type: str
"""
if tan_scheme is None:
raise ValueError("Invalid value for `tan_scheme`, must not be `None`") # noqa: E501
self._tan_scheme = tan_scheme
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Transfer):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.186207
| 277
| 0.565207
| 1,014
| 8,174
| 4.435897
| 0.162722
| 0.076478
| 0.08715
| 0.084037
| 0.483993
| 0.437305
| 0.417074
| 0.284349
| 0.197199
| 0.11205
| 0
| 0.032802
| 0.332395
| 8,174
| 289
| 278
| 28.283737
| 0.791461
| 0.390751
| 0
| 0.073171
| 0
| 0
| 0.101563
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.162602
| false
| 0
| 0.03252
| 0
| 0.325203
| 0.01626
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
07224ff81e97b5ee51932d0d9bca20ab01f96757
| 10,366
|
py
|
Python
|
external/trappy/tests/test_caching.py
|
vdonnefort/lisa
|
38e5f246e6c94201a60a8698e7f29277f11c425e
|
[
"Apache-2.0"
] | 1
|
2020-11-30T16:14:02.000Z
|
2020-11-30T16:14:02.000Z
|
external/trappy/tests/test_caching.py
|
vdonnefort/lisa
|
38e5f246e6c94201a60a8698e7f29277f11c425e
|
[
"Apache-2.0"
] | null | null | null |
external/trappy/tests/test_caching.py
|
vdonnefort/lisa
|
38e5f246e6c94201a60a8698e7f29277f11c425e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015-2017 ARM Limited, Google and contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import print_function
from builtins import chr
import os
import json
import shutil
import sys
import unittest
import utils_tests
import trappy
from trappy.ftrace import GenericFTrace
from trappy.systrace import SysTrace
class TestCaching(utils_tests.SetupDirectory):
def __init__(self, *args, **kwargs):
super(TestCaching, self).__init__(
[("trace_sched.txt", "trace.txt"),
("trace_sched.txt", "trace.raw.txt"),
("trace_systrace.html", "trace.html")],
*args,
**kwargs)
def test_cache_created(self):
"""Test cache creation when enabled"""
GenericFTrace.disable_cache = False
traces = (trappy.FTrace(), trappy.SysTrace(path='./trace.html'))
for trace in traces:
trace_path = os.path.abspath(trace.trace_path)
trace_dir = os.path.dirname(trace_path)
trace_file = os.path.basename(trace_path)
cache_dir = '.' + trace_file + '.cache'
self.assertTrue(cache_dir in os.listdir(trace_dir))
def test_cache_not_created(self):
"""Test that cache should not be created when disabled """
GenericFTrace.disable_cache = True
traces = (trappy.FTrace(), trappy.SysTrace(path='./trace.html'))
for trace in traces:
trace_path = os.path.abspath(trace.trace_path)
trace_dir = os.path.dirname(trace_path)
trace_file = os.path.basename(trace_path)
cache_dir = '.' + trace_file + '.cache'
self.assertFalse(cache_dir in os.listdir(trace_dir))
def test_compare_cached_vs_uncached(self):
""" Test that the cached and uncached traces are same """
# Build the cache, but the actual trace will be parsed
# fresh since this is a first time parse
GenericFTrace.disable_cache = False
uncached_trace = trappy.FTrace()
uncached_dfr = uncached_trace.sched_wakeup.data_frame
# Now read from previously parsed cache by reusing the path
cached_trace = trappy.FTrace(uncached_trace.trace_path)
cached_dfr = cached_trace.sched_wakeup.data_frame
# By default, the str to float conversion done when reading from csv is
# different from the one used when reading from the trace.txt file.
#
# Here's an example:
# - trace.txt string timestamps:
# [76.402065, 80.402065, 80.001337]
# - parsed dataframe timestamps:
# [76.402065000000007, 80.402065000000007, 82.001337000000007]
#
# - csv string timestamps:
# [76.402065, 80.402065, 80.001337]
# - cached dataframe timestamps:
# [76.402064999999993, 80.402064999999993, 82.001337000000007]
#
# To fix this, the timestamps read from the cache are converted using
# the same conversion method as the trace.txt parser, which results in
# cache-read timestamps being identical to trace-read timestamps.
#
# This test ensures that this stays true.
cached_times = [r[0] for r in cached_dfr.iterrows()]
uncached_times = [r[0] for r in uncached_dfr.iterrows()]
self.assertTrue(cached_times == uncached_times)
# compare other columns as well
self.assertTrue([r[1].pid for r in cached_dfr.iterrows()] ==
[r[1].pid for r in uncached_dfr.iterrows()])
self.assertTrue([r[1].comm for r in cached_dfr.iterrows()] ==
[r[1].comm for r in uncached_dfr.iterrows()])
self.assertTrue([r[1].prio for r in cached_dfr.iterrows()] ==
[r[1].prio for r in uncached_dfr.iterrows()])
def test_invalid_cache_overwritten(self):
"""Test a cache with a bad checksum is overwritten"""
# This is a directory so we can't use the files_to_copy arg of
# SetUpDirectory, just do it ourselves.
cache_path = ".trace.txt.cache"
src = os.path.join(utils_tests.TESTS_DIRECTORY, "trace_sched.txt.cache")
shutil.copytree(src, cache_path)
metadata_path = os.path.join(cache_path, "metadata.json")
def read_metadata():
with open(metadata_path, "r") as f:
return json.load(f)
def write_md5(md5):
metadata = read_metadata()
metadata["md5sum"] = md5
with open(metadata_path, "w") as f:
json.dump(metadata, f)
# Change 1 character of the stored checksum
md5sum = read_metadata()["md5sum"]
md5sum_inc = md5sum[:-1] + chr(ord(md5sum[-1]) + 1)
write_md5(md5sum_inc)
# Parse a trace, this should delete and overwrite the invalidated cache
GenericFTrace.disable_cache = False
trace = trappy.FTrace()
# Check that the modified md5sum was overwritten
self.assertNotEqual(read_metadata()["md5sum"], md5sum_inc,
"The invalid ftrace cache wasn't overwritten")
def test_cache_dynamic_events(self):
"""Test that caching works if new event parsers have been registered"""
# Parse the trace to create a cache
GenericFTrace.disable_cache = False
trace1 = trappy.FTrace()
# Check we're actually testing what we think we are
if hasattr(trace1, 'dynamic_event'):
raise RuntimeError('Test bug: found unexpected event in trace')
# Now register a new event type, call the constructor again, and check
# that the newly added event (which is not present in the cache) is
# parsed.
parse_class = trappy.register_dynamic_ftrace("DynamicEvent", "dynamic_test_key")
trace2 = trappy.FTrace()
self.assertTrue(len(trace2.dynamic_event.data_frame) == 1)
trappy.unregister_dynamic_ftrace(parse_class)
def test_cache_normalize_time(self):
"""Test that caching doesn't break normalize_time"""
GenericFTrace.disable_cache = False
# Times in trace_sched.txt
start_time = 6550.018511
first_freq_event_time = 6550.056870
# Parse without normalizing time
trace1 = trappy.FTrace(events=['cpu_frequency', 'sched_wakeup'],
normalize_time=False)
self.assertEqual(trace1.cpu_frequency.data_frame.index[0],
first_freq_event_time)
# Parse with normalized time
trace2 = trappy.FTrace(events=['cpu_frequency', 'sched_wakeup'],
normalize_time=True)
self.assertEqual(trace2.cpu_frequency.data_frame.index[0],
first_freq_event_time - start_time)
def test_cache_window_broad(self):
"""Test that caching doesn't break the 'window' parameter"""
GenericFTrace.disable_cache = False
trace1 = trappy.FTrace(
events=['sched_wakeup'],
window=(0, 1))
# Check that we're testing what we think we're testing The trace
# contains 2 sched_wakeup events; this window should get rid of one of
# them.
if len(trace1.sched_wakeup.data_frame) != 1:
raise RuntimeError('Test bug: bad sched_wakeup event count')
# Parse again without the window
trace1 = trappy.FTrace(
events=['sched_wakeup'],
window=(0, None))
self.assertEqual(len(trace1.sched_wakeup.data_frame), 2)
def test_cache_window_narrow(self):
"""
Test that applying a window to a cached trace returns EXACTLY what is expected
"""
# As described in test_compare_cache_vs_uncached, reading from cache
# results in slightly different timestamps
#
# This test verifies that applying windows results in identical
# dataframes whether cache is used or not.
GenericFTrace.disable_cache = False
uncached_trace = trappy.FTrace()
trace = trappy.FTrace(uncached_trace.trace_path,
normalize_time=False,
abs_window=(6550.100000, 6552.000002))
self.assertAlmostEquals(trace.get_duration(), 1.900002)
self.assertEqual(len(trace.sched_wakeup.data_frame), 2)
self.assertEqual(len(trace.sched_wakeup_new.data_frame), 1)
def test_ftrace_metadata(self):
"""Test that caching keeps trace metadata"""
GenericFTrace.disable_cache = False
self.test_cache_created()
trace = trappy.FTrace()
version = int(trace._version)
cpus = int(trace._cpus)
self.assertEqual(version, 6)
self.assertEqual(cpus, 6)
def test_cache_delete_single(self):
GenericFTrace.disable_cache = False
trace = trappy.FTrace()
trace_path = os.path.abspath(trace.trace_path)
trace_dir = os.path.dirname(trace_path)
trace_file = os.path.basename(trace_path)
cache_dir = '.' + trace_file + '.cache'
number_of_trace_categories = 31
self.assertEqual(len(os.listdir(cache_dir)), number_of_trace_categories)
os.remove(os.path.join(cache_dir, 'SchedWakeup.csv'))
self.assertEqual(len(os.listdir(cache_dir)), number_of_trace_categories - 1)
# Generate trace again, should regenerate only the missing item
trace = trappy.FTrace()
self.assertEqual(len(os.listdir(cache_dir)), number_of_trace_categories)
for c in trace.trace_classes:
if isinstance(c, trace.class_definitions['sched_wakeup']):
self.assertEqual(c.cached, False)
continue
self.assertEqual(c.cached, True)
| 38.535316
| 88
| 0.644125
| 1,295
| 10,366
| 4.987645
| 0.255598
| 0.031584
| 0.038706
| 0.041802
| 0.323115
| 0.297569
| 0.264747
| 0.216752
| 0.156681
| 0.129432
| 0
| 0.034637
| 0.267509
| 10,366
| 268
| 89
| 38.679104
| 0.816015
| 0.285067
| 0
| 0.262411
| 0
| 0
| 0.063126
| 0.002882
| 0
| 0
| 0
| 0
| 0.148936
| 1
| 0.092199
| false
| 0
| 0.092199
| 0
| 0.198582
| 0.007092
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
072578f31e8482a3127fc3b417aa642b8388a425
| 2,343
|
py
|
Python
|
ce_vae_test/main_cetrainer.py
|
fgitmichael/SelfSupevisedSkillDiscovery
|
60eee11cfd67046190dd2784bf40e97bdbed9d40
|
[
"MIT"
] | null | null | null |
ce_vae_test/main_cetrainer.py
|
fgitmichael/SelfSupevisedSkillDiscovery
|
60eee11cfd67046190dd2784bf40e97bdbed9d40
|
[
"MIT"
] | 6
|
2021-02-02T23:00:02.000Z
|
2022-01-13T03:13:51.000Z
|
ce_vae_test/main_cetrainer.py
|
fgitmichael/SelfSupevisedSkillDiscovery
|
60eee11cfd67046190dd2784bf40e97bdbed9d40
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import argparse
import torch
import torch.utils.data
import matplotlib.pyplot as plt
from torch import nn, optim
from torch.nn import functional as F
from torchvision import datasets, transforms
from torchvision.utils import save_image
from torch.utils.tensorboard import SummaryWriter
from ce_vae_test.networks.min_vae import MinVae
from ce_vae_test.trainer.ce_trainer import CeVaeTrainer
from ce_vae_test.sampler.dataset_sampler import SamplerDatasetWithReplacement
parser = argparse.ArgumentParser(description='VAE MNIST Example')
parser.add_argument('--batch-size', type=int, default=128, metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if args.cuda else "cpu")
writer = SummaryWriter()
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
train_sampler = SamplerDatasetWithReplacement(
dataset=datasets.MNIST('../data',
train=True,
download=True,
transform=transforms.ToTensor()),
batch_size=args.batch_size
)
test_sampler = SamplerDatasetWithReplacement(
dataset=datasets.MNIST('../data',
train=False,
transform=transforms.ToTensor()),
batch_size=args.batch_size * 10
)
cevae = MinVae(
input_size=28 * 28,
output_size=10,
latent_dim=2,
hidden_sizes_dec=[5],
device=device
).to(device)
trainer = CeVaeTrainer(
vae=cevae,
num_epochs=300,
train_loader=train_sampler,
test_loader=test_sampler,
writer=writer,
device=device,
alpha=0.90,
lamda=0.22
)
trainer.run()
| 32.09589
| 83
| 0.681605
| 294
| 2,343
| 5.285714
| 0.401361
| 0.034749
| 0.054698
| 0.025097
| 0.182754
| 0.182754
| 0.182754
| 0.063063
| 0
| 0
| 0
| 0.018359
| 0.20956
| 2,343
| 72
| 84
| 32.541667
| 0.820734
| 0
| 0
| 0.065574
| 0
| 0
| 0.130231
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.213115
| 0
| 0.213115
| 0.016393
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
07257aac63bf6240cc82f0f082448d6a6953f3dc
| 1,567
|
py
|
Python
|
appr/commands/logout.py
|
sergeyberezansky/appr
|
03168addf05c3efd779dad5168fb0a80d0512100
|
[
"Apache-2.0"
] | 31
|
2017-07-05T07:25:31.000Z
|
2021-01-18T22:21:57.000Z
|
appr/commands/logout.py
|
sergeyberezansky/appr
|
03168addf05c3efd779dad5168fb0a80d0512100
|
[
"Apache-2.0"
] | 48
|
2017-06-27T15:48:29.000Z
|
2021-01-26T21:02:27.000Z
|
appr/commands/logout.py
|
sergeyberezansky/appr
|
03168addf05c3efd779dad5168fb0a80d0512100
|
[
"Apache-2.0"
] | 17
|
2017-07-05T07:25:38.000Z
|
2021-01-20T14:52:29.000Z
|
from __future__ import absolute_import, division, print_function
from appr.auth import ApprAuth
from appr.commands.command_base import CommandBase, PackageSplit
class LogoutCmd(CommandBase):
name = 'logout'
help_message = "logout"
def __init__(self, options):
super(LogoutCmd, self).__init__(options)
self.status = None
self.registry_host = options.registry_host
self.package_parts = options.package_parts
pname = self.package_parts.get('package', None)
namespace = self.package_parts.get('namespace', None)
self.package = None
if pname:
self.package = "%s/%s" % (namespace, pname)
elif namespace:
self.package = namespace
@classmethod
def _add_arguments(cls, parser):
cls._add_registryhost_option(parser)
parser.add_argument('registry', nargs='?', default=None, action=PackageSplit,
help="registry url: quay.io[/namespace][/repo]\n" +
"If namespace and/or repo are passed, creds only logout for them")
def _call(self):
client = self.RegistryClient(self.registry_host)
ApprAuth().delete_token(client.host, scope=self.package)
self.status = "Logout complete"
if self.registry_host != '*':
self.status += " from %s" % self.registry_host
def _render_dict(self):
return {"status": self.status, 'host': self.registry_host, "scope": self.package}
def _render_console(self):
return " >>> %s" % self.status
| 36.44186
| 94
| 0.640077
| 178
| 1,567
| 5.421348
| 0.410112
| 0.091192
| 0.082902
| 0.039378
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25016
| 1,567
| 42
| 95
| 37.309524
| 0.821277
| 0
| 0
| 0
| 0
| 0
| 0.123165
| 0.017869
| 0
| 0
| 0
| 0
| 0
| 1
| 0.147059
| false
| 0.029412
| 0.088235
| 0.058824
| 0.382353
| 0.029412
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
072775cafe9ec9921c429b5df6eb75f74e95605d
| 10,370
|
py
|
Python
|
tzwhere/tzwhere.py
|
tuxiqae/pytzwhere
|
32d2bef9ff2d784741471fddb35fbb6732f556d5
|
[
"MIT"
] | 115
|
2015-01-09T06:18:19.000Z
|
2021-12-28T07:07:45.000Z
|
tzwhere/tzwhere.py
|
tuxiqae/pytzwhere
|
32d2bef9ff2d784741471fddb35fbb6732f556d5
|
[
"MIT"
] | 47
|
2015-04-15T20:23:44.000Z
|
2022-03-22T11:25:01.000Z
|
tzwhere/tzwhere.py
|
tuxiqae/pytzwhere
|
32d2bef9ff2d784741471fddb35fbb6732f556d5
|
[
"MIT"
] | 46
|
2015-01-26T16:42:10.000Z
|
2022-01-04T15:26:57.000Z
|
#!/usr/bin/env python
'''tzwhere.py - time zone computation from latitude/longitude.
Ordinarily this is loaded as a module and instances of the tzwhere
class are instantiated and queried directly
'''
import collections
try:
import ujson as json # loads 2 seconds faster than normal json
except:
try:
import json
except ImportError:
import simplejson as json
import math
import gzip
import os
import shapely.geometry as geometry
import shapely.prepared as prepared
# We can save about 222MB of RAM by turning our polygon lists into
# numpy arrays rather than tuples, if numpy is installed.
try:
import numpy
WRAP = numpy.asarray
COLLECTION_TYPE = numpy.ndarray
except ImportError:
WRAP = tuple
COLLECTION_TYPE = tuple
# for navigation and pulling values/files
this_dir, this_filename = os.path.split(__file__)
BASE_DIR = os.path.dirname(this_dir)
class tzwhere(object):
SHORTCUT_DEGREES_LATITUDE = 1.0
SHORTCUT_DEGREES_LONGITUDE = 1.0
# By default, use the data file in our package directory
DEFAULT_SHORTCUTS = os.path.join(os.path.dirname(__file__),
'tz_world_shortcuts.json')
DEFAULT_POLYGONS = os.path.join(os.path.dirname(__file__),
'tz_world.json.gz')
def __init__(self, forceTZ=False):
'''
Initializes the tzwhere class.
@forceTZ: If you want to force the lookup method to a return a
timezone even if the point you are looking up is slightly outside it's
bounds, you need to specify this during initialization arleady
'''
featureCollection = read_tzworld(tzwhere.DEFAULT_POLYGONS)
pgen = feature_collection_polygons(featureCollection)
self.timezoneNamesToPolygons = collections.defaultdict(list)
self.unprepTimezoneNamesToPolygons = collections.defaultdict(list)
for tzname, poly in pgen:
self.timezoneNamesToPolygons[tzname].append(poly)
for tzname, polys in self.timezoneNamesToPolygons.items():
self.timezoneNamesToPolygons[tzname] = WRAP(polys)
if forceTZ:
self.unprepTimezoneNamesToPolygons[tzname] = WRAP(polys)
with open(tzwhere.DEFAULT_SHORTCUTS, 'r') as f:
self.timezoneLongitudeShortcuts, self.timezoneLatitudeShortcuts = json.load(f)
self.forceTZ = forceTZ
for tzname in self.timezoneNamesToPolygons:
# Convert things to tuples to save memory
for degree in self.timezoneLatitudeShortcuts:
for tzname in self.timezoneLatitudeShortcuts[degree].keys():
self.timezoneLatitudeShortcuts[degree][tzname] = \
tuple(self.timezoneLatitudeShortcuts[degree][tzname])
for degree in self.timezoneLongitudeShortcuts.keys():
for tzname in self.timezoneLongitudeShortcuts[degree].keys():
self.timezoneLongitudeShortcuts[degree][tzname] = \
tuple(self.timezoneLongitudeShortcuts[degree][tzname])
def tzNameAt(self, latitude, longitude, forceTZ=False):
'''
Let's you lookup for a given latitude and longitude the appropriate
timezone.
@latitude: latitude
@longitude: longitude
@forceTZ: If forceTZ is true and you can't find a valid timezone return
the closest timezone you can find instead. Only works if the point has
the same integer value for its degree than the timezeone
'''
if forceTZ:
assert self.forceTZ, 'You need to initialize tzwhere with forceTZ'
latTzOptions = self.timezoneLatitudeShortcuts[str(
(math.floor(latitude / self.SHORTCUT_DEGREES_LATITUDE) *
self.SHORTCUT_DEGREES_LATITUDE)
)]
latSet = set(latTzOptions.keys())
lngTzOptions = self.timezoneLongitudeShortcuts[str(
(math.floor(longitude / self.SHORTCUT_DEGREES_LONGITUDE) *
self.SHORTCUT_DEGREES_LONGITUDE)
)]
lngSet = set(lngTzOptions.keys())
possibleTimezones = lngSet.intersection(latSet)
queryPoint = geometry.Point(longitude, latitude)
if possibleTimezones:
for tzname in possibleTimezones:
if isinstance(self.timezoneNamesToPolygons[tzname], COLLECTION_TYPE):
self.timezoneNamesToPolygons[tzname] = list(
map(lambda p: prepared.prep(
geometry.Polygon(p[0], p[1])
), self.timezoneNamesToPolygons[tzname]))
polyIndices = set(latTzOptions[tzname]).intersection(set(
lngTzOptions[tzname]
))
for polyIndex in polyIndices:
poly = self.timezoneNamesToPolygons[tzname][polyIndex]
if poly.contains_properly(queryPoint):
return tzname
if forceTZ:
return self.__forceTZ__(possibleTimezones, latTzOptions,
lngTzOptions, queryPoint)
def __forceTZ__(self, possibleTimezones, latTzOptions,
lngTzOptions, queryPoint):
distances = []
if possibleTimezones:
if len(possibleTimezones) == 1:
return possibleTimezones.pop()
else:
for tzname in possibleTimezones:
if isinstance(self.unprepTimezoneNamesToPolygons[tzname],
COLLECTION_TYPE):
self.unprepTimezoneNamesToPolygons[tzname] = list(
map(lambda p: p.context if isinstance(p, prepared.PreparedGeometry) else geometry.Polygon(p[0], p[1]),
self.timezoneNamesToPolygons[tzname]))
polyIndices = set(latTzOptions[tzname]).intersection(
set(lngTzOptions[tzname]))
for polyIndex in polyIndices:
poly = self.unprepTimezoneNamesToPolygons[
tzname][polyIndex]
d = poly.distance(queryPoint)
distances.append((d, tzname))
if len(distances) > 0:
return sorted(distances, key=lambda x: x[0])[0][1]
class prepareMap(object):
def __init__(self):
DEFAULT_SHORTCUTS = os.path.join(os.path.dirname(__file__),
'tz_world_shortcuts.json')
DEFAULT_POLYGONS = os.path.join(os.path.dirname(__file__),
'tz_world.json.gz')
featureCollection = read_tzworld(DEFAULT_POLYGONS)
pgen = feature_collection_polygons(featureCollection)
tzNamesToPolygons = collections.defaultdict(list)
for tzname, poly in pgen:
tzNamesToPolygons[tzname].append(poly)
for tzname, polys in tzNamesToPolygons.items():
tzNamesToPolygons[tzname] = \
WRAP(tzNamesToPolygons[tzname])
timezoneLongitudeShortcuts,\
timezoneLatitudeShortcuts = self.construct_shortcuts(
tzNamesToPolygons, tzwhere.SHORTCUT_DEGREES_LONGITUDE,
tzwhere.SHORTCUT_DEGREES_LATITUDE)
with open(DEFAULT_SHORTCUTS, 'w') as f:
json.dump(
(timezoneLongitudeShortcuts, timezoneLatitudeShortcuts), f)
@staticmethod
def construct_shortcuts(timezoneNamesToPolygons,
shortcut_long, shortcut_lat):
''' Construct our shortcuts for looking up polygons. Much faster
than using an r-tree '''
def find_min_max(ls, gridSize):
minLs = (math.floor(min(ls) / gridSize) *
gridSize)
maxLs = (math.floor(max(ls) / gridSize) *
gridSize)
return minLs, maxLs
timezoneLongitudeShortcuts = {}
timezoneLatitudeShortcuts = {}
for tzname in timezoneNamesToPolygons:
tzLngs = []
tzLats = []
for polyIndex, poly in enumerate(timezoneNamesToPolygons[tzname]):
lngs = [x[0] for x in poly[0]]
lats = [x[1] for x in poly[0]]
tzLngs.extend(lngs)
tzLats.extend(lats)
minLng, maxLng = find_min_max(
lngs, shortcut_long)
minLat, maxLat = find_min_max(
lats, shortcut_lat)
degree = minLng
while degree <= maxLng:
if degree not in timezoneLongitudeShortcuts:
timezoneLongitudeShortcuts[degree] =\
collections.defaultdict(list)
timezoneLongitudeShortcuts[degree][tzname].append(polyIndex)
degree = degree + shortcut_long
degree = minLat
while degree <= maxLat:
if degree not in timezoneLatitudeShortcuts:
timezoneLatitudeShortcuts[degree] =\
collections.defaultdict(list)
timezoneLatitudeShortcuts[degree][tzname].append(polyIndex)
degree = degree + shortcut_lat
return timezoneLongitudeShortcuts, timezoneLatitudeShortcuts
def read_tzworld(path):
reader = read_json
return reader(path)
def read_json(path):
with gzip.open(path, "rb") as f:
featureCollection = json.loads(f.read().decode("utf-8"))
return featureCollection
def feature_collection_polygons(featureCollection):
"""Turn a feature collection
into an iterator over polygons.
Given a featureCollection of the kind loaded from the json
input, unpack it to an iterator which produces a series of
(tzname, polygon) pairs, one for every polygon in the
featureCollection. Here tzname is a string and polygon is a
list of floats.
"""
for feature in featureCollection['features']:
tzname = feature['properties']['TZID']
if feature['geometry']['type'] == 'Polygon':
exterior = feature['geometry']['coordinates'][0]
interior = feature['geometry']['coordinates'][1:]
yield (tzname, (exterior, interior))
if __name__ == "__main__":
prepareMap()
| 39.884615
| 130
| 0.610993
| 985
| 10,370
| 6.322843
| 0.26802
| 0.009634
| 0.037091
| 0.007707
| 0.189949
| 0.159602
| 0.159602
| 0.100514
| 0.086063
| 0.086063
| 0
| 0.003374
| 0.314079
| 10,370
| 259
| 131
| 40.03861
| 0.872206
| 0.137608
| 0
| 0.176796
| 0
| 0
| 0.024763
| 0.005249
| 0
| 0
| 0
| 0
| 0.005525
| 1
| 0.049724
| false
| 0
| 0.066298
| 0
| 0.19337
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
072aa22d56a355822d78b2d3df97e983fe4fb836
| 4,783
|
py
|
Python
|
source/statuscodes.py
|
woody2371/fishbowl-api
|
f34ff9267436b1278985870fbf19863febdb391b
|
[
"MIT"
] | 6
|
2016-04-26T01:24:21.000Z
|
2021-05-13T07:48:15.000Z
|
source/statuscodes.py
|
USDev01/fishbowl-api
|
4d47e20d3385d5ebc001feec44aad321467a6d92
|
[
"MIT"
] | 3
|
2015-10-29T21:34:39.000Z
|
2021-11-08T15:22:30.000Z
|
source/statuscodes.py
|
USDev01/fishbowl-api
|
4d47e20d3385d5ebc001feec44aad321467a6d92
|
[
"MIT"
] | 12
|
2015-02-20T08:21:05.000Z
|
2021-11-06T22:27:04.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
def getstatus(code):
if code == "1000":
value = "Success!"
elif code == "1001":
value = "Unknown Message Received"
elif code == "1002":
value = "Connection to Fishbowl Server was lost"
elif code == "1003":
value = "Some Requests had errors -- now isn't that helpful..."
elif code == "1004":
value = "There was an error with the database."
elif code == "1009":
value = "Fishbowl Server has been shut down."
elif code == "1010":
value = "You have been logged off the server by an administrator."
elif code == "1012":
value = "Unknown request function."
elif code == "1100":
value = "Unknown login error occurred."
elif code == "1110":
value = "A new Integrated Application has been added to Fishbowl Inventory. Please contact your Fishbowl Inventory Administrator to approve this Integrated Application."
elif code == "1111":
value = "This Integrated Application registration key does not match."
elif code == "1112":
value = "This Integrated Application has not been approved by the Fishbowl Inventory Administrator."
elif code == "1120":
value = "Invalid Username or Password."
elif code == "1130":
value = "Invalid Ticket passed to Fishbowl Inventory Server."
elif code == "1131":
value = "Invalid Key value."
elif code == "1140":
value = "Initialization token is not correct type."
elif code == "1150":
value = "Request was invalid"
elif code == "1160":
value = "Response was invalid."
elif code == "1162":
value = "The login limit has been reached for the server's key."
elif code == "1200":
value = "Custom Field is invalid."
elif code == "1500":
value = "The import was not properly formed."
elif code == "1501":
value = "That import type is not supported"
elif code == "1502":
value = "File not found."
elif code == "1503":
value = "That export type is not supported."
elif code == "1504":
value = "File could not be written to."
elif code == "1505":
value = "The import data was of the wrong type."
elif code == "2000":
value = "Was not able to find the Part {0}."
elif code == "2001":
value = "The part was invalid."
elif code == "2100":
value = "Was not able to find the Product {0}."
elif code == "2101":
value = "The product was invalid."
elif code == "2200":
value = "The yield failed."
elif code == "2201":
value = "Commit failed."
elif code == "2202":
value = "Add initial inventory failed."
elif code == "2203":
value = "Can not adjust committed inventory."
elif code == "2300":
value = "Was not able to find the Tag number {0}."
elif code == "2301":
value = "The tag is invalid."
elif code == "2302":
value = "The tag move failed."
elif code == "2303":
value = "Was not able to save Tag number {0}."
elif code == "2304":
value = "Not enough available inventory in Tagnumber {0}."
elif code == "2305":
value = "Tag number {0} is a location."
elif code == "2400":
value = "Invalid UOM."
elif code == "2401":
value = "UOM {0} not found."
elif code == "2402":
value = "Integer UOM {0} cannot have non-integer quantity."
elif code == "2500":
value = "The Tracking is not valid."
elif code == "2510":
value = "Serial number is missing."
elif code == "2511":
value = "Serial number is null."
elif code == "2512":
value = "Serial number is duplicate."
elif code == "2513":
value = "Serial number is not valid."
elif code == "2600":
value = "Location not found."
elif code == "2601":
value = "Invalid location."
elif code == "2602":
value = "Location Group {0} not found."
elif code == "3000":
value = "Customer {0} not found."
elif code == "3001":
value = "Customer is invalid."
elif code == "3100":
value = "Vendor {0} not found."
elif code == "3101":
value = "Vendor is invalid."
elif code == "4000":
value = "There was an error load PO {0}."
elif code == "4001":
value = "Unknow status {0}."
elif code == "4002":
value = "Unknown carrier {0}."
elif code == "4003":
value = "Unknown QuickBooks class {0}."
elif code == "4004":
value = "PO does not have a PO number. Please turn on the auto-assign PO number option in the purchase order module options."
else:
value = 'Unknown status'
return value
| 37.367188
| 177
| 0.572653
| 596
| 4,783
| 4.595638
| 0.357383
| 0.172326
| 0.029573
| 0.035049
| 0.117196
| 0.045272
| 0.026287
| 0
| 0
| 0
| 0
| 0.076739
| 0.30253
| 4,783
| 127
| 178
| 37.661417
| 0.744305
| 0.007945
| 0
| 0
| 0
| 0.016129
| 0.480287
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008065
| false
| 0.016129
| 0.024194
| 0
| 0.040323
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
072b648fd224e151f6b9509016ac18b01f0c89c9
| 2,383
|
py
|
Python
|
preinstall_setup/makedeb-11.0.1-1-stable/src/makedeb/utils/missing_apt_dependencies.py
|
chipbuster/Energy-Languages-Setup
|
5b6192e1cc73f701a2310ac72520ed540d86c1ae
|
[
"BSD-3-Clause"
] | null | null | null |
preinstall_setup/makedeb-11.0.1-1-stable/src/makedeb/utils/missing_apt_dependencies.py
|
chipbuster/Energy-Languages-Setup
|
5b6192e1cc73f701a2310ac72520ed540d86c1ae
|
[
"BSD-3-Clause"
] | null | null | null |
preinstall_setup/makedeb-11.0.1-1-stable/src/makedeb/utils/missing_apt_dependencies.py
|
chipbuster/Energy-Languages-Setup
|
5b6192e1cc73f701a2310ac72520ed540d86c1ae
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
import apt_pkg
import sys
from apt_pkg import CURSTATE_INSTALLED, version_compare
from operator import lt, le, eq, ge, gt
# Function mappings for relationship operators.
relation_operators = {"<<": lt, "<=": le, "=": eq, ">=": ge, ">>": gt}
# Set up APT cache.
apt_pkg.init()
cache = apt_pkg.Cache(None)
missing_packages = []
for i in sys.argv[1:]:
# Build the package relationship string for use by 'apt-get satisfy'.
relationship_operator = None
for j in ["<=", ">=", "<", ">", "="]:
if j in i:
relationship_operator = j
break
if relationship_operator is not None:
if relationship_operator in ["<", ">"]:
relationship_operator_formatted = j + j
else:
relationship_operator_formatted = j
package = i.split(relationship_operator)
pkgname = package[0]
pkgver = package[1]
package_string = f"{pkgname} ({relationship_operator_formatted} {pkgver})"
else:
pkgname = i
pkgver = None
package_string = pkgname
# Check if the package is in the cache.
try:
pkg = cache[pkgname]
except KeyError:
missing_packages += [package_string]
continue
# Get the list of installed and provided packages that are currently installed.
installed_pkg_versions = []
if pkg.current_state == CURSTATE_INSTALLED:
installed_pkg_versions += [pkg]
for i in pkg.provides_list:
parent_pkg = i[2].parent_pkg
if parent_pkg.current_state == CURSTATE_INSTALLED:
installed_pkg_versions += [parent_pkg]
# If an installed package was found and no relationship operators were used, the dependency has been satisfied.
if (len(installed_pkg_versions) != 0) and (relationship_operator is None):
continue
# Otherwise, check all matching installed packages and see if any of them fit the specified relationship operator.
matched_pkg = False
for i in installed_pkg_versions:
installed_version = i.current_ver.ver_str
version_result = version_compare(installed_version, pkgver)
if relation_operators[relationship_operator_formatted](version_result, 0):
matched_pkg = True
if not matched_pkg:
missing_packages += [package_string]
for i in missing_packages:
print(i)
exit(0)
| 29.419753
| 118
| 0.661771
| 295
| 2,383
| 5.145763
| 0.342373
| 0.144928
| 0.065876
| 0.057312
| 0.081686
| 0.068511
| 0.068511
| 0.068511
| 0
| 0
| 0
| 0.004474
| 0.249685
| 2,383
| 80
| 119
| 29.7875
| 0.844519
| 0.206462
| 0
| 0.115385
| 0
| 0
| 0.038237
| 0.018056
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.076923
| 0
| 0.076923
| 0.019231
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
072cc767332977c77810de1909be8f9a35cce2f6
| 3,784
|
py
|
Python
|
tasks/views.py
|
TheDim0n/ProjectManager
|
50d36e7e3fc71655aa5a82bb19eacc07172ba5e4
|
[
"MIT"
] | null | null | null |
tasks/views.py
|
TheDim0n/ProjectManager
|
50d36e7e3fc71655aa5a82bb19eacc07172ba5e4
|
[
"MIT"
] | 1
|
2020-09-08T11:10:53.000Z
|
2020-09-08T11:10:53.000Z
|
tasks/views.py
|
TheDim0n/ProjectManager
|
50d36e7e3fc71655aa5a82bb19eacc07172ba5e4
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.views.generic import DetailView, ListView
from projects.models import Project
from status.models import Status
from .models import Task
from .forms import TaskForm, FilterForm
def _get_projects(user):
projects = [("All", "All"), ('---', '---')]
for item in Project.objects.filter(created_by=user):
projects.append((item.name, item.name))
return projects
def _get_statuses():
statuses = [("All", "All")]
for item in Status.objects.all():
statuses.append((item.text, item.text))
return statuses
class TaskListView(LoginRequiredMixin, ListView):
login_url = '/users/register'
model = Task
context_object_name = 'tasks'
template_name = 'tasks/index.html'
ordering = ['finish_date']
def get_queryset(self):
queryset = super().get_queryset()
for obj in queryset:
obj.check_expired()
return queryset
def get_context_data(self, *args, **kwargs):
try:
project_name = self.request.GET['project']
except KeyError:
project_name = ''
try:
status_name = self.request.GET['status']
except KeyError:
status_name = ''
if self.request.user.is_authenticated:
tasks = Task.objects.filter(created_by=self.request.user)
if project_name and project_name != "All":
if project_name == '---':
tasks = tasks.filter(level=None)
else:
tasks = tasks.filter(level__project__name=project_name)
if status_name and status_name != "All":
tasks = tasks.filter(status__text=status_name)
status_list = Status.objects.all()
last_initial = {
'status': status_name,
'project': project_name,
}
form = FilterForm(initial=last_initial)
form.fields['project'].choices = _get_projects(user=self.request.user)
form.fields['status'].choices = _get_statuses()
context = super(TaskListView, self).get_context_data(*args, **kwargs)
context['status_list'] = status_list
context['tasks'] = tasks
context['filter_form'] = form
context['task_form'] = TaskForm
return context
class TaskDetailView(DetailView):
model = Task
template_name = 'tasks/details.html'
def get_object(self):
obj = super().get_object()
obj.check_expired()
return obj
def get_context_data(self, *args, **kwargs):
initial_content = {
'name': self.object.name,
'start_date': self.object.start_date,
'finish_date': self.object.finish_date,
'status': self.object.status,
'description': self.object.description,
}
context = super(TaskDetailView, self).get_context_data(*args, **kwargs)
context['task_form'] = TaskForm(initial=initial_content)
return context
class TaskCreateView(LoginRequiredMixin, CreateView):
login_url = '/users/register'
model = Task
form_class = TaskForm
template_name = 'tasks/index.html'
def form_valid(self, form):
form.instance.created_by = self.request.user
return super().form_valid(form)
class TaskUpdateView(LoginRequiredMixin, UpdateView):
login_url = '/users/register'
model = Task
form_class = TaskForm
template_name = "tasks/update_task.html"
def form_valid(self, form):
self.object.check_expired()
return super().form_valid(form)
class TaskDeleteView(DeleteView):
model = Task
template_name = "tasks/delete_task.html"
| 31.798319
| 79
| 0.636628
| 421
| 3,784
| 5.534442
| 0.220903
| 0.037768
| 0.036481
| 0.027039
| 0.239056
| 0.169957
| 0.111588
| 0.054936
| 0.054936
| 0.054936
| 0
| 0
| 0.252907
| 3,784
| 118
| 80
| 32.067797
| 0.824195
| 0
| 0
| 0.268041
| 0
| 0
| 0.081395
| 0.011628
| 0
| 0
| 0
| 0
| 0
| 1
| 0.082474
| false
| 0
| 0.072165
| 0
| 0.463918
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
072d2f9675748ff1a2131801c4afa2c1d8506223
| 2,083
|
py
|
Python
|
smoke/noaa/get_smokeplume_counts.py
|
minnieteng/smoke_project
|
cc3c8f16f7759fe29e46d3cec32a3ed6ca86bd5f
|
[
"Apache-2.0"
] | null | null | null |
smoke/noaa/get_smokeplume_counts.py
|
minnieteng/smoke_project
|
cc3c8f16f7759fe29e46d3cec32a3ed6ca86bd5f
|
[
"Apache-2.0"
] | null | null | null |
smoke/noaa/get_smokeplume_counts.py
|
minnieteng/smoke_project
|
cc3c8f16f7759fe29e46d3cec32a3ed6ca86bd5f
|
[
"Apache-2.0"
] | null | null | null |
import os
import math
import time
import geohash
import geojson
from geojson import MultiLineString
from shapely import geometry
import shapefile
import numpy
import datetime as dt
import pandas as pd
import logging
logger = logging.getLogger(__name__)
source_shape_file_path = "C:/temp/2018/"
threshold = 60*60
cols = ['start', 'end','start_epoch_round','end_epoch_round','start_epoch_round_dt','end_epoch_round_dt']
times = []
for root,dirs,files in os.walk(source_shape_file_path):
for file in files:
with open(os.path.join(root,file),"r") as auto:
if file.endswith(".shp"):
try:
filename = file.replace(".shp","")
shape=shapefile.Reader(source_shape_file_path+filename+"/"+file)
for r in shape.iterRecords():
start_time = dt.datetime.strptime(r[1], '%Y%j %H%M')
end_time = dt.datetime.strptime(r[2], '%Y%j %H%M')
epoch_s = dt.datetime.timestamp(dt.datetime.strptime(r[1], '%Y%j %H%M'))
epoch_e = dt.datetime.timestamp(dt.datetime.strptime(r[2], '%Y%j %H%M'))
# sometimes start is later than end time, we'll assume the earlier time is start
epoch_end_round = round(max(epoch_s,epoch_e) / threshold) * threshold
epoch_start_round = round(min(epoch_s,epoch_e) / threshold) * threshold
epoch_end_round_dt = dt.datetime.utcfromtimestamp(3600 * ((max(epoch_s,epoch_e) + 1800) // 3600))
epoch_start_round_dt = dt.datetime.utcfromtimestamp(3600 * ((min(epoch_s,epoch_e) + 1800) // 3600))
times.append([start_time,end_time,epoch_start_round,epoch_end_round,epoch_start_round_dt,epoch_end_round_dt])
break
except:
logger.error('failed to parse file:'+source_shape_file_path+filename+"/")
continue
df = pd.DataFrame(times, columns=cols)
df.to_csv('noaa_times.csv')
| 45.282609
| 133
| 0.610178
| 275
| 2,083
| 4.396364
| 0.334545
| 0.06617
| 0.049628
| 0.062862
| 0.332506
| 0.263027
| 0.168734
| 0.079404
| 0.079404
| 0
| 0
| 0.023889
| 0.276524
| 2,083
| 46
| 134
| 45.282609
| 0.778368
| 0.037446
| 0
| 0
| 0
| 0
| 0.086327
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.3
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
072d38a7e1316c182e6d46a18839cb0047e95249
| 3,965
|
py
|
Python
|
notes/OOBall/OOBall/main-demo.py
|
KRHS-GameProgramming-2015/Manpac
|
959bf7f5195a4edb528fbbf25b8896fcb28d5327
|
[
"BSD-2-Clause"
] | null | null | null |
notes/OOBall/OOBall/main-demo.py
|
KRHS-GameProgramming-2015/Manpac
|
959bf7f5195a4edb528fbbf25b8896fcb28d5327
|
[
"BSD-2-Clause"
] | 3
|
2016-01-19T17:26:16.000Z
|
2016-02-10T16:59:25.000Z
|
notes/OOBall/main-demo.py
|
KRHS-GameProgramming-2015/Manpac
|
959bf7f5195a4edb528fbbf25b8896fcb28d5327
|
[
"BSD-2-Clause"
] | null | null | null |
import pygame_sdl2
pygame_sdl2.import_as_pygame()
import pygame
import os
import random
import math
from Ball import Ball
def save_state(balls):
"""
Saves the game state.
"""
stateString = ""
with open("state.txt", "w") as f:
for ball in balls:
stateString += "{} {} {} {} {}".format(ball.imageFile,
ball.speedx,
ball.speedy,
ball.rect.centerx,
ball.rect.centery)
stateString += '\n'
f.write(stateString)
def load_state():
try:
objects = []
with open("state.txt", "r") as f:
for line in f.read():
f, sx, sy, x, y = line.split()
objects += Ball(f, [int(sx), int(sy)], [int(x), int(y)])
return objects
except:
return None
def delete_state():
if os.path.exists("state.txt"):
os.unlink("state.txt")
def main():
pygame.init()
clock = pygame.time.Clock()
infoObject = pygame.display.Info()
#print infoObject.current_w
width = infoObject.current_w
height = infoObject.current_h
size = width, height
bgColor = r,g,b = 0, 0, 0
screen = pygame.display.set_mode(size)
pygame.display.set_mode()
balls = load_state()
delete_state()
if balls == None:
balls = []
ballTimer = 0
ballTimerMax = .75 * 60
done = False
sleeping = False
font = pygame.font.Font("DejaVuSans.ttf", 124)
text = font.render("Start", True, (255, 255, 255, 255))
textRect = text.get_rect(center = (width/2, height/2))
while not done:
for event in pygame.event.get():
text = font.render(str(event.type), True, (255, 255, 255, 255))
if event.type == pygame.QUIT:
done = True
elif event.type == pygame.KEYDOWN and event.key == pygame.K_AC_BACK:
done = True
elif event.type == pygame.APP_WILLENTERBACKGROUND:
# The app is about to go to sleep. It should save state, cancel
# any timers, and stop drawing the screen until an APP_DIDENTERFOREGROUND
# event shows up.
save_state(balls)
sleeping = True
elif event.type == pygame.APP_DIDENTERFOREGROUND:
# The app woke back up. Delete the saved state (we don't need it),
# restore any times, and start drawing the screen again.
delete_state()
sleeping = False
# For now, we have to re-open the window when entering the
# foreground.
screen = pygame.display.set_mode((1280, 720))
if not sleeping:
ballTimer += 1
if ballTimer >= ballTimerMax:
ballTimer = 0
ballSpeed = [random.randint(-5, 5),
random.randint(-5, 5)]
ballPos = [random.randint(100, width-100),
random.randint(100, height-100)]
balls += [Ball("ball.png",ballSpeed,ballPos)]
save_state(balls)
for ball in balls:
ball.move()
ball.collideScreen(size)
for first in balls:
for second in balls:
if first != second:
first.collideBall(second)
bgColor = r,g,b
screen.fill(bgColor)
for ball in balls:
screen.blit(ball.image, ball.rect)
screen.blit(text, textRect)
pygame.display.flip()
clock.tick(60)
if done:
break
if __name__ == "__main__":
main()
| 28.941606
| 89
| 0.49256
| 427
| 3,965
| 4.498829
| 0.379391
| 0.01874
| 0.01874
| 0.021864
| 0.086934
| 0.043207
| 0
| 0
| 0
| 0
| 0
| 0.028145
| 0.408575
| 3,965
| 136
| 90
| 29.154412
| 0.791045
| 0.097604
| 0
| 0.138298
| 0
| 0
| 0.025092
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042553
| false
| 0
| 0.074468
| 0
| 0.138298
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
072ddb9bbab8925228b0922af5e12f46301684b7
| 6,408
|
py
|
Python
|
sprt.py
|
vdbergh/pentanomial
|
d046e74acde3f961c7afd22fc4f82fa5aeb4c0fd
|
[
"MIT"
] | 3
|
2020-02-05T12:39:59.000Z
|
2021-01-04T15:41:40.000Z
|
sprt.py
|
vdbergh/pentanomial
|
d046e74acde3f961c7afd22fc4f82fa5aeb4c0fd
|
[
"MIT"
] | 2
|
2020-02-17T20:09:56.000Z
|
2021-11-21T12:47:33.000Z
|
sprt.py
|
vdbergh/pentanomial
|
d046e74acde3f961c7afd22fc4f82fa5aeb4c0fd
|
[
"MIT"
] | null | null | null |
from __future__ import division
import math, copy
import argparse
from brownian import Brownian
import scipy
import LLRcalc
class sprt:
def __init__(self, alpha=0.05, beta=0.05, elo0=0, elo1=5, elo_model="logistic"):
assert elo_model in ("logistic", "normalized")
self.elo_model = elo_model
self.a = math.log(beta / (1 - alpha))
self.b = math.log((1 - beta) / alpha)
self.elo0 = elo0
self.elo1 = elo1
self.clamped = False
self.LLR_drift_variance = LLRcalc.LLR_drift_variance_alt2
def elo_to_score(self, elo):
"""
"elo" is expressed in our current elo_model.
"""
if self.elo_model == "normalized":
nt = elo / LLRcalc.nelo_divided_by_nt
return nt * self.sigma_pg + 0.5
else:
return LLRcalc.L_(elo)
def lelo_to_elo(self, lelo):
"""
For external use. "elo" is expressed in our current elo_model.
"lelo" is logistic.
"""
if self.elo_model == "logistic":
return lelo
score = LLRcalc.L_(lelo)
nt = (score - 0.5) / self.sigma_pg
return nt * LLRcalc.nelo_divided_by_nt
def set_state(self, results):
N, self.pdf = LLRcalc.results_to_pdf(results)
if self.elo_model == "normalized":
mu, var = LLRcalc.stats(self.pdf) # code duplication with LLRcalc
if len(results) == 5:
self.sigma_pg = (2 * var) ** 0.5
elif len(results) == 3:
self.sigma_pg = var ** 0.5
else:
assert False
self.s0, self.s1 = [self.elo_to_score(elo) for elo in (self.elo0, self.elo1)]
mu_LLR, var_LLR = self.LLR_drift_variance(self.pdf, self.s0, self.s1, None)
# llr estimate
self.llr = N * mu_LLR
self.T = N
# now normalize llr (if llr is not legal then the implications
# of this are unclear)
slope = self.llr / N
if self.llr > 1.03 * self.b or self.llr < 1.03 * self.a:
self.clamped = True
if self.llr < self.a:
self.T = self.a / slope
self.llr = self.a
elif self.llr > self.b:
self.T = self.b / slope
self.llr = self.b
def outcome_prob(self, elo):
"""
The probability of a test with the given elo with worse outcome
(faster fail, slower pass or a pass changed into a fail).
"""
s = LLRcalc.L_(elo)
mu_LLR, var_LLR = self.LLR_drift_variance(self.pdf, self.s0, self.s1, s)
sigma_LLR = math.sqrt(var_LLR)
return Brownian(a=self.a, b=self.b, mu=mu_LLR, sigma=sigma_LLR).outcome_cdf(
T=self.T, y=self.llr
)
def lower_cb(self, p):
"""
Maximal elo value such that the observed outcome of the test has probability
less than p.
"""
avg_elo = (self.elo0 + self.elo1) / 2
delta = self.elo1 - self.elo0
N = 30
# Various error conditions must be handled better here!
while True:
elo0 = max(avg_elo - N * delta, -1000)
elo1 = min(avg_elo + N * delta, 1000)
try:
sol, res = scipy.optimize.brentq(
lambda elo: self.outcome_prob(elo) - (1 - p),
elo0,
elo1,
full_output=True,
disp=False,
)
except ValueError:
if elo0 > -1000 or elo1 < 1000:
N *= 2
continue
else:
if self.outcome_prob(elo0) - (1 - p) > 0:
return elo1
else:
return elo0
assert res.converged
break
return sol
def analytics(self, p=0.05):
ret = {}
ret["clamped"] = self.clamped
ret["a"] = self.a
ret["b"] = self.b
ret["elo"] = self.lower_cb(0.5)
ret["ci"] = [self.lower_cb(p / 2), self.lower_cb(1 - p / 2)]
ret["LOS"] = self.outcome_prob(0)
ret["LLR"] = self.llr
return ret
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--alpha", help="probability of a false positve", type=float, default=0.05
)
parser.add_argument(
"--beta", help="probability of a false negative", type=float, default=0.05
)
parser.add_argument(
"--elo0", help="H0 (expressed in LogisticElo)", type=float, default=0.0
)
parser.add_argument(
"--elo1", help="H1 (expressed in LogisticElo)", type=float, default=5.0
)
parser.add_argument("--level", help="confidence level", type=float, default=0.95)
parser.add_argument(
"--elo-model",
help="logistic or normalized",
choices=['logistic', 'normalized'],
default='logistic',
)
parser.add_argument(
"--results",
help="trinomial of pentanomial frequencies, low to high",
nargs="*",
type=int,
required=True,
)
args = parser.parse_args()
results = args.results
if len(results) != 3 and len(results) != 5:
parser.error("argument --results: expected 3 or 5 arguments")
alpha = args.alpha
beta = args.beta
elo0 = args.elo0
elo1 = args.elo1
elo_model = args.elo_model
p = 1 - args.level
s = sprt(alpha=alpha, beta=beta, elo0=elo0, elo1=elo1, elo_model=elo_model)
s.set_state(results)
a = s.analytics(p)
print("Design parameters")
print("=================")
print("False positives : %4.2f%%" % (100 * alpha,))
print("False negatives : %4.2f%%" % (100 * beta,))
print("[Elo0,Elo1] : [%.2f,%.2f]" % (elo0, elo1))
print("Confidence level : %4.2f%%" % (100 * (1 - p),))
print("Elo model : %s" % elo_model)
print("Estimates")
print("=========")
print("Elo : %.2f" % a["elo"])
print(
"Confidence interval : [%.2f,%.2f] (%4.2f%%)"
% (a["ci"][0], a["ci"][1], 100 * (1 - p))
)
print("LOS : %4.2f%%" % (100 * a["LOS"],))
print("Context")
print("=======")
print(
"LLR [u,l] : %.2f %s [%.2f,%.2f]"
% (a["LLR"], "(clamped)" if a["clamped"] else "", a["a"], a["b"])
)
| 33.726316
| 85
| 0.523096
| 821
| 6,408
| 3.970767
| 0.239951
| 0.039264
| 0.036503
| 0.020859
| 0.157669
| 0.096933
| 0.07362
| 0.07362
| 0.030675
| 0.030675
| 0
| 0.037343
| 0.339732
| 6,408
| 189
| 86
| 33.904762
| 0.73316
| 0.082553
| 0
| 0.089172
| 0
| 0
| 0.149307
| 0
| 0
| 0
| 0
| 0
| 0.019108
| 1
| 0.044586
| false
| 0
| 0.038217
| 0
| 0.146497
| 0.095541
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
072e395e8cbf167e556a1f0e76894f388e49246e
| 17,956
|
py
|
Python
|
tools/hci_throughput/hci.py
|
t3zeng/mynewt-nimble
|
e910132947d6b3cd61ef4732867382634178aa08
|
[
"Apache-2.0"
] | null | null | null |
tools/hci_throughput/hci.py
|
t3zeng/mynewt-nimble
|
e910132947d6b3cd61ef4732867382634178aa08
|
[
"Apache-2.0"
] | null | null | null |
tools/hci_throughput/hci.py
|
t3zeng/mynewt-nimble
|
e910132947d6b3cd61ef4732867382634178aa08
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from dataclasses import dataclass
import struct
from binascii import unhexlify
import random
############
# DEFINES
############
AF_BLUETOOTH = 31
HCI_CHANNEL_USER = 1
HCI_COMMAND_PACKET = 0x01
HCI_ACL_DATA_PACKET = 0x02
HCI_EVENT_PACKET = 0x04
HCI_EV_CODE_DISCONN_CMP = 0x05
HCI_EV_CODE_CMD_CMP = 0x0e
HCI_EV_CODE_CMD_STATUS = 0x0f
HCI_EV_CODE_LE_META_EVENT = 0x3e
HCI_SUBEV_CODE_LE_ENHANCED_CONN_CMP = 0x0a
HCI_SUBEV_CODE_LE_DATA_LEN_CHANGE = 0x07
HCI_SUBEV_CODE_LE_PHY_UPDATE_CMP = 0x0c
HCI_SUBEV_CODE_LE_CHAN_SEL_ALG = 0x14
HCI_EV_NUM_COMP_PKTS = 0x13
CONN_FAILED_TO_BE_ESTABLISHED = 0x3e
CONN_TIMEOUT = 0x08
OGF_HOST_CTL = 0x03
OCF_SET_EVENT_MASK = 0x0001
OCF_RESET = 0X0003
OGF_INFO_PARAM = 0x04
OCF_READ_LOCAL_COMMANDS = 0x0002
OCF_READ_BD_ADDR = 0x0009
OGF_LE_CTL = 0x08
OCF_LE_SET_EVENT_MASK = 0x0001
OCF_LE_READ_BUFFER_SIZE_V1 = 0x0002
OCF_LE_READ_BUFFER_SIZE_V2 = 0x0060
OCF_LE_SET_RANDOM_ADDRESS = 0x0005
OCF_LE_SET_ADVERTISING_PARAMETERS = 0x0006
OCF_LE_SET_ADVERTISE_ENABLE = 0x000a
OCF_LE_SET_SCAN_PARAMETERS = 0x000b
OCF_LE_SET_SCAN_ENABLE = 0x000c
OCF_LE_CREATE_CONN = 0x000d
OCF_LE_SET_DATA_LEN = 0x0022
OCF_LE_READ_SUGGESTED_DFLT_DATA_LEN = 0x0023
OCF_LE_READ_MAX_DATA_LEN = 0x002f
OCF_LE_READ_PHY = 0x0030
OCF_LE_SET_DFLT_PHY = 0x0031
OCF_LE_SET_PHY = 0x0032
OGF_VENDOR_SPECIFIC = 0x003f
BLE_HCI_OCF_VS_RD_STATIC_ADDR = 0x0001
PUBLIC_ADDRESS_TYPE = 0
STATIC_RANDOM_ADDRESS_TYPE = 1
WAIT_FOR_EVENT_TIMEOUT = 5
WAIT_FOR_EVENT_CONN_TIMEOUT = 25
############
# GLOBAL VAR
############
num_of_bytes_to_send = None # based on supported_max_tx_octets
num_of_packets_to_send = None
events_list = []
bdaddr = '00:00:00:00:00:00'
static_addr = '00:00:00:00:00:00'
le_read_buffer_size = None
conn_handle = 0
requested_tx_octets = 1
requested_tx_time = 1
suggested_dflt_data_len = None
max_data_len = None
phy = None
ev_num_comp_pkts = None
num_of_completed_packets_cnt = 0
num_of_completed_packets_time = 0
############
# FUNCTIONS
############
def get_opcode(ogf: int, ocf: int):
return ((ocf & 0x03ff)|(ogf << 10))
def get_ogf_ocf(opcode: int):
ogf = opcode >> 10
ocf = opcode & 0x03ff
return ogf, ocf
def cmd_addr_to_ba(addr_str: str):
return unhexlify("".join(addr_str.split(':')))[::-1]
def ba_addr_to_str(addr_ba: bytearray):
addr_str = addr_ba.hex().upper()
return ':'.join(addr_str[i:i+2] for i in range(len(addr_str), -2, -2))[1:]
def gen_static_rand_addr():
while True:
x = [random.randint(0,1) for _ in range(0,48)]
if 0 in x[:-2] and 1 in x[:-2]:
x[0] = 1
x[1] = 1
break
addr_int = int("".join([str(x[i]) for i in range(0,len(x))]), 2)
addr_hex = "{0:0{1}x}".format(addr_int, 12)
addr = ":".join(addr_hex[i:i+2] for i in range(0, len(addr_hex), 2))
return addr.upper()
############
# GLOBAL VAR CLASSES
############
@dataclass
class Suggested_Dflt_Data_Length():
status: int
suggested_max_tx_octets: int
suggested_max_tx_time: int
def __init__(self):
self.set()
def set(self, status=0, suggested_max_tx_octets=0, suggested_max_tx_time=0):
self.status = status
self.suggested_max_tx_octets = suggested_max_tx_octets
self.suggested_max_tx_time = suggested_max_tx_time
@dataclass
class Max_Data_Length():
status: int
supported_max_tx_octets: int
supported_max_tx_time: int
supported_max_rx_octets: int
supported_max_rx_time: int
def __init__(self):
self.set()
def set(self, status=0, supported_max_tx_octets=0, supported_max_tx_time=0,
supported_max_rx_octets=0, supported_max_rx_time=0):
self.status = status
self.supported_max_tx_octets = supported_max_tx_octets
self.supported_max_tx_time = supported_max_tx_time
self.supported_max_rx_octets = supported_max_rx_octets
self.supported_max_rx_time = supported_max_rx_time
@dataclass
class LE_Read_Buffer_Size:
status: int
le_acl_data_packet_length: int
total_num_le_acl_data_packets: int
iso_data_packet_len: int
total_num_iso_data_packets: int
def __init__(self):
self.set()
def set(self, status=0, le_acl_data_packet_length=0,
total_num_le_acl_data_packets=0, iso_data_packet_len=0,
total_num_iso_data_packets=0):
self.status = status
self.le_acl_data_packet_length = le_acl_data_packet_length
self.total_num_le_acl_data_packets = total_num_le_acl_data_packets
self.iso_data_packet_len = iso_data_packet_len
self.total_num_iso_data_packets = total_num_iso_data_packets
@dataclass
class LE_Read_PHY:
status: int
connection_handle: int
tx_phy: int
rx_phy: int
def __init__(self):
self.set()
def set(self, status=0, connection_handle=0, tx_phy=0, rx_phy=0):
self.status = status
self.connection_handle = connection_handle
self.tx_phy = tx_phy
self.rx_phy = rx_phy
############
# EVENTS
############
@dataclass
class HCI_Ev_Disconn_Complete:
status: int
connection_handle: int
reason: int
def __init__(self):
self.set()
def set(self, status=0, connection_handle=0, reason=0):
self.status = status
self.connection_handle = connection_handle
self.reason = reason
@dataclass
class HCI_Ev_Cmd_Complete:
num_hci_command_packets: int
opcode: int
return_parameters: int
def __init__(self):
self.set()
def set(self, num_hci_cmd_packets=0, opcode=0, return_parameters=b''):
self.num_hci_command_packets = num_hci_cmd_packets
self.opcode = opcode
self.return_parameters = return_parameters
@dataclass
class HCI_Ev_Cmd_Status:
status: int
num_hci_command_packets: int
opcode: int
def __init__(self):
self.set()
def set(self, status = 0, num_hci_cmd_packets=0, opcode=0):
self.status = status
self.num_hci_command_packets = num_hci_cmd_packets
self.opcode = opcode
@dataclass
class HCI_Ev_LE_Meta:
subevent_code: int
def __init__(self):
self.set()
def set(self, subevent_code=0):
self.subevent_code = subevent_code
@dataclass
class HCI_Ev_LE_Enhanced_Connection_Complete(HCI_Ev_LE_Meta):
status: int
connection_handle: int
role: int
peer_address_type: int
peer_address: str
local_resolvable_private_address: int
peer_resolvable_private_address: int
connection_interval: int
peripheral_latency: int
supervision_timeout: int
central_clock_accuracy: int
def __init__(self):
self.set()
def set(self, subevent_code=0, status=0, connection_handle=0, role=0,
peer_address_type=0, peer_address='00:00:00:00:00:00',
local_resolvable_private_address='00:00:00:00:00:00',
peer_resolvable_private_address='00:00:00:00:00:00',
connection_interval=0, peripheral_latency=0, supervision_timeout=0,
central_clock_accuracy=0):
super().set(subevent_code)
self.status = status
self.connection_handle = connection_handle
self.role = role
self.peer_address_type = peer_address_type
self.peer_address = peer_address
self.local_resolvable_private_address = local_resolvable_private_address
self.peer_resolvable_private_address = peer_resolvable_private_address
self.connection_interval = connection_interval
self.peripheral_latency = peripheral_latency
self.supervision_timeout = supervision_timeout
self.central_clock_accuracy = central_clock_accuracy
@dataclass
class HCI_Ev_LE_Data_Length_Change(HCI_Ev_LE_Meta):
conn_handle: int
max_tx_octets: int
max_tx_time: int
max_rx_octets: int
max_rx_time: int
triggered: int
def __init__(self):
self.set()
def set(self, subevent_code=0, conn_handle=0, max_tx_octets=0,
max_tx_time=0, max_rx_octets=0, max_rx_time=0, triggered=0):
super().set(subevent_code)
self.conn_handle = conn_handle
self.max_tx_octets = max_tx_octets
self.max_tx_time = max_tx_time
self.max_rx_octets = max_rx_octets
self.max_rx_time = max_rx_time
self.triggered = triggered
@dataclass
class HCI_Ev_LE_PHY_Update_Complete(HCI_Ev_LE_Meta):
status: int
connection_handle: int
tx_phy: int
rx_phy: int
def __init__(self):
self.set()
def set(self, subevent_code=0, status=0, connection_handle=0,
tx_phy=0, rx_phy=0):
super().set(subevent_code)
self.status = status
self.connection_handle = connection_handle
self.tx_phy = tx_phy
self.rx_phy = rx_phy
@dataclass
class HCI_Number_Of_Completed_Packets:
num_handles: int
connection_handle: int
num_completed_packets: int
def __init__(self):
self.set()
def set(self, num_handles=0, connection_handle=0, num_completed_packets=0):
self.num_handles = num_handles
self.connection_handle = connection_handle
self.num_completed_packets = num_completed_packets
class HCI_Ev_LE_Chan_Sel_Alg(HCI_Ev_LE_Meta):
connection_handle: int
algorithm: int
def __init__(self):
self.set()
def set(self, subevent_code=0, connection_handle=0, algorithm=0):
super().set(subevent_code)
self.connection_handle = connection_handle
self.algorithm = algorithm
############
# PARAMETERS
############
@dataclass
class HCI_Advertising:
advertising_interval_min: int
advertising_interval_max: int
advertising_type: int
own_address_type: int
peer_address_type: int
peer_address: str
advertising_channel_map: int
advertising_filter_policy: int
ba_full_message: bytearray
def __init__(self):
self.set()
def set(self, advertising_interval_min=0, advertising_interval_max=0, \
advertising_type=0, own_address_type=0, peer_address_type=0, \
peer_address='00:00:00:00:00:00', advertising_channel_map=0, \
advertising_filter_policy=0):
self.advertising_interval_min = advertising_interval_min
self.advertising_interval_max = advertising_interval_max
self.advertising_type = advertising_type
self.own_address_type = own_address_type
self.peer_address_type = peer_address_type
self.peer_address = peer_address
self.advertising_channel_map = advertising_channel_map
self.advertising_filter_policy = advertising_filter_policy
self.ba_full_message = bytearray(struct.pack('<HHBBBBB',
advertising_interval_min, advertising_interval_max,
advertising_type, own_address_type, peer_address_type,
advertising_channel_map, advertising_filter_policy))
peer_addr_ba = cmd_addr_to_ba(peer_address)
self.ba_full_message[7:7] = peer_addr_ba
@dataclass
class HCI_Scan:
le_scan_type: int
le_scan_interval: int
le_scan_window: int
own_address_type: int
scanning_filter_policy: int
ba_full_message: bytearray
def __init__(self):
self.set()
def set(self, le_scan_type=0, le_scan_interval=0, le_scan_window=0,
own_address_type=0, scanning_filter_policy=0):
self.le_scan_type = le_scan_type
self.le_scan_interval = le_scan_interval
self.le_scan_window = le_scan_window
self.own_address_type = own_address_type
self.scanning_filter_policy = scanning_filter_policy
self.ba_full_message = bytearray(struct.pack('<BHHBB',le_scan_type,
le_scan_interval, le_scan_window, own_address_type,
scanning_filter_policy))
@dataclass
class HCI_Connect:
le_scan_interval: int
le_scan_window: int
initiator_filter_policy: int
peer_address_type: int
peer_address: str
own_address_type: int
connection_interval_min: int
connection_interval_max: int
max_latency: int
supervision_timeout: int
min_ce_length: int
max_ce_length: int
ba_full_message: bytearray
def __init__(self):
self.set()
def set(self, le_scan_interval=0, le_scan_window=0, \
initiator_filter_policy=0, peer_address_type=0, \
peer_address='00:00:00:00:00:00', own_address_type=0, \
connection_interval_min=0, connection_interval_max=0, \
max_latency=0, supervision_timeout=0, min_ce_length=0, \
max_ce_length=0):
self.le_scan_interval = le_scan_interval
self.le_scan_window = le_scan_window
self.initiator_filter_policy = initiator_filter_policy
self.peer_address_type = peer_address_type
self.peer_address = peer_address
self.own_address_type = own_address_type
self.connection_interval_min = connection_interval_min
self.connection_interval_max = connection_interval_max
self.max_latency = max_latency
self.supervision_timeout = supervision_timeout
self.min_ce_length = min_ce_length
self.max_ce_length = max_ce_length
self.ba_full_message = bytearray(struct.pack('<HHBBBHHHHHH',
le_scan_interval, le_scan_window, initiator_filter_policy,
peer_address_type, own_address_type, connection_interval_min,
connection_interval_max, max_latency,supervision_timeout,
min_ce_length, max_ce_length))
peer_addr_ba = cmd_addr_to_ba(peer_address)
self.ba_full_message[6:6] = peer_addr_ba
############
# RX / TX
############
@dataclass
class HCI_Receive:
packet_type: int
def __init__(self):
self.set()
def set(self,packet_type=0):
self.packet_type = packet_type
@dataclass
class HCI_Recv_Event_Packet(HCI_Receive):
ev_code: int
packet_len: int
recv_data: bytearray
current_event: None
def __init__(self):
self.set()
def set(self,packet_type=0, ev_code=0, packet_len=0,
recv_data=bytearray(256)):
super().set(packet_type)
self.ev_code = ev_code
self.packet_len = packet_len
self.recv_data = recv_data
self.recv_data = recv_data[:packet_len]
@dataclass
class HCI_Recv_ACL_Data_Packet(HCI_Receive):
connection_handle: int
pb_flag: int
bc_flag: int
data_total_len: int
data: bytearray
def __init__(self):
self.set()
def set(self, packet_type=0, connection_handle=0,
pb_flag=0, bc_flag=0, total_data_len=0, data=b''):
super().set(packet_type)
self.connection_handle = connection_handle
self.pb_flag = pb_flag
self.bc_flag = bc_flag
self.data_total_len = total_data_len
self.data = data
@dataclass
class HCI_Recv_L2CAP_Data:
pdu_length: int
channel_id: int
data: bytearray
def __init__(self):
self.set()
def set(self, pdu_length=0, channel_id=0, data=b''):
self.pdu_length = pdu_length
self.channel_id = channel_id
self.data = data
@dataclass
class HCI_Cmd_Send:
packet_type: int
ogf: int
ocf: int
packet_len: int
data: bytearray
ba_full_message: bytearray
def __init__(self):
self.set()
def set(self, ogf=0, ocf=0, data=b''):
self.packet_type = HCI_COMMAND_PACKET
self.ogf = ogf
self.ocf = ocf
self.opcode = get_opcode(ogf, ocf)
self.packet_len = len(data)
self.data = data
self.ba_full_message = bytearray(struct.pack('<BHB',
self.packet_type, self.opcode, self.packet_len))
self.ba_full_message.extend(self.data)
@dataclass
class HCI_ACL_Data_Send:
packet_type: int
connection_handle: int
pb_flag: int
bc_flag: int
data_total_length: int
data: bytearray
ba_full_message: bytearray
def __init__(self):
self.set()
def set(self, connection_handle=0, pb_flag=0b00, bc_flag=0b00, data=b''):
self.packet_type = HCI_ACL_DATA_PACKET
self.connection_handle = connection_handle
self.pb_flag = pb_flag
self.bc_flag = bc_flag
self.data_total_length = len(data)
self.data = data
self.ba_full_message = bytearray(struct.pack('<BHH',
self.packet_type,
((self.connection_handle & 0x0eff) |
(self.pb_flag << 12) |
(self.bc_flag << 14)),
self.data_total_length))
self.ba_full_message.extend(self.data)
@dataclass
class L2CAP_Data_Send:
pdu_length: int
channel_id: int
data: bytearray
ba_full_message: bytearray
def __init__(self):
self.set()
def set(self, pdu_length=0, channel_id=0, data=b''):
if not pdu_length:
self.pdu_length = len(data)
else:
self.pdu_length = pdu_length
self.channel_id = channel_id
self.data = data
fmt_conf = "<HH"
self.ba_full_message = bytearray(struct.pack(fmt_conf,
self.pdu_length, self.channel_id))
self.ba_full_message.extend(data)
| 29.630363
| 80
| 0.695868
| 2,576
| 17,956
| 4.428183
| 0.117236
| 0.012273
| 0.014728
| 0.030245
| 0.467432
| 0.371263
| 0.3312
| 0.304199
| 0.272903
| 0.250022
| 0
| 0.028414
| 0.219926
| 17,956
| 605
| 81
| 29.679339
| 0.785964
| 0.047783
| 0
| 0.409664
| 0
| 0
| 0.009939
| 0
| 0
| 0
| 0.012542
| 0
| 0
| 1
| 0.107143
| false
| 0
| 0.008403
| 0.004202
| 0.407563
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
072e3ac42c4ae28edac6abdd5c5b9e36d1f69c84
| 1,253
|
py
|
Python
|
examples/dataproc/query.py
|
populationgenomics/analysis-runner
|
f42bedb1dc430a813350fb4b5514bcc7b845f0fc
|
[
"MIT"
] | null | null | null |
examples/dataproc/query.py
|
populationgenomics/analysis-runner
|
f42bedb1dc430a813350fb4b5514bcc7b845f0fc
|
[
"MIT"
] | 51
|
2021-01-26T07:09:54.000Z
|
2022-03-29T03:44:01.000Z
|
examples/dataproc/query.py
|
populationgenomics/analysis-runner
|
f42bedb1dc430a813350fb4b5514bcc7b845f0fc
|
[
"MIT"
] | 2
|
2021-12-07T17:12:07.000Z
|
2022-03-23T00:50:44.000Z
|
"""Simple Hail query example."""
import click
import hail as hl
from bokeh.io.export import get_screenshot_as_png
from analysis_runner import output_path
GNOMAD_HGDP_1KG_MT = (
'gs://gcp-public-data--gnomad/release/3.1/mt/genomes/'
'gnomad.genomes.v3.1.hgdp_1kg_subset_dense.mt'
)
@click.command()
@click.option('--rerun', help='Whether to overwrite cached files', default=False)
def query(rerun):
"""Query script entry point."""
hl.init(default_reference='GRCh38')
sample_qc_path = output_path('sample_qc.mt')
if rerun or not hl.hadoop_exists(sample_qc_path):
mt = hl.read_matrix_table(GNOMAD_HGDP_1KG_MT)
mt = mt.head(100, n_cols=100)
mt_qc = hl.sample_qc(mt)
mt_qc.write(sample_qc_path)
mt_qc = hl.read_matrix_table(sample_qc_path)
plot_filename = output_path('call_rate_plot.png', 'web')
if rerun or not hl.hadoop_exists(plot_filename):
call_rate_plot = hl.plot.histogram(
mt_qc.sample_qc.call_rate, range=(0, 1), legend='Call rate'
)
with hl.hadoop_open(plot_filename, 'wb') as f:
get_screenshot_as_png(call_rate_plot).save(f, format='PNG')
if __name__ == '__main__':
query() # pylint: disable=no-value-for-parameter
| 30.560976
| 81
| 0.695132
| 194
| 1,253
| 4.175258
| 0.463918
| 0.069136
| 0.059259
| 0.044444
| 0.064198
| 0.064198
| 0.064198
| 0
| 0
| 0
| 0
| 0.016537
| 0.179569
| 1,253
| 40
| 82
| 31.325
| 0.771401
| 0.073424
| 0
| 0
| 0
| 0
| 0.171304
| 0.083478
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.142857
| 0
| 0.178571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
072e6fc797520341c47d9f0dd007069870cb1147
| 17,420
|
py
|
Python
|
ptpip/ptpip.py
|
darkarnium/ptpip
|
c54eed4d7509ecfc6973a00496a9e80fb7473fa2
|
[
"Apache-2.0"
] | null | null | null |
ptpip/ptpip.py
|
darkarnium/ptpip
|
c54eed4d7509ecfc6973a00496a9e80fb7473fa2
|
[
"Apache-2.0"
] | null | null | null |
ptpip/ptpip.py
|
darkarnium/ptpip
|
c54eed4d7509ecfc6973a00496a9e80fb7473fa2
|
[
"Apache-2.0"
] | null | null | null |
import uuid
import time
import socket
import struct
class PtpIpConnection(object):
"""docstring for PtpIP"""
def __init__(self):
super(PtpIpConnection, self).__init__()
self.session = None
self.session_events = None
self.session_id = None
self.cmd_queue = []
self.event_queue = []
self.object_queue = []
def open(self, host='192.168.1.1', port=15740):
# Open both session, first one for for commands, second for events
self.session = self.connect(host=host, port=port)
self.send_recieve_ptpip_packet(PtpIpInitCmdReq(), self.session)
self.session_events = self.connect(host=host, port=port)
self.send_recieve_ptpip_packet(PtpIpEventReq(), self.session_events)
# 0x1002 OpenSession
ptip_cmd = PtpIpCmdRequest(cmd=0x1002, param1=struct.unpack('L', self.session_id)[0])
self.send_recieve_ptpip_packet(ptip_cmd, self.session)
def communication_thread(self):
while True:
if len(self.cmd_queue) == 0:
# do a ping receive a pong (same as ping) as reply to keep the connection alive
# couldnt get any reply onto a propper PtpIpPing packet so i am querying the status
# of the device
ptpip_packet_reply = self.send_recieve_ptpip_packet(PtpIpCmdRequest(cmd=0x90C8),
self.session)
if isinstance(ptpip_packet_reply, PtpIpCmdResponse):
time.sleep(1)
continue
else:
# get the next command from command the queue
ptip_cmd = self.cmd_queue.pop()
ptpip_packet_reply = self.send_recieve_ptpip_packet(ptip_cmd, self.session)
if (ptpip_packet_reply.ptp_response_code == 0x2001 and \
ptpip_packet_reply.ptp_response_code == 0x2019):
print("Cmd send successfully")
else:
print(f"cmd reply is: {ptpip_packet_reply.ptp_response_code}")
# wait 1 second before new packets are processed/send to the camera
time.sleep(1)
pass
def send_ptpip_cmd(self, ptpip_packet):
self.cmd_queue.append(ptpip_packet)
def connect(self, host='192.168.1.1', port=15740):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
s.connect((host, port))
except socket.error as message:
if s:
s.close()
print(f"Could not open socket: {message}")
return s
def send_recieve_ptpip_packet(self, ptpip_packet, session):
if isinstance(ptpip_packet, PtpIpInitCmdReq):
self.send_data(ptpip_packet.data(), session)
# set the session id of the object if the reply is of type PtpIpInitCmdAck
ptpip_packet_reply = PtpIpPacket().factory(data=self.recieve_data(session))
if isinstance(ptpip_packet_reply, PtpIpInitCmdAck):
self.session_id = ptpip_packet_reply.session_id
elif isinstance(ptpip_packet, PtpIpEventReq):
self.send_ptpip_event_req(ptpip_packet, session)
ptpip_packet_reply = PtpIpPacket().factory(data=self.recieve_data(session))
elif isinstance(ptpip_packet, PtpIpCmdRequest) and ptpip_packet.ptp_cmd == 0x90C7:
self.send_data(ptpip_packet.data(), session)
ptpip_packet_reply = PtpIpPacket().factory(data=self.recieve_data(session))
if isinstance(ptpip_packet_reply, PtpIpStartDataPacket):
data_length = struct.unpack('I', ptpip_packet_reply.length)[0]
ptpip_packet_reply = PtpIpPacket().factory(data=self.recieve_data(session))
data = ptpip_packet_reply.data
while isinstance(ptpip_packet_reply, PtpIpDataPacket):
data = data + ptpip_packet_reply.data
ptpip_packet_reply = PtpIpPacket().factory(data=self.recieve_data(session))
if data_length == len(data):
events = PtpIpEventFactory(data).get_events()
for event in events:
self.event_queue.append(event)
ptpip_packet_reply = PtpIpPacket().factory(data=self.recieve_data(session))
elif isinstance(ptpip_packet, PtpIpCmdRequest) and ptpip_packet.ptp_cmd == 0x1009:
self.send_data(ptpip_packet.data(), session)
ptpip_packet_reply = PtpIpPacket().factory(data=self.recieve_data(session))
if isinstance(ptpip_packet_reply, PtpIpStartDataPacket):
data_length = struct.unpack('I', ptpip_packet_reply.length)[0]
ptpip_packet_reply = PtpIpPacket().factory(data=self.recieve_data(session))
data = ptpip_packet_reply.data
while isinstance(ptpip_packet_reply, PtpIpDataPacket):
data = data + ptpip_packet_reply.data
ptpip_packet_reply = PtpIpPacket().factory(data=self.recieve_data(session))
if data_length == len(data):
self.object_queue.append(PtpIpDataObject(ptpip_packet.param1, data))
ptpip_packet_reply = PtpIpPacket().factory(data=self.recieve_data(session))
else:
self.send_data(ptpip_packet.data(), session)
ptpip_packet_reply = PtpIpPacket().factory(data=self.recieve_data(session))
return ptpip_packet_reply
def send_ptpip_event_req(self, ptpip_packet, session):
# add the session id of the object itself if it is not specified in the package
if ptpip_packet.session_id is None:
ptpip_packet.session_id = self.session_id
self.send_data(ptpip_packet.data(), session)
def send_data(self, data, session):
session.send(struct.pack('I', len(data) + 4) + data)
def recieve_data(self, session):
data = session.recv(4)
(data_length,) = struct.unpack('I', data)
print(f"Packet length: {data_length}")
while (data_length) > len(data):
data += session.recv(data_length - len(data))
return data[4:]
class PtpIpPacket(object):
"""docstring for PtpIpCmd"""
def __init__(self):
super(PtpIpPacket, self).__init__()
def factory(self, data=None):
if data is None:
self.cmdtype = None
else:
print(f"Cmd Type: {struct.unpack('I', data[0:4])[0]}")
self.cmdtype = struct.unpack('I', data[0:4])[0]
if self.cmdtype == 1:
return PtpIpInitCmdReq(data[4:])
elif self.cmdtype == 2:
return PtpIpInitCmdAck(data[4:])
elif self.cmdtype == 3:
return PtpIpEventReq(data[4:])
elif self.cmdtype == 4:
return PtpIpEventAck(data[4:])
elif self.cmdtype == 5:
return PtpIpInitFail(data[4:])
elif self.cmdtype == 6:
return PtpIpCmdRequest(data[4:])
elif self.cmdtype == 7:
return PtpIpCmdResponse(data[4:])
elif self.cmdtype == 9:
return PtpIpStartDataPacket(data[4:])
elif self.cmdtype == 10:
return PtpIpDataPacket(data[4:])
elif self.cmdtype == 12:
return PtpIpEndDataPacket(data[4:])
elif self.cmdtype == 13:
return PtpIpPing(data[4:])
def data(self):
pass
class PtpIpInitCmdReq(PtpIpPacket):
"""docstring for PtpIpInitCmd"""
def __init__(self, data=None):
super(PtpIpInitCmdReq, self).__init__()
self.cmdtype = struct.pack('I', 0x01)
self.version = struct.pack('>I', 0x0100)
if data is None:
guid = uuid.uuid4()
self.guid = guid.bytes
self.hostname = socket.gethostname() + '\x00'
self.hostname = self.hostname.encode('utf-16-le')
else:
self.guid = data[0:16]
self.hostname = data[16:0]
def data(self):
return self.cmdtype + self.guid + self.hostname + self.version
class PtpIpInitCmdAck(PtpIpPacket):
"""docstring for PtpIpInitCmd"""
def __init__(self, data=None):
super(PtpIpInitCmdAck, self).__init__()
self.cmdtype = struct.pack('I', 0x02)
if data is not None:
self.session_id = data[0:4]
self.guid = data[4:20]
self.hostname = data[20:]
class PtpIpEventReq(PtpIpPacket):
"""docstring for PtpIpInitCmd"""
def __init__(self, data=None, session_id=None):
super(PtpIpEventReq, self).__init__()
self.cmdtype = struct.pack('I', 0x03)
self.session_id = None
if data is not None:
self.session_id = data[0:4]
elif session_id is not None:
self.session_id = session_id
def data(self):
if self.session_id:
return self.cmdtype + self.session_id
return self.cmdtype
class PtpIpEventAck(PtpIpPacket):
"""docstring for PtpIpInitCmd"""
def __init__(self, data=None):
super(PtpIpEventAck, self).__init__()
self.cmdtype = struct.pack('I', 0x04)
class PtpIpInitFail(PtpIpPacket):
"""docstring for PtpIpInitCmd"""
def __init__(self, data=None):
super(PtpIpInitFail, self).__init__()
self.cmdtype = struct.pack('I', 0x05)
class PtpIpCmdRequest(PtpIpPacket):
"""
Operation Code Description
0x1001 GetDeviceInfo
0x1002 OpenSession
0x1003 CloseSession
0x1004 GetStorageIDs
0x1005 GetStorageInfo
0x1006 GetNumObjects
0x1007 GetObjectHandles
0x1008 GetObjectInfo
0x1009 GetObject
0x100A GetThumb
0x100B DeleteObject
0x100C SendObjectInfo
0x100D SendObject
0x100E InitiateCapture
0x100F FormatStore
0x1014 GetDevicePropDesc
0x1015 GetDevicePropValue
0x1016 SetDevicePropValue
0x101B GetPartialObject
0x90C0 InitiateCaptureRecInSdram
0x90C1 AfDrive
0x90C2 ChangeCameraMode
0x90C3 DeleteImagesInSdram
0x90C4 GetLargeThumb
0x90C7 GetEvent
0x90C8 DeviceReady
0x90C9 SetPreWbData
0x90CA GetVendorPropCodes
0x90CB AfAndCaptureRecInSdram
0x90CC GetPicCtrlData
0x90CD SetPicCtrlData
0x90CE DeleteCustomPicCtrl
0x90CF GetPicCtrlCapability
0x9201 StartLiveView
0x9202 EndLiveView
0x9203 GetLiveViewImage
0x9204 MfDrive
0x9205 ChangeAfArea
0x9206 AfDriveCancel
0x9207 InitiateCaptureRecInMedia
0x9209 GetVendorStorageIDs
0x920A StartMovieRecInCard
0x920B EndMovieRec
0x920C TerminateCapture
0x9400 GetPartialObjectHighSpeed
0x9407 SetTransferListLock
0x9408 GetTransferList
0x9409 NotifyFileAcquisitionStart
0x940A NotifyFileAcquisitionEnd
0x940B GetSpecificSizeObject
0x9801 GetObjectPropsSupported
0x9802 GetObjectPropDesc
0x9803 GetObjectPropValue
0x9805 GetObjectPropList
"""
def __init__(self, data=None, cmd=None, param1=None, param2=None, param3=None, param4=None,
param5=None):
super(PtpIpCmdRequest, self).__init__()
self.cmdtype = struct.pack('I', 0x06)
self.unkown = struct.pack('I', 0x01)
self.ptp_cmd = cmd
self.param1 = param1
self.param2 = param2
self.param3 = param3
self.param4 = param4
self.param5 = param5
# Todo: Transaction ID generieren
self.transaction_id = struct.pack('I', 0x06)
self.args = ''
if self.param1 is not None:
self.args = self.args + struct.pack('L', self.param1)
if self.param2 is not None:
self.args = self.args + struct.pack('L', self.param2)
if self.param3 is not None:
self.args = self.args + struct.pack('L', self.param3)
if self.param4 is not None:
self.args = self.args + struct.pack('L', self.param4)
if self.param5 is not None:
self.args = self.args + struct.pack('L', self.param5)
def data(self):
return self.cmdtype + self.unkown + struct.pack('H', self.ptp_cmd) + \
self.transaction_id + self.args
class PtpIpCmdResponse(PtpIpPacket):
"""
ResponseCode Description
0x2000 Undefined
0x2001 OK
0x2002 General Error
0x2003 Session Not Open
0x2004 Invalid TransactionID
0x2005 Operation Not Supported
0x2006 Parameter Not Supported
0x2007 Incomplete Transfer
0x2008 Invalid StorageID
0x2009 Invalid ObjectHandle
0x200A DeviceProp Not Supported
0x200B Invalid ObjectFormatCode
0x200C Store Full
0x200D Object WriteProtected
0x200E Store Read-Only
0x200F Access Denied
0x2010 No Thumbnail Present
0x2011 SelfTest Failed
0x2012 Partial Deletion
0x2013 Store Not Available
0x2014 Specification By Format Unsupported
0x2015 No Valid ObjectInfo
0x2016 Invalid Code Format
0x2017 Unknown Vendor Code
0x2018 Capture Already Terminated
0x2019 Device Busy
0x201A Invalid ParentObject
0x201B Invalid DeviceProp Format
0x201C Invalid DeviceProp Value
0x201D Invalid Parameter
0x201E Session Already Open
0x201F Transaction Cancelled
0x2020 Specification of Destination Unsupported
"""
def __init__(self, data=None):
super(PtpIpCmdResponse, self).__init__()
self.cmdtype = struct.pack('I', 0x07)
if data is not None:
self.ptp_response_code = struct.unpack('H', data[0:2])[0]
self.transaction_id = data[2:6]
self.args = data[6:]
class PtpIpStartDataPacket(PtpIpPacket):
"""docstring for Start_Data_Packet"""
def __init__(self, data=None):
self.cmdtype = struct.pack('I', 0x09)
super(PtpIpStartDataPacket, self).__init__()
if data is not None:
self.transaction_id = data[0:4]
self.length = data[4:8]
class PtpIpDataPacket(PtpIpPacket):
"""docstring for Start_Data_Packet"""
def __init__(self, data=None):
self.cmdtype = struct.pack('I', 0x10)
super(PtpIpDataPacket, self).__init__()
if data is not None:
self.transaction_id = data[0:4]
self.data = data[4:]
class PtpIpCancelTransaction(PtpIpPacket):
"""docstring for Start_Data_Packet"""
def __init__(self, data=None):
self.cmdtype = struct.pack('I', 0x11)
super(PtpIpCancelTransaction, self).__init__()
if data is not None:
self.transaction_id = data[0:4]
class PtpIpEndDataPacket(PtpIpPacket):
"""docstring for Start_Data_Packet"""
def __init__(self, data=None):
self.cmdtype = struct.pack('I', 0x12)
super(PtpIpEndDataPacket, self).__init__()
if data is not None:
self.transaction_id = data[0:4]
print(f"transaction_id: {struct.unpack('I', self.transaction_id)[0]}")
self.data = data[4:]
class PtpIpPing(PtpIpPacket):
"""docstring for Start_Data_Packet"""
def __init__(self, data=None):
self.cmdtype = struct.pack('I', 0x13)
super(PtpIpPing, self).__init__()
if data is not None:
self.data = ''
def data(self):
return self.cmdtype
class PtpIpEvent(object):
"""
EventCode Description
0x4001 CancelTransaction
0x4002 ObjectAdded
0x4003 ObjectRemoved
0x4004 StoreAdded
0x4005 StoreRemoved
0x4006 DevicePropChanged
0x4007 ObjectInfoChanged
0x4008 DeviceInfoChanged
0x4009 RequestObjectTransfer
0x400A StoreFull
0x400C StorageInfoChanged
0x400D CaptureComplete
0xC101 ObjectAddedInSdram
0xC102 CaptureCompleteRecInSdram
0xC105 RecordingInterrupted
"""
def __init__(self, event_code, event_parameter):
super(PtpIpEvent, self).__init__()
self.event_code = int(event_code)
self.event_parameter = int(event_parameter)
class PtpIpEventFactory(object):
"""
This is a factory to produce an array of PtpIpEvent objects if it got passd a data reply
from a GetEvent request 0x90C7
"""
def __init__(self, data):
super(PtpIpEventFactory, self).__init__()
# create an empty array for the PtpIpEvent object which will be replied
self.events = []
# get the amount of events passed from the data passed to the factory
amount_of_events = struct.unpack('H', data[0:2])[0]
# set an counter and an offset of 2 as the first two bytes are already processed
counter = 1
offset = 2
while counter <= amount_of_events:
# get the event_code which consists of two bytes
event_code = str(struct.unpack('H', data[offset:offset+2])[0])
# get the event_parameter which consists of 4 bytes
event_parameter = str(struct.unpack('I', data[offset+2:offset+6])[0])
self.events.append(PtpIpEvent(event_code, event_parameter))
# increase the offset by 6 to get to the next event_code and event_parameter pair
offset = offset + 6
counter = counter + 1
def get_events(self):
return self.events
class PtpIpDataObject(object):
"""docstring for PtpIpDataObject"""
def __init__(self, object_handle, data):
super(PtpIpDataObject, self).__init__()
self.object_handle = object_handle
self.data = data
| 34.701195
| 99
| 0.644259
| 1,991
| 17,420
| 5.457559
| 0.220994
| 0.055678
| 0.044174
| 0.016749
| 0.361495
| 0.32818
| 0.298086
| 0.259249
| 0.246825
| 0.233757
| 0
| 0.054413
| 0.268886
| 17,420
| 501
| 100
| 34.770459
| 0.798759
| 0.231745
| 0
| 0.274368
| 0
| 0
| 0.023696
| 0.004833
| 0
| 0
| 0.007639
| 0.001996
| 0
| 1
| 0.115523
| false
| 0.00722
| 0.01444
| 0.01444
| 0.263538
| 0.021661
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
072f5247503c271ee10d989b45781d7bce312d75
| 19,888
|
py
|
Python
|
tensorflow/python/compiler/tensorrt/model_tests/model_handler.py
|
sboshin/tensorflow
|
77689016fb4c1373abeca36360f7b2dd9434c547
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/compiler/tensorrt/model_tests/model_handler.py
|
sboshin/tensorflow
|
77689016fb4c1373abeca36360f7b2dd9434c547
|
[
"Apache-2.0"
] | 88
|
2020-11-24T08:18:10.000Z
|
2022-03-25T20:28:30.000Z
|
tensorflow/python/compiler/tensorrt/model_tests/model_handler.py
|
sboshin/tensorflow
|
77689016fb4c1373abeca36360f7b2dd9434c547
|
[
"Apache-2.0"
] | 1
|
2020-12-18T08:51:32.000Z
|
2020-12-18T08:51:32.000Z
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Loads, converts, and runs sample models."""
import abc
import collections
import functools
import tempfile
import time
from typing import Callable, Iterable, List, Mapping, Optional, Sequence, Union
from absl import logging
import numpy as np
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.client import session
from tensorflow.python.compiler.tensorrt import trt_convert as trt
from tensorflow.python.framework import convert_to_constants
from tensorflow.python.framework import dtypes as tf_dtypes
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops as framework_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.saved_model import load as saved_model_load
from tensorflow.python.saved_model import loader as saved_model_loader
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
# pylint: disable=bad-whitespace
### Helper Functions
def _get_concrete_tensor_shape(
tensor_shape: tensor_shape_pb2.TensorShapeProto,
batch_size: Optional[int] = None) -> Sequence[int]:
"""Gets a concrete tensor shape without dynamic dimensions."""
if tensor_shape.unknown_rank:
raise ValueError("Cannot generates random tensors for unknown rank!")
shape = [dim.size for dim in tensor_shape.dim]
if not shape:
raise ValueError("The tensor cannot have a rank of 0!")
if shape[0] < 0:
if batch_size is None or batch_size <= 0:
raise ValueError("Must provide a valid batch size "
"as the tensor has a dynamic batch size!")
shape[0] = batch_size
if any(filter(lambda x: x < 0, shape)):
raise ValueError("Cannot have dynamic dimensions except for batch size!")
return shape
def _generate_random_tensor_v1(tensor_info: meta_graph_pb2.TensorInfo,
batch_size: Optional[int] = None) -> np.ndarray:
"""Generates a random tensor based on the data type and tensor shape."""
dtype = tf_dtypes.as_dtype(tensor_info.dtype)
shape = _get_concrete_tensor_shape(tensor_info.tensor_shape, batch_size)
with session.Session():
return random_ops.random_uniform(
shape=shape, dtype=dtype, name=tensor_info.name.split(":")[0]).eval()
def _generate_random_tensor_v2(
tensor: framework_ops.Tensor,
batch_size: Optional[int] = None) -> framework_ops.Tensor:
"""Generates a random tensor based on the data type and tensor shape."""
shape = _get_concrete_tensor_shape(tensor.shape.as_proto(), batch_size)
return random_ops.random_uniform(
shape=shape, dtype=tensor.dtype, name=tensor.name)
# Models are repeatedly loaded for different TensorRT conversion settings.
# Using cache can reduce I/O.
@functools.lru_cache()
def load_meta_graph(
saved_model_dir: str, saved_model_tags: str,
saved_model_signature_key: str) -> meta_graph_pb2.MetaGraphDef:
"""Loads a `tf.MetaGraphDef` in TF1."""
with session.Session() as sess:
meta_graph = saved_model_loader.load(
sess=sess,
export_dir=saved_model_dir,
tags=saved_model_tags,
)
output_node_names = [
tensor.name.split(":")[0] for tensor in
meta_graph.signature_def[saved_model_signature_key].outputs.values()
]
graph_def = (
convert_to_constants.convert_variables_to_constants_from_session_graph(
sess, meta_graph.graph_def, output_node_names))
meta_graph.graph_def.CopyFrom(graph_def)
return meta_graph
@functools.lru_cache()
def load_graph_func(saved_model_dir: str, saved_model_tags: str,
saved_model_signature_key: str):
"""Loads a graph function in TF2."""
imported = saved_model_load.load(
export_dir=saved_model_dir, tags=saved_model_tags)
graph_func = imported.signatures[saved_model_signature_key]
return convert_to_constants.convert_variables_to_constants_v2(graph_func)
### Test Classes
class TestResult(
collections.namedtuple("TestResult",
["outputs", "latency", "trt_convert_params"])):
def __new__(cls,
outputs: Mapping[str, np.ndarray],
latency: List[float],
trt_convert_params: trt.TrtConversionParams = None):
return super(TestResult, cls).__new__(cls, outputs, latency,
trt_convert_params)
class ModelConfig(
collections.namedtuple("ModelConfig", [
"saved_model_dir", "saved_model_tags", "saved_model_signature_key",
"default_batch_size"
])):
"""Configurations for test models."""
def __new__(cls,
saved_model_dir: str,
saved_model_tags: Sequence[str] = (tag_constants.SERVING,),
saved_model_signature_key: str = (
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY),
default_batch_size: int = 1):
return super(ModelConfig,
cls).__new__(cls, saved_model_dir, saved_model_tags,
saved_model_signature_key, default_batch_size)
class TestResultCollection(
collections.namedtuple("TestResultCollection", ["results", "config"])):
def __new__(cls, config: ModelConfig,
results: Sequence[TestResult] = tuple()):
return super(TestResultCollection, cls).__new__(cls, config, results)
class _ModelHandlerBase(metaclass=abc.ABCMeta):
"""Base class for running a model."""
def __init__(self, model_config: ModelConfig):
self._model_config = model_config
def __str__(self) -> str:
return str(self._model_config)
def __repr__(self) -> str:
return "{}({})".format(self.__class__.__name__, str(self))
@property
def model_config(self) -> ModelConfig:
return self._model_config
@property
def input_tensort_names(self) -> Sequence[str]:
"""Names of input tensors."""
@property
def output_tensor_names(self) -> Sequence[str]:
"""Names of output tensors."""
@abc.abstractmethod
def generate_random_inputs(
self,
batch_size: Optional[int] = None
) -> Mapping[str, Union[np.ndarray, framework_ops.Tensor]]:
"""Generates mapping from names to input tensors."""
@abc.abstractmethod
def run(self,
inputs=None,
warmup_iterations: int = 10,
benchmark_iterations: int = 100,
allow_to_use_gpu: bool = False) -> TestResult:
"""Runs the model with provided or randomly generated input tensors.
Args:
inputs: Mapping from names to input ndarrays in TF1, or a sequence of
tensors in TF2. If `None`, ramdomly generated inputs will be used
instead.
warmup_iterations: Number of inferences to warm up the runtime.
benchmark_iterations: Number of inferences to measure the latency.
allow_to_use_gpu: Whether it is allowed to use GPU or not.
Returns:
`TestResult` summarizing timing and numerics information.
"""
class ModelHandlerV1(_ModelHandlerBase):
"""Runs a model in TF1."""
@property
def meta_graph(self) -> meta_graph_pb2.MetaGraphDef:
return load_meta_graph(
saved_model_dir=self.model_config.saved_model_dir,
saved_model_tags=self.model_config.saved_model_tags,
saved_model_signature_key=self.model_config.saved_model_signature_key)
@property
def input_tensor_info(self) -> Mapping[str, meta_graph_pb2.TensorInfo]:
return self.meta_graph.signature_def[
self.model_config.saved_model_signature_key].inputs
@property
def output_tensor_info(self) -> Mapping[str, meta_graph_pb2.TensorInfo]:
return self.meta_graph.signature_def[
self.model_config.saved_model_signature_key].outputs
@property
def input_tensort_names(self) -> Sequence[str]:
return [info.name for info in self.input_tensor_info.values()]
@property
def output_tensor_names(self) -> Sequence[str]:
return [info.name for info in self.output_tensor_info.values()]
def generate_random_inputs(self,
batch_size: Optional[int] = None
) -> Mapping[str, np.ndarray]:
batch_size = batch_size or self.model_config.default_batch_size
return {
tensor_info.name: _generate_random_tensor_v1(tensor_info, batch_size)
for tensor_info in self.input_tensor_info.values()
}
def run(self,
inputs: Optional[Mapping[str, np.ndarray]] = None,
warmup_iterations=10,
benchmark_iterations=100,
allow_to_use_gpu=False) -> TestResult:
inputs = inputs or self.generate_random_inputs()
config_proto = None
if not allow_to_use_gpu:
config_proto = config_pb2.ConfigProto(device_count={"CPU": 1, "GPU": 0})
with session.Session(config=config_proto) as sess:
importer.import_graph_def(self.meta_graph.graph_def)
try:
for _ in range(warmup_iterations):
sess.run(fetches=self.output_tensor_names, feed_dict=inputs)
latency = []
for _ in range(benchmark_iterations):
before = time.time()
outputs = sess.run(fetches=self.output_tensor_names, feed_dict=inputs)
latency.append(time.time() - before)
except Exception as exc:
raise RuntimeError("Failed to run model inference! "
"Model information: {}".format(str(self))) from exc
outputs = dict(zip(self.output_tensor_names, outputs))
return TestResult(latency=latency, outputs=outputs if inputs else None)
class ModelHandlerV2(_ModelHandlerBase):
"""Runs a model in TF2."""
@property
def graph_func(self):
graph_func = load_graph_func(
saved_model_dir=self.model_config.saved_model_dir,
saved_model_tags=self.model_config.saved_model_tags,
saved_model_signature_key=self.model_config.saved_model_signature_key)
return convert_to_constants.convert_variables_to_constants_v2(graph_func)
@property
def input_tensor_names(self):
return [tensor.name for tensor in self.graph_func.inputs]
@property
def output_tensor_names(self):
return [tensor.name for tensor in self.graph_func.outputs]
def generate_random_inputs(self,
batch_size: Optional[int] = None
) -> Sequence[framework_ops.Tensor]:
batch_size = batch_size or self.model_config.default_batch_size
return [
_generate_random_tensor_v2(tensor, batch_size)
for tensor in self.graph_func.inputs
]
def run(self,
inputs: Optional[Sequence[framework_ops.Tensor]] = None,
warmup_iterations=10,
benchmark_iterations=100,
allow_to_use_gpu=False) -> TestResult:
inputs = inputs or self.generate_random_inputs()
try:
device = "/device:gpu:0" if allow_to_use_gpu else "/device:cpu:0"
with framework_ops.device(device):
for _ in range(warmup_iterations):
self.graph_func(*inputs)
latency = []
for _ in range(benchmark_iterations):
before = time.time()
outputs = self.graph_func(*inputs)
latency.append(time.time() - before)
except Exception as exc:
raise RuntimeError("Failed to run model inference! "
"Model information: {}".format(str(self))) from exc
outputs = dict(zip(self.output_tensor_names, outputs))
return TestResult(latency=latency, outputs=outputs if inputs else None)
class _TrtModelHandlerBase(_ModelHandlerBase):
"""Base class for converting and running a model."""
def __init__(
self,
model_config: ModelConfig,
trt_convert_params: trt.TrtConversionParams,
):
super(_TrtModelHandlerBase, self).__init__(model_config)
self._trt_convert_params = trt_convert_params
self._converter = self._create_converter(trt_convert_params)
logging.info("Converting to TensorRT!")
self._check_conversion(self._converter.convert())
self._conversion_is_saved = False
@abc.abstractmethod
def _create_converter(self, trt_convert_params: trt.TrtConversionParams):
"""Creates a converter for the corresponding TF version."""
@abc.abstractmethod
def _check_conversion(self, conversion_output):
"""Checks if conversion output has any TensorRT engines."""
def _check_contains_trt_engine(self, graph_def: graph_pb2.GraphDef):
if "TRTEngineOp" not in [node.op for node in graph_def.node]:
raise RuntimeError("Failed to convert to TensorRT! "
"Model Information: {}".format(str(self)))
def __str__(self) -> str:
base = super(_TrtModelHandlerBase, self).__str__()
return "{}, TrtConversionParams: {}".format(base,
str(self._trt_convert_params))
@property
def trt_convert_params(self) -> trt.TrtConversionParams:
return self._trt_convert_params
def save(self,
output_saved_model_dir: Optional[str] = None,
overwrite=True) -> None:
"""Saves a TensorRT converted model."""
if self._conversion_is_saved and not overwrite:
return
output_saved_model_dir = output_saved_model_dir or tempfile.mkdtemp()
logging.info("Saving TensorRT model to %s!", output_saved_model_dir)
self._converter.save(output_saved_model_dir)
self._model_config = self.model_config._replace(
saved_model_dir=output_saved_model_dir)
self._conversion_is_saved = True
class TrtModelHandlerV1(_TrtModelHandlerBase, ModelHandlerV1):
"""Converts a TF1 model with TensorRT and runs the converted model."""
def _create_converter(self, trt_convert_params: trt.TrtConversionParams):
conversion_nodes_denylist = self.output_tensor_names
return trt.TrtGraphConverter(
input_saved_model_dir=self.model_config.saved_model_dir,
input_saved_model_tags=self.model_config.saved_model_tags,
input_saved_model_signature_key=(
self.model_config.saved_model_signature_key),
nodes_denylist=conversion_nodes_denylist,
max_batch_size=trt_convert_params.max_batch_size,
max_workspace_size_bytes=trt_convert_params.max_workspace_size_bytes,
precision_mode=trt_convert_params.precision_mode,
minimum_segment_size=trt_convert_params.minimum_segment_size,
is_dynamic_op=trt_convert_params.is_dynamic_op,
maximum_cached_engines=trt_convert_params.maximum_cached_engines,
use_calibration=trt_convert_params.use_calibration,
)
_check_conversion = _TrtModelHandlerBase._check_contains_trt_engine
def run(self,
inputs: Optional[Mapping[str, np.ndarray]] = None,
warmup_iterations=10,
benchmark_iterations=100) -> TestResult:
self.save(overwrite=False)
logging.info("Running with TensorRT!")
test_result = ModelHandlerV1.run(
self,
inputs,
warmup_iterations,
benchmark_iterations,
allow_to_use_gpu=True)
return test_result._replace(trt_convert_params=self._trt_convert_params)
class TrtModelHandlerV2(_TrtModelHandlerBase, ModelHandlerV2):
"""Converts a TF2 model with TensorRT and runs the converted model."""
def _create_converter(self, trt_convert_params: trt.TrtConversionParams):
return trt.TrtGraphConverterV2(
input_saved_model_dir=self.model_config.saved_model_dir,
input_saved_model_tags=self.model_config.saved_model_tags,
input_saved_model_signature_key=(
self.model_config.saved_model_signature_key),
conversion_params=trt_convert_params)
def _check_conversion(self, graph_func):
graph_def = graph_func.graph.as_graph_def()
self._check_contains_trt_engine(graph_def)
def run(self,
inputs: Optional[Sequence[framework_ops.Tensor]] = None,
warmup_iterations=10,
benchmark_iterations=100) -> TestResult:
self.save(overwrite=False)
logging.info("Running with TensorRT!")
test_result = ModelHandlerV2.run(
self,
inputs,
warmup_iterations,
benchmark_iterations,
allow_to_use_gpu=True)
return test_result._replace(trt_convert_params=self._trt_convert_params)
class _ModelHandlerManagerBase(metaclass=abc.ABCMeta):
"""Manages a series of ModelHandlers for aggregrated testing/benchmarking."""
def __init__(
self, model_config: ModelConfig,
default_trt_convert_params: trt.TrtConversionParams,
trt_convert_params_updater: Callable[[trt.TrtConversionParams],
Iterable[trt.TrtConversionParams]]):
self._ori_model = self.model_handler_cls(model_config)
self._trt_models = []
for trt_convert_params in trt_convert_params_updater(
default_trt_convert_params):
trt_model = self.trt_model_handler_cls(
model_config, trt_convert_params=trt_convert_params)
self._trt_models.append(trt_model)
self._result_collection = TestResultCollection(
results=[], config=model_config)
def __str__(self) -> str:
return "Input Model: {}".format(str(self._ori_model))
def __repr__(self) -> str:
return "{}({})".format(self.__class__.__name__, str(self))
@property
@classmethod
@abc.abstractmethod
def model_handler_cls(cls):
"""The modle handler class. ModelHandleV1/ModelHandlerV2."""
@property
@classmethod
@abc.abstractmethod
def trt_model_handler_cls(cls):
"""The TensorRTmodle handler class. TrtModelHandleV1/TrtModelHandlerV2."""
@property
def model_config(self):
return self._ori_model.model_config
def generate_random_inputs(self, batch_size: Optional[int] = None):
return self._ori_model.generate_random_inputs(batch_size)
def run(self,
inputs=None,
warmup_iterations: int = 10,
benchmark_iterations: int = 100) -> TestResultCollection:
"""Runs model inference with provided or randomly generated input tensors.
Args:
inputs: Mapping from names to input ndarrays in TF1. Or a sequence of
tensors in TF2. If `None`, ramdomly generated input tensors will be used
instead.
warmup_iterations: Number of inferences to warm up the runtime.
benchmark_iterations: Number of inferences to measure the latency.
Returns:
`TestResultCollection` summarizing timing and numerics information for
different TensorRT conversion settings.
"""
inputs = inputs or self.generate_random_inputs()
results = [
model.run(inputs, warmup_iterations, benchmark_iterations)
for model in [self._ori_model] + self._trt_models
]
return self._result_collection._replace(results=results)
class ModelHandlerManagerV1(_ModelHandlerManagerBase):
"""Manages a series of ModelHandlers for aggregrated testing/benchmarking in TF1."""
model_handler_cls = ModelHandlerV1
trt_model_handler_cls = TrtModelHandlerV1
class ModelHandlerManagerV2(_ModelHandlerManagerBase):
"""Manages a series of ModelHandlers for aggregrated testing/benchmarking in TF2."""
model_handler_cls = ModelHandlerV2
trt_model_handler_cls = TrtModelHandlerV2
| 38.026769
| 86
| 0.71611
| 2,473
| 19,888
| 5.446826
| 0.140315
| 0.046028
| 0.03801
| 0.027765
| 0.552116
| 0.451596
| 0.409577
| 0.384558
| 0.363029
| 0.341203
| 0
| 0.006251
| 0.195646
| 19,888
| 522
| 87
| 38.099617
| 0.835782
| 0.150342
| 0
| 0.372283
| 0
| 0
| 0.043089
| 0.0015
| 0
| 0
| 0
| 0
| 0
| 1
| 0.130435
| false
| 0
| 0.070652
| 0.05163
| 0.342391
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
073032049203bfdc6f84f748cd2128bbc2872806
| 2,959
|
py
|
Python
|
kpca_iris.py
|
syamkakarla98/Kernel-PCA-Using-Different-Kernels-With-Classification
|
03302843bff9b0d87e2983bed1f37bc329e716c1
|
[
"MIT"
] | 10
|
2018-07-12T11:46:21.000Z
|
2021-03-13T06:47:01.000Z
|
kpca_iris.py
|
syamkakarla98/Kernel-PCA-Using-Different-Kernels-With-Classification
|
03302843bff9b0d87e2983bed1f37bc329e716c1
|
[
"MIT"
] | null | null | null |
kpca_iris.py
|
syamkakarla98/Kernel-PCA-Using-Different-Kernels-With-Classification
|
03302843bff9b0d87e2983bed1f37bc329e716c1
|
[
"MIT"
] | 9
|
2018-09-19T11:57:44.000Z
|
2021-03-13T06:47:04.000Z
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# load dataset into Pandas DataFrame
df = pd.read_csv("D:\Python_programs\ML\Iris Data\KPCA\iris.csv")
#df.to_csv('iris.csv')
from sklearn.preprocessing import StandardScaler
features = ['sepal length', 'sepal width', 'petal length', 'petal width']
# Separating out the features
x = df.loc[:, features].values
# Separating out the target
y = df.loc[:,['target']].values
# Standardizing the features
x = StandardScaler().fit_transform(x)
from sklearn.decomposition import KernelPCA
## Finding the principle components
# KERNELS : linear,rbf,poly
#
def Kernel_Pca(ker):
kpca = KernelPCA(n_components=4, kernel=ker, gamma=15)
x_kpca = kpca.fit_transform(x)
kpca_transform = kpca.fit_transform(x)
explained_variance = np.var(kpca_transform, axis=0)
ev = explained_variance / np.sum(explained_variance)
#--------- Bar Graph for Explained Variance Ratio ------------
plt.bar([1,2,3,4],list(ev*100),label='Principal Components',color='b')
plt.legend()
plt.xlabel('Principal Components ')
#----------------------
n=list(ev*100)
pc=[]
for i in range(len(n)):
n[i]=round(n[i],4)
pc.append('PC-'+str(i+1)+'('+str(n[i])+')')
#----------------------
plt.xticks([1,2,3,4],pc, fontsize=7, rotation=30)
plt.ylabel('Variance Ratio')
plt.title('Variance Ratio of IRIS Dataset using kernel:'+str(ker))
plt.show()
#---------------------------------------------------
# *Since the initial 2 principal components have high variance.
# so, we select pc-1 and pc-2.
#---------------------------------------------------
kpca = KernelPCA(n_components=2, kernel=ker, gamma=15)
x_kpca = kpca.fit_transform(x)
principalComponents = kpca.fit_transform(x)
principalDf = pd.DataFrame(data = principalComponents
, columns = ['PC-1', 'PC-2'])
# Adding lables
finalDf = pd.concat([principalDf, df[['target']]], axis = 1)
# Plotting pc1 & pc2
fig = plt.figure(figsize = (8,8))
ax = fig.add_subplot(1,1,1)
ax.set_xlabel('PC-1', fontsize = 15)
ax.set_ylabel('PC-2', fontsize = 15)
ax.set_title('KPCA on IRIS Dataset using kernel:'+str(ker), fontsize = 20)
targets = ['Iris-setosa', 'Iris-versicolor', 'Iris-virginica']
colors = ['r', 'g', 'b']
for target, color in zip(targets,colors):
indicesToKeep = finalDf['target'] == target
ax.scatter(finalDf.loc[indicesToKeep, 'PC-1']
, finalDf.loc[indicesToKeep, 'PC-2']
, c = color
, s = 30)
ax.legend(targets)
ax.grid()
plt.show() # FOR SHOWING THE PLOT
#------------------- SAVING DATA INTO CSV FILE ------------
finalDf.to_csv('iris_after_KPCA_using_'+str(ker)+'.csv')
#------------------------------------------------------
k=['linear','rbf','poly']
for i in k:
Kernel_Pca(i)
| 34.406977
| 78
| 0.584657
| 384
| 2,959
| 4.432292
| 0.385417
| 0.035253
| 0.03819
| 0.039953
| 0.077556
| 0.077556
| 0.044653
| 0.044653
| 0.044653
| 0.044653
| 0
| 0.021375
| 0.193647
| 2,959
| 85
| 79
| 34.811765
| 0.691953
| 0.224062
| 0
| 0.074074
| 0
| 0
| 0.155673
| 0.021108
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018519
| false
| 0
| 0.092593
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0730d1a99c54c1eeab8095b4f4102da12e701b30
| 4,704
|
py
|
Python
|
pydbrepo/drivers/sqlite.py
|
danteay/pydbrepo
|
665ad5fe64a00697128f9943e0fc831ae485f136
|
[
"MIT"
] | 2
|
2021-09-03T10:54:01.000Z
|
2022-01-08T18:48:20.000Z
|
pydbrepo/drivers/sqlite.py
|
danteay/pydbrepo
|
665ad5fe64a00697128f9943e0fc831ae485f136
|
[
"MIT"
] | null | null | null |
pydbrepo/drivers/sqlite.py
|
danteay/pydbrepo
|
665ad5fe64a00697128f9943e0fc831ae485f136
|
[
"MIT"
] | 1
|
2021-12-28T17:34:40.000Z
|
2021-12-28T17:34:40.000Z
|
"""SQLite Driver implementation."""
# pylint: disable=R0201
import os
import sqlite3
from typing import Any, AnyStr, List, NoReturn, Optional, Tuple
from pydbrepo.drivers.driver import Driver
class SQLite(Driver):
"""SQLite Driver connection class.
Environment variables:
DATABASE_URL: Database file ulr on the system. If it's an in memory database the url should
be None or `:memory:` string
DATABASE_COMMIT: default('false') Auto commit transaction flag
:type url:
:param url: Database connection url
:param autocommit: Auto commit transactions
"""
def __init__(
self,
url: Optional[AnyStr] = None,
autocommit: Optional[bool] = None,
):
super().__init__()
self.__build_connection(url, autocommit)
def __build_connection(
self,
url: Optional[AnyStr] = None,
autocommit: Optional[bool] = None,
) -> NoReturn:
"""Start real driver connection from parameters.
:param url: Database connection url
:param autocommit: Auto commit transactions
"""
if url is None:
url = ':memory:'
if autocommit is None:
autocommit = False
if os.getenv('DATABASE_URL', None) is not None:
url = os.getenv('DATABASE_URL')
if os.getenv('DATABASE_COMMIT', None) is not None:
autocommit = os.getenv('DATABASE_COMMIT').lower() == "true"
self.__url = url
self.__conn = sqlite3.connect(url)
self.__commit = autocommit
@staticmethod
def __execute(cursor, sql: AnyStr, *args) -> Any:
"""Execute query and attempt to replace with arguments.
:param cursor: Connection cursor statement
:param sql: Raw query to be executed
:param args: List of arguments passed to be replaced in query
"""
if not args:
return cursor.execute(sql)
return cursor.execute(sql, tuple(args))
def query(self, **kwargs) -> List[Tuple]:
"""Execute a query and return all values.
:param kwargs: Parameters to execute query statement.
sql: AnyStr -> SQL query statement
args: Optional[Iterable[Any]] -> Object with query replacement values
:return List[Tuple]: List of tuple records found by query
"""
self._validate_params({'sql'}, set(kwargs.keys()))
cursor = self.__conn.cursor()
_ = self.__execute(cursor, kwargs['sql'], *kwargs.get('args', []))
self.__commit_transaction()
res = cursor.fetchall()
cursor.close()
return res
def query_one(self, **kwargs) -> Tuple[Any, ...]:
"""Execute a query and do not return any result value.
:param kwargs: Parameters to execute query statement.
sql: AnyStr -> SQL query statement
args: Optional[Iterable[Any]] -> Object with query replacement values
:return Tuple: Found record
"""
self._validate_params({'sql'}, set(kwargs.keys()))
cursor = self.__conn.cursor()
_ = self.__execute(cursor, kwargs['sql'], *kwargs.get('args', []))
self.__commit_transaction()
res = cursor.fetchone()
cursor.close()
return res
def query_none(self, **kwargs) -> NoReturn:
"""Execute a query and do not return any result value.
:param kwargs: Parameters to execute query statement.
sql: AnyStr -> SQL query statement
args: Optional[Iterable[Any]] -> Object with query replacement values
"""
self._validate_params({'sql'}, set(kwargs.keys()))
cursor = self.__conn.cursor()
_ = self.__execute(cursor, kwargs['sql'], *kwargs.get('args', []))
self.__commit_transaction()
cursor.close()
def commit(self) -> NoReturn:
"""Commit transaction."""
self.__conn.commit()
def rollback(self) -> NoReturn:
self.__conn.rollback()
def close(self) -> NoReturn:
"""Close current connection."""
self.__conn.close()
def get_real_driver(self) -> Any:
"""Return real mysql driver connection."""
return self.__conn
def placeholder(self, **kwargs) -> AnyStr:
"""Return query place holder."""
return '?'
def reset_placeholder(self) -> NoReturn:
"""Reset place holder status (do nothing)"""
def __repr__(self):
"""Mysql driver representation."""
return f"SQLite({self.__url})"
def __commit_transaction(self):
"""Execute commit operation if the __commit flag is True."""
if self.__commit:
self.commit()
| 28.682927
| 99
| 0.605655
| 524
| 4,704
| 5.276718
| 0.234733
| 0.023146
| 0.023146
| 0.01736
| 0.404702
| 0.404702
| 0.384448
| 0.384448
| 0.384448
| 0.347559
| 0
| 0.001775
| 0.281463
| 4,704
| 163
| 100
| 28.858896
| 0.816272
| 0.359694
| 0
| 0.323944
| 0
| 0
| 0.042779
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.197183
| false
| 0
| 0.056338
| 0
| 0.366197
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
07311f534338364dbf730b4dc400d2a729b73016
| 3,036
|
py
|
Python
|
Modules/BatchNormND.py
|
EmilPi/PuzzleLib
|
31aa0fab3b5e9472b9b9871ca52e4d94ea683fa9
|
[
"Apache-2.0"
] | 52
|
2020-02-28T20:40:15.000Z
|
2021-08-25T05:35:17.000Z
|
Modules/BatchNormND.py
|
EmilPi/PuzzleLib
|
31aa0fab3b5e9472b9b9871ca52e4d94ea683fa9
|
[
"Apache-2.0"
] | 2
|
2021-02-14T15:57:03.000Z
|
2021-10-05T12:21:34.000Z
|
Modules/BatchNormND.py
|
EmilPi/PuzzleLib
|
31aa0fab3b5e9472b9b9871ca52e4d94ea683fa9
|
[
"Apache-2.0"
] | 8
|
2020-02-28T20:40:11.000Z
|
2020-07-09T13:27:23.000Z
|
import numpy as np
from PuzzleLib import Config
from PuzzleLib.Backend import gpuarray, Blas
from PuzzleLib.Backend.Dnn import batchNormNd, batchNormNdBackward
from PuzzleLib.Variable import Variable
from PuzzleLib.Modules.Module import ModuleError, Module
class BatchNormND(Module):
def __init__(self, nd, maps, epsilon=1e-5, initFactor=1.0, minFactor=0.1, sscale=0.01, affine=True, name=None,
empty=False, inplace=False):
super().__init__(name)
self.inplace = inplace
if inplace and Config.showWarnings:
Config.getLogger().info("Warning: %s is using inplace flag", self)
self.maps = maps
self.epsilon = epsilon
self.initFactor = initFactor
self.minFactor = minFactor
self.numOfProps = 0
self.affine = affine
self.scale, self.bias, self.mean, self.var = None, None, None, None
self.savemean, self.saveinvvar, self.scalegrad, self.biasgrad = None, None, None, None
if empty:
return
shape = (1, maps) + self.repeat(1, nd)
scale = np.random.normal(1.0, sscale if affine else 0.0, shape).astype(self.calctype)
var = np.ones(shape, dtype=self.calctype)
self.setVar("scale", Variable(gpuarray.to_gpu(scale)))
self.setVar("bias", Variable(gpuarray.zeros(shape, dtype=self.calctype)))
self.setAttr("mean", gpuarray.zeros(shape, dtype=self.calctype))
self.setAttr("var", gpuarray.to_gpu(var))
def updateData(self, data):
if self.train:
if self.inplace:
raise ModuleError("%s: using inplace flag in train mode is prohibited" % self)
self.numOfProps += 1
factor = max(self.initFactor / self.numOfProps, self.minFactor)
self.data, self.savemean, self.saveinvvar = batchNormNd(
data, self.scale, self.bias, self.mean, self.var, self.epsilon, factor, False
)
else:
self.data = batchNormNd(
data, self.scale, self.bias, self.mean, self.var, self.epsilon, 0, True,
out=data if self.inplace else None
)
def updateGrad(self, grad):
tup = batchNormNdBackward(self.inData, grad, self.scale, self.savemean, self.saveinvvar, self.epsilon)
if self.affine:
self.grad, self.scalegrad, self.biasgrad = tup
else:
self.grad, _, _ = tup
def accGradParams(self, grad, scale=1.0, momentum=0.0):
if self.affine:
Blas.addVectorToVector(
self.scalegrad.ravel(), self.vars["scale"].grad.ravel(), out=self.vars["scale"].grad.ravel(),
alpha=scale, beta=momentum
)
Blas.addVectorToVector(
self.biasgrad.ravel(), self.vars["bias"].grad.ravel(), out=self.vars["bias"].grad.ravel(),
alpha=scale, beta=momentum
)
def dataShapeFrom(self, shape):
return shape
def gradShapeFrom(self, shape):
return shape
def reset(self):
super().reset()
self.savemean, self.saveinvvar = None, None
if self.affine:
self.scalegrad, self.biasgrad = None, None
def calcMode(self, T):
if Config.backend == Config.Backend.cuda:
if T not in {np.float16, np.float32}:
raise ModuleError("Unsupported dtype %s" % T)
elif T != np.float32:
raise ModuleError("Unsupported dtype %s" % T)
self.calctype = T
| 27.351351
| 111
| 0.706522
| 421
| 3,036
| 5.066508
| 0.251781
| 0.030005
| 0.024379
| 0.048758
| 0.307548
| 0.212846
| 0.152836
| 0.152836
| 0.054384
| 0.054384
| 0
| 0.010998
| 0.161397
| 3,036
| 110
| 112
| 27.6
| 0.826787
| 0
| 0
| 0.171053
| 0
| 0
| 0.051713
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.078947
| 0.026316
| 0.236842
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0731748ca4b74185c74c8c4352a8260f73831cf9
| 6,038
|
py
|
Python
|
model/server/server.py
|
waltzofpearls/reckon
|
533e47fd05f685024083ce7a823e9c26c35dd824
|
[
"MIT"
] | 8
|
2019-09-01T12:57:38.000Z
|
2022-03-25T21:54:19.000Z
|
model/server/server.py
|
waltzofpearls/reckon
|
533e47fd05f685024083ce7a823e9c26c35dd824
|
[
"MIT"
] | 3
|
2021-08-12T13:18:42.000Z
|
2022-03-12T00:59:15.000Z
|
model/server/server.py
|
waltzofpearls/reckon
|
533e47fd05f685024083ce7a823e9c26c35dd824
|
[
"MIT"
] | 2
|
2021-12-22T06:56:56.000Z
|
2022-03-25T21:58:19.000Z
|
from concurrent import futures
from forecaster.prophet import Forecaster as ProphetForecaster
from multiprocessing import Event, Process, cpu_count
from pythonjsonlogger import jsonlogger
import contextlib
import grpc
import logging
import model.api.forecast_pb2_grpc as grpc_lib
import os
import signal
import socket
import sys
import time
class ForecastServicer(ProphetForecaster):
def __init__(self, logger):
self.logger = logger
def pretty_timedelta(self, seconds):
seconds = int(seconds)
days, seconds = divmod(seconds, 86400)
hours, seconds = divmod(seconds, 3600)
minutes, seconds = divmod(seconds, 60)
if days > 0:
return '{:d}d{:d}h{:d}m{:d}s'.format(days, hours, minutes, seconds)
elif hours > 0:
return '{:d}h{:d}m{:d}s'.format(hours, minutes, seconds)
elif minutes > 0:
return '{:d}m{:d}s'.format(minutes, seconds)
else:
return '{:d}s'.format(seconds)
class GracefulShutdown:
def __init__(self, logger):
self.logger = logger
self.event = Event()
signal.signal(signal.SIGINT, self.handler('SIGINT'))
signal.signal(signal.SIGTERM, self.handler('SIGTERM'))
signal.signal(signal.SIGHUP, self.handler('SIGHUP'))
def handler(self, signal_name):
def fn(signal_received, frame):
self.logger.info('signal received', extra={'signal': signal_name})
self.event.set()
return fn
class Config(object):
def __init__(self):
self.grpc_server_address = os.getenv('GRPC_SERVER_ADDRESS', '')
self.grpc_server_key = str.encode(os.getenv('GRPC_SERVER_KEY', ''))
self.grpc_server_cert = str.encode(os.getenv('GRPC_SERVER_CERT', ''))
self.grpc_root_ca = str.encode(os.getenv('GRPC_ROOT_CA', ''))
self.gprc_server_process_num = int(os.getenv('GPRC_SERVER_PROCESS_NUM', cpu_count()))
self.grpc_server_thread_num = int(os.getenv('GRPC_SERVER_THREAD_NUM', 1))
self.grpc_server_grace_period_in_secs = int(os.getenv('GRPC_SERVER_GRACE_PERIOD_IN_SECS', 2))
self.grpc_server_kill_period_in_secs = int(os.getenv('GRPC_SERVER_KILL_PERIOD_IN_SECS', 5))
class Server(object):
def __init__(self, config, logger):
self.config = config
self.logger = logger
@contextlib.contextmanager
def _reserve_port(self):
"""Find and reserve a port for all subprocesses to use"""
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 0:
raise RuntimeError('failed to set SO_REUSEPORT.')
_, port = self.config.grpc_server_address.split(':')
sock.bind(('', int(port)))
try:
yield sock.getsockname()[1]
finally:
sock.close()
def _run_server(self, shutdown_event):
server_credentials = grpc.ssl_server_credentials(
[(self.config.grpc_server_key, self.config.grpc_server_cert)],
root_certificates=self.config.grpc_root_ca,
require_client_auth=True
)
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=self.config.grpc_server_thread_num),
options=[
("grpc.so_reuseport", 1),
("grpc.use_local_subchannel_pool", 1),
],
)
grpc_lib.add_ForecastServicer_to_server(ForecastServicer(self.logger), server)
server.add_secure_port(self.config.grpc_server_address, server_credentials)
self.logger.info('starting python gRPC server...')
server.start()
while not shutdown_event.is_set():
time.sleep(1)
server.stop(5).wait()
self.logger.info('python gRPC server stopped')
def serve(self):
with self._reserve_port():
procs = []
shutdown = GracefulShutdown(self.logger)
for _ in range(self.config.gprc_server_process_num):
proc = Process(target=self._run_server, args=(shutdown.event,))
procs.append(proc)
proc.start()
while not shutdown.event.is_set():
time.sleep(1)
t = time.time()
grace_period = self.config.grpc_server_grace_period_in_secs
kill_period = self.config.grpc_server_kill_period_in_secs
while True:
# Send SIGINT if process doesn't exit quickly enough, and kill it as last resort
# .is_alive() also implicitly joins the process (good practice in linux)
alive_procs = [proc for proc in procs if proc.is_alive()]
if len(alive_procs) == 0:
break
elapsed = time.time() - t
if elapsed >= grace_period and elapsed < kill_period:
for proc in alive_procs:
proc.terminate()
self.logger.info("sending SIGTERM to subprocess", extra={'proc': proc})
elif elapsed >= kill_period:
for proc in alive_procs:
self.logger.warning("sending SIGKILL to subprocess", extra={'proc': proc})
# Queues and other inter-process communication primitives can break when
# process is killed, but we don't care here
proc.kill()
time.sleep(1)
time.sleep(1)
for proc in procs:
self.logger.info("subprocess terminated", extra={'proc': proc})
def json_logger():
logger = logging.getLogger()
log_handler = logging.StreamHandler(sys.stdout)
formatter = jsonlogger.JsonFormatter(fmt='%(asctime)s %(name)s %(levelname)s %(message)s')
log_handler.setFormatter(formatter)
log_handler.flush = sys.stdout.flush
logger.setLevel(logging.INFO)
logger.addHandler(log_handler)
return logger
| 41.07483
| 101
| 0.625704
| 728
| 6,038
| 4.978022
| 0.287088
| 0.060706
| 0.030905
| 0.038631
| 0.203918
| 0.166943
| 0.085541
| 0.060706
| 0.022627
| 0.022627
| 0
| 0.006806
| 0.269957
| 6,038
| 146
| 102
| 41.356164
| 0.815336
| 0.05217
| 0
| 0.086614
| 0
| 0
| 0.092405
| 0.024151
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086614
| false
| 0
| 0.102362
| 0
| 0.267717
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0733497e7a5accdfb3af9d8db6169c656322604e
| 14,221
|
py
|
Python
|
launchpad/launch/worker_manager.py
|
LaudateCorpus1/launchpad
|
6068bbaff9da6d9d520c01314ef920d0d4978afc
|
[
"Apache-2.0"
] | null | null | null |
launchpad/launch/worker_manager.py
|
LaudateCorpus1/launchpad
|
6068bbaff9da6d9d520c01314ef920d0d4978afc
|
[
"Apache-2.0"
] | 1
|
2021-10-05T16:06:38.000Z
|
2021-10-05T16:06:38.000Z
|
launchpad/launch/worker_manager.py
|
LaudateCorpus1/launchpad
|
6068bbaff9da6d9d520c01314ef920d0d4978afc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""WorkerManager handles thread and process-based runtimes."""
import atexit
import collections
from concurrent import futures
import ctypes
import os
import signal
import subprocess
import threading
import time
from typing import Optional, Sequence, Text
from absl import flags
from absl import logging
from absl.testing import absltest
from launchpad import flags as lp_flags
import psutil
import termcolor
FLAGS = flags.FLAGS
ThreadWorker = collections.namedtuple('ThreadWorker', ['thread', 'future'])
_WORKER_MANAGERS = threading.local()
_HAS_MAIN_MANAGER = False
def get_worker_manager():
manager = getattr(_WORKER_MANAGERS, 'manager', None)
assert manager, 'Worker manager is not available in the current thread'
return manager
def register_signal_handler(sig, handler):
"""Registers a signal handler."""
return signal.signal(sig, handler)
def remove_signal_handler(sig, handler):
return signal.signal(sig, handler)
def wait_for_stop():
"""Blocks until termination of the node's program is requested.
Can be used to perform cleanup at the end of the run, for example:
start_server()
lp.wait_for_stop()
stop_server()
checkpoint()
"""
get_worker_manager().wait_for_stop()
class WorkerManager:
"""Encapsulates running threads and processes of a Launchpad Program."""
def __init__(
self,
stop_main_thread=False,
kill_main_thread=True,
register_in_thread=False,
register_signals=True):
"""Initializes a WorkerManager.
Args:
stop_main_thread: Should main thread be notified about termination.
kill_main_thread: When set to false try not to kill the launcher while
killing workers. This is not possible when thread workers run in the
same process.
register_in_thread: TODO
register_signals: Whether or not to register signal handlers.
"""
self._mutex = threading.Lock()
self._termination_notice_secs = -1
handle_user_stop = False
global _HAS_MAIN_MANAGER
# Make the first created worker manager the main manager, which handles
# signals.
if not _HAS_MAIN_MANAGER:
self._termination_notice_secs = FLAGS.lp_termination_notice_secs
handle_user_stop = True
_HAS_MAIN_MANAGER = True
self._active_workers = collections.defaultdict(list)
self._workers_count = collections.defaultdict(lambda: 0)
self._first_failure = None
self._stop_counter = 0
self._alarm_enabled = False
self._kill_main_thread = kill_main_thread
self._stop_event = threading.Event()
self._main_thread = threading.current_thread().ident
self._sigterm_handler = None
self._sigquit_handler = None
self._sigalrm_handler = None
if register_signals:
self._sigterm_handler = register_signal_handler(signal.SIGTERM,
self._sigterm)
self._sigquit_handler = register_signal_handler(signal.SIGQUIT,
self._sigquit)
if handle_user_stop:
register_signal_handler(
signal.SIGINT, lambda sig=None, frame=None: self._stop_by_user())
self._stop_main_thread = stop_main_thread
if register_in_thread:
_WORKER_MANAGERS.manager = self
def _disable_signals(self):
self._disable_alarm()
if self._sigterm_handler is not None:
remove_signal_handler(signal.SIGTERM, self._sigterm_handler)
self._sigterm_handler = None
if self._sigquit_handler is not None:
remove_signal_handler(signal.SIGQUIT, self._sigquit_handler)
self._sigquit_handler = None
def _sigterm(self, sig=None, frame=None):
"""Handles SIGTERM by stopping the workers."""
if callable(self._sigterm_handler):
self._sigterm_handler(sig, frame)
self._stop()
def _sigquit(self, sig=None, frame=None):
if callable(self._sigquit_handler):
self._sigquit_handler(sig, frame)
self._kill()
def wait_for_stop(self):
"""Blocks until managed runtime is being terminated."""
self._stop_event.wait()
def thread_worker(self, name, function):
"""Registers and start a new thread worker.
Args:
name: Name of the worker group.
function: Entrypoint function to execute in a worker.
"""
with self._mutex:
future = futures.Future()
def run_inner(f=function, future=future, manager=self):
_WORKER_MANAGERS.manager = manager
try:
future.set_result(f())
except BaseException as e:
future.set_exception(e)
builder = lambda t, n: threading.Thread(target=t, name=n)
thread = builder(run_inner, name)
thread.setDaemon(True)
thread.start()
self._workers_count[name] += 1
worker = ThreadWorker(thread=thread, future=future)
self._active_workers[name].append(worker)
if self._stop_event.is_set():
# Runtime is terminating, so notify the worker.
self._send_exception(worker)
def process_worker(self, name, command, env=None, **kwargs):
"""Adds process worker to the runtime.
Args:
name: Name of the worker's group.
command: Command to execute in the worker.
env: Environment variables to set for the worker.
**kwargs: Other parameters to be passed to `subprocess.Popen`.
"""
with self._mutex:
process = subprocess.Popen(command, env=env or {}, **kwargs)
self._workers_count[name] += 1
self._active_workers[name].append(process)
def register_existing_process(self, name: str, pid: int):
"""Registers already started worker process.
Args:
name: Name of the workers' group.
pid: Pid of the process to monitor.
"""
with self._mutex:
self._workers_count[name] += 1
self._active_workers[name].append(psutil.Process(pid))
def _stop_by_user(self):
"""Handles stopping of the runtime by a user."""
if self._termination_notice_secs != 0:
print(
termcolor.colored(
'User-requested termination. Asking workers to stop.', 'blue'))
print(termcolor.colored('Press CTRL+C to terminate immediately.', 'blue'))
signal.signal(signal.SIGINT, lambda sig, frame: self._kill())
self._stop()
def _kill_process_tree(self, pid):
"""Kills all child processes of the current process."""
parent = psutil.Process(pid)
for process in parent.children(recursive=True):
try:
process.send_signal(signal.SIGKILL)
except psutil.NoSuchProcess:
pass
parent.send_signal(signal.SIGKILL)
def _kill(self):
"""Kills all workers (and main thread/process if needed)."""
print(termcolor.colored('\nKilling entire runtime.', 'blue'))
kill_self = self._kill_main_thread
for workers in self._active_workers.values():
for worker in workers:
if isinstance(worker, ThreadWorker):
# Not possible to kill a thread without killing the process.
kill_self = True
else:
self._kill_process_tree(worker.pid)
if kill_self:
self._kill_process_tree(os.getpid())
def _send_exception(self, worker):
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(
ctypes.c_long(worker.thread.ident),
ctypes.py_object(SystemExit))
assert res < 2, 'Exception raise failure'
def _stop_or_kill(self):
"""Stops all workers; kills them if they don't stop on time."""
pending_secs = self._termination_notice_secs - self._stop_counter
if pending_secs == 0:
if self._termination_notice_secs > 0:
still_running = [
label for label in self._active_workers
if self._active_workers[label]
]
print(
termcolor.colored(
f'Worker groups that did not terminate in time: {still_running}',
'red'))
self._kill()
return
if pending_secs >= 0:
print(
termcolor.colored(f'Waiting for workers to stop for {pending_secs}s.',
'blue'),
end='\r')
self._stop_counter += 1
for workers in self._active_workers.values():
for worker in workers:
if isinstance(worker, ThreadWorker):
if self._stop_counter == 1:
self._send_exception(worker)
elif isinstance(worker, subprocess.Popen):
worker.send_signal(signal.SIGTERM)
else:
# Notify all workers running under a proxy process.
children = worker.children(recursive=True)
worker_found = False
for process in children:
if process.name() != 'bash' and 'envelope_' not in process.name():
try:
worker_found = True
process.send_signal(signal.SIGTERM)
except psutil.NoSuchProcess:
pass
if not worker_found:
# No more workers running, so we can kill the proxy itself.
try:
worker.send_signal(signal.SIGKILL)
except psutil.NoSuchProcess:
pass
if self._stop_main_thread:
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(
ctypes.c_long(threading.main_thread().ident),
ctypes.py_object(SystemExit))
assert res < 2, 'Exception raise failure'
if pending_secs >= 0:
signal.alarm(1)
def _stop(self):
"""Requests all workers to stop and schedule delayed termination."""
if not self._stop_event.is_set():
self._stop_event.set()
try:
if self._termination_notice_secs > 0:
self._alarm_enabled = True
self._sigalrm_handler = register_signal_handler(
signal.SIGALRM, lambda sig=None, frame=None: self._stop_or_kill())
except ValueError:
# This happens when we attempt to register a signal handler but not in the
# main thread. Send a SIGTERM to redirect to the main thread.
psutil.Process(os.getpid()).send_signal(signal.SIGTERM)
return
self._stop_or_kill()
def _disable_alarm(self):
if self._alarm_enabled:
self._alarm_enabled = False
signal.alarm(0)
remove_signal_handler(signal.SIGALRM, self._sigalrm_handler)
def stop_and_wait(self):
"""Requests stopping all workers and wait for termination."""
with self._mutex:
self._stop()
self.wait(raise_error=False)
def join(self):
self.wait()
def wait(self,
labels_to_wait_for: Optional[Sequence[Text]] = None,
raise_error=True,
return_on_first_completed=False):
"""Waits for workers to finish.
Args:
labels_to_wait_for: If supplied, only wait for these groups' workers to
finish. Wait for all workers otherwise.
raise_error: Raise an exception upon any worker failure.
return_on_first_completed: Whether to return upon the first completed (or
failed) worker.
Raises:
RuntimeError: if any worker raises an exception.
"""
while True:
try:
active_workers = True
while active_workers:
with self._mutex:
self._check_workers()
active_workers = False
if self._first_failure and raise_error:
failure = self._first_failure
self._first_failure = None
raise failure
for label in labels_to_wait_for or self._active_workers.keys():
if self._active_workers[label]:
active_workers = True
if (return_on_first_completed and len(self._active_workers[label])
< self._workers_count[label]):
return
time.sleep(0.1)
return
except SystemExit:
self._stop()
def cleanup_after_test(self, test_case: absltest.TestCase):
"""Cleanups runtime after a test."""
with self._mutex:
self._check_workers()
self._stop()
self._disable_signals()
self.wait(raise_error=False)
with self._mutex:
if self._first_failure:
raise self._first_failure
def _check_workers(self):
"""Checks status of running workers, terminate runtime in case of errors."""
has_workers = False
for label in self._active_workers:
still_active = []
for worker in self._active_workers[label]:
active = True
if isinstance(worker, ThreadWorker):
if not worker.thread.is_alive():
worker.thread.join()
if not self._stop_counter:
try:
worker.future.result()
except BaseException as e:
if not self._first_failure and not self._stop_counter:
self._first_failure = e
active = False
elif isinstance(worker, subprocess.Popen):
try:
res = worker.wait(0)
active = False
if res and not self._first_failure and not self._stop_counter:
self._first_failure = RuntimeError('One of the workers failed.')
except subprocess.TimeoutExpired:
pass
else:
try:
# We can't obtain return code of external process, so clean
# termination is assumed.
res = worker.wait(0)
active = False
except psutil.TimeoutExpired:
pass
if active:
has_workers = True
still_active.append(worker)
self._active_workers[label] = still_active
if has_workers and self._first_failure and not self._stop_counter:
self._stop()
elif not has_workers:
self._disable_alarm()
def __del__(self):
self._disable_signals()
| 33.779097
| 81
| 0.660713
| 1,774
| 14,221
| 5.071026
| 0.192221
| 0.022232
| 0.026456
| 0.016674
| 0.2241
| 0.16663
| 0.108382
| 0.093264
| 0.060805
| 0.056247
| 0
| 0.002841
| 0.257507
| 14,221
| 420
| 82
| 33.859524
| 0.849133
| 0.224738
| 0
| 0.327586
| 0
| 0
| 0.038401
| 0
| 0
| 0
| 0
| 0.002381
| 0.010345
| 1
| 0.089655
| false
| 0.017241
| 0.055172
| 0.003448
| 0.172414
| 0.017241
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
07340b73d70dfdc6b284b1403d39e1bbdf13bf8f
| 1,054
|
py
|
Python
|
mmdeploy/backend/tensorrt/init_plugins.py
|
hanrui1sensetime/mmdeploy
|
f2594c624b67910e55e24418832bd96685425b2f
|
[
"Apache-2.0"
] | 1
|
2021-12-30T06:29:46.000Z
|
2021-12-30T06:29:46.000Z
|
mmdeploy/backend/tensorrt/init_plugins.py
|
wwjwy/mmdeploy
|
c6fccd0121618c8c4dc07f49823c377003475040
|
[
"Apache-2.0"
] | null | null | null |
mmdeploy/backend/tensorrt/init_plugins.py
|
wwjwy/mmdeploy
|
c6fccd0121618c8c4dc07f49823c377003475040
|
[
"Apache-2.0"
] | 1
|
2022-02-10T04:31:10.000Z
|
2022-02-10T04:31:10.000Z
|
# Copyright (c) OpenMMLab. All rights reserved.
import ctypes
import glob
import logging
import os
def get_ops_path() -> str:
"""Get path of the TensorRT plugin library.
Returns:
str: A path of the TensorRT plugin library.
"""
wildcard = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'../../../build/lib/libmmdeploy_tensorrt_ops.so'))
paths = glob.glob(wildcard)
lib_path = paths[0] if len(paths) > 0 else ''
return lib_path
def load_tensorrt_plugin() -> bool:
"""Load TensorRT plugins library.
Returns:
bool: True if TensorRT plugin library is successfully loaded.
"""
lib_path = get_ops_path()
success = False
if os.path.exists(lib_path):
ctypes.CDLL(lib_path)
logging.info(f'Successfully loaded tensorrt plugins from {lib_path}')
success = True
else:
logging.warning(f'Could not load the library of tensorrt plugins. \
Because the file does not exist: {lib_path}')
return success
| 26.35
| 77
| 0.642315
| 138
| 1,054
| 4.768116
| 0.42029
| 0.074468
| 0.095745
| 0.051672
| 0.091185
| 0.091185
| 0
| 0
| 0
| 0
| 0
| 0.002554
| 0.257116
| 1,054
| 39
| 78
| 27.025641
| 0.837803
| 0.239089
| 0
| 0
| 0
| 0
| 0.128105
| 0.060131
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.173913
| 0
| 0.347826
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0734297119899a9bd812848f57a6fbe4c63a3822
| 16,800
|
py
|
Python
|
reagent/test/world_model/test_seq2reward.py
|
dmitryvinn/ReAgent
|
f98825b9d021ec353a1f9087840a05fea259bf42
|
[
"BSD-3-Clause"
] | null | null | null |
reagent/test/world_model/test_seq2reward.py
|
dmitryvinn/ReAgent
|
f98825b9d021ec353a1f9087840a05fea259bf42
|
[
"BSD-3-Clause"
] | null | null | null |
reagent/test/world_model/test_seq2reward.py
|
dmitryvinn/ReAgent
|
f98825b9d021ec353a1f9087840a05fea259bf42
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import os
import random
import unittest
from typing import Optional
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn as nn
from parameterized import parameterized
from reagent.core import types as rlt
from reagent.core.parameters import (
NormalizationData,
NormalizationParameters,
ProblemDomain,
Seq2RewardTrainerParameters,
)
from reagent.gym.envs import Gym
from reagent.gym.utils import create_df_from_replay_buffer
from reagent.models.seq2reward_model import Seq2RewardNetwork
from reagent.net_builder.value.fully_connected import FullyConnected
from reagent.prediction.predictor_wrapper import (
Seq2RewardWithPreprocessor,
Seq2RewardPlanShortSeqWithPreprocessor,
FAKE_STATE_ID_LIST_FEATURES,
FAKE_STATE_ID_SCORE_LIST_FEATURES,
)
from reagent.preprocessing.identify_types import DO_NOT_PREPROCESS
from reagent.preprocessing.preprocessor import Preprocessor
from reagent.training.utils import gen_permutations
from reagent.training.world_model.compress_model_trainer import CompressModelTrainer
from reagent.training.world_model.seq2reward_trainer import get_Q, Seq2RewardTrainer
from torch.utils.data import DataLoader
logger = logging.getLogger(__name__)
SEED = 0
STRING_GAME_TESTS = [(False,), (True,)]
class FakeStepPredictionNetwork(nn.Module):
def __init__(self, look_ahead_steps):
super().__init__()
self.look_ahead_steps = look_ahead_steps
def forward(self, state: torch.Tensor):
"""
Given the current state, predict the probability of
experiencing next n steps (1 <=n <= look_ahead_steps)
For the test purpose, it outputs fixed fake numbers
"""
batch_size, _ = state.shape
return torch.ones(batch_size, self.look_ahead_steps).float()
class FakeSeq2RewardNetwork(nn.Module):
def forward(
self,
state: rlt.FeatureData,
action: rlt.FeatureData,
valid_reward_len: Optional[torch.Tensor] = None,
):
"""
Mimic I/O of Seq2RewardNetwork but return fake reward
Reward is the concatenation of action indices, independent
of state.
For example, when seq_len = 3, batch_size = 1, action_num = 2,
acc_reward = tensor(
[[ 0.],
[ 1.],
[ 10.],
[ 11.],
[100.],
[101.],
[110.],
[111.]]
)
Input action shape: seq_len, batch_size, num_action
Output acc_reward shape: batch_size, 1
"""
# pyre-fixme[9]: action has type `FeatureData`; used as `Tensor`.
action = action.float_features.transpose(0, 1)
action_indices = torch.argmax(action, dim=2).tolist()
acc_reward = torch.tensor(
list(map(lambda x: float("".join(map(str, x))), action_indices))
).reshape(-1, 1)
logger.info(f"acc_reward: {acc_reward}")
return rlt.Seq2RewardOutput(acc_reward=acc_reward)
def create_string_game_data(
dataset_size=10000, training_data_ratio=0.9, filter_short_sequence=False
):
SEQ_LEN = 6
NUM_ACTION = 2
NUM_MDP_PER_BATCH = 5
env = Gym(env_name="StringGame-v0", set_max_steps=SEQ_LEN)
df = create_df_from_replay_buffer(
env=env,
problem_domain=ProblemDomain.DISCRETE_ACTION,
desired_size=dataset_size,
multi_steps=None,
ds="2020-10-10",
)
if filter_short_sequence:
batch_size = NUM_MDP_PER_BATCH
time_diff = torch.ones(SEQ_LEN, batch_size)
valid_step = SEQ_LEN * torch.ones(batch_size, dtype=torch.int64)[:, None]
not_terminal = torch.Tensor(
[0 if i == SEQ_LEN - 1 else 1 for i in range(SEQ_LEN)]
)
not_terminal = torch.transpose(not_terminal.tile(NUM_MDP_PER_BATCH, 1), 0, 1)
else:
batch_size = NUM_MDP_PER_BATCH * SEQ_LEN
time_diff = torch.ones(SEQ_LEN, batch_size)
valid_step = torch.arange(SEQ_LEN, 0, -1).tile(NUM_MDP_PER_BATCH)[:, None]
not_terminal = torch.transpose(
torch.tril(torch.ones(SEQ_LEN, SEQ_LEN), diagonal=-1).tile(
NUM_MDP_PER_BATCH, 1
),
0,
1,
)
num_batches = int(dataset_size / SEQ_LEN / NUM_MDP_PER_BATCH)
batches = [None for _ in range(num_batches)]
batch_count, batch_seq_count = 0, 0
batch_reward = torch.zeros(SEQ_LEN, batch_size)
batch_action = torch.zeros(SEQ_LEN, batch_size, NUM_ACTION)
batch_state = torch.zeros(SEQ_LEN, batch_size, NUM_ACTION)
for mdp_id in sorted(set(df.mdp_id)):
mdp = df[df["mdp_id"] == mdp_id].sort_values("sequence_number", ascending=True)
if len(mdp) != SEQ_LEN:
continue
all_step_reward = torch.Tensor(list(mdp["reward"]))
all_step_state = torch.Tensor([list(s.values()) for s in mdp["state_features"]])
all_step_action = torch.zeros_like(all_step_state)
all_step_action[torch.arange(SEQ_LEN), [int(a) for a in mdp["action"]]] = 1.0
for j in range(SEQ_LEN):
if filter_short_sequence and j > 0:
break
reward = torch.zeros_like(all_step_reward)
reward[: SEQ_LEN - j] = all_step_reward[-(SEQ_LEN - j) :]
batch_reward[:, batch_seq_count] = reward
state = torch.zeros_like(all_step_state)
state[: SEQ_LEN - j] = all_step_state[-(SEQ_LEN - j) :]
batch_state[:, batch_seq_count] = state
action = torch.zeros_like(all_step_action)
action[: SEQ_LEN - j] = all_step_action[-(SEQ_LEN - j) :]
batch_action[:, batch_seq_count] = action
batch_seq_count += 1
if batch_seq_count == batch_size:
batches[batch_count] = rlt.MemoryNetworkInput(
reward=batch_reward,
action=rlt.FeatureData(float_features=batch_action),
state=rlt.FeatureData(float_features=batch_state),
next_state=rlt.FeatureData(
float_features=torch.zeros_like(batch_state)
), # fake, not used anyway
not_terminal=not_terminal,
time_diff=time_diff,
valid_step=valid_step,
step=None,
)
batch_count += 1
batch_seq_count = 0
batch_reward = torch.zeros_like(batch_reward)
batch_action = torch.zeros_like(batch_action)
batch_state = torch.zeros_like(batch_state)
assert batch_count == num_batches
num_training_batches = int(training_data_ratio * num_batches)
training_data = DataLoader(
batches[:num_training_batches], collate_fn=lambda x: x[0]
)
eval_data = DataLoader(batches[num_training_batches:], collate_fn=lambda x: x[0])
return training_data, eval_data
def train_seq2reward_model(training_data, learning_rate=0.01, num_epochs=5):
SEQ_LEN, batch_size, NUM_ACTION = next(
iter(training_data)
).action.float_features.shape
assert SEQ_LEN == 6 and NUM_ACTION == 2
seq2reward_network = Seq2RewardNetwork(
state_dim=NUM_ACTION,
action_dim=NUM_ACTION,
num_hiddens=64,
num_hidden_layers=2,
)
trainer_param = Seq2RewardTrainerParameters(
learning_rate=learning_rate,
multi_steps=SEQ_LEN,
action_names=["0", "1"],
gamma=1.0,
view_q_value=True,
)
trainer = Seq2RewardTrainer(
seq2reward_network=seq2reward_network, params=trainer_param
)
pl.seed_everything(SEED)
pl_trainer = pl.Trainer(max_epochs=num_epochs, deterministic=True)
pl_trainer.fit(trainer, training_data)
return trainer
def eval_seq2reward_model(eval_data, seq2reward_trainer):
SEQ_LEN, batch_size, NUM_ACTION = next(iter(eval_data)).action.float_features.shape
initial_state = torch.Tensor([[0, 0]])
initial_state_q_values = torch.squeeze(
get_Q(
seq2reward_trainer.seq2reward_network,
initial_state,
seq2reward_trainer.all_permut,
)
)
total_mse_loss = 0
total_q_values = torch.zeros(NUM_ACTION)
total_action_distribution = torch.zeros(NUM_ACTION)
for idx, batch in enumerate(eval_data):
(
mse_loss,
_,
q_values,
action_distribution,
) = seq2reward_trainer.validation_step(batch, idx)
total_mse_loss += mse_loss
total_q_values += torch.tensor(q_values)
total_action_distribution += torch.tensor(action_distribution)
N_eval = len(eval_data)
eval_mse_loss = total_mse_loss / N_eval
eval_q_values = total_q_values / N_eval
eval_action_distribution = total_action_distribution / N_eval
return (
initial_state_q_values,
eval_mse_loss,
eval_q_values,
eval_action_distribution,
)
def train_seq2reward_compress_model(
training_data, seq2reward_network, learning_rate=0.1, num_epochs=5
):
SEQ_LEN, batch_size, NUM_ACTION = next(
iter(training_data)
).action.float_features.shape
assert SEQ_LEN == 6 and NUM_ACTION == 2
compress_net_builder = FullyConnected(sizes=[8, 8])
state_normalization_data = NormalizationData(
dense_normalization_parameters={
0: NormalizationParameters(feature_type=DO_NOT_PREPROCESS),
1: NormalizationParameters(feature_type=DO_NOT_PREPROCESS),
}
)
compress_model_network = compress_net_builder.build_value_network(
state_normalization_data,
output_dim=NUM_ACTION,
)
trainer_param = Seq2RewardTrainerParameters(
learning_rate=0.0,
multi_steps=SEQ_LEN,
action_names=["0", "1"],
compress_model_learning_rate=learning_rate,
gamma=1.0,
view_q_value=True,
)
trainer = CompressModelTrainer(
compress_model_network=compress_model_network,
seq2reward_network=seq2reward_network,
params=trainer_param,
)
pl.seed_everything(SEED)
pl_trainer = pl.Trainer(max_epochs=num_epochs, deterministic=True)
pl_trainer.fit(trainer, training_data)
return trainer
def eval_seq2reward_compress_model(eval_data, compress_model_trainer):
SEQ_LEN, batch_size, NUM_ACTION = next(iter(eval_data)).action.float_features.shape
total_mse_loss = 0
total_q_values = torch.zeros(NUM_ACTION)
total_action_distribution = torch.zeros(NUM_ACTION)
for idx, batch in enumerate(eval_data):
(
mse_loss,
q_values,
action_distribution,
_,
) = compress_model_trainer.validation_step(batch, idx)
total_mse_loss += mse_loss
total_q_values += torch.tensor(q_values)
total_action_distribution += torch.tensor(action_distribution)
N_eval = len(eval_data)
eval_mse_loss = total_mse_loss / N_eval
eval_q_values = total_q_values / N_eval
eval_action_distribution = total_action_distribution / N_eval
return eval_mse_loss, eval_q_values, eval_action_distribution
class TestSeq2Reward(unittest.TestCase):
def test_seq2reward_with_preprocessor_plan_short_sequence(self):
self._test_seq2reward_with_preprocessor(plan_short_sequence=True)
def test_seq2reward_with_preprocessor_plan_full_sequence(self):
self._test_seq2reward_with_preprocessor(plan_short_sequence=False)
def _test_seq2reward_with_preprocessor(self, plan_short_sequence):
state_dim = 4
action_dim = 2
seq_len = 3
model = FakeSeq2RewardNetwork()
state_normalization_parameters = {
i: NormalizationParameters(
feature_type=DO_NOT_PREPROCESS, mean=0.0, stddev=1.0
)
for i in range(1, state_dim)
}
state_preprocessor = Preprocessor(state_normalization_parameters, False)
if plan_short_sequence:
step_prediction_model = FakeStepPredictionNetwork(seq_len)
model_with_preprocessor = Seq2RewardPlanShortSeqWithPreprocessor(
model,
step_prediction_model,
state_preprocessor,
seq_len,
action_dim,
)
else:
model_with_preprocessor = Seq2RewardWithPreprocessor(
model,
state_preprocessor,
seq_len,
action_dim,
)
input_prototype = rlt.ServingFeatureData(
float_features_with_presence=state_preprocessor.input_prototype(),
id_list_features=FAKE_STATE_ID_LIST_FEATURES,
id_score_list_features=FAKE_STATE_ID_SCORE_LIST_FEATURES,
)
q_values = model_with_preprocessor(input_prototype)
if plan_short_sequence:
# When planning for 1, 2, and 3 steps ahead,
# the expected q values are respectively:
# [0, 1], [1, 11], [11, 111]
# Weighting the expected q values by predicted step
# probabilities [0.33, 0.33, 0.33], we have [4, 41]
expected_q_values = torch.tensor([[4.0, 41.0]])
else:
expected_q_values = torch.tensor([[11.0, 111.0]])
assert torch.all(expected_q_values == q_values)
def test_get_Q(self):
NUM_ACTION = 2
MULTI_STEPS = 3
BATCH_SIZE = 2
STATE_DIM = 4
all_permut = gen_permutations(MULTI_STEPS, NUM_ACTION)
seq2reward_network = FakeSeq2RewardNetwork()
state = torch.zeros(BATCH_SIZE, STATE_DIM)
q_values = get_Q(seq2reward_network, state, all_permut)
expected_q_values = torch.tensor([[11.0, 111.0], [11.0, 111.0]])
logger.info(f"q_values: {q_values}")
assert torch.all(expected_q_values == q_values)
def test_gen_permutations_seq_len_1_action_6(self):
SEQ_LEN = 1
NUM_ACTION = 6
expected_outcome = torch.tensor([[0], [1], [2], [3], [4], [5]])
self._test_gen_permutations(SEQ_LEN, NUM_ACTION, expected_outcome)
def test_gen_permutations_seq_len_3_num_action_2(self):
SEQ_LEN = 3
NUM_ACTION = 2
expected_outcome = torch.tensor(
[
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
]
)
self._test_gen_permutations(SEQ_LEN, NUM_ACTION, expected_outcome)
def _test_gen_permutations(self, SEQ_LEN, NUM_ACTION, expected_outcome):
# expected shape: SEQ_LEN, PERM_NUM, ACTION_DIM
result = gen_permutations(SEQ_LEN, NUM_ACTION)
assert result.shape == (SEQ_LEN, NUM_ACTION ** SEQ_LEN, NUM_ACTION)
outcome = torch.argmax(result.transpose(0, 1), dim=-1)
assert torch.all(outcome == expected_outcome)
@parameterized.expand(STRING_GAME_TESTS)
@unittest.skipIf("SANDCASTLE" in os.environ, "Skipping long test on sandcastle.")
def test_seq2reward_on_string_game_v0(self, filter_short_sequence):
np.random.seed(SEED)
random.seed(SEED)
torch.manual_seed(SEED)
training_data, eval_data = create_string_game_data(
filter_short_sequence=filter_short_sequence
)
seq2reward_trainer = train_seq2reward_model(training_data)
(
initial_state_q_values,
eval_mse_loss,
eval_q_values,
eval_action_distribution,
) = eval_seq2reward_model(eval_data, seq2reward_trainer)
assert abs(initial_state_q_values[0].item() - 10) < 1.0
assert abs(initial_state_q_values[1].item() - 5) < 1.0
if filter_short_sequence:
assert eval_mse_loss < 0.1
else:
# Same short sequences may have different total rewards due to the missing
# states and actions in previous steps, so the trained network is not able
# to reduce the mse loss to values close to zero.
assert eval_mse_loss < 10
compress_model_trainer = train_seq2reward_compress_model(
training_data, seq2reward_trainer.seq2reward_network
)
(
compress_eval_mse_loss,
compress_eval_q_values,
compress_eval_action_distribution,
) = eval_seq2reward_compress_model(eval_data, compress_model_trainer)
assert compress_eval_mse_loss < 1e-5
assert torch.all(eval_q_values - compress_eval_q_values < 1e-5)
assert torch.all(
eval_action_distribution - compress_eval_action_distribution < 1e-5
)
| 35.66879
| 88
| 0.65619
| 2,080
| 16,800
| 4.944712
| 0.15625
| 0.028585
| 0.010695
| 0.014584
| 0.430724
| 0.352747
| 0.291784
| 0.26456
| 0.235002
| 0.215654
| 0
| 0.022562
| 0.26131
| 16,800
| 470
| 89
| 35.744681
| 0.806205
| 0.070536
| 0
| 0.258065
| 0
| 0
| 0.010441
| 0
| 0
| 0
| 0
| 0.002128
| 0.037634
| 1
| 0.043011
| false
| 0
| 0.061828
| 0
| 0.13172
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0735c43eb0b2d2d58ca5e330e9b0ab738257e5f2
| 18,432
|
py
|
Python
|
kolibri/core/auth/management/commands/sync.py
|
reubenjacob/kolibri
|
028bb2ad63e438c832ff657d37f7b05c3400f2da
|
[
"MIT"
] | null | null | null |
kolibri/core/auth/management/commands/sync.py
|
reubenjacob/kolibri
|
028bb2ad63e438c832ff657d37f7b05c3400f2da
|
[
"MIT"
] | 8
|
2021-05-21T15:31:24.000Z
|
2022-02-24T15:02:14.000Z
|
kolibri/core/auth/management/commands/sync.py
|
kuboginichimaru/kolibri
|
18b398f62baa1c60f8456f7f9c6d6c9447068f69
|
[
"MIT"
] | 1
|
2019-10-05T11:14:40.000Z
|
2019-10-05T11:14:40.000Z
|
import json
import logging
import math
import re
from contextlib import contextmanager
from django.core.management import call_command
from django.core.management.base import CommandError
from morango.models import Filter
from morango.models import InstanceIDModel
from morango.models import ScopeDefinition
from morango.sync.controller import MorangoProfileController
from ..utils import create_superuser_and_provision_device
from ..utils import get_baseurl
from ..utils import get_client_and_server_certs
from ..utils import get_dataset_id
from ..utils import get_single_user_sync_filter
from ..utils import provision_single_user_device
from kolibri.core.auth.constants.morango_sync import PROFILE_FACILITY_DATA
from kolibri.core.auth.constants.morango_sync import ScopeDefinitions
from kolibri.core.auth.constants.morango_sync import State
from kolibri.core.auth.management.utils import get_facility
from kolibri.core.auth.management.utils import run_once
from kolibri.core.auth.models import dataset_cache
from kolibri.core.logger.utils.data import bytes_for_humans
from kolibri.core.tasks.exceptions import UserCancelledError
from kolibri.core.tasks.management.commands.base import AsyncCommand
from kolibri.core.utils.lock import db_lock
from kolibri.utils import conf
DATA_PORTAL_SYNCING_BASE_URL = conf.OPTIONS["Urls"]["DATA_PORTAL_SYNCING_BASE_URL"]
TRANSFER_MESSAGE = "{records_transferred}/{records_total}, {transfer_total}"
logger = logging.getLogger(__name__)
class Command(AsyncCommand):
help = "Allow the syncing of facility data with Kolibri Data Portal or another Kolibri device."
def add_arguments(self, parser):
parser.add_argument(
"--facility", action="store", type=str, help="ID of facility to sync"
)
parser.add_argument(
"--baseurl", type=str, default=DATA_PORTAL_SYNCING_BASE_URL, dest="baseurl"
)
parser.add_argument("--noninteractive", action="store_true")
parser.add_argument(
"--chunk-size",
type=int,
default=500,
help="Chunk size of records to send/retrieve per request",
)
parser.add_argument(
"--no-push", action="store_true", help="Do not push data to the server"
)
parser.add_argument(
"--no-pull", action="store_true", help="Do not pull data from the server"
)
parser.add_argument(
"--username",
type=str,
help="username of superuser or facility admin on server we are syncing with",
)
parser.add_argument(
"--password",
type=str,
help="password of superuser or facility admin on server we are syncing with",
)
parser.add_argument(
"--user",
type=str,
help="for single-user syncing, the user ID of the account to be synced",
)
parser.add_argument(
"--no-provision",
action="store_true",
help="do not create a facility and temporary superuser",
)
# parser.add_argument("--scope-id", type=str, default=FULL_FACILITY)
def handle_async(self, *args, **options): # noqa C901
(
baseurl,
facility_id,
chunk_size,
username,
password,
user_id,
no_push,
no_pull,
noninteractive,
no_provision,
) = (
options["baseurl"],
options["facility"],
options["chunk_size"],
options["username"],
options["password"],
options["user"],
options["no_push"],
options["no_pull"],
options["noninteractive"],
options["no_provision"],
)
PORTAL_SYNC = baseurl == DATA_PORTAL_SYNCING_BASE_URL
# validate url that is passed in
if not PORTAL_SYNC:
baseurl = get_baseurl(baseurl)
# call this in case user directly syncs without migrating database
if not ScopeDefinition.objects.filter():
call_command("loaddata", "scopedefinitions")
dataset_cache.clear()
dataset_cache.activate()
# try to connect to server
controller = MorangoProfileController(PROFILE_FACILITY_DATA)
network_connection = controller.create_network_connection(baseurl)
# if instance_ids are equal, this means device is trying to sync with itself, which we don't allow
if (
InstanceIDModel.get_or_create_current_instance()[0].id
== network_connection.server_info["instance_id"]
):
raise CommandError(
"Device can not sync with itself. Please recheck base URL and try again."
)
if user_id: # it's a single-user sync
if not facility_id:
raise CommandError(
"Facility ID must be specified in order to do single-user syncing"
)
if not re.match("[a-f0-9]{32}", user_id):
raise CommandError("User ID must be a 32-character UUID (no dashes)")
dataset_id = get_dataset_id(
baseurl, identifier=facility_id, noninteractive=True
)
client_cert, server_cert, username = get_client_and_server_certs(
username,
password,
dataset_id,
network_connection,
user_id=user_id,
noninteractive=noninteractive,
)
scopes = [client_cert.scope_definition_id, server_cert.scope_definition_id]
if len(set(scopes)) != 2:
raise CommandError(
"To do a single-user sync, one device must have a single-user certificate, and the other a full-facility certificate."
)
elif PORTAL_SYNC: # do portal sync setup
facility = get_facility(
facility_id=facility_id, noninteractive=noninteractive
)
# check for the certs we own for the specific facility
client_cert = (
facility.dataset.get_owned_certificates()
.filter(scope_definition_id=ScopeDefinitions.FULL_FACILITY)
.first()
)
if not client_cert:
raise CommandError(
"This device does not own a certificate for Facility: {}".format(
facility.name
)
)
# get primary partition
scope_params = json.loads(client_cert.scope_params)
dataset_id = scope_params["dataset_id"]
# check if the server already has a cert for this facility
server_certs = network_connection.get_remote_certificates(
dataset_id, scope_def_id=ScopeDefinitions.FULL_FACILITY
)
# if necessary, push a cert up to the server
server_cert = (
server_certs[0]
if server_certs
else network_connection.push_signed_client_certificate_chain(
local_parent_cert=client_cert,
scope_definition_id=ScopeDefinitions.FULL_FACILITY,
scope_params=scope_params,
)
)
else: # do P2P setup
dataset_id = get_dataset_id(
baseurl, identifier=facility_id, noninteractive=noninteractive
)
client_cert, server_cert, username = get_client_and_server_certs(
username,
password,
dataset_id,
network_connection,
noninteractive=noninteractive,
)
logger.info("Syncing has been initiated (this may take a while)...")
sync_session_client = network_connection.create_sync_session(
client_cert, server_cert, chunk_size=chunk_size
)
try:
# pull from server
if not no_pull:
self._handle_pull(
sync_session_client,
noninteractive,
dataset_id,
client_cert,
server_cert,
user_id=user_id,
)
# and push our own data to server
if not no_push:
self._handle_push(
sync_session_client,
noninteractive,
dataset_id,
client_cert,
server_cert,
user_id=user_id,
)
if not no_provision:
with self._lock():
if user_id:
provision_single_user_device(user_id)
else:
create_superuser_and_provision_device(
username, dataset_id, noninteractive=noninteractive
)
except UserCancelledError:
if self.job:
self.job.extra_metadata.update(sync_state=State.CANCELLED)
self.job.save_meta()
logger.info("Syncing has been cancelled.")
return
network_connection.close()
if self.job:
self.job.extra_metadata.update(sync_state=State.COMPLETED)
self.job.save_meta()
dataset_cache.deactivate()
logger.info("Syncing has been completed.")
@contextmanager
def _lock(self):
cancellable = False
# job can't be cancelled while locked
if self.job:
cancellable = self.job.cancellable
self.job.save_as_cancellable(cancellable=False)
with db_lock():
yield
if self.job:
self.job.save_as_cancellable(cancellable=cancellable)
def _raise_cancel(self, *args, **kwargs):
if self.is_cancelled() and (not self.job or self.job.cancellable):
raise UserCancelledError()
def _handle_pull(
self,
sync_session_client,
noninteractive,
dataset_id,
client_cert,
server_cert,
user_id,
):
"""
:type sync_session_client: morango.sync.syncsession.SyncSessionClient
:type noninteractive: bool
:type dataset_id: str
"""
sync_client = sync_session_client.get_pull_client()
sync_client.signals.queuing.connect(self._raise_cancel)
sync_client.signals.transferring.connect(self._raise_cancel)
self._queueing_tracker_adapter(
sync_client.signals.queuing,
"Remotely preparing data",
State.REMOTE_QUEUING,
noninteractive,
)
self._transfer_tracker_adapter(
sync_client.signals.transferring,
"Receiving data ({})".format(TRANSFER_MESSAGE),
State.PULLING,
noninteractive,
)
self._queueing_tracker_adapter(
sync_client.signals.dequeuing,
"Locally integrating received data",
State.LOCAL_DEQUEUING,
noninteractive,
)
self._session_tracker_adapter(
sync_client.signals.session,
"Creating pull transfer session",
"Completed pull transfer session",
)
if not user_id:
# full-facility sync
sync_client.initialize(Filter(dataset_id))
else:
# single-user sync
client_is_single_user = (
client_cert.scope_definition_id == ScopeDefinitions.SINGLE_USER
)
filt = get_single_user_sync_filter(
dataset_id, user_id, is_read=client_is_single_user
)
sync_client.initialize(Filter(filt))
sync_client.run()
with self._lock():
sync_client.finalize()
def _handle_push(
self,
sync_session_client,
noninteractive,
dataset_id,
client_cert,
server_cert,
user_id,
):
"""
:type sync_session_client: morango.sync.syncsession.SyncSessionClient
:type noninteractive: bool
:type dataset_id: str
"""
sync_client = sync_session_client.get_push_client()
sync_client.signals.transferring.connect(self._raise_cancel)
self._queueing_tracker_adapter(
sync_client.signals.queuing,
"Locally preparing data to send",
State.LOCAL_QUEUING,
noninteractive,
)
self._transfer_tracker_adapter(
sync_client.signals.transferring,
"Sending data ({})".format(TRANSFER_MESSAGE),
State.PUSHING,
noninteractive,
)
self._queueing_tracker_adapter(
sync_client.signals.dequeuing,
"Remotely integrating data",
State.REMOTE_DEQUEUING,
noninteractive,
)
self._session_tracker_adapter(
sync_client.signals.session,
"Creating push transfer session",
"Completed push transfer session",
)
with self._lock():
if not user_id:
# full-facility sync
sync_client.initialize(Filter(dataset_id))
else:
# single-user sync
client_is_single_user = (
client_cert.scope_definition_id == ScopeDefinitions.SINGLE_USER
)
filt = get_single_user_sync_filter(
dataset_id, user_id, is_read=not client_is_single_user
)
sync_client.initialize(Filter(filt))
sync_client.run()
# we can't cancel remotely integrating data
if self.job:
self.job.save_as_cancellable(cancellable=False)
# allow server timeout since remotely integrating data can take a while and the request
# could timeout. In that case, we'll assume everything is good.
sync_client.finalize(allow_server_timeout=True)
def _update_all_progress(self, progress_fraction, progress):
"""
Override parent progress update callback to report from the progress tracker we're sent
"""
if self.job:
self.job.update_progress(progress_fraction, 1.0)
self.job.extra_metadata.update(progress.extra_data)
self.job.save_meta()
def _session_tracker_adapter(self, signal_group, started_msg, completed_msg):
"""
Attaches a signal handler to session creation signals
:type signal_group: morango.sync.syncsession.SyncSignalGroup
:type started_msg: str
:type completed_msg: str
"""
@run_once
def session_creation(transfer_session):
"""
A session is created individually for pushing and pulling
"""
logger.info(started_msg)
if self.job:
self.job.extra_metadata.update(sync_state=State.SESSION_CREATION)
@run_once
def session_destruction(transfer_session):
if transfer_session.records_total == 0:
logger.info("There are no records to transfer")
logger.info(completed_msg)
signal_group.started.connect(session_creation)
signal_group.completed.connect(session_destruction)
def _transfer_tracker_adapter(
self, signal_group, message, sync_state, noninteractive
):
"""
Attaches a signal handler to pushing/pulling signals
:type signal_group: morango.sync.syncsession.SyncSignalGroup
:type message: str
:type sync_state: str
:type noninteractive: bool
"""
tracker = self.start_progress(total=100)
def stats_msg(transfer_session):
transfer_total = (
transfer_session.bytes_sent + transfer_session.bytes_received
)
return message.format(
records_transferred=transfer_session.records_transferred,
records_total=transfer_session.records_total,
transfer_total=bytes_for_humans(transfer_total),
)
def stats(transfer_session):
logger.info(stats_msg(transfer_session))
def handler(transfer_session):
"""
:type transfer_session: morango.models.core.TransferSession
"""
progress = (
100
* transfer_session.records_transferred
/ float(transfer_session.records_total)
)
tracker.update_progress(
increment=math.ceil(progress - tracker.progress),
message=stats_msg(transfer_session),
extra_data=dict(
bytes_sent=transfer_session.bytes_sent,
bytes_received=transfer_session.bytes_received,
sync_state=sync_state,
),
)
if noninteractive or tracker.progressbar is None:
signal_group.started.connect(stats)
signal_group.in_progress.connect(stats)
signal_group.connect(handler)
# log one more time at end to capture in logging output
signal_group.completed.connect(stats)
def _queueing_tracker_adapter(
self, signal_group, message, sync_state, noninteractive
):
"""
Attaches a signal handler to queuing/dequeuing signals
:type signal_group: morango.sync.syncsession.SyncSignalGroup
:type message: str
:type sync_state: str
:type noninteractive: bool
"""
tracker = self.start_progress(total=2)
def started(transfer_session):
dataset_cache.clear()
if noninteractive or tracker.progressbar is None:
logger.info(message)
def handler(transfer_session):
tracker.update_progress(
message=message, extra_data=dict(sync_state=sync_state)
)
if noninteractive or tracker.progressbar is None:
signal_group.started.connect(started)
signal_group.started.connect(started)
signal_group.started.connect(handler)
signal_group.completed.connect(handler)
| 34.711864
| 138
| 0.598307
| 1,907
| 18,432
| 5.53592
| 0.160986
| 0.021786
| 0.017713
| 0.018187
| 0.405987
| 0.341195
| 0.324903
| 0.309084
| 0.296296
| 0.263427
| 0
| 0.002111
| 0.331923
| 18,432
| 530
| 139
| 34.777358
| 0.855205
| 0.103353
| 0
| 0.350877
| 0
| 0.002506
| 0.09992
| 0.004078
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042607
| false
| 0.015038
| 0.070175
| 0
| 0.122807
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0735ef32022db6fd8f2cae3cf86a392fe7526086
| 5,787
|
py
|
Python
|
warp.py
|
RezaFirouzii/fum-delta-vision
|
0a8ad1d434006a9aee0a12c1f021c0bca0bc87e2
|
[
"MIT"
] | null | null | null |
warp.py
|
RezaFirouzii/fum-delta-vision
|
0a8ad1d434006a9aee0a12c1f021c0bca0bc87e2
|
[
"MIT"
] | null | null | null |
warp.py
|
RezaFirouzii/fum-delta-vision
|
0a8ad1d434006a9aee0a12c1f021c0bca0bc87e2
|
[
"MIT"
] | null | null | null |
import math
import imageio
import cv2 as cv
import numpy as np
import transformer
def fix_rotation(img):
img_copy = img.copy()
img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
rows, cols = img.shape
img = cv.adaptiveThreshold(img, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY_INV, 15, 9)
kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (3, 3))
img = cv.morphologyEx(img, cv.MORPH_OPEN, kernel)
img = cv.medianBlur(img, 3)
contours, hierarchy = cv.findContours(img, cv.RETR_LIST, cv.CHAIN_APPROX_NONE)
roi = max(contours, key=cv.contourArea)
x, y, w, h = cv.boundingRect(roi)
corners = [[x, y], [x + w, y], [x, y + h], [x + w, y + h]]
src = np.float32(corners)
# src = np.reshape(src, (len(src), 1, 2))
# perimeter = cv.arcLength(src, True)
# corners = cv.approxPolyDP(src, perimeter // 10, True)
# corners = np.vstack(corners)
dst = np.float32([[0, 0], [cols, 0], [0, rows], [cols, rows]])
matrix = cv.getPerspectiveTransform(src, dst)
rotated_img = cv.warpPerspective(img_copy, matrix, (cols, rows))
cv.imshow('', rotated_img)
D1 = 105
D2 = 175
D3 = 275
if __name__ == "__main__":
cap = cv.VideoCapture('samples/delta.mp4')
if not cap.isOpened():
raise IOError("Video was not opened!")
mse = 0
count = 0
reader = imageio.get_reader('samples/delta.mp4')
fps = reader.get_meta_data()['fps']
writer = imageio.get_writer('samples/result.mp4', fps=fps)
while True:
res, frame = cap.read()
if not res:
break
mean_error = 0
holes_count = 0
img = frame.copy()
cv.imshow('dfa', img)
frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
frame_copy = frame.copy()
# frame = cv.adaptiveThreshold(frame, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY_INV, 15, 9)
# kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (3, 3))
# frame = cv.morphologyEx(frame, cv.MORPH_OPEN, kernel)
# frame = cv.medianBlur(frame, 3)
# contours, hierarchy = cv.findContours(frame, cv.RETR_LIST, cv.CHAIN_APPROX_NONE)
# roi = max(contours, key=cv.contourArea)
# x, y, w, h = cv.boundingRect(roi)
x, y, w, h = 115, 0, 445, 360
img = img[y: y+h, x: x+w]
img = transformer.rotate_along_axis(img, theta=40)
frame_copy = frame_copy[y: y+h, x: x+w]
frame_copy = transformer.rotate_along_axis(frame_copy, theta=40)
# cv.imshow('', frame_copy)
# cv.rectangle(frame_copy, (x, y), (x + w, y + h), (0, 255, 0), 2)
# cv.drawContours(frame_copy, roi, -1, (0, 0, 255), 2)
# res, mask = cv.threshold(frame_copy, 0, 255, cv.THRESH_BINARY)
# frame_copy = cv.bitwise_and(frame_copy, frame_copy, mask=mask)
# corners = cv.goodFeaturesToTrack(frame_copy, 1000, 0.0001, 1)
# corners = list(sorted(corners, key=lambda x: x[0][1]))
# print(corners[-1], corners[-2])
# print()
# corners = np.array([[38, 293], [407, 293]])
# for item in corners:
# # x, y = map(int, item.ravel())
# x, y = item
# cv.circle(img, (x, y), 5, (0, 0, 255), -1)
src = np.float32([[0, 0], [w, 0], [38, 293], [407, 293]])
dst = np.float32([[0, 0], [w, 0], [30, h], [w - 30, h]])
matrix = cv.getPerspectiveTransform(src, dst)
img = cv.warpPerspective(img, matrix, (w, h))
cv.imshow('', img)
img_copy = img.copy()
img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
img = cv.adaptiveThreshold(img, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY_INV, 15, 9)
kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (3, 3))
img = cv.morphologyEx(img, cv.MORPH_OPEN, kernel)
img = cv.medianBlur(img, 3)
origin = (w // 2 + 4, h // 2 + 2)
o1, o2 = origin
r = w // 2 + 1
ORIGIN = (0, 0)
R = 300 # mm
contours, hierarchy = cv.findContours(img, cv.RETR_LIST, cv.CHAIN_APPROX_NONE)
contours = list(filter(lambda x: 50 < cv.contourArea(x) < 175, contours))
factor = 0.1
smooth_contours = []
for i in range(len(contours)):
epsilon = factor * cv.arcLength(contours[i], True)
approx = cv.approxPolyDP(contours[i], epsilon, True)
x, y, width, height = cv.boundingRect(approx)
area = width*height
if len(approx) == 4 and 75 < area < 200:
smooth_contours.append(contours[i])
center, radius = cv.minEnclosingCircle(approx)
radius = int(radius)
center = tuple(map(int, center))
x, y = center
X = ((x - o1) * R) / r
Y = ((y - o2) * R) / r
X, Y = round(X, 2), round(Y, 2)
cv.circle(img_copy, center, radius, (0, 255, 0), 2)
cv.putText(img_copy, str((X, Y)), center, cv.FONT_HERSHEY_SIMPLEX, 0.3, (255, 0, 255, 255), 1, cv.LINE_AA)
e1, e2, e3 = map(lambda d: abs(math.hypot(X, Y) - d), [D1, D2, D3])
error = min(e1, e2, e3)
if error < 10:
mean_error += error ** 2
holes_count += 1
cv.circle(img_copy, origin, 4, (0, 0, 255), -1)
# cv.line(img_copy, origin, (origin[0], origin[1]), (255, 0, 255), 2)
mean_error /= holes_count
mse += mean_error
count += 1
cv.imshow("Final", img_copy)
writer.append_data(img_copy)
# cv.imshow("Chg", img)
if cv.waitKey(30) == 27:
break
print("E:", mse / count, "N:", count)
writer.close()
cap.release()
cv.destroyAllWindows()
| 35.286585
| 122
| 0.556247
| 798
| 5,787
| 3.922306
| 0.243108
| 0.025559
| 0.01278
| 0.018211
| 0.307668
| 0.260064
| 0.247923
| 0.247923
| 0.247923
| 0.247923
| 0
| 0.057283
| 0.294107
| 5,787
| 164
| 123
| 35.286585
| 0.708935
| 0.214273
| 0
| 0.176471
| 0
| 0
| 0.021248
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009804
| false
| 0
| 0.04902
| 0
| 0.058824
| 0.009804
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0736759453528b8e50d3977ed9f783e1f7d2c291
| 2,318
|
py
|
Python
|
sdssobstools/boss_data.py
|
sdss/ObserverTools
|
7f9949341edc91a79dac69d79e24af09e8558ffa
|
[
"BSD-3-Clause"
] | null | null | null |
sdssobstools/boss_data.py
|
sdss/ObserverTools
|
7f9949341edc91a79dac69d79e24af09e8558ffa
|
[
"BSD-3-Clause"
] | null | null | null |
sdssobstools/boss_data.py
|
sdss/ObserverTools
|
7f9949341edc91a79dac69d79e24af09e8558ffa
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
"""
A tool to grab a single BOSS image and pull a few items from its header. It is
used in bin/sloan_log.py, but it could be used directly as well.
"""
import argparse
from pathlib import Path
from astropy.time import Time
import fitsio
class BOSSRaw:
"""A class to parse raw data from APOGEE. The purpose of collecting this
raw data is to future-proof things that need these ouptuts in case
things like autoschedulers change, which many libraries depend on. This
will hopefully help SDSS-V logging"""
def __init__(self, fil):
self.fil = fil
header = fitsio.read_header(fil)
self.dither = header['MGDPOS']
if not self.dither: # This key started working instead during SDSS-V
self.dither = header['POINTING'][0]
self.exp_time = int(header['EXPTIME'])
self.isot = Time(header['DATE-OBS']) # UTC
self.plate_id = header['PLATEID']
self.cart_id = header['CARTID']
self.exp_id = int(str(fil).split('-')[-1].split('.')[0])
self.lead = header['PLATETYP']
if 'Closed' in header['HARTMANN']:
self.hartmann = 'Closed'
self.flavor = header['FLAVOR'].capitalize()
elif 'Out' in header['HARTMANN']:
self.hartmann = 'Open'
self.flavor = header['FLAVOR'].capitalize()
self.hart_resids = []
else:
self.hartmann = header['HARTMANN']
self.flavor = 'Hart'
# self.seeing = header['SEEING']
# self.img_type = header['IMAGETYP']
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--today', action='store_true')
args = parser.parse_args()
parser.add_argument('-m', '--mjd',
help='If not today (-t), the mjd to search')
parser.add_argument('-v', '--verbose', action='count', default=1,
help='Show details, can be stacked')
if args.today:
mjd_today = int(Time.now().sjd)
data_dir = '/data/spectro/{}/'.format(mjd_today)
elif args.mjd:
data_dir = '/data/spectro/{}/'.format(args.mjd)
else:
raise Exception('No date specified')
for path in Path(data_dir).rglob('sdR*.fit.gz'):
print(path)
if __name__ == '__main__':
main()
| 34.088235
| 78
| 0.603538
| 303
| 2,318
| 4.518152
| 0.511551
| 0.021914
| 0.039445
| 0.029218
| 0.122717
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002921
| 0.261432
| 2,318
| 67
| 79
| 34.597015
| 0.796729
| 0.227351
| 0
| 0.086957
| 0
| 0
| 0.162883
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.086957
| 0
| 0.152174
| 0.021739
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0736f9344c10dda2615d756c67d64d15dd48a036
| 955
|
py
|
Python
|
capitulo-08/ex13b.py
|
bryan-lima/exercicios-livro-introd-prog-python-3ed
|
b6bc26dced9728510865704a80cb0d97f81f756b
|
[
"MIT"
] | 3
|
2021-11-09T17:54:10.000Z
|
2022-01-30T22:32:25.000Z
|
capitulo-08/ex13b.py
|
bryan-lima/exercicios-livro-introd-prog-python-3ed
|
b6bc26dced9728510865704a80cb0d97f81f756b
|
[
"MIT"
] | null | null | null |
capitulo-08/ex13b.py
|
bryan-lima/exercicios-livro-introd-prog-python-3ed
|
b6bc26dced9728510865704a80cb0d97f81f756b
|
[
"MIT"
] | null | null | null |
# Altere o Programa 8.20 de forma que o usuário tenha três chances de acertar o número
# O programa termina se o usuário acertar ou errar três vezes
# Programa 8.20 do livro, página 184
# Programa 8.20 - Adivinhando o número
#
# import random
#
# n = random.randint(1, 10)
# x = int(input('Escolha um número entre 1 e 10: '))
# if x == n:
# print('Você acertou!')
# else:
# print('Você errou.')
import random
numberRandom = random.randint(1, 10)
counter = 0
while True:
chosenNumber = int(input('\nEscolha um número entre 1 e 10: '))
counter += 1
if chosenNumber == numberRandom:
print(f'Parabéns! Você acertou na {counter}ª de 3 tentativas!')
break
else:
print(f'Você errou!')
if counter < 3:
print(f'Resta(m) {3 - counter} tentativa(s).')
else:
print('Suas tentativas acabaram! Mais sorte na próxima vez.')
print(f'O número sorteado foi {numberRandom}.')
break
| 27.285714
| 86
| 0.642932
| 140
| 955
| 4.385714
| 0.492857
| 0.039088
| 0.053746
| 0.052117
| 0.055375
| 0.055375
| 0
| 0
| 0
| 0
| 0
| 0.040166
| 0.243979
| 955
| 34
| 87
| 28.088235
| 0.810249
| 0.393717
| 0
| 0.235294
| 0
| 0
| 0.39469
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.058824
| 0
| 0.058824
| 0.294118
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0737de527d56f865ee1256abea29660c8dca454e
| 894
|
py
|
Python
|
setup.py
|
shb84/ATM76
|
433179bde8935abeaf2ace52fe17dedb7a313487
|
[
"MIT"
] | null | null | null |
setup.py
|
shb84/ATM76
|
433179bde8935abeaf2ace52fe17dedb7a313487
|
[
"MIT"
] | null | null | null |
setup.py
|
shb84/ATM76
|
433179bde8935abeaf2ace52fe17dedb7a313487
|
[
"MIT"
] | null | null | null |
import setuptools
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setuptools.setup(
name="atm76",
version="0.1.0",
author="Steven H. Berguin",
author_email="stevenberguin@gmail.com",
description="Differentiable 1976 Atmosphere",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/shb84/ATM76.git",
packages=setuptools.find_packages(),
package_data={},
install_requires=["numpy>=1.16", "genn"],
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.7',
)
| 27.9375
| 63
| 0.680089
| 109
| 894
| 5.422018
| 0.706422
| 0.101523
| 0.064298
| 0.101523
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027248
| 0.178971
| 894
| 31
| 64
| 28.83871
| 0.777929
| 0.03132
| 0
| 0
| 0
| 0
| 0.310185
| 0.02662
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.115385
| 0
| 0.115385
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
073976d41a2a2bee70b7facb5e914072923e6d0f
| 4,065
|
py
|
Python
|
agent/check_plugins/download_speed.py
|
indigos33k3r/god-eye
|
b2af5ca6dbbd1b302dd5cda1fd0f0c0eee009e76
|
[
"BSD-3-Clause"
] | 1
|
2019-04-01T01:59:22.000Z
|
2019-04-01T01:59:22.000Z
|
agent/check_plugins/download_speed.py
|
indigos33k3r/god-eye
|
b2af5ca6dbbd1b302dd5cda1fd0f0c0eee009e76
|
[
"BSD-3-Clause"
] | null | null | null |
agent/check_plugins/download_speed.py
|
indigos33k3r/god-eye
|
b2af5ca6dbbd1b302dd5cda1fd0f0c0eee009e76
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
import asyncio
from agent.check_plugins import AbstractCheckPlugin
# Do khong biet dung thu vien asyncio ntn ca nen em dung thu vien request
# python
import requests
import sys
import time
from datetime import datetime
logger = logging.getLogger(__name__)
class Download(AbstractCheckPlugin):
@asyncio.coroutine
def __call__(self, client, dnode):
logger.info('Test download speed : running...')
start = time.clock()
r = requests.get('http://{}'.format(dnode), stream=True)
total_length = int(r.headers.get('content-length'))
if total_length is None:
logger.error("Empty file!")
else:
array_speed = []
start_chunk = time.clock()
for chunk in r.iter_content(1024): # 1kB1024 1MB 1048576
end_chunk = time.clock()
delta = end_chunk - start_chunk
start_chunk = end_chunk
if delta <= 0:
break
else:
array_speed.append(1//delta) # kB / s
end = time.clock()
yield from self._queue.put(self.get_result(dnode, start, end, total_length, array_speed))
@asyncio.coroutine
def get_result(self, url, start, end, total_length, array_speed):
"""Download and processing data.
Args:
url (str): url file download.
start (float): It's time which started download.
end (float): It's time which finished download.
total_length (int): size of file download (Byte)
array_speed (list): list download speeds for each 1024 Byte (kB/s)
Returns:
list with item 0 : json format for influxdb
"""
download_speed = total_length // (time.clock() - start)
accelerationS = self.acceleration(array_speed)
mean_deviationS = self.mean_deviation(array_speed, download_speed)
logger.info("Test download speed done!")
#TODO Bỏ time, để kiểm tra xem db có ghi đc dữ liệu hay chưa
return [self.output([self._snode, url, datetime.now(), download_speed, mean_deviationS, accelerationS])]
def acceleration(self, array_speed):
"""Caculate acceleration.
By get the highest speed in the first cycle.
Args:
array_speed (list): list download times for each 1024 Byte
Returns:
acceleration (kB/s) : the deviation between highest speed and first byte speed
"""
if len(array_speed) == 0:
return 0
speed_before = array_speed[0]
for speed in array_speed:
if speed < speed_before:
break
else:
speed_before = speed
return speed_before - array_speed[0]
def mean_deviation(self, array_speed, download_speed):
"""The mean deviation each downloads with download_speed.
Args:
array_speed (list): list download speeds for each kB.
download_speed (kB/s): mean download speed.
Returns:
mean_deviation (kB/s)
"""
if len(array_speed) == 0:
return 0
sum = 0
for speed in array_speed:
sum += abs(speed - download_speed)
return sum//len(array_speed)
def output(self, my_array):
"""Reformat my_array for inserting into influxdb.
Args:
my_array (list): [self._snode, url, str(datetime.now()), download_speed, mean_deviationS, accelerationS]
Returns:
json format for influxdb
"""
return {
"measurement": "download_speed",
"tags": {
"snode": "{}".format(my_array[0]),
"dnode": "{}".format(my_array[1])
},
# "time": "{}".format(my_array[2]),
"fields": {
"speed": my_array[3],
"mean_deviation": my_array[4],
"acceleration": my_array[5]
}
}
| 32.007874
| 116
| 0.571218
| 467
| 4,065
| 4.813705
| 0.321199
| 0.080071
| 0.019573
| 0.024021
| 0.218416
| 0.160142
| 0.100534
| 0.034698
| 0
| 0
| 0
| 0.01518
| 0.335547
| 4,065
| 127
| 117
| 32.007874
| 0.817105
| 0.294219
| 0
| 0.19403
| 0
| 0
| 0.065749
| 0
| 0
| 0
| 0
| 0.007874
| 0
| 1
| 0.074627
| false
| 0
| 0.104478
| 0
| 0.283582
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
073aa245c28c69910b8c705ef18f357b5c9e4c5f
| 5,846
|
py
|
Python
|
GA/train.py
|
jcordell/keras-optimization
|
cbda84bcf3b31928d829af4afc82af1886877341
|
[
"MIT"
] | 1
|
2017-05-29T13:48:22.000Z
|
2017-05-29T13:48:22.000Z
|
GA/train.py
|
jcordell/keras-optimization
|
cbda84bcf3b31928d829af4afc82af1886877341
|
[
"MIT"
] | null | null | null |
GA/train.py
|
jcordell/keras-optimization
|
cbda84bcf3b31928d829af4afc82af1886877341
|
[
"MIT"
] | null | null | null |
"""
Utility used by the Network class to actually train.
Based on:
https://github.com/fchollet/keras/blob/master/examples/mnist_mlp.py
"""
from keras.datasets import mnist, cifar10
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.utils.np_utils import to_categorical
from keras.callbacks import EarlyStopping
import data_parser
import numpy as np
from keras.optimizers import Adadelta, Adam, rmsprop
from sklearn.metrics import mean_squared_error
# Helper: Early stopping.
early_stopper = EarlyStopping(patience=5)
def get_cifar10():
"""Retrieve the CIFAR dataset and process the data."""
# Set defaults.
nb_classes = 10
batch_size = 64
input_shape = (3072,)
# Get the data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.reshape(50000, 3072)
x_test = x_test.reshape(10000, 3072)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# convert class vectors to binary class matrices
y_train = to_categorical(y_train, nb_classes)
y_test = to_categorical(y_test, nb_classes)
return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test)
def get_mnist():
"""Retrieve the MNIST dataset and process the data."""
# Set defaults.
nb_classes = 10
batch_size = 128
input_shape = (784,)
# Get the data.
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# convert class vectors to binary class matrices
y_train = to_categorical(y_train, nb_classes)
y_test = to_categorical(y_test, nb_classes)
return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test)
def get_dbtt():
data = data_parser.parse("DBTT_Data22.csv")
data_lwr = data_parser.parse("CD_LWR_clean8.csv")
X = ["N_log(eff fl p =.05)", "N_log(eff fl p =.4)", "N_log(eff fl p =.5)", "N(Cu)", "N(Ni)", "N(Mn)", "N(P)",
"N(Si)", "N( C )", "N_log(eff fl p =.1)", "N_log(eff fl p =.2)", "N_log(eff fl p =.3)", "N(Temp)"]
Y = "CD delta sigma"
data.set_x_features(X)
data.set_y_feature(Y)
data_lwr.set_y_feature(Y)
data_lwr.set_x_features(X)
data.add_exclusive_filter("Alloy", '=', 29)
data.add_exclusive_filter("Alloy", '=', 8)
data.add_exclusive_filter("Alloy", '=', 1)
data.add_exclusive_filter("Alloy", '=', 2)
data.add_exclusive_filter("Alloy", '=', 14)
data_lwr.add_exclusive_filter("Alloy", '=', 29)
data_lwr.add_exclusive_filter("Alloy", '=', 14)
x_test = np.array(data_lwr.get_x_data())
y_test = np.array(data_lwr.get_y_data())
x_train = np.array(data.get_x_data())
y_train = np.array(data.get_y_data())
#print("Training with", np.shape(y_train)[0], "data points")
nb_classes = -1
batch_size = np.shape(y_train)[0]
input_shape = (13,)
# normalize y columns
y_train = y_train/758.92
return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test)
def compile_model(network, nb_classes, input_shape):
"""Compile a sequential model.
Args:
network (dict): the parameters of the network
Returns:
a compiled network.
"""
# Get our network parameters.
nb_layers = network['nb_layers']
nb_neurons = network['nb_neurons']
activation = network['activation']
optimizer = network['optimizer']
learning_rate = network['learning_rate']
model = Sequential()
# Add each layer.
for i in range(nb_layers):
# Need input shape for first layer.
if i == 0:
print(nb_neurons)
model.add(Dense(units=nb_neurons, activation=activation, input_shape=input_shape))
else:
print(nb_neurons)
model.add(Dense(nb_neurons, activation=activation))
model.add(Dropout(0.2)) # hard-coded dropout
# Output layer.
if(nb_classes == -1):
model.add(Dense(1, activation='linear'))
ADAM = Adam(lr=learning_rate)
model.compile(loss='mean_squared_error', metrics=['accuracy'], optimizer=ADAM)
else:
model.add(Dense(nb_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=optimizer,
metrics=['accuracy'])
return model
def train_and_score(network, dataset):
"""Train the model, return test loss.
Args:
network (dict): the parameters of the network
dataset (str): Dataset to use for training/evaluating
"""
if dataset == 'cifar10':
nb_classes, batch_size, input_shape, x_train, \
x_test, y_train, y_test = get_cifar10()
elif dataset == 'mnist':
nb_classes, batch_size, input_shape, x_train, \
x_test, y_train, y_test = get_mnist()
elif dataset == 'dbtt':
nb_classes, batch_size, input_shape, x_train, \
x_test, y_train, y_test = get_dbtt()
model = compile_model(network, nb_classes, input_shape)
if dataset == 'dbtt':
model.fit(x_train, y_train, epochs=10, batch_size=1406, verbose=0)
y_predict = model.predict(x_test) * 758.92 # todo way to not hardcode this?
rms = np.sqrt(mean_squared_error(y_test, y_predict))
print(rms)
return rms
else:
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=10000, # using early stopping, so no real limit
verbose=0,
validation_data=(x_test, y_test),
callbacks=[early_stopper])
score = model.evaluate(x_test, y_test, verbose=0)
return score[1] # 1 is accuracy. 0 is loss.
| 33.028249
| 113
| 0.653609
| 849
| 5,846
| 4.250883
| 0.23086
| 0.030479
| 0.016625
| 0.024383
| 0.455805
| 0.382377
| 0.307287
| 0.259906
| 0.237739
| 0.237739
| 0
| 0.030637
| 0.223914
| 5,846
| 176
| 114
| 33.215909
| 0.764823
| 0.164728
| 0
| 0.219298
| 0
| 0
| 0.085452
| 0.005002
| 0
| 0
| 0
| 0.005682
| 0
| 1
| 0.04386
| false
| 0
| 0.078947
| 0
| 0.175439
| 0.026316
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
073cf557d5c1841920fb8cd559522daa79d5440d
| 3,272
|
py
|
Python
|
ssl_context_builder/http_impl/requests_wrapper/secure_session.py
|
mbjahnoon/ssl_context_builder
|
e73530f900b56710c705675e8e657f0bd17f7c07
|
[
"Apache-2.0"
] | 1
|
2022-03-01T16:27:33.000Z
|
2022-03-01T16:27:33.000Z
|
ssl_context_builder/http_impl/requests_wrapper/secure_session.py
|
mbjahnoon/ssl_context_builder
|
e73530f900b56710c705675e8e657f0bd17f7c07
|
[
"Apache-2.0"
] | null | null | null |
ssl_context_builder/http_impl/requests_wrapper/secure_session.py
|
mbjahnoon/ssl_context_builder
|
e73530f900b56710c705675e8e657f0bd17f7c07
|
[
"Apache-2.0"
] | null | null | null |
import weakref
import os
import requests
import ssl
from ssl import SSLContext
import logging
from ssl_context_builder.builder.builder import SslContextBuilder
from ssl_context_builder.http_impl.requests_wrapper.ssl_adapter import SslAdapter
class RequestsSecureSession:
def __init__(self, ssl_context: SSLContext):
"""
This class create a wrapper for the requests.Session object
It does the following:
1. Disable session env_vars consuming
2. Load certificates provided with the ssl_context
3. Except ssl_context to control the TLS communication
@param ssl_context: SSLContext
"""
self.cert_file_path = self._create_cert_file(ssl_context) # see note inside the function why not using tempfile
self._ssl_context = ssl_context
self.session = requests.Session()
self.session.trust_env = False
self.session.verify = self.cert_file_path
self.session.mount('https://', SslAdapter(ssl_context))
self._finalizer = weakref.finalize(
self, self._cleanup, self.cert_file_path, self.session,
warn_message="Implicitly cleaning up {!r}".format(self))
def __enter__(self):
return self
def __exit__(self, exc, value, tb):
self.cleanup()
def cleanup(self): # Non throw function
"""
Delete the cert file and close the session
@return:
"""
if self._finalizer.detach():
try:
os.remove(self.cert_file_path)
except:
logging.warning(f"Couldn't delete certs file {self.cert_file_path}")
try:
self.session.close()
except:
logging.warning("Couldn't close session")
@staticmethod
def _cleanup(name, session, warn_message):
try:
os.remove(name)
except:
logging.warning(f"Couldn't delete certs file {name}")
try:
session.close()
except:
logging.warning("Couldn't close session")
logging.warning(warn_message)
@classmethod
def _create_cert_file(cls, ssl_context: SSLContext):
"""
This create a CA bundle file extracted from the ssl_context
The reason we are creating a real file and deleting it is that this file is being opened later on
in the requests flow. This means we have to close the file before it is being used
tempfile is being destroyed when closed.
@param ssl_context: ssl_context
@return: path to the created ca_bundle file
"""
path = "certs.pem"
if os.path.exists(path):
path = cls._generate_cert_file_path("certs")
with open(path, mode="a+") as certs_file:
certs = ""
for der in ssl_context.get_ca_certs(True):
certs += f"{ssl.DER_cert_to_PEM_cert(der)}\n"
certs_file.write(certs)
return path
@classmethod
def _generate_cert_file_path(cls, file_name: str, num=1):
file_name_candidate = f"{file_name}({num}).pem"
if os.path.exists(file_name_candidate):
return cls._generate_cert_file_path(file_name, num + 1)
return file_name_candidate
| 34.808511
| 120
| 0.637531
| 415
| 3,272
| 4.814458
| 0.33012
| 0.075075
| 0.048048
| 0.04004
| 0.171171
| 0.121121
| 0.094094
| 0.094094
| 0.094094
| 0
| 0
| 0.002136
| 0.284535
| 3,272
| 93
| 121
| 35.182796
| 0.851346
| 0.225856
| 0
| 0.196721
| 0
| 0
| 0.097304
| 0.032013
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114754
| false
| 0
| 0.131148
| 0.016393
| 0.327869
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
073ddb35cfd257b4fe7bee31f410bb17b18b0611
| 621
|
py
|
Python
|
tiny_scripts/select_cifar_10.py
|
jiaqiangwjq/python_workhouse
|
c0e739d8bc8ea3d318a0f916e9d79b1f4d4acad9
|
[
"Unlicense"
] | null | null | null |
tiny_scripts/select_cifar_10.py
|
jiaqiangwjq/python_workhouse
|
c0e739d8bc8ea3d318a0f916e9d79b1f4d4acad9
|
[
"Unlicense"
] | null | null | null |
tiny_scripts/select_cifar_10.py
|
jiaqiangwjq/python_workhouse
|
c0e739d8bc8ea3d318a0f916e9d79b1f4d4acad9
|
[
"Unlicense"
] | null | null | null |
'''
Selected cifar-10. The .csv file format:
class_index,data_index
3,0
8,1
8,2
...
'''
import pickle
import pandas as pd
file = 'E:\pycharm\LEARN\data\cifar-10\cifar-10-batches-py\\test_batch'
with open(file, 'rb') as f:
dict = pickle.load(f, encoding='bytes')
dict.keys()
batch_label = dict[b'batch_label']
labels = dict[b'labels']
data = dict[b'data']
filenames = dict[b'filenames']
length = len(labels)
data_index = [i for i in range(length)]
class_index = labels
csv_dict = {'class_index': class_index, 'data_index': data_index}
df = pd.DataFrame(csv_dict)
df.to_csv('selected_cifar10.csv', index=False)
| 18.818182
| 71
| 0.710145
| 105
| 621
| 4.057143
| 0.485714
| 0.093897
| 0.098592
| 0.089202
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025878
| 0.128824
| 621
| 33
| 72
| 18.818182
| 0.761553
| 0.128824
| 0
| 0
| 0
| 0.0625
| 0.262664
| 0.116323
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
074082800249cdc23669711e86fb83230db924ee
| 940
|
py
|
Python
|
codebox/scripts/fixture.py
|
disqus/codebox
|
9f8e1a9c08c6a79bf3519782be483ff9763c4b4e
|
[
"Apache-2.0"
] | 5
|
2015-09-24T19:53:02.000Z
|
2019-05-14T11:56:07.000Z
|
codebox/scripts/fixture.py
|
disqus/codebox
|
9f8e1a9c08c6a79bf3519782be483ff9763c4b4e
|
[
"Apache-2.0"
] | null | null | null |
codebox/scripts/fixture.py
|
disqus/codebox
|
9f8e1a9c08c6a79bf3519782be483ff9763c4b4e
|
[
"Apache-2.0"
] | null | null | null |
# Ghetto Fixtures
from codebox import app
from codebox.apps.auth.models import User
from codebox.apps.snippets.models import Snippet
from codebox.apps.organizations.models import Organization, OrganizationMember
from flask import g
client = app.test_client()
_ctx = app.test_request_context()
_ctx.push()
app.preprocess_request()
g.redis.flushdb()
User.objects.create(pk=1, name='zeeg')
Organization.objects.create(pk='disqus', name='DISQUS')
OrganizationMember.objects.create(org='disqus', user=1)
# Create sample snippets
# plaintext
Snippet.objects.create(org='disqus', user=1, lang='text', text = "Hello World!")
# python
Snippet.objects.create(org='disqus', user=1, lang='python', text = "print 'Disqus was here'")
# html
Snippet.objects.create(org='disqus', user=1, lang='html', text = '<h1>Look its HTML!</h1>')
# javascript
Snippet.objects.create(org='disqus', user=1, lang='javascript', text = "document.write('Di-squs')")
| 29.375
| 99
| 0.75
| 132
| 940
| 5.295455
| 0.401515
| 0.130186
| 0.114449
| 0.157368
| 0.25608
| 0.25608
| 0.217454
| 0.217454
| 0
| 0
| 0
| 0.009401
| 0.094681
| 940
| 31
| 100
| 30.322581
| 0.811986
| 0.075532
| 0
| 0
| 0
| 0
| 0.1777
| 0.029036
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.294118
| 0
| 0.294118
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0740a865caa54dd6749985e9ca6d8ad7824f4098
| 3,062
|
py
|
Python
|
corehq/apps/linked_domain/tests/test_views.py
|
akashkj/commcare-hq
|
b00a62336ec26cea1477dfb8c048c548cc462831
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/linked_domain/tests/test_views.py
|
akashkj/commcare-hq
|
b00a62336ec26cea1477dfb8c048c548cc462831
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/linked_domain/tests/test_views.py
|
akashkj/commcare-hq
|
b00a62336ec26cea1477dfb8c048c548cc462831
|
[
"BSD-3-Clause"
] | null | null | null |
from unittest.mock import Mock, patch
from django.test import SimpleTestCase
from corehq.apps.domain.exceptions import DomainDoesNotExist
from corehq.apps.linked_domain.exceptions import (
DomainLinkAlreadyExists,
DomainLinkError,
DomainLinkNotAllowed,
)
from corehq.apps.linked_domain.views import link_domains
class LinkDomainsTests(SimpleTestCase):
@classmethod
def setUpClass(cls):
super(LinkDomainsTests, cls).setUpClass()
cls.upstream_domain = 'upstream'
cls.downstream_domain = 'downstream'
def test_exception_raised_if_domain_does_not_exist(self):
def mock_handler(domain):
return domain != self.downstream_domain
with patch('corehq.apps.linked_domain.views.domain_exists') as mock_domainexists,\
self.assertRaises(DomainDoesNotExist):
mock_domainexists.side_effect = mock_handler
link_domains(Mock(), self.upstream_domain, self.downstream_domain)
def test_exception_raised_if_domain_link_already_exists(self):
with patch('corehq.apps.linked_domain.views.domain_exists', return_value=True),\
patch('corehq.apps.linked_domain.views.get_active_domain_link', return_value=Mock()),\
self.assertRaises(DomainLinkAlreadyExists):
link_domains(Mock(), self.upstream_domain, self.downstream_domain)
def test_exception_raised_if_domain_link_error_raised(self):
def mock_handler(downstream, upstream):
raise DomainLinkError
with patch('corehq.apps.linked_domain.views.domain_exists', return_value=True),\
patch('corehq.apps.linked_domain.views.get_active_domain_link', return_value=None),\
patch('corehq.apps.linked_domain.views.DomainLink.link_domains') as mock_linkdomains,\
self.assertRaises(DomainLinkError):
mock_linkdomains.side_effect = mock_handler
link_domains(Mock(), self.upstream_domain, self.downstream_domain)
def test_exception_raised_if_user_is_not_admin_in_both_domains(self):
with patch('corehq.apps.linked_domain.views.domain_exists', return_value=True),\
patch('corehq.apps.linked_domain.views.get_active_domain_link', return_value=None),\
patch('corehq.apps.linked_domain.views.user_has_admin_access_in_all_domains', return_value=False),\
self.assertRaises(DomainLinkNotAllowed):
link_domains(Mock(), self.upstream_domain, self.downstream_domain)
def test_successful(self):
with patch('corehq.apps.linked_domain.views.domain_exists', return_value=True),\
patch('corehq.apps.linked_domain.views.get_active_domain_link', return_value=None),\
patch('corehq.apps.linked_domain.views.DomainLink.link_domains', return_value=True),\
patch('corehq.apps.linked_domain.views.user_has_admin_access_in_all_domains', return_value=True):
domain_link = link_domains(Mock(), self.upstream_domain, self.downstream_domain)
self.assertIsNotNone(domain_link)
| 48.603175
| 112
| 0.736447
| 362
| 3,062
| 5.89779
| 0.18232
| 0.074941
| 0.112412
| 0.154567
| 0.623888
| 0.59719
| 0.583138
| 0.583138
| 0.583138
| 0.528806
| 0
| 0
| 0.171457
| 3,062
| 62
| 113
| 49.387097
| 0.841545
| 0
| 0
| 0.22449
| 0
| 0
| 0.230242
| 0.224363
| 0
| 0
| 0
| 0
| 0.102041
| 1
| 0.163265
| false
| 0
| 0.102041
| 0.020408
| 0.306122
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0740e77524f70aef71e87bb08ca6fba979752644
| 2,207
|
py
|
Python
|
pyingest/parsers/zenodo.py
|
golnazads/adsabs-pyingest
|
37b37dd9e0d8a6e5cc34c59d30acd14e3381b48e
|
[
"MIT"
] | 1
|
2020-06-04T20:09:03.000Z
|
2020-06-04T20:09:03.000Z
|
pyingest/parsers/zenodo.py
|
golnazads/adsabs-pyingest
|
37b37dd9e0d8a6e5cc34c59d30acd14e3381b48e
|
[
"MIT"
] | 81
|
2017-11-16T16:07:21.000Z
|
2022-03-08T14:05:37.000Z
|
pyingest/parsers/zenodo.py
|
golnazads/adsabs-pyingest
|
37b37dd9e0d8a6e5cc34c59d30acd14e3381b48e
|
[
"MIT"
] | 17
|
2016-04-13T17:03:25.000Z
|
2021-12-22T15:26:54.000Z
|
#!/usr/bin/python
#
#
from __future__ import absolute_import
import json
import re
import logging
from .datacite import DataCiteParser
class WrongPublisherException(Exception):
pass
class ZenodoParser(DataCiteParser):
def get_references(self, r):
# as of version 3.1 of datacite schema, "References" is not an
# allowed description type so Lars is shoving the references
# in a section labeled as "Other" as a json structure
references = []
for s in self._array(r.get('descriptions', {}).get('description', [])):
t = s.get('@descriptionType')
c = self._text(s)
if t == 'References':
# XXX not supported yet, but one can only hope...
references = c.split('\n')
elif t == 'Other':
try:
j = json.loads(c)
references = j.get('references', [])
except ValueError:
logging.warning(u'Ignoring unparsable "Other" description element: %s\n' % c)
return references
def get_abstract(self, r):
abs = super(ZenodoParser, self).get_abstract(r)
abs = re.sub(r'\s*<p>', '', abs)
abs = re.sub(r'</p>\s*$', '', abs)
return abs
def parse(self, fp, **kwargs):
"""Parses Zenodo's flavor of DataCite 3.1 schema, returns ADS tagged format"""
doc = super(self.__class__, self).parse(fp, **kwargs)
# r = self._resource
return doc
# publisher
pub = doc.get('source')
if pub != 'Zenodo' and pub != 'ZENODO':
raise WrongPublisherException("Found publisher field of \"%s\" rather than Zenodo" % pub)
else:
doc['source'] = 'ZENODO'
return doc
#
# if __name__ == "__main__":
#
# # allows program to print utf-8 encoded output sensibly
# import codecs
# sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
# sys.stderr = codecs.getwriter('utf-8')(sys.stderr)
#
# parser = ZenodoParser()
# for file in sys.argv[1:]:
# d = None
# with open(file, 'r') as fp:
# d = parser.parse(fp)
# print json.dumps(d, indent=2)
| 30.232877
| 101
| 0.566833
| 266
| 2,207
| 4.616541
| 0.481203
| 0.009772
| 0.013029
| 0.014658
| 0.035831
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005875
| 0.305845
| 2,207
| 72
| 102
| 30.652778
| 0.795692
| 0.32986
| 0
| 0.055556
| 0
| 0
| 0.143448
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0.027778
| 0.138889
| 0
| 0.388889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
07421cfb41d4ae2f25674d5123c3192c8a85313e
| 25,223
|
py
|
Python
|
src/fullnode.py
|
AmeyaDaddikar/vjtichain
|
2a9b68d475fe5cc2babdf3f5b463a685e8423f05
|
[
"MIT"
] | 1
|
2019-05-26T12:36:37.000Z
|
2019-05-26T12:36:37.000Z
|
src/fullnode.py
|
AmeyaDaddikar/vjtichain
|
2a9b68d475fe5cc2babdf3f5b463a685e8423f05
|
[
"MIT"
] | null | null | null |
src/fullnode.py
|
AmeyaDaddikar/vjtichain
|
2a9b68d475fe5cc2babdf3f5b463a685e8423f05
|
[
"MIT"
] | null | null | null |
import json
import time
from functools import lru_cache
from multiprocessing import Pool, Process
from threading import Thread, Timer
from typing import Any, Dict, List
from datetime import datetime
import hashlib
import inspect
import requests
import waitress
from bottle import BaseTemplate, Bottle, request, response, static_file, template, error
import utils.constants as consts
from core import Block, BlockChain, SingleOutput, Transaction, TxIn, TxOut, genesis_block
from authority import Authority
from utils.logger import logger, iplogger
from utils.storage import get_block_from_db, get_wallet_from_db, read_header_list_from_db
from utils.utils import compress, decompress, dhash
from wallet import Wallet
app = Bottle()
BaseTemplate.defaults["get_url"] = app.get_url
LINE_PROFILING = False
BLOCKCHAIN = BlockChain()
PEER_LIST: List[Dict[str, Any]] = []
MY_WALLET = Wallet()
miner = Authority()
def mining_thread_task():
while True:
if not miner.is_mining() and not consts.NO_MINING:
miner.start_mining(BLOCKCHAIN.mempool, BLOCKCHAIN.active_chain, MY_WALLET)
time.sleep(consts.MINING_INTERVAL_THRESHOLD // 2)
def send_to_all_peers(url, data):
def request_task(peers, url, data):
for peer in peers:
try:
requests.post(get_peer_url(peer) + url, data=data, timeout=(5, 1))
except Exception as e:
logger.debug("Server: Requests: Error while sending data in process" + str(peer))
Process(target=request_task, args=(PEER_LIST, url, data), daemon=True).start()
def start_mining_thread():
time.sleep(5)
Thread(target=mining_thread_task, name="Miner", daemon=True).start()
def fetch_peer_list() -> List[Dict[str, Any]]:
try:
r = requests.post(consts.SEED_SERVER_URL, data={"port": consts.MINER_SERVER_PORT})
peer_list = json.loads(r.text)
return peer_list
except Exception as e:
logger.error("Could not connect to DNS Seed")
return []
def get_peer_url(peer: Dict[str, Any]) -> str:
return "http://" + str(peer["ip"]) + ":" + str(peer["port"])
def greet_peer(peer: Dict[str, Any]) -> bool:
try:
url = get_peer_url(peer)
data = {"port": consts.MINER_SERVER_PORT, "version": consts.MINER_VERSION, "blockheight": BLOCKCHAIN.active_chain.length}
# Send a POST request to the peer
r = requests.post(url + "/greetpeer", data=data)
data = json.loads(r.text)
# Update the peer data in the peer list with the new data received from the peer.
if data.get("blockheight", None):
peer.update(data)
else:
logger.debug("Main: Peer data does not have Block Height")
return False
return True
except Exception as e:
logger.debug("Main: Could not greet peer" + str(e))
return False
def receive_block_from_peer(peer: Dict[str, Any], header_hash) -> Block:
r = requests.post(get_peer_url(peer) + "/getblock", data={"headerhash": header_hash})
return Block.from_json(decompress(r.text)).object()
def check_block_with_peer(peer, hhash):
r = requests.post(get_peer_url(peer) + "/checkblock", data={"headerhash": hhash})
result = json.loads(r.text)
if result:
return True
return False
def get_block_header_hash(height):
return dhash(BLOCKCHAIN.active_chain.header_list[height])
def sync(max_peer):
fork_height = BLOCKCHAIN.active_chain.length
r = requests.post(get_peer_url(max_peer) + "/getblockhashes", data={"myheight": fork_height})
hash_list = json.loads(decompress(r.text.encode()))
for hhash in hash_list:
block = receive_block_from_peer(max_peer, hhash)
if not BLOCKCHAIN.add_block(block):
logger.error("Sync: Block received is invalid, Cannot Sync")
break
return
# Periodically sync with all the peers
def sync_with_peers():
try:
PEER_LIST = fetch_peer_list()
new_peer_list = []
for peer in PEER_LIST:
if greet_peer(peer):
new_peer_list.append(peer)
PEER_LIST = new_peer_list
if PEER_LIST:
max_peer = max(PEER_LIST, key=lambda k: k["blockheight"])
logger.debug(f"Sync: Syncing with {get_peer_url(max_peer)}, he seems to have height {max_peer['blockheight']}")
sync(max_peer)
except Exception as e:
logger.error("Sync: Error: " + str(e))
Timer(consts.MINING_INTERVAL_THRESHOLD * 2, sync_with_peers).start()
def check_balance(pub_key: str) -> int:
current_balance = 0
for x, utxo_list in BLOCKCHAIN.active_chain.utxo.utxo.items():
tx_out = utxo_list[0]
if tx_out.address == pub_key:
current_balance += int(tx_out.amount)
return int(current_balance)
def send_bounty(receiver_public_keys: List[str], amounts: List[int]):
current_balance = check_balance(MY_WALLET.public_key)
for key in receiver_public_keys:
if len(key) < consts.PUBLIC_KEY_LENGTH:
logger.debug("Invalid Public Key Length")
return False
total_amount = sum(amounts)
if current_balance < total_amount:
logger.debug("Insuficient balance")
elif MY_WALLET.public_key in receiver_public_keys:
logger.debug("Cannot send to myself")
else:
transaction = create_transaction(receiver_public_keys, amounts, MY_WALLET.public_key, message="Authority: Faucet Money")
transaction.sign(MY_WALLET)
logger.info("Wallet: Attempting to Send Transaction")
try:
r = requests.post(
"http://0.0.0.0:" + str(consts.MINER_SERVER_PORT) + "/newtransaction",
data=compress(transaction.to_json()),
timeout=(5, 1),
)
if r.status_code == 400:
logger.info("Wallet: Could not Send Transaction. Invalid Transaction")
else:
logger.info("Wallet: Transaction Sent, Wait for it to be Mined")
return True
except Exception as e:
logger.error("Wallet: Could not Send Transaction. Try Again." + str(e))
return False
def create_transaction(receiver_public_keys: List[str], amounts: List[int], sender_public_key, message="") -> Transaction:
vout = {}
vin = {}
current_amount = 0
total_amount = sum(amounts)
i = 0
for so, utxo_list in BLOCKCHAIN.active_chain.utxo.utxo.items():
tx_out = utxo_list[0]
if current_amount >= total_amount:
break
if tx_out.address == sender_public_key:
current_amount += tx_out.amount
vin[i] = TxIn(payout=SingleOutput.from_json(so), pub_key=sender_public_key, sig="")
i += 1
for i, address in enumerate(receiver_public_keys):
vout[i] = TxOut(amount=amounts[i], address=address)
change = (current_amount - total_amount)
if change > 0:
vout[i + 1] = TxOut(amount=change, address=sender_public_key)
tx = Transaction(version=consts.MINER_VERSION, locktime=0, timestamp=int(time.time()), vin=vin, vout=vout, message=message)
return tx
def get_ip(request):
return request.environ.get("HTTP_X_FORWARDED_FOR") or request.environ.get("REMOTE_ADDR")
def log_ip(request, fname):
client_ip = get_ip(request)
iplogger.info(f"{client_ip} : Called function {fname}")
@app.post("/checkBalance")
def checkingbalance():
log_ip(request, inspect.stack()[0][3])
data = request.json
public_key = data["public_key"]
logger.debug(public_key)
current_balance = check_balance(public_key)
return str(current_balance)
@app.post("/makeTransaction")
def make_transaction():
log_ip(request, inspect.stack()[0][3])
data = request.json
bounty = int(data["bounty"])
receiver_public_key = data["receiver_public_key"]
sender_public_key = data["sender_public_key"]
message = "No Message"
if "message" in data:
message = data["message"]
if len(receiver_public_key) < consts.PUBLIC_KEY_LENGTH:
logger.debug("Invalid Receiver Public Key")
response.status = 400
return "Invalid Receiver Public Key"
current_balance = check_balance(sender_public_key)
if current_balance < bounty:
logger.debug("Insufficient Balance to make Transaction")
response.status = 400
return "Insufficient Balance to make Transaction, need more " + str(bounty - current_balance)
elif sender_public_key == receiver_public_key:
logger.debug("Someone trying to send money to himself")
response.status = 400
return "Cannot send money to youself"
else:
transaction = create_transaction([receiver_public_key], [bounty], sender_public_key, message=message)
data = {}
data["send_this"] = transaction.to_json()
transaction.vin = {}
data["sign_this"] = transaction.to_json()
return json.dumps(data)
@app.post("/sendTransaction")
def send_transaction():
log_ip(request, inspect.stack()[0][3])
data = request.json
transaction = Transaction.from_json(data["transaction"]).object()
sig = data["signature"]
transaction.add_sign(sig)
logger.debug(transaction)
logger.info("Wallet: Attempting to Send Transaction")
try:
r = requests.post(
"http://0.0.0.0:" + str(consts.MINER_SERVER_PORT) + "/newtransaction",
data=compress(transaction.to_json()),
timeout=(5, 1),
)
if r.status_code == 400:
response.status = 400
logger.error("Wallet: Could not Send Transaction. Invalid transaction")
return "Try Again"
except Exception as e:
response.status = 400
logger.error("Wallet: Could not Send Transaction. Try Again." + str(e))
return "Try Again"
else:
logger.info("Wallet: Transaction Sent, Wait for it to be Mined")
return "Done"
@app.post("/transactionHistory")
def transaction_history():
log_ip(request, inspect.stack()[0][3])
data = request.json
public_key = data["public_key"]
tx_hist = BLOCKCHAIN.active_chain.transaction_history.get(public_key)
return json.dumps(tx_hist)
@app.post("/greetpeer")
def greet_peer_f():
log_ip(request, inspect.stack()[0][3])
try:
peer = {}
peer["port"] = request.forms.get("port")
peer["ip"] = request.remote_addr
peer["time"] = time.time()
peer["version"] = request.forms.get("version")
peer["blockheight"] = request.forms.get("blockheight")
ADD_ENTRY = True
for entry in PEER_LIST:
ip = entry["ip"]
port = entry["port"]
if ip == peer["ip"] and port == peer["port"]:
ADD_ENTRY = False
if ADD_ENTRY:
PEER_LIST.append(peer)
logger.debug("Server: Greet, A new peer joined, Adding to List")
except Exception as e:
logger.debug("Server: Greet Error: " + str(e))
pass
data = {"version": consts.MINER_VERSION, "blockheight": BLOCKCHAIN.active_chain.length}
response.content_type = "application/json"
return json.dumps(data)
@lru_cache(maxsize=128)
def cached_get_block(headerhash: str) -> str:
if headerhash:
db_block = get_block_from_db(headerhash)
if db_block:
return compress(db_block)
else:
logger.error("ERROR CALLED GETBLOCK FOR NON EXISTENT BLOCK")
return "Invalid Hash"
@app.post("/getblock")
def getblock():
log_ip(request, inspect.stack()[0][3])
hhash = request.forms.get("headerhash")
return cached_get_block(hhash)
@app.post("/checkblock")
def checkblock():
log_ip(request, inspect.stack()[0][3])
headerhash = request.forms.get("headerhash")
if get_block_from_db(headerhash):
return json.dumps(True)
return json.dumps(False)
@app.post("/getblockhashes")
def send_block_hashes():
log_ip(request, inspect.stack()[0][3])
peer_height = int(request.forms.get("myheight"))
hash_list = []
for i in range(peer_height, BLOCKCHAIN.active_chain.length):
hash_list.append(dhash(BLOCKCHAIN.active_chain.header_list[i]))
return compress(json.dumps(hash_list)).decode()
@lru_cache(maxsize=16)
def process_new_block(request_data: bytes) -> str:
global BLOCKCHAIN
block_json = decompress(request_data)
if block_json:
try:
block = Block.from_json(block_json).object()
# Check if block already exists
if get_block_from_db(dhash(block.header)):
logger.info("Server: Received block exists, doing nothing")
return "Block already Received Before"
if BLOCKCHAIN.add_block(block):
logger.info("Server: Received a New Valid Block, Adding to Chain")
logger.debug("Server: Sending new block to peers")
# Broadcast block to other peers
send_to_all_peers("/newblock", request_data)
# TODO Make new chain/ orphan set for Block that is not added
except Exception as e:
logger.error("Server: New Block: invalid block received " + str(e))
return "Invalid Block Received"
# Kill Miner
t = Timer(1, miner.stop_mining)
t.start()
return "Block Received"
logger.error("Server: Invalid Block Received")
return "Invalid Block"
@app.post("/newblock")
def received_new_block():
log_ip(request, inspect.stack()[0][3])
return process_new_block(request.body.read())
@lru_cache(maxsize=16)
def process_new_transaction(request_data: bytes) -> str:
global BLOCKCHAIN
transaction_json = decompress(request_data)
if transaction_json:
try:
tx = Transaction.from_json(transaction_json).object()
# Add transaction to Mempool
if tx not in BLOCKCHAIN.mempool:
if BLOCKCHAIN.active_chain.is_transaction_valid(tx):
logger.debug("Valid Transaction received, Adding to Mempool")
BLOCKCHAIN.mempool.add(tx)
# Broadcast block to other peers
send_to_all_peers("/newtransaction", request_data)
else:
logger.debug("The transation is not valid, not added to Mempool")
return False, "Not Valid Transaction"
else:
return True, "Transaction Already received"
except Exception as e:
logger.error("Server: New Transaction: Invalid tx received: " + str(e))
return False, "Not Valid Transaction"
return True, "Done"
# Transactions for all active chains
@app.post("/newtransaction")
def received_new_transaction():
log_ip(request, inspect.stack()[0][3])
result, message = process_new_transaction(request.body.read())
if result:
response.status = 200
else:
response.status = 400
return message
question = '''What is greater than God,
more evil than the devil,
the poor have it,
the rich need it,
and if you eat it, you'll die?'''
actual_answer = "nothing"
@app.get("/")
def home():
log_ip(request, inspect.stack()[0][3])
message = ""
message_type = "info"
return template("index.html", message=message, message_type=message_type, question=question)
with open('uuids.json', 'r') as file:
uuid_json = file.read()
valid_ids = set(json.loads(uuid_json))
@app.post("/")
def puzzle():
log_ip(request, inspect.stack()[0][3])
message = ""
message_type = "info"
uuid = request.forms.get("uuid")
pubkey = request.forms.get("pubkey")
amounts = [300]
if uuid in valid_ids:
logger.debug("Valid Answer, Rewarding " + pubkey)
message = "Well Done!"
if check_balance(MY_WALLET.public_key) >= sum(amounts):
result = send_bounty([pubkey], amounts)
if result:
message = "Your reward is being sent, please wait for it to be mined!"
valid_ids.remove(uuid)
else:
message = "Some Error Occured, Contact Admin."
message_type = "warning"
else:
message = "Invalid Unique ID!"
message_type = "danger"
return template("index.html", message=message, message_type=message_type, question=question)
@app.get('/about')
def about():
return template("about.html")
# @app.get("/wallet")
# def wallet():
# log_ip(request, inspect.stack()[0][3])
# return template("wallet.html", message="", message_type="", pubkey=MY_WALLET.public_key)
# @app.post("/wallet")
# def wallet_post():
# log_ip(request, inspect.stack()[0][3])
# number = int(request.forms.get("number"))
# message = ""
# message_type = "info"
# try:
# receivers = []
# amounts = []
# total_amount = 0
# for i in range(0, number):
# receiver = str(request.forms.get("port" + str(i)))
# bounty = int(request.forms.get("amount" + str(i)))
# publickey = ""
# if len(receiver) < 10:
# wallet = get_wallet_from_db(receiver)
# if wallet is not None:
# publickey = wallet[1]
# else:
# message = "Error with the Receiver Port ID, try again."
# message_type = "danger"
# return template("wallet.html", message=message, message_type=message_type, pubkey=MY_WALLET.public_key)
# else:
# publickey = receiver
# total_amount += bounty
# receivers.append(publickey)
# amounts.append(bounty)
# if check_balance(MY_WALLET.public_key) >= total_amount:
# result = send_bounty(receivers, amounts)
# if result:
# message = "Your transaction is sent, please wait for it to be mined!"
# else:
# message = "Some Error Occured, Contact Admin."
# message_type = "warning"
# else:
# message = "You have Insufficient Balance!"
# message_type = "warning"
# return template("wallet.html", message=message, message_type=message_type, pubkey=MY_WALLET.public_key)
# except Exception as e:
# logger.error(e)
# message = "Some Error Occured. Please try again later."
# message_type = "danger"
# return template("wallet.html", message=message, message_type=message_type, pubkey=MY_WALLET.public_key)
@app.get("/checkmybalance")
def checkblance():
log_ip(request, inspect.stack()[0][3])
return str(check_balance(MY_WALLET.public_key))
@app.route("/static/<filename:path>", name="static")
def serve_static(filename):
log_ip(request, inspect.stack()[0][3])
return static_file(filename, root="static")
@app.get("/favicon.ico")
def get_favicon():
log_ip(request, inspect.stack()[0][3])
return static_file("favicon.ico", root="static")
@app.get("/info")
def sendinfo():
log_ip(request, inspect.stack()[0][3])
s = (
"No. of Blocks: "
+ str(BLOCKCHAIN.active_chain.length)
+ "<br>"
+ dhash(BLOCKCHAIN.active_chain.header_list[-1])
+ "<br>"
+ "Balance "
+ str(check_balance(MY_WALLET.public_key))
+ "<br>Public Key: <br>"
+ str(get_wallet_from_db(consts.MINER_SERVER_PORT)[1])
)
return s
def render_block_header(hdr):
html = "<table>"
html += "<tr><th>" + "Height" + "</th>"
html += "<td>" + str(hdr.height) + "</td></tr>"
html += "<tr><th>" + "Block Hash" + "</th>"
html += "<td>" + dhash(hdr) + "</td></tr>"
html += "<tr><th>" + "Prev Block Hash" + "</th>"
html += "<td>" + str(hdr.prev_block_hash) + "</td></tr>"
html += "<tr><th>" + "Merkle Root" + "</th>"
html += "<td>" + str(hdr.merkle_root) + "</td></tr>"
html += "<tr><th>" + "Timestamp" + "</th>"
html += (
"<td>"
+ str(datetime.fromtimestamp(hdr.timestamp).strftime("%d-%m-%Y %H:%M:%S"))
+ " ("
+ str(hdr.timestamp)
+ ")</td></tr>"
)
# get block
block = Block.from_json(get_block_from_db(dhash(hdr))).object()
html += "<tr><th>" + "Transactions" + "</th>"
html += "<td>" + str(len(block.transactions)) + "</td></tr>"
# for i, transaction in enumerate(block.transactions):
# s = "coinbase: " + str(transaction.is_coinbase) + ", fees: " + str(transaction.fees)
# html += "<tr><th>Transaction " + str(i) + "</th><td>" + str(s) + "</td></tr>"
html += "</table>"
return str(html)
@app.get("/chains")
def visualize_chain():
log_ip(request, inspect.stack()[0][3])
data = []
start = BLOCKCHAIN.active_chain.length - 10 if BLOCKCHAIN.active_chain.length > 10 else 0
headers = []
hdr_list = BLOCKCHAIN.active_chain.header_list
if len(hdr_list) > 200:
hdr_list = BLOCKCHAIN.active_chain.header_list[:100] + BLOCKCHAIN.active_chain.header_list[-100:]
for hdr in hdr_list:
d = {}
d["hash"] = dhash(hdr)[-5:]
d["time"] = hdr.timestamp
d["data"] = render_block_header(hdr)
headers.append(d)
data.append(headers)
return template("chains.html", data=data, start=start)
@app.get("/explorer")
def explorer():
log_ip(request, inspect.stack()[0][3])
prev = int(request.query.prev or 0)
if prev < 0:
prev = 0
hdr_list = list(reversed(BLOCKCHAIN.active_chain.header_list))
indexes = [i for i in range(prev * 8, (prev + 1) * 8) if i < len(hdr_list)]
blocks = [Block.from_json(get_block_from_db(dhash(hdr_list[i]))).object() for i in indexes]
transactions = list(BLOCKCHAIN.mempool)
return template("explorer.html", blocks=blocks, transactions=transactions, prev=prev)
@app.route("/block/<blockhash>", name="transaction")
def block(blockhash):
log_ip(request, inspect.stack()[0][3])
try:
block = Block.from_json(get_block_from_db(blockhash)).object()
except Exception as e:
logger.debug("BLOCK/blockhash: " + str(e))
return template("error.html")
return template("block.html", block=block)
@app.route("/transaction/<blockhash>/<txhash>", name="transaction")
def transaction(blockhash, txhash):
log_ip(request, inspect.stack()[0][3])
try:
block = Block.from_json(get_block_from_db(blockhash)).object()
tx = None
for t in block.transactions:
if t.hash() == txhash:
tx = t
except Exception as e:
logger.debug("Transaction/bhash/tx: " + str(e))
return template("error.html")
return template("transaction.html", tx=tx, block=block)
@app.route("/address/<pubkey:re:.+>", name="account")
def account(pubkey):
log_ip(request, inspect.stack()[0][3])
balance = check_balance(pubkey)
tx_hist = BLOCKCHAIN.active_chain.transaction_history.get(pubkey)
return template("account.html", tx_hist=tx_hist, balance=balance, pubkey=pubkey)
@app.post("/mining")
def mining():
log_ip(request, inspect.stack()[0][3])
password = request.body.read().decode("utf-8")
hashed = b"\x11`\x1e\xdd\xd1\xb6\x80\x0f\xd4\xb0t\x90\x9b\xd3]\xa0\xcc\x1d\x04$\x8b\xb1\x19J\xaa!T5-\x9eJ\xfcI5\xc0\xbb\xf5\xb1\x9d\xba\xbef@\xa1)\xcf\x9b]c(R\x91\x0e\x9dMM\xb6\x94\xa9\xe2\x94il\x15"
dk = hashlib.pbkdf2_hmac("sha512", password.encode("utf-8"), b"forgeteverythingthatyouthinkyouknow", 200000)
if hashed == dk:
consts.NO_MINING = not consts.NO_MINING
logger.info("Mining: " + str(not consts.NO_MINING))
return "Mining Toggled, " + "NOT MINING" if consts.NO_MINING else "MINING"
else:
return "Password Mismatch," + "NOT MINING" if consts.NO_MINING else "MINING"
@app.route("/<url:re:.+>")
@error(403)
@error(404)
@error(505)
def error_handle(url="url", error="404"):
log_ip(request, inspect.stack()[0][3])
return template("error.html")
if __name__ == "__main__":
try:
if consts.NEW_BLOCKCHAIN:
logger.info("FullNode: Starting New Chain from Genesis")
BLOCKCHAIN.add_block(genesis_block)
else:
# Restore Blockchain
logger.info("FullNode: Restoring Existing Chain")
header_list = read_header_list_from_db()
BLOCKCHAIN.build_from_header_list(header_list)
# Sync with all my peers
sync_with_peers()
# Start mining Thread
Thread(target=start_mining_thread, daemon=True).start()
if consts.NO_MINING:
logger.info("FullNode: Not Mining")
# Start server
if LINE_PROFILING:
from wsgi_lineprof.middleware import LineProfilerMiddleware
with open("lineprof" + str(consts.MINER_SERVER_PORT) + ".log", "w") as f:
app = LineProfilerMiddleware(app, stream=f, async_stream=True)
waitress.serve(app, host="0.0.0.0", threads=16, port=consts.MINER_SERVER_PORT)
else:
waitress.serve(app, host="0.0.0.0", threads=16, port=consts.MINER_SERVER_PORT)
except KeyboardInterrupt:
miner.stop_mining()
| 34.223881
| 203
| 0.632082
| 3,214
| 25,223
| 4.80336
| 0.138768
| 0.022153
| 0.02021
| 0.030768
| 0.364749
| 0.294274
| 0.246858
| 0.200544
| 0.152157
| 0.129097
| 0
| 0.011567
| 0.239107
| 25,223
| 736
| 204
| 34.27038
| 0.79283
| 0.10768
| 0
| 0.243446
| 0
| 0.001873
| 0.168754
| 0.016577
| 0.011236
| 0
| 0
| 0.001359
| 0
| 1
| 0.08427
| false
| 0.007491
| 0.037453
| 0.007491
| 0.235955
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
074273af8a268ef926e75f5dce65175c9bfb7048
| 5,914
|
py
|
Python
|
deepexplain/tf/v1_x/main.py
|
alexus37/MasterThesisCode
|
a7eada603686de75968acc8586fd307a91b0491b
|
[
"MIT"
] | 1
|
2020-04-23T15:39:27.000Z
|
2020-04-23T15:39:27.000Z
|
deepexplain/tf/v1_x/main.py
|
alexus37/DeepExplain
|
a7eada603686de75968acc8586fd307a91b0491b
|
[
"MIT"
] | null | null | null |
deepexplain/tf/v1_x/main.py
|
alexus37/DeepExplain
|
a7eada603686de75968acc8586fd307a91b0491b
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.framework import ops
from collections import OrderedDict
import warnings, logging
from deepexplain.tf.v1_x import constants
from deepexplain.tf.v1_x.baseClasses import GradientBasedMethod
from deepexplain.tf.v1_x.methods import DeepLIFTRescale, EpsilonLRP
from deepexplain.tf.v1_x.utils import original_grad
from deepexplain.tf.v1_x.methods import DummyZero, Saliency, GradientXInput, IntegratedGradients, EpsilonLRP, DeepLIFTRescale, Occlusion, ShapleySampling
attribution_methods = OrderedDict({
'zero': (DummyZero, 0),
'saliency': (Saliency, 1),
'grad*input': (GradientXInput, 2),
'intgrad': (IntegratedGradients, 3),
'elrp': (EpsilonLRP, 4),
'deeplift': (DeepLIFTRescale, 5),
'occlusion': (Occlusion, 6),
'shapley_sampling': (ShapleySampling, 7)
})
print(f'Using tf version = {tf.__version__}')
@ops.RegisterGradient("DeepExplainGrad")
def deepexplain_grad(op, grad):
# constants._ENABLED_METHOD_CLASS, _GRAD_OVERRIDE_CHECKFLAG
constants._GRAD_OVERRIDE_CHECKFLAG = 1
if constants._ENABLED_METHOD_CLASS is not None \
and issubclass(constants._ENABLED_METHOD_CLASS, GradientBasedMethod):
return constants._ENABLED_METHOD_CLASS.nonlinearity_grad_override(op, grad)
else:
return original_grad(op, grad)
class DeepExplain(object):
def __init__(self, graph=None, session=tf.compat.v1.get_default_session()):
self.method = None
self.batch_size = None
self.session = session
self.graph = session.graph if graph is None else graph
self.graph_context = self.graph.as_default()
self.override_context = self.graph.gradient_override_map(self.get_override_map())
self.keras_phase_placeholder = None
self.context_on = False
if self.session is None:
raise RuntimeError('DeepExplain: could not retrieve a session. Use DeepExplain(session=your_session).')
def __enter__(self):
# Override gradient of all ops created in context
self.graph_context.__enter__()
self.override_context.__enter__()
self.context_on = True
return self
def __exit__(self, type, value, traceback):
self.graph_context.__exit__(type, value, traceback)
self.override_context.__exit__(type, value, traceback)
self.context_on = False
def get_explainer(self, method, T, X, **kwargs):
if not self.context_on:
raise RuntimeError('Explain can be called only within a DeepExplain context.')
# global constants._ENABLED_METHOD_CLASS, _GRAD_OVERRIDE_CHECKFLAG
self.method = method
if self.method in attribution_methods:
method_class, method_flag = attribution_methods[self.method]
else:
raise RuntimeError('Method must be in %s' % list(attribution_methods.keys()))
if isinstance(X, list):
for x in X:
if 'tensor' not in str(type(x)).lower():
raise RuntimeError('If a list, X must contain only Tensorflow Tensor objects')
else:
if 'tensor' not in str(type(X)).lower():
raise RuntimeError('X must be a Tensorflow Tensor object or a list of them')
if 'tensor' not in str(type(T)).lower():
raise RuntimeError('T must be a Tensorflow Tensor object')
# logging.info('DeepExplain: running "%s" explanation method (%d)' % (self.method, method_flag))
self._check_ops()
constants._GRAD_OVERRIDE_CHECKFLAG = 0
constants._ENABLED_METHOD_CLASS = method_class
method = constants._ENABLED_METHOD_CLASS(T, X,
self.session,
keras_learning_phase=self.keras_phase_placeholder,
**kwargs)
if (issubclass(constants._ENABLED_METHOD_CLASS, DeepLIFTRescale) or issubclass(constants._ENABLED_METHOD_CLASS, EpsilonLRP)) \
and constants._GRAD_OVERRIDE_CHECKFLAG == 0:
warnings.warn('DeepExplain detected you are trying to use an attribution method that requires '
'gradient override but the original gradient was used instead. You might have forgot to '
'(re)create your graph within the DeepExlain context. Results are not reliable!')
constants._ENABLED_METHOD_CLASS = None
constants._GRAD_OVERRIDE_CHECKFLAG = 0
self.keras_phase_placeholder = None
return method
def explain(self, method, T, X, xs, ys=None, batch_size=None, **kwargs):
explainer = self.get_explainer(method, T, X, **kwargs)
return explainer.run(xs, ys, batch_size)
@staticmethod
def get_override_map():
return dict((a, 'DeepExplainGrad') for a in constants.SUPPORTED_ACTIVATIONS)
def _check_ops(self):
"""
Heuristically check if any op is in the list of unsupported activation functions.
This does not cover all cases where explanation methods would fail, and must be improved in the future.
Also, check if the placeholder named 'keras_learning_phase' exists in the graph. This is used by Keras
and needs to be passed in feed_dict.
:return:
"""
g = tf.compat.v1.get_default_graph()
for op in g.get_operations():
if len(op.inputs) > 0 and not op.name.startswith('gradients'):
if op.type in constants.UNSUPPORTED_ACTIVATIONS:
warnings.warn('Detected unsupported activation (%s). '
'This might lead to unexpected or wrong results.' % op.type)
elif 'keras_learning_phase' in op.name:
self.keras_phase_placeholder = op.outputs[0]
| 46.566929
| 153
| 0.672979
| 711
| 5,914
| 5.369902
| 0.278481
| 0.034573
| 0.057622
| 0.070718
| 0.197486
| 0.102672
| 0.064955
| 0.022525
| 0.022525
| 0.022525
| 0
| 0.004706
| 0.245519
| 5,914
| 127
| 154
| 46.566929
| 0.850964
| 0.101623
| 0
| 0.09
| 0
| 0
| 0.154139
| 0.00647
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.12
| 0.01
| 0.27
| 0.02
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
074414b6699fea23b4050feee569e12a24d49670
| 1,457
|
py
|
Python
|
util/mem_usage.py
|
robinupham/cnn_lensing
|
f5d4defc7e2c5b7a23744051da904526d04c27c8
|
[
"MIT"
] | null | null | null |
util/mem_usage.py
|
robinupham/cnn_lensing
|
f5d4defc7e2c5b7a23744051da904526d04c27c8
|
[
"MIT"
] | null | null | null |
util/mem_usage.py
|
robinupham/cnn_lensing
|
f5d4defc7e2c5b7a23744051da904526d04c27c8
|
[
"MIT"
] | null | null | null |
"""
Get the memory usage of a Keras model.
From https://stackoverflow.com/a/46216013.
"""
def get_model_memory_usage(batch_size, model):
"""
Get the memory usage of a Keras model in GB.
From https://stackoverflow.com/a/46216013.
"""
import numpy as np
try:
from keras import backend as K
except ImportError:
from tensorflow.keras import backend as K
shapes_mem_count = 0
internal_model_mem_count = 0
for l in model.layers:
layer_type = l.__class__.__name__
if layer_type == 'Model':
internal_model_mem_count += get_model_memory_usage(batch_size, l)
single_layer_mem = 1
out_shape = l.output_shape
if isinstance(out_shape, list):
out_shape = out_shape[0]
for s in out_shape:
if s is None:
continue
single_layer_mem *= s
shapes_mem_count += single_layer_mem
trainable_count = np.sum([K.count_params(p) for p in model.trainable_weights])
non_trainable_count = np.sum([K.count_params(p) for p in model.non_trainable_weights])
number_size = 4.0
if K.floatx() == 'float16':
number_size = 2.0
if K.floatx() == 'float64':
number_size = 8.0
total_memory = number_size * (batch_size * shapes_mem_count + trainable_count + non_trainable_count)
gbytes = np.round(total_memory / (1024.0 ** 3), 3) + internal_model_mem_count
return gbytes
| 30.354167
| 104
| 0.649279
| 210
| 1,457
| 4.204762
| 0.338095
| 0.05436
| 0.047565
| 0.071348
| 0.353341
| 0.305776
| 0.165345
| 0.165345
| 0.097395
| 0.097395
| 0
| 0.034419
| 0.262183
| 1,457
| 47
| 105
| 31
| 0.786977
| 0.117364
| 0
| 0
| 0
| 0
| 0.015091
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.129032
| 0
| 0.193548
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0745c582ad840fd55885e6625d498a1f4e1e1d0a
| 799
|
py
|
Python
|
setup.py
|
statisticianinstilettos/recommender_metrics
|
82091ec53eb8b3527f95755006237658deb03c18
|
[
"MIT"
] | null | null | null |
setup.py
|
statisticianinstilettos/recommender_metrics
|
82091ec53eb8b3527f95755006237658deb03c18
|
[
"MIT"
] | null | null | null |
setup.py
|
statisticianinstilettos/recommender_metrics
|
82091ec53eb8b3527f95755006237658deb03c18
|
[
"MIT"
] | null | null | null |
import io
import os
from setuptools import setup
def read(file_name):
"""Read a text file and return the content as a string."""
with io.open(os.path.join(os.path.dirname(__file__), file_name),
encoding='utf-8') as f:
return f.read()
setup(
name='recmetrics',
url='https://github.com/statisticianinstilettos/recommender_metrics',
author='Claire Longo',
author_email='longoclaire@gmail.com',
packages=['recmetrics'],
install_requires=['funcsigs',
'numpy',
'pandas',
'plotly',
'scikit-learn',
'seaborn'],
license='MIT',
version='0.1.4',
description='Evaluation metrics for recommender systems',
long_description=read("README.md"),
long_description_content_type="text/markdown",
)
| 25.774194
| 73
| 0.644556
| 95
| 799
| 5.284211
| 0.694737
| 0.031873
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00639
| 0.216521
| 799
| 30
| 74
| 26.633333
| 0.795527
| 0.065081
| 0
| 0
| 0
| 0
| 0.318489
| 0.02834
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.12
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
07461f1a486f88f500aad5210c29f31d3c93dac1
| 1,174
|
py
|
Python
|
module2-sql-for-analysis/rpg_db.py
|
TobyChen320/DS-Unit-3-Sprint-2-SQL-and-Databases
|
306d2252b3756a501e2412fcb5eddbdebc16a362
|
[
"MIT"
] | null | null | null |
module2-sql-for-analysis/rpg_db.py
|
TobyChen320/DS-Unit-3-Sprint-2-SQL-and-Databases
|
306d2252b3756a501e2412fcb5eddbdebc16a362
|
[
"MIT"
] | null | null | null |
module2-sql-for-analysis/rpg_db.py
|
TobyChen320/DS-Unit-3-Sprint-2-SQL-and-Databases
|
306d2252b3756a501e2412fcb5eddbdebc16a362
|
[
"MIT"
] | null | null | null |
import sqlite3
import os
import psycopg2
from dotenv import load_dotenv
load_dotenv()
DB_NAME2 = os.getenv("DB_NAME3")
DB_USER2 = os.getenv("DB_USER3")
DB_PASS2 = os.getenv("DB_PASS3")
DB_HOST2 = os.getenv("DB_HOST3")
conn = psycopg2.connect(dbname=DB_NAME2,
user=DB_USER2,
password=DB_PASS2,
host=DB_HOST2)
cursor = conn.cursor()
sl_conn = sqlite3.connect("rpg_db.sqlite3")
sl_cursor = sl_conn.cursor()
characters = sl_cursor.execute('SELECT * FROM charactercreator_character LIMIT 10').fetchall()
print(characters)
create_character_table_query = '''
CREATE TABLE IF NOT EXISTS rpg_characters (
character_id SERIAL PRIMARY KEY,
name VARCHAR(30),
level INT,
exp INT,
hp INT,
strength INT,
intelligence INT,
dexterity INT,
wisdom INT
)
'''
cursor.execute(create_character_table_query)
conn.commit()
for character in characters:
insert_query = f''' INSERT INTO rpg_characters
(character_id, name, level, exp, hp, strength, intelligence, dexterity, wisdom) VALUES
{character}
'''
cursor.execute(insert_query)
conn.commit()
cursor.close()
conn.close()
| 22.150943
| 94
| 0.698467
| 154
| 1,174
| 5.116883
| 0.415584
| 0.040609
| 0.050761
| 0.063452
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022317
| 0.198467
| 1,174
| 52
| 95
| 22.576923
| 0.81509
| 0
| 0
| 0.095238
| 0
| 0
| 0.374787
| 0.022147
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.047619
| 0.095238
| 0
| 0.095238
| 0.02381
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0748c347781432c41ed5dc21b2a78b229eb50e78
| 24,232
|
py
|
Python
|
sws_comp_wiki_gen.py
|
moff-wildfire/sws-battlefy
|
04b12b54f91e450980c2c57eed57f0504abec1bb
|
[
"Unlicense"
] | 1
|
2021-12-10T01:36:36.000Z
|
2021-12-10T01:36:36.000Z
|
sws_comp_wiki_gen.py
|
moff-wildfire/sws-battlefy
|
04b12b54f91e450980c2c57eed57f0504abec1bb
|
[
"Unlicense"
] | null | null | null |
sws_comp_wiki_gen.py
|
moff-wildfire/sws-battlefy
|
04b12b54f91e450980c2c57eed57f0504abec1bb
|
[
"Unlicense"
] | null | null | null |
import battlefy_data
import battlefy_wiki_linkings
from datetime import datetime
from operator import itemgetter
from pathlib import Path
import calcup_roster_tracking
def create_sidebar(data, wiki_name):
sidebar = '{{Infobox league' + '\n'
sidebar += '|liquipediatier=' + '\n'
sidebar += '|name=' + data['name'] + '\n'
sidebar += '|shortname=' + data['name'] + '\n'
sidebar += '|tickername=' + data['name'] + '\n'
sidebar += '|image=' + '\n'
sidebar += '|icon=' + '\n'
sidebar += '|series=' + '\n'
sidebar += '|organizer=' + data['organization']['name'] + '\n'
sidebar += '|organizer-link=' + '\n'
sidebar += '|sponsor=' + '\n'
sidebar += '|localcurrency=' + '\n'
sidebar += '|prizepool=' + data['prizes'] + '\n'
sidebar += '|type=Online' + '\n'
sidebar += '|platform=' + data['platform'] + '\n'
sidebar += '|country=' + '\n'
sidebar += '|format=' + '\n'
sidebar += '|patch=' + '\n'
sidebar += '|sdate=' + datetime.strptime(data['checkInStartTime'], '%Y-%m-%dT%H:%M:%S.%fZ').strftime(
'%Y-%m-%d') + '\n'
try:
sidebar += '|edate=' + datetime.strptime(data['lastCompletedMatchAt'], '%Y-%m-%dT%H:%M:%S.%fZ').strftime(
'%Y-%m-%d') + '\n'
except KeyError:
sidebar += '|edate=\n'
sidebar += '|web=' + '\n'
sidebar += '|bracket=https://battlefy.com/' + data['organization']['slug'] + '/' + data['slug'] + '/' \
+ data['_id'] + '/bracket-list' + '\n'
sidebar += '|rulebook=' + '\n'
sidebar += '|twitter=' + '\n'
sidebar += '|twitch=' + '\n'
sidebar += '|instagram=' + '\n'
sidebar += '|discord=' + '\n'
sidebar += '|map1=' + '\n'
sidebar += '|map2=' + '\n'
sidebar += '|map3=' + '\n'
sidebar += '|map4=' + '\n'
sidebar += '|map5=' + '\n'
sidebar += '|team_number=' + str(len(data['teams'])) + '\n'
sidebar += '|previous=' + '\n'
sidebar += '|next=' + '\n'
sidebar += '}}\n'
sidebar += '{{Upcoming matches tournament|' + wiki_name + '}}\n'
return sidebar
def create_event_format(data):
event_format = ''
for stage in data['stages']:
event_format += '* ' + stage['name'] + '\n'
if stage['bracket']['type'] == "swiss":
event_format += '** ' + str(stage['bracket']['roundsCount']) + '-round ' + stage['bracket']['type'] + '\n'
elif stage['bracket']['type'] == "elimination":
numGames = 0
rounds = 0
for match in stage['bracket']['series']:
if match['numGames'] != numGames:
if rounds:
event_format += '** ' + str(rounds) + '-round ' \
+ stage['bracket']['seriesStyle'] + str(numGames) + '\n'
rounds = 1
numGames = match['numGames']
else:
rounds += 1
if rounds:
event_format += '** ' + str(rounds) + '-round ' \
+ stage['bracket']['seriesStyle'] + str(numGames) + '\n'
return event_format
def rank_teams(data, bw_teams, sort_place=True, break_ties=False):
for stage in data['stages']:
for place, standing in enumerate(stage['standings']):
if 'place' in standing:
if 'place' not in data['teams'][standing['team']['_id']]:
data['teams'][standing['team']['_id']]['place'] = len(stage['standings']) + place
else:
if break_ties:
data['teams'][standing['team']['_id']]['place'] = \
standing['place'] + (1 - 1 / data['teams'][standing['team']['_id']]['place'])
else:
data['teams'][standing['team']['_id']]['place'] = standing['place']
else:
data['teams'][standing['team']['_id']]['place'] = len(stage['standings']) + place
teams = list()
for team_id in data['teams']:
if 'place' in data['teams'][team_id]:
place = data['teams'][team_id]['place']
else:
place = 0
team_info = bw_teams.get_team_info(data['teams'][team_id]['persistentTeamID'], data['teams'][team_id]['name'])
teams.append((team_id,
data['teams'][team_id]['name'],
place,
data['teams'][team_id]['persistentTeamID'],
team_info['name']
))
if sort_place:
teams = sorted(teams, key=itemgetter(2, 4, 0))
else:
teams = sorted(teams, key=itemgetter(4, 0))
return teams
def create_participants(data, bw_players, bw_teams, dynamic=[], sort_place=True):
header = '{{TeamCardToggleButton}}\n'
teams_ordered = ''
# Use prior rounds as a tiebreaker for when multiple teams have the same place at the end
teams = rank_teams(data, bw_teams, sort_place)
dynamic_idx = 0
if dynamic:
header += '{{tabs dynamic\n'
header += '|name' + str(dynamic_idx+1) + '=' + dynamic[dynamic_idx]['tab_name'] + '\n'
header += '|This=1\n'
header += '|content' + str(dynamic_idx+1) + '=' + '\n'
header += '{{TeamCard columns start|cols=5|height=250}}\n'
for team_num, team in enumerate(teams):
if dynamic:
if team_num == dynamic[dynamic_idx]['count']:
teams_ordered += '{{TeamCard columns end}}\n'
dynamic_idx += 1
teams_ordered += '|name' + str(dynamic_idx + 1) + '=' + dynamic[dynamic_idx]['tab_name'] + '\n'
teams_ordered += '|content' + str(dynamic_idx+1) + '=' + '\n'
teams_ordered += '{{TeamCard columns start|cols=5|height=250}}\n'
else:
if team_num == 0:
teams_ordered += '{{TeamCard columns start|cols=5|height=250}}\n'
teams_table = '{{TeamCard\n'
team_info = bw_teams.get_team_info(team[3], team[1])
teams_table += '|team=' + team_info['name'] + '\n'
teams_table += '|image=' + team_info['image'] + '\n'
for idx, player in enumerate(data['teams'][team[0]]['players']):
player_tag = 'p' + str(idx + 1)
if player['_id'] in calcup_roster_tracking.eventid_to_missing_userid:
player['userID'] = calcup_roster_tracking.eventid_to_missing_userid[player['_id']]
player_info = bw_players.get_player_info(player['userID'], player['inGameName'])
teams_table += '|' + player_tag + '=' + player_info['name'] \
+ ' |' + player_tag + 'flag=' + player_info['flag']
if player_info['link']:
teams_table += ' |' + player_tag + 'link=' + player_info['link']
teams_table += '\n'
# teams_table += '|c= |cflag=\n'
# teams_table += '|qualifier=\n'
teams_table += '}}\n'
teams_ordered += teams_table
footer = '{{TeamCard columns end}}\n'
if dynamic:
footer += '}}\n'
return header + teams_ordered + footer
def create_swiss_table(stage, bw_teams):
dropped_style = 'drop'
swiss_table = '{{SwissTableLeague|rounds=' + str(stage['bracket']['roundsCount']) + '|diff=false\n'
for i in range(stage['bracket']['teamsCount']):
swiss_table += '|pbg' + str(i + 1) + '=down'
if (i + 1) % 8 == 0:
swiss_table += '\n'
if '\n' not in swiss_table[-1]:
swiss_table += '\n'
for rank, record in enumerate(stage['standings']):
if record['disqualified']:
swiss_table += '|bg' + str(rank + 1) + '=' + dropped_style + ''
else:
swiss_table += '|bg' + str(rank + 1) + '=down'
team_info = bw_teams.get_team_info(record['team']['persistentTeamID'], record['team']['name'])
swiss_table += '|team' + str(rank + 1) + '=' + team_info['teamteamplate']
swiss_table += '|temp_tie' + str(rank+1) + '=' + "{:7.3f}".format(record['opponentsMatchWinPercentage']) + '\n'
swiss_table += '}}\n'
return swiss_table
def create_swiss_matches(matches, teams, bw_teams):
swiss_match_table = ''
rounds = dict()
for match in matches:
match_line = create_match_maps(match, teams, bw_teams)
if not match_line:
continue
try:
rounds[str(match['roundNumber'])].append(match_line)
except KeyError:
rounds[str(match['roundNumber'])] = list()
rounds[str(match['roundNumber'])].append(match_line)
for i in range(1, len(rounds) + 1):
if i == 1:
swiss_match_table += '{{box|start|padding=2em}}\n'
else:
swiss_match_table += '{{box|break|padding=2em}}\n'
swiss_match_table += '====={{HiddenSort|Round ' + str(i) + '}}=====\n'
swiss_match_table += '{{MatchListStart|width=450px|title=Round ' + str(i) + ' Matches|matchsection=Round ' \
+ str(i) + '|hide=false}}\n'
for match in rounds[str(i)]:
swiss_match_table += match
swiss_match_table += '{{MatchListEnd}}\n'
swiss_match_table += '{{box|end}}\n'
return swiss_match_table
def create_elim_bracket(stage, teams, bw_teams):
if stage['bracket']['style'] == 'single':
bracket = '{{' + str(stage['bracket']['teamsCount']) + 'SETeamBracket\n'
elif stage['bracket']['style'] == 'double':
bracket = '{{' + str(stage['bracket']['teamsCount']) + 'DETeamBracket\n'
else:
print('Unknown stage style: ' + stage['bracket']['style'])
return
# todo handle double elimination brackets
# set up team number trackers
team_previous_round = dict()
# set up round-match count trackers
round_max_win_match_count = [1] * (len(stage['bracket']['series']) + 1)
round_max_win_match_count[0] = 0
round_max_loss_match_count = [1] * (len(stage['bracket']['series']) + 1)
round_max_loss_match_count[0] = 0
# matches = sorted(stage['matches'], key=itemgetter('matchNumber'))
matches = stage['matches']
for match in matches:
# TODO: this will need to get updated for non SE16 templates
# In DE brackets D means the team dropped down from the previous round
# In DE brackest W means the team won the previous round
# So there are rounds where D vs L happen such as R2D1 vs R2W5 and R2D2 vs R2W6
# Might want to key off match['inConsolationBracket']
# May also just need to keep track of match['next'] and build up the D and W that way instead
# Default first round to D and then future bracket type is defined by match['next']
# Not exactly sure how to address round_team_number, in a 8 team DE the third winners bracket round is
# called the 4th round and in a 16 team DE the 4th winners bracket round is called the 6th round
# https://liquipedia.net/rainbowsix/Template:4DETeamBracket/doc
# https://liquipedia.net/rainbowsix/Template:8DETeamBracket/doc
# https://liquipedia.net/rainbowsix/Template:16DETeamBracket/doc
# if match['matchType'] == 'winner':
# round_max_win_match_count[match['roundNumber']] = max(match['matchNumber'],
# round_max_win_match_count[match['roundNumber']])
# elif match['matchType'] == 'loser':
# round_max_loss_match_count[match['roundNumber']] = max(match['matchNumber'],
# round_max_loss_match_count[match['roundNumber']])
if not 'teamID' in match['top']:
continue
if match['top']['teamID'] in team_previous_round:
if team_previous_round[match['top']['teamID']]:
bracket_type = 'W'
else:
bracket_type = 'D'
else:
bracket_type = 'D'
if match['matchType'] == 'winner':
round_match_offset = -2 * round_max_win_match_count[match['roundNumber'] - 1]
else:
round_match_offset = -2 * round_max_loss_match_count[match['roundNumber'] - 1] \
+ (round_max_win_match_count[match['roundNumber']]
- round_max_win_match_count[match['roundNumber'] - 1]) * 2
# Increment for next time
if match['matchType'] == 'winner':
round_max_win_match_count[match['roundNumber']] = max(match['matchNumber'],
round_max_win_match_count[match['roundNumber']])
elif match['matchType'] == 'loser':
round_max_loss_match_count[match['roundNumber']] = max(match['matchNumber'],
round_max_loss_match_count[match['roundNumber']])
bracket_indicator = '|R' + str(match['roundNumber']) + bracket_type \
+ str(match['matchNumber'] * 2 - 1 + round_match_offset)
if 'teamID' in match['top']:
team_name = bw_teams.get_team_info(teams[match['top']['teamID']]['persistentTeamID'],
teams[match['top']['teamID']]['name'])['teamteamplate']
bracket += bracket_indicator + 'team=' + team_name + ' '
else:
bracket += bracket_indicator + 'literal=BYE '
if 'score' in match['top']:
bracket += bracket_indicator + 'score=' + str(match['top']['score']) + ' '
if 'winner' in match['top'] and match['top']['winner']:
bracket += bracket_indicator + 'win=1 '
team_previous_round[match['top']['teamID']] = True
else:
team_previous_round[match['top']['teamID']] = False
bracket += '\n'
if 'teamID' in match['bottom']:
if match['bottom']['teamID'] in team_previous_round:
if team_previous_round[match['bottom']['teamID']]:
bracket_type = 'W'
else:
bracket_type = 'D'
else:
bracket_type = 'D'
else:
bracket_type = 'D'
bracket_indicator = '|R' + str(match['roundNumber']) + bracket_type \
+ str(match['matchNumber'] * 2 + round_match_offset)
if 'teamID' in match['bottom']:
team_name = bw_teams.get_team_info(teams[match['bottom']['teamID']]['persistentTeamID'],
teams[match['bottom']['teamID']]['name'])['teamteamplate']
bracket += bracket_indicator + 'team=' + team_name + ' '
else:
bracket += bracket_indicator + 'literal=BYE '
if 'score' in match['bottom']:
bracket += bracket_indicator + 'score=' + str(match['bottom']['score']) + ' '
if 'winner' in match['bottom'] and match['bottom']['winner']:
bracket += bracket_indicator + 'win=2 '
team_previous_round[match['bottom']['teamID']] = True
elif 'teamID' in match['bottom']:
team_previous_round[match['bottom']['teamID']] = False
bracket += '\n'
bracket += '}}\n'
return bracket
def create_match_maps(match, teams, bw_teams):
match_line = ''
if not match['isComplete']:
return match_line
match_line = '{{MatchMaps\n'
match_line += '|date=\n'
if 'teamID' in match['top']:
team_top = bw_teams.get_team_info(teams[match['top']['teamID']]['persistentTeamID'],
teams[match['top']['teamID']]['name'])
elif match['isBye']:
team_top = bw_teams.get_team_info('0', 'BYE')
if 'teamID' in match['bottom']:
team_bot = bw_teams.get_team_info(teams[match['bottom']['teamID']]['persistentTeamID'],
teams[match['bottom']['teamID']]['name'])
elif match['isBye']:
team_bot = bw_teams.get_team_info('0', 'BYE')
match_line += '|team1=' + team_top['teamteamplate']
match_line += '|team2=' + team_bot['teamteamplate']
if 'isTie' in match and match['isTie']:
match_line += '|winner=0\n'
elif 'winner' in match['top'] and match['top']['winner']:
match_line += '|winner=1\n'
elif 'winner' in match['bottom'] and match['bottom']['winner']:
match_line += '|winner=2\n'
else:
match_line += '|winner=0\n'
if match['isBye']:
match_line += '|walkover=1'
match_line += '|games1='
if match['top']['winner']:
match_line += 'W'
else:
match_line += 'FF'
match_line += '|games2='
if 'winner' in match['bottom'] and match['bottom']['winner']:
match_line += 'W'
else:
match_line += 'FF'
else:
match_line += '|games1=' + str(match['top']['score'])
match_line += '|games2=' + str(match['bottom']['score']) + '\n'
match_line += '|details={{BracketMatchSummary\n'
match_line += '|date=|finished=true\n'
match_line += '|twitch= |youtube=\n'
match_line += '|vod=\n'
match_line += '}}\n'
match_line += '}}\n'
return match_line
def create_round_robin_tables(stage, teams, bw_teams, wiki_name, include_matches=True):
tables = ''
for idx, group in enumerate(stage['groups']):
if idx == 1:
tables += '{{box|start|padding=2em}}\n'
else:
tables += '{{box|break|padding=2em}}\n'
tables += '===={{HiddenSort|Group ' + group['name'] + '}}====\n'
tables += '{{GroupTableLeague|title=Group ' + group['name'] + '|width=450px|show_p=false|date=|ties=true\n'
tables += '|tournament=' + wiki_name + '\n'
group_header = ''
group_table = ''
for pos, standing_id in enumerate(group['standingIDs']):
group_header += '|pbg' + str(pos + 1) + '=down'
for standing in stage['standings']:
if standing_id == standing['_id']:
# if standing['disqualified']:
# has_drop = True
team_info = bw_teams.get_team_info(teams[standing['team']['_id']]['persistentTeamID'],
teams[standing['team']['_id']]['name'])
group_table += '|bg' + str(pos + 1) + '=down|team' + str(pos + 1) + "=" \
+ team_info['teamteamplate'] + '\n'
group_header += '|tiebreaker1=series\n'
tables += group_header
tables += group_table
tables += "}}\n"
if include_matches:
match_table = '{{MatchListStart|title=Group ' + group['name'] + ' Matches|width=450px|hide=true}}\n'
for match in group['matches']:
match_line = create_match_maps(match, teams, bw_teams)
match_table += match_line
tables += match_table
tables += '{{MatchListEnd}}\n'
tables += '{{box|end}}\n'
return tables
def create_prize_pool(prize):
prize_pool = prize + '\n'
prize_pool += '{{prize pool start}}\n'
prize_pool += '{{prize pool slot |place=1 |usdprize=0 |tbd |lastvs1= |lastscore1= |lastvsscore1=}}\n'
prize_pool += '{{prize pool slot |place=2 |usdprize=0 |tbd |lastvs1= |lastscore1= |lastvsscore1=}}\n'
prize_pool += '{{prize pool slot |place=3-4 |usdprize=0\n'
prize_pool += '|tbd |lastvs1= |lastscore1= |lastvsscore1=\n'
prize_pool += '|tbd |lastvs2= |lastscore2= |lastvsscore2=\n'
prize_pool += '}}\n'
prize_pool += '{{prize pool slot |place=5-8 |usdprize=0\n'
prize_pool += '|tbd |lastvs1= |lastscore1= |lastvsscore1=\n'
prize_pool += '|tbd |lastvs2= |lastscore2= |lastvsscore2=\n'
prize_pool += '|tbd |lastvs3= |lastscore3= |lastvsscore3=\n'
prize_pool += '|tbd |lastvs4= |lastscore4= |lastvsscore4=\n'
prize_pool += '}}\n'
prize_pool += '{{Prize pool end}}\n'
return prize_pool
def main():
ccs_winter_minor_id = '5ff3354193edb53839d44d55'
ccs_winter_minor_wiki = 'Calrissian_Cup/Winter/Minor'
ccs_winter_major_id = '60019f8ebcc5ed46373408a1'
ccs_winter_major_wiki = 'Calrissian_Cup/Winter/Major'
ccs_spring_minor_id = '603c00fbfe4fb811b3168f5b'
ccs_spring_minor_wiki = 'Calrissian_Cup/Spring/Minor'
ccs_spring_major_id = '6061b764f68d8733c8455fcf'
ccs_spring_major_wiki = 'Calrissian_Cup/Spring/Major'
ccs_summer_minor_id = '60b41961d35b1411a7b31d64'
ccs_summer_minor_wiki = 'Calrissian_Cup/Summer/Minor'
ccs_summer_major_id = '60dd319012cb9c33c2f63868'
ccs_summer_major_wiki = 'Calrissian_Cup/Summer/Major'
ccs_fall_minor_id = '60fa26043ba15d73719669bd'
ccs_fall_minor_wiki = 'Calrissian_Cup/Fall/Minor'
ccs_fall_major_id = '61314505635fe17a14eafe03'
ccs_fall_major_wiki = 'Calrissian_Cup/Fall/Major'
ccs_championship_id = '6150dd2b0dd060282bebb0eb'
ccs_championship_wiki = 'Calrissian_Cup/Championship'
world_cup_id = '611dac6ecb6f6260d5f30b6e'
world_cup_wiki = 'World_Cup'
twin_suns_tourny_id = '60806876938bed74f6edea9e'
twin_suns_wiki = 'Twin_Suns_Tournament'
gsl_s1_id = '5ff4b388fd124e11b18e185d'
gsl_s1_wiki = 'Global_Squadrons_League/2021/Season_1'
tournament_id = world_cup_id
wiki_name = world_cup_wiki
participant_tabs = [
# {'tab_name': 'Top 16',
# 'count': 16},
# {'tab_name': 'Top 32',
# 'count': 32},
# {'tab_name': 'Other Notable Participants',
# 'count': -1},
]
bw_teams = battlefy_wiki_linkings.BattlefyWikiTeamLinkings()
bw_players = battlefy_wiki_linkings.BattlefyWikiPlayerLinkings()
event_data = battlefy_data.BattlefyData(tournament_id)
event_data.load_tournament_data()
# FORCE REDUCE TEAMS
event_data.reduce_teams()
event_path = event_data.get_tournament_data_path()
event_path.mkdir(parents=True, exist_ok=True)
filename = Path.joinpath(event_path, event_data.tournament_data['name'] + '.wiki')
with open(filename, 'w+', newline='\n', encoding='utf-8') as f:
display = '{{DISPLAYTITLE:' + event_data.tournament_data['name'] + '}}\n'
f.write(display)
sidebar = create_sidebar(event_data.tournament_data, wiki_name)
f.write(sidebar)
f.write('==About==\n')
f.write('===Format===\n')
event_format = create_event_format(event_data.tournament_data)
f.write(event_format)
f.write('===Broadcast Talent===\n')
f.write('===Prize Pool===\n')
prize_pool = create_prize_pool(event_data.tournament_data['prizes'])
f.write(prize_pool)
f.write('==Participants==\n')
teams = create_participants(event_data.tournament_data, bw_players, bw_teams,
dynamic=participant_tabs, sort_place=True)
f.write(teams)
f.write('==Results==\n')
for stage in event_data.tournament_data['stages']:
if stage['bracket']['type'] == 'swiss':
f.write('===Swiss Stage===\n')
f.write('====Swiss Standings====\n')
swiss_table = create_swiss_table(stage, bw_teams)
f.write(swiss_table)
f.write('====Swiss Match Results====\n')
swiss_matches = create_swiss_matches(stage['matches'], event_data.tournament_data['teams'], bw_teams)
f.write(swiss_matches)
elif stage['bracket']['type'] == 'elimination':
f.write('===Playoffs===\n')
bracket = create_elim_bracket(stage, event_data.tournament_data['teams'], bw_teams)
f.write(bracket)
elif stage['bracket']['type'] == 'roundrobin':
f.write('===' + stage['name'] + '===\n')
round_robin_tables = create_round_robin_tables(stage, event_data.tournament_data['teams'], bw_teams,
wiki_name, include_matches=True)
f.write(round_robin_tables)
else:
print('Unsupported bracket type of: ' + stage['bracket']['type'])
if __name__ == '__main__':
main()
| 42.812721
| 119
| 0.560333
| 2,701
| 24,232
| 4.818215
| 0.151425
| 0.021515
| 0.011526
| 0.023974
| 0.413708
| 0.337175
| 0.280237
| 0.242739
| 0.194329
| 0.183495
| 0
| 0.020891
| 0.27897
| 24,232
| 565
| 120
| 42.888496
| 0.723958
| 0.077088
| 0
| 0.219731
| 0
| 0.004484
| 0.230966
| 0.053475
| 0.06278
| 0
| 0
| 0.00177
| 0
| 1
| 0.024664
| false
| 0
| 0.013453
| 0
| 0.065022
| 0.004484
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
074906b7cce1eac2c3d5b9dbf7a25ead70cb372d
| 11,662
|
py
|
Python
|
training_xgboost_model.py
|
MighTy-Weaver/Inefficient-AC-detection
|
8229f19accd1569ba7b48f77f71783173393d9ed
|
[
"Apache-2.0"
] | 2
|
2021-02-21T13:28:30.000Z
|
2021-07-10T05:24:05.000Z
|
training_xgboost_model.py
|
MighTy-Weaver/Inefficient-AC-detection
|
8229f19accd1569ba7b48f77f71783173393d9ed
|
[
"Apache-2.0"
] | null | null | null |
training_xgboost_model.py
|
MighTy-Weaver/Inefficient-AC-detection
|
8229f19accd1569ba7b48f77f71783173393d9ed
|
[
"Apache-2.0"
] | null | null | null |
# This is the code to train the xgboost model with cross-validation for each unique room in the dataset.
# Models are dumped into ./models and results are dumped into two csv files in the current work directory.
import argparse
import json
import math
import os
import pickle
import warnings
from typing import Tuple
import numpy as np
import pandas as pd
import xgboost as xgb
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
from imblearn.over_sampling import SMOTE
from numpy.random import RandomState
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.utils import compute_sample_weight
from tqdm import tqdm
from xgboost import DMatrix, cv
# Set up an argument parser to decide the metric function
parser = argparse.ArgumentParser()
parser.add_argument("--metric", choices=['R2', 'RMSE'], type=str, required=False, default='R2',
help="The evaluation metric you want to use to train the XGBoost model")
parser.add_argument("--log", choices=[0, 1, 100], type=int, required=False, default=0,
help="Whether to print out the training progress")
parser.add_argument("--SMOTE", choices=[0, 1], type=int, required=False, default=1, help="Whether use the SMOTE or not")
parser.add_argument("--SMOGN", choices=[0, 1], type=int, required=False, default=0, help="Whether use the SMOGN or not")
parser.add_argument("--SampleWeight", choices=[0, 1], type=int, required=False, default=0,
help="Whether use the sample weight")
args = parser.parse_args()
# Ignore all the warnings and set pandas to display every column and row everytime we print a dataframe
warnings.filterwarnings('ignore')
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
assert args.SMOTE != args.SMOGN, "Can't use SMOTE and SMOGN at the same time!"
# Load the data with a positive AC electricity consumption value, and drop the time data as we don't need them
data = pd.read_csv("summer_data_compiled.csv", index_col=0)
data = data[data.AC > 0].drop(['Time', 'Date', 'Hour'], axis=1).reset_index(drop=True)
# Create some directory to store the models and future analysis figures.
# log_folder_name = "Test_{}_{}".format(args.metric, datetime.now().strftime("%Y_%m_%d_%H_%M_%S"))
log_folder_name = "Test_R2_HYPEROPT"
log_folder_name = log_folder_name + "_SMOTE" if args.SMOTE else log_folder_name
log_folder_name = log_folder_name + "_SMOGN" if args.SMOGN else log_folder_name
log_folder_name = log_folder_name + "_SW" if args.SampleWeight else log_folder_name
previous_parameter_folder = "Test_R2_HYPEROPT"
assert log_folder_name != previous_parameter_folder, "Previous folder name exists"
if not os.path.exists('./{}/'.format(log_folder_name)):
os.mkdir('./{}'.format(log_folder_name))
os.mkdir('./{}/models/'.format(log_folder_name))
os.mkdir('./{}/trntst_models/'.format(log_folder_name))
# Define our evaluation functions
def RMSE(predt: np.ndarray, dtrain: DMatrix) -> Tuple[str, float]:
truth_value = dtrain.get_label()
root_squard_error = math.sqrt(mean_squared_error(truth_value, predt))
return "RMSE", root_squard_error
def R2(predt: np.ndarray, dtrain: DMatrix) -> Tuple[str, float]:
truth_value = dtrain.get_label()
r2_value = r2_score(truth_value, predt)
return "R2", r2_value
def fobjective(space):
param_dict_tunning = {'max_depth': int(space['max_depth']),
'learning_rate': space['learning_rate'],
'colsample_bytree': space['colsample_bytree'],
'min_child_weight': int(space['min_child_weight']),
'reg_alpha': int(space['reg_alpha']),
'reg_lambda': space['reg_lambda'],
'subsample': space['subsample'],
'min_split_loss': space['min_split_loss'],
'objective': 'reg:squarederror'}
xgb_cv_result = xgb.cv(dtrain=data_matrix, params=param_dict_tunning, nfold=5,
early_stopping_rounds=30, as_pandas=True, num_boost_round=200,
seed=seed, metrics='rmse', maximize=False, shuffle=True)
return {"loss": (xgb_cv_result["test-rmse-mean"]).tail(1).iloc[0], "status": STATUS_OK}
eval_dict = {'RMSE': RMSE, 'R2': R2}
print("Start Training The Models")
# Create two dataframes to store the result during the training and after the training.
error_csv = pd.DataFrame(
columns=['room', 'train-{}-mean'.format(args.metric), 'train-{}-std'.format(args.metric), 'train-rmse-mean',
'train-rmse-std', 'test-{}-mean'.format(args.metric), 'test-{}-std'.format(args.metric), 'test-rmse-mean',
'test-rmse-std'])
prediction_csv = pd.DataFrame(columns=['room', 'observation', 'prediction'])
room_list = data['Location'].unique()
# ranging through all the rooms and do the training and cross-validation for each room.
for room in tqdm(room_list):
seed = 2030 + room
# Four rooms have low quality data and we delete them manually
if room == 309 or room == 312 or room == 826 or room == 917 or room == 1001:
continue
# We extract the data of particular room and run the SMOTE algorithm on it.
room_data = data[data.Location == room].drop(['Location'], axis=1).reset_index(drop=True)
if args.SMOTE:
# Label all the AC data by 0.75, all AC above 0.75 will be marked as 1, otherwise 0. Split into X and y
room_data['SMOTE_split'] = (room_data['AC'] > 0.75).astype('int')
X = room_data.drop(['SMOTE_split'], axis=1)
y = room_data['SMOTE_split']
# Run the SMOTE algorithm and retrieve the result.
model_smote = SMOTE(random_state=621, k_neighbors=3)
room_data_smote, smote_split = model_smote.fit_resample(X, y)
# concat the result from SMOTE and split the result into X and y for training.
room_data_smote = pd.concat([room_data_smote, smote_split], axis=1)
y = room_data_smote['AC']
X = room_data_smote.drop(['AC', 'SMOTE_split'], axis=1)
elif args.SMOGN:
if len(room_data) < 500:
room_data['SMOTE_split'] = (room_data['AC'] > 0.75).astype('int')
X = room_data.drop(['SMOTE_split'], axis=1)
y = room_data['SMOTE_split']
# Run the SMOTE algorithm and retrieve the result.
model_smote = SMOTE(random_state=621, k_neighbors=3)
room_data_smote, smote_split = model_smote.fit_resample(X, y)
# concat the result from SMOTE and split the result into X and y for training.
room_data_smote = pd.concat([room_data_smote, smote_split], axis=1)
y = room_data_smote['AC']
X = room_data_smote.drop(['AC', 'SMOTE_split'], axis=1)
else:
room_data = pd.read_csv('./SMOGN_processed/{}.csv'.format(room), index_col=0)
y = room_data['AC']
X = room_data.drop(['AC'], axis=1)
else:
y = pd.DataFrame(room_data['AC'].fillna(method='pad'))
X = room_data.drop(['AC'], axis=1).fillna(method='pad')
if args.SampleWeight:
class_sample = pd.cut(y, bins=15)
weight = compute_sample_weight(class_weight="balanced", y=class_sample)
X = X.to_numpy()
# Build another full data matrix for the built-in cross validation function to work.
data_matrix = DMatrix(data=X, label=y, weight=weight) if args.SampleWeight else DMatrix(data=X, label=y)
# Cross_validation with hyper-parameter tuning
space = {'max_depth': hp.quniform("max_depth", 3, 10, 1),
'learning_rate': hp.uniform("learning_rate", 0.1, 3),
'colsample_bytree': hp.uniform("colsample_bytree", 0.5, 1),
'min_child_weight': hp.quniform("min_child_weight", 1, 20, 1),
'reg_alpha': hp.quniform("reg_alpha", 0, 100, 1),
'reg_lambda': hp.uniform("reg_lambda", 0, 2),
'subsample': hp.uniform("subsample", 0.5, 1),
'min_split_loss': hp.uniform("min_split_loss", 0, 9)}
if os.path.exists('./{}/models/{}_parameter.npy'.format(previous_parameter_folder, room)):
best_param_dict = np.load('./{}/models/{}_parameter.npy'.format(previous_parameter_folder, room),
allow_pickle=True).item()
np.save('./{}/models/{}_parameter.npy'.format(log_folder_name, room), best_param_dict)
else:
trials = Trials()
best_hyperparams = fmin(fn=fobjective, space=space, algo=tpe.suggest, max_evals=400, trials=trials,
rstate=RandomState(seed))
# setup our training parameters and a model variable as model checkpoint
best_param_dict = {'objective': 'reg:squarederror', 'max_depth': int(best_hyperparams['max_depth']),
'reg_alpha': best_hyperparams['reg_alpha'], 'reg_lambda': best_hyperparams['reg_lambda'],
'min_child_weight': best_hyperparams['min_child_weight'],
'colsample_bytree': best_hyperparams['colsample_bytree'],
'learning_rate': best_hyperparams['learning_rate'],
'subsample': best_hyperparams['subsample'],
'min_split_loss': best_hyperparams['min_split_loss']}
np.save('./{}/models/{}_parameter.npy'.format(log_folder_name, room), best_param_dict)
# Use the built-in cv function to do the cross validation, still with ten folds, this will return us the results.
xgb_cv_result = cv(dtrain=data_matrix, params=best_param_dict, nfold=5,
early_stopping_rounds=30, as_pandas=True, num_boost_round=200,
seed=seed, shuffle=True, feval=eval_dict[args.metric], maximize=True)
xgb_cv_result['room'] = room
error_csv.loc[len(error_csv)] = xgb_cv_result.loc[len(xgb_cv_result) - 1]
# Use one training_testing for ploting, and save both ground truth and prediction value into the dataframe
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=seed)
d_train = DMatrix(X_train, label=y_train)
d_test = DMatrix(X_test, label=y_test)
watchlist = [(d_test, 'eval'), (d_train, 'train')]
xgb_model_train_test = xgb.train(params=best_param_dict, dtrain=d_train, num_boost_round=200, evals=watchlist,
verbose_eval=args.log, xgb_model=None, feval=eval_dict[args.metric], maximize=True)
prediction = np.array(xgb_model_train_test.predict(d_test)).tolist()
real = np.array(y_test).tolist()
prediction_csv.loc[len(prediction_csv)] = {'room': room, 'observation': json.dumps(real),
'prediction': json.dumps(prediction)}
# Dump the error dataframes into csv files.
error_csv.to_csv('./{}/error.csv'.format(log_folder_name), index=False)
prediction_csv.to_csv('./{}/prediction.csv'.format(log_folder_name), index=False)
# Develop a model using the whole orignial dataset, and save the model
xgb_model_full = xgb.train(params=best_param_dict, dtrain=data_matrix, num_boost_round=200, evals=watchlist,
verbose_eval=args.log, xgb_model=None, feval=eval_dict[args.metric], maximize=True)
# Save all the models we trained for future use
pickle.dump(xgb_model_train_test, open('./{}/trntst_models/{}.pickle.bat'.format(log_folder_name, room), 'wb'))
pickle.dump(xgb_model_full, open('./{}/models/{}.pickle.bat'.format(log_folder_name, room), 'wb'))
print("Training finished!")
| 51.149123
| 120
| 0.667038
| 1,648
| 11,662
| 4.509709
| 0.215413
| 0.026911
| 0.038482
| 0.025565
| 0.334499
| 0.294672
| 0.264801
| 0.237217
| 0.213402
| 0.202637
| 0
| 0.015043
| 0.207683
| 11,662
| 227
| 121
| 51.374449
| 0.789286
| 0.163608
| 0
| 0.173077
| 0
| 0
| 0.177632
| 0.022307
| 0
| 0
| 0
| 0
| 0.012821
| 1
| 0.019231
| false
| 0
| 0.115385
| 0
| 0.153846
| 0.019231
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0749f9a616656fe35e1c0d2532a8c8a5e40dc4ab
| 1,042
|
py
|
Python
|
vaping/config.py
|
josephburnett/vaping
|
16f9092f0b3c1692e6d1a040f746e1277e197353
|
[
"Apache-2.0"
] | null | null | null |
vaping/config.py
|
josephburnett/vaping
|
16f9092f0b3c1692e6d1a040f746e1277e197353
|
[
"Apache-2.0"
] | null | null | null |
vaping/config.py
|
josephburnett/vaping
|
16f9092f0b3c1692e6d1a040f746e1277e197353
|
[
"Apache-2.0"
] | null | null | null |
import re
import munge
def parse_interval(val):
"""
converts a string to float of seconds
.5 = 500ms
90 = 1m30s
**Arguments**
- val (`str`)
"""
re_intv = re.compile(r"([\d\.]+)([a-zA-Z]+)")
val = val.strip()
total = 0.0
for match in re_intv.findall(val):
unit = match[1]
count = float(match[0])
if unit == "s":
total += count
elif unit == "m":
total += count * 60
elif unit == "ms":
total += count / 1000
elif unit == "h":
total += count * 3600
elif unit == "d":
total += count * 86400
else:
raise ValueError("unknown unit from interval string '%s'" % val)
return total
class Config(munge.Config):
"""
Vaping config manager
"""
defaults = {
"config": {
"vaping": {"home_dir": None, "pidfile": "vaping.pid", "plugin_path": [],},
},
"config_dir": "~/.vaping",
"codec": "yaml",
}
| 20.84
| 86
| 0.46833
| 115
| 1,042
| 4.191304
| 0.573913
| 0.103734
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.042879
| 0.373321
| 1,042
| 49
| 87
| 21.265306
| 0.695253
| 0.114203
| 0
| 0
| 0
| 0
| 0.159817
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0.066667
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
074c422d6b8b108e68ca3caffc0062b15b80774b
| 1,333
|
py
|
Python
|
examples/scripts/segmentation/nnet3-segmenter.py
|
mxmpl/pykaldi
|
0570307138c5391cc47b019450d08bcb9686dd98
|
[
"Apache-2.0"
] | 916
|
2017-11-22T19:33:36.000Z
|
2022-03-31T11:51:58.000Z
|
examples/scripts/segmentation/nnet3-segmenter.py
|
mxmpl/pykaldi
|
0570307138c5391cc47b019450d08bcb9686dd98
|
[
"Apache-2.0"
] | 268
|
2018-01-16T22:06:45.000Z
|
2022-03-29T03:24:41.000Z
|
examples/scripts/segmentation/nnet3-segmenter.py
|
mxmpl/pykaldi
|
0570307138c5391cc47b019450d08bcb9686dd98
|
[
"Apache-2.0"
] | 260
|
2018-01-23T18:39:40.000Z
|
2022-03-24T08:17:39.000Z
|
#!/usr/bin/env python
from __future__ import print_function
from kaldi.segmentation import NnetSAD, SegmentationProcessor
from kaldi.nnet3 import NnetSimpleComputationOptions
from kaldi.util.table import SequentialMatrixReader
# Construct SAD
model = NnetSAD.read_model("final.raw")
post = NnetSAD.read_average_posteriors("post_output.vec")
transform = NnetSAD.make_sad_transform(post)
graph = NnetSAD.make_sad_graph()
decodable_opts = NnetSimpleComputationOptions()
decodable_opts.extra_left_context = 79
decodable_opts.extra_right_context = 21
decodable_opts.extra_left_context_initial = 0
decodable_opts.extra_right_context_final = 0
decodable_opts.frames_per_chunk = 150
decodable_opts.acoustic_scale = 0.3
sad = NnetSAD(model, transform, graph, decodable_opts=decodable_opts)
seg = SegmentationProcessor(target_labels=[2])
# Define feature pipeline as a Kaldi rspecifier
feats_rspec = "ark:compute-mfcc-feats --config=mfcc.conf scp:wav.scp ark:- |"
# Segment
with SequentialMatrixReader(feats_rspec) as f, open ("segments", "w") as s:
for key, feats in f:
out = sad.segment(feats)
segments, stats = seg.process(out["alignment"])
seg.write(key, segments, s)
print("segments:", segments, flush=True)
print("stats:", stats, flush=True)
print("global stats:", seg.stats, flush=True)
| 37.027778
| 77
| 0.775694
| 177
| 1,333
| 5.627119
| 0.485876
| 0.11747
| 0.072289
| 0.044177
| 0.118474
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011121
| 0.123031
| 1,333
| 35
| 78
| 38.085714
| 0.84089
| 0.066017
| 0
| 0
| 0
| 0.038462
| 0.10556
| 0.017728
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.153846
| 0
| 0.153846
| 0.153846
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
074cdaf58b71e5a0a7b4da96e1a1535d7fb91e4b
| 987
|
py
|
Python
|
helper_tools/raspi_OMX-Player_Howto_demo.py
|
stko/Schnipsl
|
824572c657e48f18950f584b9529661ff5bb8069
|
[
"MIT"
] | null | null | null |
helper_tools/raspi_OMX-Player_Howto_demo.py
|
stko/Schnipsl
|
824572c657e48f18950f584b9529661ff5bb8069
|
[
"MIT"
] | 29
|
2020-08-30T15:07:50.000Z
|
2022-02-19T03:41:26.000Z
|
helper_tools/raspi_OMX-Player_Howto_demo.py
|
wifitvbox/Schnipsl
|
553ce8de3dda26fb92297ad76e92f4a363070e4e
|
[
"MIT"
] | 1
|
2020-12-28T05:46:17.000Z
|
2020-12-28T05:46:17.000Z
|
#!/usr/bin/python
# mp4museum.org by julius schmiedel 2019
import os
import sys
import glob
from subprocess import Popen, PIPE
import RPi.GPIO as GPIO
FNULL = open(os.devnull, "w")
# setup GPIO pin
GPIO.setmode(GPIO.BOARD)
GPIO.setup(11, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)
GPIO.setup(13, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)
# functions to be called by event listener
def buttonPause(channel):
player.stdin.write("p")
def buttonNext(channel):
player.stdin.write("q")
# add event listener
GPIO.add_event_detect(11, GPIO.FALLING, callback = buttonPause, bouncetime = 234)
GPIO.add_event_detect(13, GPIO.FALLING, callback = buttonNext, bouncetime = 1234)
# please do not remove my logo screen
player = Popen(['omxplayer', '--adev', 'both', '/home/pi/mp4museum.mp4'],stdin=PIPE,stdout=FNULL)
player.wait()
# the loop
while(1):
for files in sorted(glob.glob(r'/media/*/*.mp4')):
player = Popen(['omxplayer','--adev', 'both',files],stdin=PIPE,stdout=FNULL)
player.wait()
| 25.973684
| 97
| 0.73151
| 152
| 987
| 4.684211
| 0.532895
| 0.033708
| 0.02809
| 0.033708
| 0.238764
| 0.160112
| 0.075843
| 0.075843
| 0
| 0
| 0
| 0.02765
| 0.120567
| 987
| 37
| 98
| 26.675676
| 0.792627
| 0.177305
| 0
| 0.095238
| 0
| 0
| 0.095652
| 0.027329
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.238095
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
074ce069ee533cbcb1f8fc2b612416adfbbf158a
| 4,549
|
py
|
Python
|
dash_app/compare_alg.py
|
zeyu2001/ICT1002-Python
|
76a2c8ad3e3c4a3c873a9259e2a11488c33f2bf7
|
[
"MIT"
] | 1
|
2020-10-31T06:57:01.000Z
|
2020-10-31T06:57:01.000Z
|
dash_app/compare_alg.py
|
zeyu2001/ICT1002-Python
|
76a2c8ad3e3c4a3c873a9259e2a11488c33f2bf7
|
[
"MIT"
] | null | null | null |
dash_app/compare_alg.py
|
zeyu2001/ICT1002-Python
|
76a2c8ad3e3c4a3c873a9259e2a11488c33f2bf7
|
[
"MIT"
] | 1
|
2021-12-04T10:02:16.000Z
|
2021-12-04T10:02:16.000Z
|
"""
Comparison between the efficiency of the Boyer-Moore algorithm and the naive substring search algorithm.
The runtimes for both algorithms are plotted on the same axes.
"""
import matplotlib.pyplot as plt
import numpy as np
import string
import time
import random
from bm_alg import boyer_moore_match, naive_match
# number of test cases for each iteration
TEST_CASES = 100
# test cases generated based on this pattern (vary_n)
PATTERN = 'ICT1002 is a really great module!'
# test cases generated based on this text (vary_m)
TEXT = PATTERN * 50
def generate_test_cases(pattern, length, k):
"""
Generates <k> test cases with text of length <length> containing <pattern>
Args:
pattern (str): A pattern within the text.
length (int): The length of the pattern
k (int): The number of test cases
Returns:
A list of test cases, i.e. strings that contain <pattern>
"""
result = []
for _ in range(k):
text = pattern
while len(text) < length:
direction = random.choice((0, 1))
# 0 --> Left
if direction == 0:
text = random.choice(string.ascii_lowercase) + text
# 1 --> Right
else:
text = text + random.choice(string.ascii_lowercase)
result.append(text)
return result
def vary_n(max_n):
x = [n for n in range(1, max_n + 1)]
y_bm = []
y_naive = []
for n in x:
print('n =', n)
bm_result = []
naive_result = []
if n >= len(PATTERN):
# generate test cases of length n, which contain PATTERN
test_cases = generate_test_cases(PATTERN, n, TEST_CASES)
else:
# generate test cases of length n, which do not (and can not possibly) contain PATTERN
test_cases = generate_test_cases('', n, TEST_CASES)
for test_case in test_cases:
start = time.time()
naive_match(test_case, PATTERN)
naive_result.append(time.time() - start)
start = time.time()
boyer_moore_match(test_case, PATTERN)
bm_result.append(time.time() - start)
# obtain median runtime (mean is affected by outliers)
y_naive.append(sorted(naive_result)[TEST_CASES // 2])
y_bm.append(sorted(bm_result)[TEST_CASES // 2])
plt.plot(x, y_naive, label="Naive Algorithm")
plt.plot(x, y_bm, label="Boyer-Moore Algorithm")
plt.xlabel("n")
plt.ylabel("Runtime")
plt.title("Substring Search Algorithm Efficiency")
plt.legend()
plt.show()
def vary_m(max_m):
x = [m for m in range(1, max_m + 1)]
y_bm = []
y_naive = []
for m in x:
print('m =', m)
bm_result = []
naive_result = []
# generate test cases of length n
test_cases = generate_test_cases('', m, TEST_CASES)
for test_case in test_cases:
start = time.time()
naive_match(TEXT, test_case)
naive_result.append(time.time() - start)
start = time.time()
boyer_moore_match(TEXT, test_case)
bm_result.append(time.time() - start)
# obtain median runtime (mean is affected by outliers)
y_naive.append(sorted(naive_result)[TEST_CASES // 2])
y_bm.append(sorted(bm_result)[TEST_CASES // 2])
plt.plot(x, y_naive, label="Naive Algorithm")
plt.plot(x, y_bm, label="Boyer-Moore Algorithm")
plt.xlabel("m")
plt.ylabel("Runtime")
plt.title("Substring Search Algorithm Efficiency")
plt.legend()
plt.show()
def main():
done = False
print("m = Length of pattern\nn = Length of text\n")
print("1. Constant m, vary n")
print("2. Constant n, vary m")
print("3. Quit\n")
while not done:
choice = input("Your choice: ")
if choice == '1':
max_n = input("Upper limit of n: ")
while not (max_n.isnumeric() and int(max_n) > 1):
print("That is not a valid number.")
max_n = input("Upper limit of n: ")
vary_n(int(max_n))
elif choice == '2':
max_m = input("Upper limit of m: ")
while not (max_m.isnumeric() and int(max_m) > 1):
print("That is not a valid number.")
max_m = input("Upper limit of m: ")
vary_m(int(max_m))
elif choice == '3':
done = True
else:
print("That is not a valid option.")
if __name__ == '__main__':
main()
| 28.254658
| 104
| 0.585843
| 623
| 4,549
| 4.130016
| 0.216693
| 0.090944
| 0.04625
| 0.031092
| 0.516129
| 0.506024
| 0.427517
| 0.340459
| 0.340459
| 0.31714
| 0
| 0.009491
| 0.305122
| 4,549
| 160
| 105
| 28.43125
| 0.804492
| 0.196747
| 0
| 0.43
| 0
| 0
| 0.131184
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.06
| 0
| 0.11
| 0.09
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
074fa8cb751dc3e01a0d7cf156f12acfd22b5c7b
| 616
|
py
|
Python
|
TSIS_3/3774.py
|
GMKanat/PP2_spring
|
423617d559c5690f689741aaa152b9fee5082baf
|
[
"MIT"
] | null | null | null |
TSIS_3/3774.py
|
GMKanat/PP2_spring
|
423617d559c5690f689741aaa152b9fee5082baf
|
[
"MIT"
] | null | null | null |
TSIS_3/3774.py
|
GMKanat/PP2_spring
|
423617d559c5690f689741aaa152b9fee5082baf
|
[
"MIT"
] | null | null | null |
ans = dict()
pairs = dict()
def create_tree(p):
if p in ans:
return ans[p]
else:
try:
res = 0
if p in pairs:
for ch in pairs[p]:
res += create_tree(ch) + 1
ans[p] = res
return res
except:
pass
n = int(input())
for i in range(0, n-1):
child, parent = input().split()
if parent in pairs:
pairs[parent].append(child)
else:
pairs[parent] = [child]
if n > 0:
for k in pairs:
create_tree(k)
for key in sorted(ans.keys()):
print(key, ans[key])
| 22.814815
| 46
| 0.469156
| 86
| 616
| 3.325581
| 0.383721
| 0.097902
| 0.034965
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013812
| 0.412338
| 616
| 27
| 47
| 22.814815
| 0.776243
| 0
| 0
| 0.074074
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0.037037
| 0
| 0
| 0.111111
| 0.037037
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
075329f4475d143e6e7eeffda251a30feb1872ce
| 404
|
py
|
Python
|
italicizer.py
|
Dorijan-Cirkveni/Miniprojects
|
2109275c9c1b9f5e7a286604cbb1b7966dff9798
|
[
"MIT"
] | null | null | null |
italicizer.py
|
Dorijan-Cirkveni/Miniprojects
|
2109275c9c1b9f5e7a286604cbb1b7966dff9798
|
[
"MIT"
] | null | null | null |
italicizer.py
|
Dorijan-Cirkveni/Miniprojects
|
2109275c9c1b9f5e7a286604cbb1b7966dff9798
|
[
"MIT"
] | null | null | null |
def italicize(s):
b = False
res = ''
for e in s:
if e == '"':
if b:
res += '{\\i}' + e
else:
res += e + '{i}'
b=not b
else:
res += e
return res
def main():
F=open('test_in.txt','r')
X=F.read()
F.close()
print(italicize(X))
return
if __name__ == "__main__":
main()
| 15.538462
| 34
| 0.368812
| 50
| 404
| 2.8
| 0.5
| 0.1
| 0.114286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.467822
| 404
| 25
| 35
| 16.16
| 0.651163
| 0
| 0
| 0.095238
| 0
| 0
| 0.071782
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0
| 0
| 0.190476
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|