hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
23da0c0d49653c618ac8d67a4ddcb6c90f60383d | 4,489 | py | Python | exp/legacy_exp/run_exp.py | Lossless-Virtual-Switching/Backdraft | 99e8f7acf833d1755a109898eb397c3412fff159 | [
"MIT"
] | null | null | null | exp/legacy_exp/run_exp.py | Lossless-Virtual-Switching/Backdraft | 99e8f7acf833d1755a109898eb397c3412fff159 | [
"MIT"
] | null | null | null | exp/legacy_exp/run_exp.py | Lossless-Virtual-Switching/Backdraft | 99e8f7acf833d1755a109898eb397c3412fff159 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import os
import sys
from time import sleep
import argparse
import subprocess
import json
sys.path.insert(0, '../')
from bkdrft_common import *
cur_script_dir = os.path.dirname(os.path.abspath(__file__))
pipeline_config_file = os.path.join(cur_script_dir,
'pipeline.bess')
def _stop_everything(containers):
try:
bessctl_do('daemon stop') # this may show an error it is nothing
except:
# bess was not running
pass
for container in containers:
cmd = 'sudo docker stop -t 0 {name}'
cmd = cmd.format(**container)
subprocess.run(cmd, shell=True, stdout=subprocess.PIPE)
cmd = 'sudo docker rm {name}'
cmd = cmd.format(**container)
subprocess.run(cmd, shell=True, stdout=subprocess.PIPE)
def run_docker_container(container):
cmd = 'sudo docker run --cpuset-cpus={cpuset} --cpus={cpu_share} -d --network none --name {name} {image} {args}'
cmd = cmd.format(**container)
print(cmd)
subprocess.run(cmd, shell=True)
def main():
# Load config file
containers = json.load(open(exp_config_path))
if cpus is not None:
containers[2]['cpu_share'] = cpus
# Write pipeline config file
with open('.pipeline_config.json', 'w') as f:
# TODO: instead of server and client, write config so it has ip address in it
# this way it is not important who is server and who is client
# also this way config file becomes the single source of information about ip address
txt = json.dumps(containers)
f.write(txt)
# Kill anything running
_stop_everything(containers)
# load kernel module
print('loading BESS kernel module')
load_bess_kmod()
# Check BESS kernel module is loaded
cmd = 'lsmod | grep bess' # this is not reliable
p = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE)
result = p.stdout.decode()
print(result)
if not result:
print('BESS kmod is not loaded please load this kernel module first')
return 1
# Run containers
# first run servers
for container in containers:
if container['type'] == 'server':
run_docker_container(container)
# wait some time
sleep(1)
# run cliecnts
for container in containers:
if container['type'] == 'client':
run_docker_container(container)
# Setup BESS config
file_path = pipeline_config_file
ret = bessctl_do('daemon start -- run file {}'.format(file_path))
print('all containers and BESS pipeline has been setuped')
try:
# Wait for client to finish
# Note: there is an assumption that the second container is the client
# TODO: Relax this assumption
client_container_name = containers[1]['name']
#client_pid = get_container_pid(client_container_name)
while docker_container_is_running(client_container_name):
sleep(1)
except KeyboardInterrupt as e:
print('Experiment interrupted')
# Gather experiment result
## iperf3 logs
tmp_txt = ''
try:
subprocess.check_call('sudo docker cp {}:/root/iperf3.log .'.format(client_container_name), shell=True)
subprocess.run('sudo chown $USER ./iperf3.log', shell=True)
with open('iperf3.log', 'r') as logfile:
tmp_txt = logfile.read()
except:
pass
## mutilate logs
logs = get_docker_container_logs(client_container_name)
logs = '\n'.join(['=== client ===', logs, '=== iperf ===', tmp_txt])
if args.output:
output_file = open(args.output, 'w')
output_file.write(logs)
else:
print(logs)
logs = get_docker_container_logs(containers[0]['name'])
logs = '\n'.join(['=== server ===', logs])
print(logs)
p = bessctl_do('show port', subprocess.PIPE)
print(p.stdout.decode())
# Stop and clear test environment
_stop_everything(containers)
if __name__ == '__main__':
supported_exps = ('iperf3', 'apache', 'memcached_iperf3',
'memcached_shuffle')
parser = argparse.ArgumentParser()
parser.add_argument('experiment', choices=supported_exps)
parser.add_argument('--output', default=None, required=False,
help="results will be writen in the given file")
parser.add_argument('--cpus', type=float, default=None)
args = parser.parse_args()
exp = args.experiment
exp_config_path = 'exp_configs/{}.json'.format(exp)
cpus = args.cpus
main()
| 30.746575 | 116 | 0.655825 |
b8cd904fb69e3d6cdcc6586ca9f289f2c3105b74 | 1,805 | py | Python | source/machine_connector/utils/tests/test_init_msg_metadata.py | aws-solutions/machine-to-cloud-connectivity-framework | 980f2892ea2636ace604ea8f33b594091a9f24d2 | [
"Apache-2.0",
"MIT"
] | 1 | 2021-10-30T13:03:09.000Z | 2021-10-30T13:03:09.000Z | source/machine_connector/utils/tests/test_init_msg_metadata.py | aws-solutions/machine-to-cloud-connectivity-framework | 980f2892ea2636ace604ea8f33b594091a9f24d2 | [
"Apache-2.0",
"MIT"
] | 2 | 2021-11-03T12:00:12.000Z | 2022-03-08T06:03:51.000Z | source/machine_connector/utils/tests/test_init_msg_metadata.py | aws-solutions/machine-to-cloud-connectivity-framework | 980f2892ea2636ace604ea8f33b594091a9f24d2 | [
"Apache-2.0",
"MIT"
] | 2 | 2021-10-30T13:03:03.000Z | 2022-03-21T00:11:20.000Z | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
from unittest import mock
@mock.patch.dict(os.environ, {"SITE_NAME": "test_site", "AREA": "test_area", "PROCESS": "test_process", "MACHINE_NAME": "test_machine_name"})
def test_init_user_message_init(mocker):
mocker.patch("checkpoint_manager.CheckpointManager")
from init_msg_metadata import InitMessage
msg_metadata_client = InitMessage()
assert msg_metadata_client.SITE_NAME == "test_site"
assert msg_metadata_client.AREA == "test_area"
assert msg_metadata_client.PROCESS == "test_process"
assert msg_metadata_client.MACHINE_NAME == "test_machine_name"
@mock.patch.dict(os.environ, {"SITE_NAME": "test_site", "AREA": "test_area", "PROCESS": "test_process", "MACHINE_NAME": "test_machine_name"})
def test_init_user_message_usrmsg(mocker):
mocker.patch("checkpoint_manager.CheckpointManager")
from init_msg_metadata import InitMessage
msg_metadata_client = InitMessage()
test_user_message = msg_metadata_client.init_user_message()
expected_user_message = {
"siteName": "test_site",
"area": "test_area",
"process": "test_process",
"machineName": "test_machine_name"
}
assert "siteName" in test_user_message
assert "area" in test_user_message
assert "process" in test_user_message
assert "machineName" in test_user_message
assert test_user_message == expected_user_message
assert test_user_message["siteName"] == expected_user_message["siteName"]
assert test_user_message["area"] == expected_user_message["area"]
assert test_user_message["process"] == expected_user_message["process"]
assert test_user_message["machineName"] == expected_user_message["machineName"]
| 45.125 | 141 | 0.751247 |
80c068b15c898a0671aeea15c742439b49cd1e36 | 15,817 | py | Python | examples/pytorch/name_entity_recognition/main.py | amanapte/graph4nlp | 1ec5464b5d6b1f9c36297171cfedf617021273c3 | [
"Apache-2.0"
] | 1 | 2021-12-09T21:56:49.000Z | 2021-12-09T21:56:49.000Z | examples/pytorch/name_entity_recognition/main.py | amanapte/graph4nlp | 1ec5464b5d6b1f9c36297171cfedf617021273c3 | [
"Apache-2.0"
] | null | null | null | examples/pytorch/name_entity_recognition/main.py | amanapte/graph4nlp | 1ec5464b5d6b1f9c36297171cfedf617021273c3 | [
"Apache-2.0"
] | null | null | null | import argparse
import os
import torch
import torch.backends.cudnn as cudnn
import torch.multiprocessing
import torch.optim as optim
from torch.utils.data import DataLoader
from graph4nlp.pytorch.data.data import from_batch
from graph4nlp.pytorch.modules.evaluation.accuracy import Accuracy
from graph4nlp.pytorch.modules.graph_construction import (
ConstituencyBasedGraphConstruction,
IEBasedGraphConstruction,
NodeEmbeddingBasedRefinedGraphConstruction,
)
from graph4nlp.pytorch.modules.graph_construction.node_embedding_based_graph_construction import (
NodeEmbeddingBasedGraphConstruction,
)
from graph4nlp.pytorch.modules.utils.generic_utils import to_cuda
from conll import ConllDataset
from conlleval import evaluate
from dependency_graph_construction_without_tokenize import (
DependencyBasedGraphConstruction_without_tokenizer,
)
from line_graph_construction import LineBasedGraphConstruction
from model import Word2tag
torch.multiprocessing.set_sharing_strategy("file_system")
cudnn.benchmark = False
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# from torchcrf import CRF
def all_to_cuda(data, device=None):
if isinstance(data, torch.Tensor):
data = to_cuda(data, device)
elif isinstance(data, (list, dict)):
keys = range(len(data)) if isinstance(data, list) else data.keys()
for k in keys:
if isinstance(data[k], torch.Tensor):
data[k] = to_cuda(data[k], device)
return data
def conll_score(preds, tgts, tag_types):
# preds is a list and each elements is the list of tags of a sentence
# tgts is a lits and each elements is the tensor of tags of a text
pred_list = []
tgt_list = []
for idx in range(len(preds)):
pred_list.append(preds[idx].cpu().clone().numpy())
for idx in range(len(tgts)):
tgt_list.extend(tgts[idx].cpu().clone().numpy().tolist())
pred_tags = [tag_types[int(pred)] for pred in pred_list]
tgt_tags = [tag_types[int(tgt)] for tgt in tgt_list]
prec, rec, f1 = evaluate(tgt_tags, pred_tags, verbose=False)
return prec, rec, f1
def write_file(tokens_collect, pred_collect, tag_collect, file_name, tag_types):
num_sent = len(tokens_collect)
f = open(file_name, "w")
for idx in range(num_sent):
sent_token = tokens_collect[idx]
sent_pred = pred_collect[idx].cpu().clone().numpy()
sent_tag = tag_collect[idx].cpu().clone().numpy()
# f.write('%s\n' % ('-X- SENTENCE START'))
for word_idx in range(len(sent_token)):
w = sent_token[word_idx]
tgt = tag_types[sent_tag[word_idx].item()]
pred = tag_types[sent_pred[word_idx].item()]
f.write("%d %s %s %s\n" % (word_idx + 1, w, tgt, pred))
f.close()
def get_tokens(g_list):
tokens = []
for g in g_list:
sent_token = []
dic = g.node_attributes
for node in dic:
sent_token.append(node["token"])
if "ROOT" in sent_token:
sent_token.remove("ROOT")
tokens.append(sent_token)
return tokens
class Conll:
def __init__(self):
super(Conll, self).__init__()
self.tag_types = ["I-PER", "O", "B-ORG", "B-LOC", "I-ORG", "I-MISC", "I-LOC", "B-MISC"]
if args.gpu > -1:
self.device = torch.device("cuda")
else:
self.device = torch.device("cpu")
self.checkpoint_path = "./checkpoints/"
if not os.path.exists(self.checkpoint_path):
os.mkdir(self.checkpoint_path)
self._build_dataloader()
print("finish dataloading")
self._build_model()
print("finish building model")
self._build_optimizer()
self._build_evaluation()
def _build_dataloader(self):
print("starting build the dataset")
if args.graph_type == "line_graph":
dataset = ConllDataset(
root_dir="examples/pytorch/name_entity_recognition/conll",
topology_builder=LineBasedGraphConstruction,
graph_type="static",
pretrained_word_emb_cache_dir=args.pre_word_emb_file,
topology_subdir="LineGraph",
tag_types=self.tag_types,
)
elif args.graph_type == "dependency_graph":
dataset = ConllDataset(
root_dir="examples/pytorch/name_entity_recognition/conll",
topology_builder=DependencyBasedGraphConstruction_without_tokenizer,
graph_type="static",
pretrained_word_emb_cache_dir=args.pre_word_emb_file,
topology_subdir="DependencyGraph",
tag_types=self.tag_types,
)
elif args.graph_type == "node_emb":
dataset = ConllDataset(
root_dir="examples/pytorch/name_entity_recognition/conll",
topology_builder=NodeEmbeddingBasedGraphConstruction,
graph_type="dynamic",
pretrained_word_emb_cache_dir=args.pre_word_emb_file,
topology_subdir="DynamicGraph_node_emb",
tag_types=self.tag_types,
merge_strategy=None,
dynamic_graph_type=args.graph_type
if args.graph_type in ("node_emb", "node_emb_refined")
else None,
)
elif args.graph_type == "node_emb_refined":
if args.init_graph_type == "line":
dynamic_init_topology_builder = LineBasedGraphConstruction
elif args.init_graph_type == "dependency":
dynamic_init_topology_builder = DependencyBasedGraphConstruction_without_tokenizer
elif args.init_graph_type == "constituency":
dynamic_init_topology_builder = ConstituencyBasedGraphConstruction
elif args.init_graph_type == "ie":
# merge_strategy = "global"
dynamic_init_topology_builder = IEBasedGraphConstruction
else:
# init_topology_builder
raise RuntimeError("Define your own init_topology_builder")
dataset = ConllDataset(
root_dir="examples/pytorch/name_entity_recognition/conll",
topology_builder=NodeEmbeddingBasedRefinedGraphConstruction,
graph_type="dynamic",
pretrained_word_emb_cache_dir=args.pre_word_emb_file,
topology_subdir="DynamicGraph_node_emb_refined",
tag_types=self.tag_types,
dynamic_graph_type=args.graph_type
if args.graph_type in ("node_emb", "node_emb_refined")
else None,
dynamic_init_topology_builder=dynamic_init_topology_builder,
dynamic_init_topology_aux_args={"dummy_param": 0},
)
print(len(dataset.train))
print("strating loading the training data")
self.train_dataloader = DataLoader(
dataset.train,
batch_size=args.batch_size,
shuffle=True,
num_workers=1,
collate_fn=dataset.collate_fn,
)
print("strating loading the validating data")
self.val_dataloader = DataLoader(
dataset.val, batch_size=100, shuffle=True, num_workers=1, collate_fn=dataset.collate_fn
)
print("strating loading the testing data")
self.test_dataloader = DataLoader(
dataset.test, batch_size=100, shuffle=True, num_workers=1, collate_fn=dataset.collate_fn
)
print("strating loading the vocab")
self.vocab = dataset.vocab_model
def _build_model(self):
self.model = Word2tag(self.vocab, args, device=self.device).to(self.device)
def _build_optimizer(self):
parameters = [p for p in self.model.parameters() if p.requires_grad]
self.optimizer = optim.Adam(parameters, lr=args.lr, weight_decay=args.weight_decay)
def _build_evaluation(self):
self.metrics = Accuracy(["F1", "precision", "recall"])
def train(self):
max_score = -1
max_idx = 0
for epoch in range(args.epochs):
self.model.train()
print("Epoch: {}".format(epoch))
pred_collect = []
gt_collect = []
for data in self.train_dataloader:
graph, tgt = data["graph_data"], data["tgt_tag"]
tgt_l = [tgt_.to(self.device) for tgt_ in tgt]
graph = graph.to(self.device)
pred_tags, loss = self.model(graph, tgt_l, require_loss=True)
pred_collect.extend(pred_tags) # pred: list of batch_sentence pred tensor
gt_collect.extend(tgt) # tgt:list of sentence token tensor
# num_tokens=len(torch.cat(pred_tags).view(-1))
print("Epoch: {}".format(epoch) + " loss:" + str(loss.cpu().item()))
self.optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1)
self.optimizer.step()
if epoch % 1 == 0:
score = self.evaluate(epoch)
if score > max_score:
self.model.save_checkpoint(self.checkpoint_path, "best.pt")
max_idx = epoch
max_score = max(max_score, score)
return max_score, max_idx
def evaluate(self, epoch):
self.model.eval()
pred_collect = []
gt_collect = []
tokens_collect = []
with torch.no_grad():
for data in self.val_dataloader:
graph, tgt = data["graph_data"], data["tgt_tag"]
graph = graph.to(self.device)
tgt_l = [tgt_.to(self.device) for tgt_ in tgt]
pred, loss = self.model(graph, tgt_l, require_loss=True)
pred_collect.extend(pred) # pred: list of batch_sentence pred tensor
gt_collect.extend(tgt) # tgt:list of sentence token tensor
tokens_collect.extend(get_tokens(from_batch(graph)))
prec, rec, f1 = conll_score(pred_collect, gt_collect, self.tag_types)
print("Testing results: precision is %5.2f, rec is %5.2f, f1 is %5.2f" % (prec, rec, f1))
print("Epoch: {}".format(epoch) + " loss:" + str(loss.cpu().item()))
return f1
@torch.no_grad()
def test(self):
self.model.eval()
pred_collect = []
tokens_collect = []
tgt_collect = []
with torch.no_grad():
for data in self.test_dataloader:
graph, tgt = data["graph_data"], data["tgt_tag"]
graph = graph.to(self.device)
tgt_l = [tgt_.to(self.device) for tgt_ in tgt]
pred, loss = self.model(graph, tgt_l, require_loss=True)
# pred = logits2tag(g)
pred_collect.extend(pred)
tgt_collect.extend(tgt)
tokens_collect.extend(get_tokens(from_batch(graph)))
prec, rec, f1 = conll_score(pred_collect, tgt_collect, self.tag_types)
print("Testing results: precision is %5.2f, rec is %5.2f, f1 is %5.2f" % (prec, rec, f1))
return f1
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="NER")
parser.add_argument("--gpu", type=int, default=-1, help="which GPU to use.")
parser.add_argument("--epochs", type=int, default=10, help="number of training epochs")
parser.add_argument(
"--direction_option",
type=str,
default="bi_fuse",
help="direction type (`undirected`, `bi_fuse`, `bi_sep`)",
)
parser.add_argument(
"--lstm_num_layers", type=int, default=1, help="number of hidden layers in lstm"
)
parser.add_argument(
"--gnn_num_layers", type=int, default=1, help="number of hidden layers in gnn"
)
parser.add_argument("--init_hidden_size", type=int, default=300, help="initial_emb_hidden_size")
parser.add_argument("--hidden_size", type=int, default=128, help="initial_emb_hidden_size")
parser.add_argument("--lstm_hidden_size", type=int, default=80, help="initial_emb_hidden_size")
parser.add_argument("--num_class", type=int, default=8, help="num_class")
parser.add_argument(
"--residual", action="store_true", default=False, help="use residual connection"
)
parser.add_argument("--word_dropout", type=float, default=0.5, help="input feature dropout")
parser.add_argument("--tag_dropout", type=float, default=0.5, help="input feature dropout")
parser.add_argument(
"--rnn_dropout", type=list, default=0.33, help="dropout for rnn in word_emb"
)
parser.add_argument("--lr", type=float, default=0.01, help="learning rate")
parser.add_argument("--weight-decay", type=float, default=5e-5, help="weight decay")
parser.add_argument(
"--aggregate_type",
type=str,
default="mean",
help="aggregate type: 'mean','gcn','pool','lstm'",
)
parser.add_argument(
"--gnn_type", type=str, default="graphsage", help="ingnn type: 'gat','graphsage','ggnn'"
)
parser.add_argument("--use_gnn", type=bool, default=True, help="whether to use gnn")
parser.add_argument("--batch_size", type=int, default=100, help="batch size for training")
parser.add_argument(
"--graph_type",
type=str,
default="line_graph",
help="graph_type:line_graph, dependency_graph, dynamic_graph",
)
parser.add_argument(
"--init_graph_type",
type=str,
default="line",
help="initial graph construction type ('line', 'dependency', 'constituency', 'ie')",
)
parser.add_argument(
"--pre_word_emb_file", type=str, default=None, help="path of pretrained_word_emb_file"
)
parser.add_argument(
"--gl_num_heads", type=int, default=1, help="num of heads for dynamic graph construction"
)
parser.add_argument(
"--gl_epsilon", type=int, default=0.5, help="epsilon for graph sparsification"
)
parser.add_argument("--gl_top_k", type=int, default=None, help="top k for graph sparsification")
parser.add_argument(
"--gl_smoothness_ratio",
type=float,
default=None,
help="smoothness ratio for graph regularization loss",
)
parser.add_argument(
"--gl_sparsity_ratio",
type=float,
default=None,
help="sparsity ratio for graph regularization loss",
)
parser.add_argument(
"--gl_connectivity_ratio",
type=float,
default=None,
help="connectivity ratio for graph regularization loss",
)
parser.add_argument(
"--init_adj_alpha",
type=float,
default=0.8,
help="alpha ratio for combining initial graph adjacency matrix",
)
parser.add_argument(
"--gl_metric_type",
type=str,
default="weighted_cosine",
help="similarity metric type for dynamic graph construction ('weighted_cosine', 'attention', \
'rbf_kernel', 'cosine')",
)
parser.add_argument(
"--no_fix_word_emb",
type=bool,
default=False,
help="Not fix pretrained word embeddings (default: false)",
)
parser.add_argument(
"--no_fix_bert_emb",
type=bool,
default=False,
help="Not fix pretrained word embeddings (default: false)",
)
import datetime
starttime = datetime.datetime.now()
# long running
# do something other
args = parser.parse_args()
runner = Conll()
max_score, max_idx = runner.train()
print("Train finish, best score: {:.3f}".format(max_score))
print(max_idx)
score = runner.test()
endtime = datetime.datetime.now()
print((endtime - starttime).seconds)
| 39.941919 | 102 | 0.626351 |
9dfa731282bc36b38cb39f666f657b7150df9fea | 3,205 | py | Python | launchable/commands/record/build.py | yoshiori/cli | 327ff6c2dd7672546c9fd95f5fd46eebc9ff0923 | [
"Apache-2.0"
] | null | null | null | launchable/commands/record/build.py | yoshiori/cli | 327ff6c2dd7672546c9fd95f5fd46eebc9ff0923 | [
"Apache-2.0"
] | null | null | null | launchable/commands/record/build.py | yoshiori/cli | 327ff6c2dd7672546c9fd95f5fd46eebc9ff0923 | [
"Apache-2.0"
] | null | null | null | import re
import click
import subprocess
import json
import os
from ...utils.token import parse_token
from .commit import commit
from ...utils.env_keys import REPORT_ERROR_KEY
from ...utils.http_client import LaunchableClient
@click.command()
@click.option(
'--name',
'build_name',
help='build name',
required=True,
type=str,
metavar='BUILD_NAME'
)
@click.option(
'--source',
help='path to local Git workspace, optionally prefixed by a label. '
' like --source path/to/ws or --source main=path/to/ws',
default=["."],
metavar="REPO_NAME",
multiple=True
)
@click.pass_context
def build(ctx, build_name, source):
token, org, workspace = parse_token()
# This command accepts REPO_NAME=REPO_DIST and REPO_DIST
repos = [s.split('=') if re.match(r'[^=]+=[^=]+', s) else (s, s)
for s in source]
# TODO: if repo_dist is absolute path, warn the user that that's probably not what they want to do
for (name, repo_dist) in repos:
ctx.invoke(commit, source=repo_dist)
sources = [(
name,
subprocess.check_output(
"git rev-parse HEAD".split(), cwd=repo_dist
).decode().replace("\n", "")
) for name, repo_dist in repos]
submodules = []
for repo_name, repo_dist in repos:
# invoke git directly because dulwich's submodule feature was broken
submodule_stdouts = subprocess.check_output(
"git submodule status --recursive".split(), cwd=repo_dist
).decode().splitlines()
for submodule_stdout in submodule_stdouts:
# the output is e.g.
# "+bbf213437a65e82dd6dda4391ecc5d598200a6ce sub1 (heads/master)"
matched = re.search(
r"^[\+\-U ](?P<hash>[a-f0-9]{40}) (?P<name>\w+)",
submodule_stdout
)
if matched:
hash = matched.group('hash')
name = matched.group('name')
if hash and name:
submodules.append((repo_name+"/"+name, hash))
# Note: currently becomes unique command args and submodules by the hash.
# But they can be conflict between repositories.
uniq_submodules = {hash: (name, hash)
for name, hash in sources + submodules}.values()
try:
commitHashes = [{
'repositoryName': name,
'commitHash': hash
} for name, hash in uniq_submodules]
if not (commitHashes[0]['repositoryName']
and commitHashes[0]['commitHash']):
exit('Please specify --source as --source .')
payload = {
"buildNumber": build_name,
"commitHashes": commitHashes
}
headers = {
"Content-Type": "application/json",
}
path = "/intake/organizations/{}/workspaces/{}/builds".format(
org, workspace)
client = LaunchableClient(token)
res = client.request("post", path, data=json.dumps(
payload).encode(), headers=headers)
res.raise_for_status()
except Exception as e:
if os.getenv(REPORT_ERROR_KEY):
raise e
else:
print(e)
| 31.116505 | 102 | 0.589392 |
c98bfac9ee6dc79e87c404f65662771e7b1e5e2a | 9,606 | py | Python | src/frr/tests/topotests/lib/ltemplate.py | zhouhaifeng/vpe | 9c644ffd561988e5740021ed26e0f7739844353d | [
"Apache-2.0"
] | null | null | null | src/frr/tests/topotests/lib/ltemplate.py | zhouhaifeng/vpe | 9c644ffd561988e5740021ed26e0f7739844353d | [
"Apache-2.0"
] | null | null | null | src/frr/tests/topotests/lib/ltemplate.py | zhouhaifeng/vpe | 9c644ffd561988e5740021ed26e0f7739844353d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Part of NetDEF Topology Tests
#
# Copyright (c) 2017 by
# Network Device Education Foundation, Inc. ("NetDEF")
#
# Permission to use, copy, modify, and/or distribute this software
# for any purpose with or without fee is hereby granted, provided
# that the above copyright notice and this permission notice appear
# in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
#
"""
ltemplate.py: LabN template for FRR tests.
"""
import os
import sys
import platform
import pytest
# pylint: disable=C0413
# Import topogen and topotest helpers
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from lib.lutil import *
# Required to instantiate the topology builder class.
customize = None
class LTemplate:
test = None
testdir = None
scriptdir = None
logdir = None
prestarthooksuccess = True
poststarthooksuccess = True
iproute2Ver = None
def __init__(self, test, testdir):
pathname = os.path.join(testdir, "customize.py")
global customize
if sys.version_info >= (3, 5):
import importlib.util
spec = importlib.util.spec_from_file_location("customize", pathname)
customize = importlib.util.module_from_spec(spec)
spec.loader.exec_module(customize)
else:
import imp
customize = imp.load_source("customize", pathname)
self.test = test
self.testdir = testdir
self.scriptdir = testdir
self.logdir = ""
logger.info("LTemplate: " + test)
def setup_module(self, mod):
"Sets up the pytest environment"
# This function initiates the topology build with Topogen...
tgen = Topogen(customize.build_topo, mod.__name__)
# ... and here it calls Mininet initialization functions.
tgen.start_topology()
self.logdir = tgen.logdir
logger.info("Topology started")
try:
self.prestarthooksuccess = customize.ltemplatePreRouterStartHook()
except AttributeError:
# not defined
logger.debug("ltemplatePreRouterStartHook() not defined")
if self.prestarthooksuccess != True:
logger.info("ltemplatePreRouterStartHook() failed, skipping test")
return
# This is a sample of configuration loading.
router_list = tgen.routers()
# For all registred routers, load the zebra configuration file
for rname, router in router_list.items():
logger.info("Setting up %s" % rname)
for rd_val in TopoRouter.RD:
config = os.path.join(
self.testdir, "{}/{}.conf".format(rname, TopoRouter.RD[rd_val])
)
prog = os.path.join(tgen.net[rname].daemondir, TopoRouter.RD[rd_val])
if os.path.exists(config):
if os.path.exists(prog):
router.load_config(rd_val, config)
else:
logger.warning(
"{} not found, but have {}.conf file".format(
prog, TopoRouter.RD[rd_val]
)
)
# After loading the configurations, this function loads configured daemons.
logger.info("Starting routers")
tgen.start_router()
try:
self.poststarthooksuccess = customize.ltemplatePostRouterStartHook()
except AttributeError:
# not defined
logger.debug("ltemplatePostRouterStartHook() not defined")
luStart(baseScriptDir=self.scriptdir, baseLogDir=self.logdir, net=tgen.net)
# initialized by ltemplate_start
_lt = None
def setup_module(mod):
global _lt
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
test = mod.__name__[: mod.__name__.rfind(".")]
testdir = os.path.join(root, test)
# don't do this for now as reload didn't work as expected
# fixup sys.path, want test dir there only once
# try:
# sys.path.remove(testdir)
# except ValueError:
# logger.debug(testdir+" not found in original sys.path")
# add testdir
# sys.path.append(testdir)
# init class
_lt = LTemplate(test, testdir)
_lt.setup_module(mod)
# drop testdir
# sys.path.remove(testdir)
def teardown_module(mod):
global _lt
"Teardown the pytest environment"
tgen = get_topogen()
if _lt != None and _lt.scriptdir != None and _lt.prestarthooksuccess == True:
luShowResults(logger.info)
print(luFinish())
# This function tears down the whole topology.
tgen.stop_topology()
_lt = None
def ltemplateTest(
script, SkipIfFailed=True, CallOnFail=None, CheckFuncStr=None, KeepGoing=False
):
global _lt
if _lt == None or _lt.prestarthooksuccess != True:
return
tgen = get_topogen()
if not os.path.isfile(script):
if not os.path.isfile(os.path.join(_lt.scriptdir, script)):
logger.error("Could not find script file: " + script)
assert "Could not find script file: " + script
logger.info("Starting template test: " + script)
numEntry = luNumFail()
if SkipIfFailed and tgen.routers_have_failure():
pytest.skip(tgen.errors)
if numEntry > 0:
if not KeepGoing:
pytest.skip("Have %d errors" % numEntry)
if CheckFuncStr != None:
check = eval(CheckFuncStr)
if check != True:
pytest.skip("Check function '" + CheckFuncStr + "' returned: " + check)
if CallOnFail != None:
CallOnFail = eval(CallOnFail)
luInclude(script, CallOnFail)
numFail = luNumFail() - numEntry
if numFail > 0:
luShowFail()
fatal_error = "%d tests failed" % numFail
if not KeepGoing:
assert (
"scripts/cleanup_all.py failed" == "See summary output above"
), fatal_error
# Memory leak test template
def test_memory_leak():
"Run the memory leak test and report results."
tgen = get_topogen()
if not tgen.is_memleak_enabled():
pytest.skip("Memory leak test/report is disabled")
tgen.report_memory_leaks()
class ltemplateRtrCmd:
def __init__(self):
self.resetCounts()
def doCmd(self, tgen, rtr, cmd, checkstr=None):
logger.info("doCmd: {} {}".format(rtr, cmd))
output = tgen.net[rtr].cmd(cmd).strip()
if len(output):
self.output += 1
if checkstr != None:
ret = re.search(checkstr, output)
if ret == None:
self.nomatch += 1
else:
self.match += 1
return ret
logger.info("output: " + output)
else:
logger.info("No output")
self.none += 1
return None
def resetCounts(self):
self.match = 0
self.nomatch = 0
self.output = 0
self.none = 0
def getMatch(self):
return self.match
def getNoMatch(self):
return self.nomatch
def getOutput(self):
return self.output
def getNone(self):
return self.none
def ltemplateVersionCheck(
vstr, rname="r1", compstr="<", cli=False, kernel="4.9", iproute2=None, mpls=True
):
tgen = get_topogen()
router = tgen.gears[rname]
if cli:
logger.info("calling mininet CLI")
tgen.mininet_cli()
logger.info("exited mininet CLI")
if _lt == None:
ret = "Template not initialized"
return ret
if _lt.prestarthooksuccess != True:
ret = "ltemplatePreRouterStartHook failed"
return ret
if _lt.poststarthooksuccess != True:
ret = "ltemplatePostRouterStartHook failed"
return ret
if mpls == True and tgen.hasmpls != True:
ret = "MPLS not initialized"
return ret
if kernel != None:
krel = platform.release()
if topotest.version_cmp(krel, kernel) < 0:
ret = "Skipping tests, old kernel ({} < {})".format(krel, kernel)
return ret
if iproute2 != None:
if _lt.iproute2Ver == None:
# collect/log info on iproute2
cc = ltemplateRtrCmd()
found = cc.doCmd(
tgen, rname, "apt-cache policy iproute2", "Installed: ([\d\.]*)"
)
if found != None:
iproute2Ver = found.group(1)
else:
iproute2Ver = "0-unknown"
logger.info("Have iproute2 version=" + iproute2Ver)
if topotest.version_cmp(iproute2Ver, iproute2) < 0:
ret = "Skipping tests, old iproute2 ({} < {})".format(iproute2Ver, iproute2)
return ret
ret = True
try:
if router.has_version(compstr, vstr):
ret = "Skipping tests, old FRR version {} {}".format(compstr, vstr)
return ret
except:
ret = True
return ret
# for testing
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
| 29.925234 | 88 | 0.607849 |
9a26cf7e42638037793aa32578c21b3018e9320e | 40 | py | Python | yabs/__init__.py | mar10/yabs | 6144398c67e9b0a9f12e7e2cf3583041ba7a960e | [
"MIT"
] | 8 | 2020-07-05T18:08:07.000Z | 2022-03-03T18:22:44.000Z | yabs/__init__.py | mar10/yabs | 6144398c67e9b0a9f12e7e2cf3583041ba7a960e | [
"MIT"
] | 4 | 2021-04-01T16:38:21.000Z | 2021-04-01T16:38:28.000Z | yabs/__init__.py | mar10/yabs | 6144398c67e9b0a9f12e7e2cf3583041ba7a960e | [
"MIT"
] | null | null | null | # flake8: noqa
__version__ = "0.4.1-a1"
| 13.333333 | 24 | 0.65 |
3aba5d4030c4a3ad19fdc5d2934d3cd1555ac4f9 | 2,842 | py | Python | qnet_agent.py | opplieam/Pong-Deep-RL | 28850a46bff0fb22374a39a7ab391175a9105673 | [
"Apache-2.0"
] | null | null | null | qnet_agent.py | opplieam/Pong-Deep-RL | 28850a46bff0fb22374a39a7ab391175a9105673 | [
"Apache-2.0"
] | null | null | null | qnet_agent.py | opplieam/Pong-Deep-RL | 28850a46bff0fb22374a39a7ab391175a9105673 | [
"Apache-2.0"
] | null | null | null | import torch
from deep_network import NeuralNetwork
from utils import preprocess_frame, save_model
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
# torch.manual_seed(1)
class QNet_Agent(object):
save_model_frequency = 10000
number_of_frames = 0
def __init__(self, number_of_outputs, learning_rate, env, memory, batch_size, gamma, update_target_frequency):
self.env = env
self.memory = memory
self.batch_size = batch_size
self.gamma = gamma
self.update_target_frequency = update_target_frequency
self.nn = NeuralNetwork(number_of_outputs).to(device)
self.target_nn = NeuralNetwork(number_of_outputs).to(device)
self.loss_func = torch.nn.MSELoss()
self.optimizer = torch.optim.Adam(params=self.nn.parameters(),
lr=learning_rate)
def select_action(self, state, epsilon):
random_for_egreedy = torch.rand(1).item()
if random_for_egreedy > epsilon:
with torch.no_grad():
state = preprocess_frame(state)
action_from_nn = self.nn(state)
action = torch.max(action_from_nn, 1)[1].item()
else:
action = self.env.action_space.sample()
return action
def optimize(self):
if len(self.memory) < self.batch_size:
return
state, action, new_state, reward, done = self.memory.sample(self.batch_size)
state = [preprocess_frame(frame) for frame in state]
state = torch.cat(state) # stack tensor
new_state = [preprocess_frame(frame) for frame in new_state]
new_state = torch.cat(new_state)
reward = torch.Tensor(reward).to(device)
action = torch.LongTensor(action).to(device)
done = torch.Tensor(done).to(device)
# Double DQN
max_new_state_indexes = torch.argmax(self.nn(new_state).detach(), 1)
new_state_values = self.target_nn(new_state).detach()
max_new_state_values = new_state_values.gather(
1, max_new_state_indexes.unsqueeze(1)
).squeeze(1)
target_value = reward + (1 - done) * self.gamma * max_new_state_values
predicted_value = self.nn(state).gather(1, action.unsqueeze(1)).squeeze(
1)
loss = self.loss_func(predicted_value, target_value)
self.optimizer.zero_grad()
loss.backward()
for param in self.nn.parameters():
param.grad.data.clamp_(-1, 1)
self.optimizer.step()
if self.number_of_frames % self.save_model_frequency == 0:
save_model(self.nn)
if self.number_of_frames % self.update_target_frequency == 0:
self.target_nn.load_state_dict(self.nn.state_dict())
self.number_of_frames += 1
| 35.525 | 114 | 0.644616 |
5a7ff8078f691095e196cb032f62369a0f6cd277 | 4,954 | py | Python | test/multiapi/Expected/AcceptanceTests/Multiapi/multiapi/v2/operations/_operation_group_two_operations.py | changlong-liu/autorest.python | 1f03e4c6a11934d385fab050dc44041f1e91e9ff | [
"MIT"
] | null | null | null | test/multiapi/Expected/AcceptanceTests/Multiapi/multiapi/v2/operations/_operation_group_two_operations.py | changlong-liu/autorest.python | 1f03e4c6a11934d385fab050dc44041f1e91e9ff | [
"MIT"
] | null | null | null | test/multiapi/Expected/AcceptanceTests/Multiapi/multiapi/v2/operations/_operation_group_two_operations.py | changlong-liu/autorest.python | 1f03e4c6a11934d385fab050dc44041f1e91e9ff | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_test_four_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2.0.0") # type: str
parameter_one = kwargs.pop('parameter_one') # type: bool
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/multiapi/two/testFourEndpoint')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['parameterOne'] = _SERIALIZER.query("parameter_one", parameter_one, 'bool')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
# fmt: on
class OperationGroupTwoOperations(object):
"""OperationGroupTwoOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~multiapi.v2.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def test_four(
self,
parameter_one, # type: bool
**kwargs # type: Any
):
# type: (...) -> None
"""TestFour should be in OperationGroupTwoOperations.
:param parameter_one: A boolean parameter.
:type parameter_one: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2.0.0") # type: str
request = build_test_four_request(
api_version=api_version,
parameter_one=parameter_one,
template_url=self.test_four.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
test_four.metadata = {'url': '/multiapi/two/testFourEndpoint'} # type: ignore
| 38.107692 | 133 | 0.677836 |
dd250cc1529bea0acd79a4d38787d9efb611e13a | 10,491 | py | Python | mpllayout.py | mchels/FolderBrowser | 2915634ebae8b093f26117d65f6bd97a3de59f07 | [
"MIT"
] | 1 | 2016-11-04T11:06:05.000Z | 2016-11-04T11:06:05.000Z | mpllayout.py | mchels/FolderBrowser | 2915634ebae8b093f26117d65f6bd97a3de59f07 | [
"MIT"
] | null | null | null | mpllayout.py | mchels/FolderBrowser | 2915634ebae8b093f26117d65f6bd97a3de59f07 | [
"MIT"
] | 3 | 2017-08-07T15:04:59.000Z | 2019-03-22T13:25:00.000Z | from PyQt5 import QtCore, QtWidgets
from PyQt5.QtWidgets import QSizePolicy
from plotcontrols import PlotControls
from matplotlib.backends.backend_qt5 import NavigationToolbar2QT
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import numpy as np
from numpy import nanmin, nanmax
from custom_colormap import get_colormap
from datahandler import data_handler_factory
from plothandler import plot_handler_factory
class MplLayout(QtWidgets.QWidget):
"""
This class is responsible for drawing the plots.
Parameters
----------
statusBar : QtWidgets.QStatusBar instance
statusBar of the parent FolderBrowser instance.
parent : QtWidgets.QMainWindow instance
The parent FolderBrowser instance.
The starting point for this class was the Matplotlib example file
embedding_in_qt5.py
from
https://matplotlib.org/examples/user_interfaces/embedding_in_qt5.html
"""
def __init__(self, statusBar=None, parent=None):
super().__init__()
self.statusBar = statusBar
self.parent = parent
self.init_fig_and_canvas()
self.cmap_names = ['Reds', 'Blues_r', 'dark symmetric',
'light symmetric', 'inferno', 'viridis', 'afmhot']
self.plot_2D_types = ('Auto', 'imshow', 'pcolormesh')
self.plotcontrols = PlotControls(self.cmap_names, self.plot_2D_types)
self.set_callback_functions()
self.init_navi_toolbar()
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self.navi_toolbar)
layout.addWidget(self.canvas)
layout.addWidget(self.plotcontrols)
self.setLayout(layout)
self.none_str = '---'
self.sel_col_names = self.plotcontrols.get_sel_cols()
self.plot_data = [None] * 3
self.cbar = None
self.cmap_name = self.cmap_names[0]
self.cmap = plt.get_cmap(self.cmap_name)
self.lims = [None] * 3
self.aspect = 'auto'
self.update_is_scheduled = False
self.title = None
self.labels = [None] * 3
self.scilimits = (-3,3)
self.n_active_cols = None
self.plot_2D_type = None
def reset_and_plot(self, sweep):
self.sweep = sweep
raw_col_names = list(self.sweep.data.dtype.names)
pcol_names = self.sweep.pdata.get_names()
all_names = raw_col_names + pcol_names
col3_names = all_names + [self.none_str]
col_names = [all_names, all_names, col3_names]
self.plotcontrols.reset_col_boxes(col_names)
self.update_sel_cols()
def update_sel_cols(self, new_num=None):
col_names = self.plotcontrols.get_sel_cols()
new_col_names = [n for n in col_names if n != self.none_str]
# Try to make 1D plot if '---' is selected in the third comboBox.
self.plot_is_2D = len(new_col_names) == 3
self.data_is_1D = self.sweep.dimension == 1
plot_is_invalid = self.plot_is_2D and self.data_is_1D
if plot_is_invalid:
msg = "You can't do a 2D plot, since the data is only 1D."
self.statusBar.showMessage(msg, 2000)
self.plotcontrols.set_text_on_box(2, self.none_str)
self.update_sel_cols()
return
self.set_data_for_plot(new_col_names)
tmp = (self.plot_dim, self.data_h.n_data_arrs)
if tmp in ((1,2), (2,3)) and self.data_h.data_is_valid:
self.update_is_scheduled = True
self.set_labels()
self.update_lims()
self.update_plot()
else:
self.clear_axis(redraw=True)
def set_data_for_plot(self, new_col_names):
new_plot_data = [None] * len(new_col_names)
for i, col_name in enumerate(new_col_names):
sweep = self.sweep
raw_data_col_names = sweep.data.dtype.names
pdata_col_names = sweep.pdata.name_func_dict.keys()
if col_name in raw_data_col_names:
new_plot_data[i] = sweep.data[col_name]
elif col_name in pdata_col_names:
try:
new_plot_data[i] = sweep.pdata[col_name]
except Exception as error:
msg = 'Calculation of pseudocolumn failed'
self.statusBar.showMessage(msg, 2000)
new_data_h = data_handler_factory(*new_plot_data)
self.sel_col_names = new_col_names
self.n_active_cols = len(new_col_names)
ax = self.canvas.figure.axes[0]
plot_dim = self.n_active_cols - 1
self.plot_dim = plot_dim
self.plot_h = plot_handler_factory(ax, new_data_h, plot_dim=plot_dim)
self.data_h = new_data_h
def set_labels(self):
self.labels = [None] * self.n_active_cols
for i, _ in enumerate(self.labels):
col_name = self.sel_col_names[i]
self.labels[i] = self.sweep.get_label(col_name)
def update_lims(self):
"""
user_lims are limits set by user in the lim_boxes.
For both 1D and 2D plots extent is data limits.
"""
ext = [None] * self.n_active_cols
user_lims = self.plotcontrols.get_lims()
self.lims = [None] * self.n_active_cols
for i, lim in enumerate(self.lims):
ext = self.data_h.get_extent_of_data_dim(i)
self.lims[i] = self.combine_lim_lists(user_lims[i], ext)
self.update_cmap()
if not self.update_is_scheduled:
self.update_plot()
def update_cmap(self, cmap_name=None):
"""
cmap_name: string corresponding to a built-in matplotlib colormap
OR 'symmetric' which is defined below.
"""
if not self.plot_is_2D:
return
if type(cmap_name) is int:
cmap_name = self.cmap_names[cmap_name]
if cmap_name is None:
cmap_name = self.cmap_name
self.cmap_name = cmap_name
self.cmap = get_colormap(cmap_name, self.lims[2])
if not self.update_is_scheduled:
self.update_plot()
def update_aspect(self):
self.aspect = self.plotcontrols.get_aspect()
if not self.update_is_scheduled:
self.update_plot()
def update_plot(self):
if self.plot_is_2D: self._update_2D_plot()
else: self._update_1D_plot()
self.update_is_scheduled = False
def _update_1D_plot(self):
self.clear_axis(redraw=False)
self.plot_h.plot()
self.common_plot_update()
def _update_2D_plot(self):
fig = self.canvas.figure
if self.plot_2D_type == 'imshow' and not self.data_h.imshow_eligible:
self.clear_axis(redraw=True)
return
self.clear_axis(redraw=False)
self.image = self.plot_h.plot(plot_type=self.plot_2D_type)
self.cbar = fig.colorbar(mappable=self.image)
self.cbar.formatter.set_powerlimits(self.scilimits)
self.image.set_cmap(self.cmap)
self.image.set_clim(self.lims[2])
self.cbar.set_label(self.labels[2])
self.cbar.draw_all()
self.common_plot_update()
def common_plot_update(self):
ax = self.canvas.figure.axes[0]
ax.ticklabel_format(style='sci', axis='both',
scilimits=self.scilimits, useOffset=False)
ax.autoscale_view(True, True, True)
ax.relim()
ax.set_xlabel(self.labels[0])
ax.set_ylabel(self.labels[1])
ax.set_xlim(self.lims[0])
ax.set_ylim(self.lims[1])
ax.set_title(self.title, fontsize=11)
ax.set_aspect(self.aspect)
self.custom_tight_layout()
self.canvas.draw()
def clear_axis(self, redraw=True):
try:
self.cbar.remove()
self.cbar = None
self.image = None
except AttributeError:
pass
for ax in self.canvas.figure.axes:
ax.cla()
ax.relim()
ax.autoscale()
if redraw:
self.custom_tight_layout()
self.canvas.draw()
def custom_tight_layout(self):
# Sometimes we'll get an error:
# ValueError: bottom cannot be >= top
# This is a confirmed bug when using tight_layout():
# https://github.com/matplotlib/matplotlib/issues/5456
try:
self.canvas.figure.tight_layout()
except ValueError:
msg = ('Title is wider than figure.'
'This causes undesired behavior and is a known bug.')
self.statusBar.showMessage(msg, 2000)
def set_callback_functions(self):
pt = self.plotcontrols
for box in pt.col_boxes:
box.activated.connect(self.update_sel_cols)
for box in pt.lim_boxes:
box.editingFinished.connect(self.update_lims)
pt.cmap_sel.activated.connect(self.update_cmap)
pt.plot_2D_type_sel.activated.connect(self.set_plot_2D_type)
pt.aspect_box.editingFinished.connect(self.update_aspect)
def init_fig_and_canvas(self):
fig = Figure(facecolor='white')
fig.add_subplot(1, 1, 1)
self.canvas = FigureCanvasQTAgg(fig)
policy = QSizePolicy.Expanding
self.canvas.setSizePolicy(policy, policy)
def init_navi_toolbar(self):
self.navi_toolbar = NavigationToolbar2QT(self.canvas, self)
self.navi_toolbar.setStyleSheet('border: none')
self.navi_toolbar.setMaximumHeight(20)
def copy_fig_to_clipboard(self):
image = QtWidgets.QWidget.grab(self.canvas).toImage()
QtWidgets.QApplication.clipboard().setImage(image)
def set_plot_2D_type(self, new_type=None):
new_type = self.plotcontrols.get_sel_2D_type()
assert new_type in self.plot_2D_types
if new_type == 'Auto':
new_type = None
self.plot_2D_type = new_type
if not self.update_is_scheduled:
self.update_plot()
def set_title(self, title):
self.title = title
@staticmethod
def combine_lim_lists(list1, list2):
if list1 is None or list2 is None:
return None
assert len(list1) == len(list2)
out_list = [None] * len(list1)
for i in range(len(list1)):
if list1[i] is None:
out_list[i] = list2[i]
else:
out_list[i] = list1[i]
return out_list
| 37.602151 | 77 | 0.632923 |
d3430f6676c4f69185af22c57640886b392715e9 | 4,872 | py | Python | src/py/PrjPublish/PublishByPara.py | PrQiang/aods | b743754740f5b5bb4217f06fd790dffa303f871f | [
"MIT"
] | 2 | 2020-12-14T14:24:56.000Z | 2021-06-16T09:22:13.000Z | example/PrjPublish/PublishByPara.py | PrQiang/aods | b743754740f5b5bb4217f06fd790dffa303f871f | [
"MIT"
] | 1 | 2020-12-30T10:25:27.000Z | 2020-12-30T10:25:44.000Z | example/PrjPublish/PublishByPara.py | PrQiang/aods | b743754740f5b5bb4217f06fd790dffa303f871f | [
"MIT"
] | 1 | 2021-06-16T09:22:17.000Z | 2021-06-16T09:22:17.000Z |
""" 实现版本脚本发布功能
"""
import zipfile, random, time, json, os, sys, zlib, paramiko
from Logger import*
from DataModel import DataModel
from RestfulApiClient import RestfulApiClient
class Publish:
def __init__(self, ftpsInfo, pubUrl):
"""
# ftpsInfo: ftp上传信息集, 如: [("127.0.0.1", 22, 'usr', 'pwd')]
# pubUrl: 发布url地址: 如 http://aa.bb.com:8888/update/
"""
self.ftpsInfo, self.pubUrl = ftpsInfo, pubUrl
def Publish(self, prj, mn, ver, folder, pubFileName, detail = ''):
try:
decryptFileName, encryptFileName, data = '%s.%s.db'%(pubFileName, ver), '%s.%s.en.db'%(pubFileName, ver), None
if not self.__packetFile(decryptFileName, folder):return print("打包%s:%s:%s失败"%(prj, mn, ver))# 打包文件
with open(decryptFileName, 'rb') as f: data = f.read()
if data is None: return print("读取文件(%s)失败"%(decryptFileName))
lenData = len(data) # 计算定制hash计算
half = int(lenData / 2)
fileHash = "%08x%08x%08x%08x"%(lenData, zlib.crc32(data[0 : half]), zlib.crc32(data[half : ]), zlib.crc32(data))
sk, skDict = self.__generalSk().encode(), self.__generalDict() # 生成加密密钥,字典
data=[skDict[(((sk[n%len(sk)] << 8) & 0xFF00) | (data[n] & 0xFF)) & 0xFFFF] for n in range(lenData)]
with open(encryptFileName, "wb") as f: f.write(bytes(data)) # 加密文件
if not self.__sftpUploadEncryptFile(encryptFileName): return print("上传文件失败")
dir, fn = os.path.split(encryptFileName)
result = RestfulApiClient().Publish(prj, mn, ver, [1, ], detail, sk.decode(), fileHash, "%s%s"%(self.pubUrl, fn))
Log(LOG_INFO, "Publish", json.dumps(result))
return True
except Exception as e:
Log(LOG_ERROR, "Publish", "Run failed: %s"%e)
return False
def __packetFile(self, fileName, folder):
try:
with zipfile.ZipFile(fileName, 'w', zipfile.ZIP_DEFLATED) as zf:
for root, sroot, files in os.walk(folder):
for file in files:
fullFileName = os.path.join(root, file)
zf.write(fullFileName, fullFileName.replace(folder, ""))
return True
except Exception as e:
Log(LOG_ERROR, "Publish", "__packetFile(%s) failed: %s"%(fileName, e))
return False
def __generalSk(self, skLen = 16):
kv = '0123456789ABCDEFGHIJKLMNOPQRSTUabcdefghijklmnopqrstuvwxyzVWXYZ0123456789fghijklmnopqrstuvwx'
lenKv, rd = len(kv), random.Random(time.time())
return ''.join([kv[rd.randint(0, lenKv - 1)] for i in range(skLen)])
def __generalDict(self):
return bytes([(i + 10) % 256 for i in range(0, 65536)])
def __sftpUploadEncryptFile(self, fileWithPath):
try:
for (addr, port, usr, pwd) in self.ftpsInfo:
if not self.__singleSFtpUpload(addr, port, usr, pwd, fileWithPath): return False
return True
except Exception as e:
Log(LOG_ERROR, "Publish", "__sftpUploadEncryptFile(%s) failed: %s"%(fileWithPath, e))
return False
def __singleSFtpUpload(self, addr, port, usr, pwd, fileWithPath):
try:
transport = paramiko.Transport((addr, int(port)))
transport.connect(username=usr, password=pwd)
sftp = paramiko.SFTPClient.from_transport(transport)
dir, fn = os.path.split(fileWithPath)
sftp.put(fileWithPath, '/var/www/html/%s'%fn)
sftp.close()
return True
except Exception as e:
Log(LOG_ERROR, "Publish", "__singleFtpUpload(%s:%s, %s, %s) failed: %s"%( addr, port, usr, fileWithPath, e))
return False
if __name__== '__main__':
cfg = {
"usr":"master", # 登录UI用账号
"password":"master@er.com",# 登录UI用密码
"pubUrl":"http://192.168.221.134:8210/", # 发布后下载url路径
"pubTopic":"publish", #
"sftpAddr":[("192.168.221.134", 22, 'root', 'Baidu.com22')], # sftp上传ip、端口、账号、密码
#"prjs":[
# # 项目名,模块名,发布版本号,待发布文件目录,待发布文件名称前缀,发布描述
# ("aods", "aods-x64-win", "0.0.0.0001", ".\\aods-x64-win\\", "aods-x64-win", "fix the bug .1.023,1"),
# ("aods", "aodc-x64-win", "0.0.0.0001", ".\\aodc-x64-win\\", "aodc-x64-win", "fix the bug .1.023,1")
# ]
}
rlt = RestfulApiClient().Login(cfg['usr'], cfg['password']) # 修改为api发布消息
if not rlt or rlt["login_result"]["result"] != "success":
Log(LOG_ERROR, "Publish","Failed to login")
sys.exit(1)
DataModel.Instance().UpdateUser(rlt["login_result"])
p = Publish(cfg["sftpAddr"], cfg["pubUrl"])
(prj, mn, ver, folder, pubFileName, detail) = sys.argv[1:]
if not p.Publish(prj, mn, ver, folder, pubFileName, detail):
sys.exit(1)
sys.exit(0) | 44.290909 | 125 | 0.58436 |
93cf12c4437198c721d630413047a7bbb38f9c70 | 3,247 | py | Python | Selenium_Scripts/login_improper.py | RankyLea/conduit | 501f132e0d95de2b2ddc0bdd8d3ef971eb9b1357 | [
"MIT"
] | null | null | null | Selenium_Scripts/login_improper.py | RankyLea/conduit | 501f132e0d95de2b2ddc0bdd8d3ef971eb9b1357 | [
"MIT"
] | null | null | null | Selenium_Scripts/login_improper.py | RankyLea/conduit | 501f132e0d95de2b2ddc0bdd8d3ef971eb9b1357 | [
"MIT"
] | null | null | null | import string
import time
import random
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from random_signup import my_test_user
from random_signup import my_email
from random_signup import my_password
URL = "http://conduitapp.progmasters.hu:1667"
# URL = "http://localhost:1667/"
try:
driver = webdriver.Chrome(ChromeDriverManager().install())
driver.get(URL)
time.sleep(2)
sign_in = driver.find_element_by_xpath('//ul/li[2]/a')
sign_in.click()
# Üresen elküldött login form
email_field = driver.find_element_by_xpath('//form/fieldset[1]/input[1]')
email_field.click()
email_field.send_keys("")
password_field = driver.find_element_by_xpath('//form/fieldset[2]/input[1]')
password_field.click()
password_field.send_keys("")
sign_in_btn = driver.find_element_by_xpath('//*[@id="app"]/div/div/div/div/form/button')
sign_in_btn.click()
element = driver.find_element(By.CSS_SELECTOR, ".btn")
actions = ActionChains(driver)
actions.move_to_element(element).perform()
driver.find_element_by_xpath('/html/body/div[2]/div/div[4]/div/button').click()
# Helytelen email formátum (nincs @ jel)
email_field = driver.find_element_by_xpath('//form/fieldset[1]/input[1]')
email_field.click()
email_field.send_keys("petti.kockas")
password_field = driver.find_element_by_xpath('//form/fieldset[2]/input[1]')
password_field.click()
password_field.send_keys("KockasPeti123")
sign_in_btn = driver.find_element_by_xpath('//*[@id="app"]/div/div/div/div/form/button')
sign_in_btn.click()
element = driver.find_element(By.CSS_SELECTOR, ".btn")
actions = ActionChains(driver)
actions.move_to_element(element).perform()
driver.find_element_by_xpath('/html/body/div[2]/div/div[4]/div/button').click()
# Helytelen email formátum (nincs @ domain név megadva)
#
email_field = driver.find_element_by_xpath('//form/fieldset[1]/input[1]')
email_field.clear()
email_field.click()
email_field.send_keys("petti.kockas@")
password_field = driver.find_element_by_xpath('//form/fieldset[2]/input[1]')
password_field.clear()
password_field.click()
password_field.send_keys("KockasPeti123")
sign_in_btn = driver.find_element_by_xpath('//*[@id="app"]/div/div/div/div/form/button')
sign_in_btn.click()
element = driver.find_element(By.CSS_SELECTOR, ".btn")
actions = ActionChains(driver)
actions.move_to_element(element).perform()
driver.find_element_by_xpath('/html/body/div[2]/div/div[4]/div/button').click()
finally:
pass
# driver.close()
#
# element = self.driver.find_element(By.CSS_SELECTOR, ".btn")
# actions = ActionChains(self.driver)
# actions.move_to_element(element).perform()
# element = self.driver.find_element(By.CSS_SELECTOR, "body")
# actions = ActionChains(self.driver)
# actions.move_to_element(element, 0, 0).perform()
# self.driver.find_element(By.CSS_SELECTOR, ".swal-button").click()
# self.driver.find_element(By.CSS_SELECTOR, ".btn").click()
| 34.542553 | 92 | 0.716046 |
4720b28ecb44e8e39e44dfbab7af6650246d02d1 | 503 | py | Python | vcfx/field/calendar/nodes.py | Pholey/vcfx | 1c0e58fc420cdd85bcafb46ebb19389470aa9209 | [
"MIT"
] | 1 | 2016-02-08T10:19:09.000Z | 2016-02-08T10:19:09.000Z | vcfx/field/calendar/nodes.py | Pholey/vcfx | 1c0e58fc420cdd85bcafb46ebb19389470aa9209 | [
"MIT"
] | null | null | null | vcfx/field/calendar/nodes.py | Pholey/vcfx | 1c0e58fc420cdd85bcafb46ebb19389470aa9209 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import super
from future import standard_library
standard_library.install_aliases()
from vcfx.field.nodes import Field
#######
# TODO(cassidy): Figure out what `CALADRURI` actually is and implement support
#######
class BusyTime(Field):
KEY = "FBURL"
def __init__(self, *a, **kw):
super(BusyTime, self).__init__(*a, **kw)
| 27.944444 | 78 | 0.759443 |
e0e7760d2225e0f83dfe580058ae2c868e215b82 | 14,399 | py | Python | tests/oauth2_utils.py | jvanasco/pyramid_oauthlib_lowlevel | ab1a867f63252128274dde12f41960a20037f1d4 | [
"BSD-2-Clause"
] | 1 | 2019-05-21T21:01:02.000Z | 2019-05-21T21:01:02.000Z | tests/oauth2_utils.py | jvanasco/pyramid_oauthlib_lowlevel | ab1a867f63252128274dde12f41960a20037f1d4 | [
"BSD-2-Clause"
] | null | null | null | tests/oauth2_utils.py | jvanasco/pyramid_oauthlib_lowlevel | ab1a867f63252128274dde12f41960a20037f1d4 | [
"BSD-2-Clause"
] | null | null | null | # stdlib
import pdb
import datetime
# pypi
import sqlalchemy
import sqlalchemy.orm
from oauthlib.oauth2 import Server
from oauthlib.oauth2 import WebApplicationServer
# local module
from pyramid_oauthlib_lowlevel.oauth2.validator import OAuth2RequestValidator_Hooks
from pyramid_oauthlib_lowlevel.oauth2.validator import OAuth2RequestValidator
from pyramid_oauthlib_lowlevel.oauth2 import provider as oauth2_provider
from pyramid_oauthlib_lowlevel.utils import catch_backend_failure
from pyramid_oauthlib_lowlevel.client.api_client import ApiClient
# local tests
from .oauth2_model import DeveloperApplication
from .oauth2_model import DeveloperApplication_Keyset
from .oauth2_model import Developer_OAuth2Server_BearerToken
from .oauth2_model import Developer_OAuth2Server_GrantToken
from .oauth2_model import OAUTH2__APP_ID
from .oauth2_model import OAUTH2__APP_KEY
from .oauth2_model import OAUTH2__APP_SECRET
from .oauth2_model import OAUTH2__URL_APP_FLOW_REGISTER_CALLBACK
from .oauth2_model import OAUTH2__URL_AUTHORITY_FLOWA_AUTHORIZATION
from .oauth2_model import OAUTH2__URL_AUTHORITY_FLOWA_TOKEN
from .oauth2_model import OAUTH2__URL_AUTHORITY_REVOKE_TOKEN
from .oauth2_model import OAUTH2__URL_AUTHORITY_FLOWB_TOKEN
from .oauth2_model import OAUTH2__URL_AUTHORITY_FLOWB_TOKEN_ALT
from .oauth2_model import OAUTH2__URL_AUTHORITY_FLOWC_TOKEN_LIMITED
from .oauth2_model import OAUTH2__URL_AUTHORITY_PROTECTED_RESOURCE
from .oauth2_model import OAUTH2__URL_APP_FETCH_PROTECTED_RESOURCE
# ==============================================================================
class CustomApiClient(ApiClient):
_user_agent = "CustomApiClient v0"
oauth_version = 2
_url_authorization = OAUTH2__URL_AUTHORITY_FLOWA_AUTHORIZATION
_url_callback = OAUTH2__URL_APP_FLOW_REGISTER_CALLBACK
_url_obtain_token = OAUTH2__URL_AUTHORITY_FLOWA_TOKEN
class CustomApiClientB(ApiClient):
_user_agent = "CustomApiClientB v0"
oauth_version = 2
_url_authorization = OAUTH2__URL_AUTHORITY_FLOWA_AUTHORIZATION
_url_callback = redirect_uri = OAUTH2__URL_APP_FLOW_REGISTER_CALLBACK
_url_obtain_token = OAUTH2__URL_AUTHORITY_FLOWB_TOKEN
_url_obtain_token_alt = OAUTH2__URL_AUTHORITY_FLOWB_TOKEN_ALT
_url_revoke_token = OAUTH2__URL_AUTHORITY_REVOKE_TOKEN
_url_token_limited = OAUTH2__URL_AUTHORITY_FLOWC_TOKEN_LIMITED
class CustomValidator(OAuth2RequestValidator):
def _rotate_refresh_token__True(self, request):
"""Determine whether to rotate the refresh token. Default, yes.
When access tokens are refreshed the old refresh token can be kept
or replaced with a new one (rotated). Return True to rotate and
and False for keeping original.
:param request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Refresh Token Grant
"""
return True
def _rotate_refresh_token__False(self, request):
return False
class CustomValidator_Hooks(OAuth2RequestValidator_Hooks):
#
# client getter
#
@catch_backend_failure
def client_getter(self, client_id=None):
"""Retreive a valid client
:param client_id: Unicode client identifier
returns `docs.oauth2.object_interfaces.Client()`
EXAMPLE ARGS:
client_id = u'12312341'
"""
if not client_id:
return None
clientObject = (
self.pyramid_request.dbSession.query(DeveloperApplication)
.join(
DeveloperApplication_Keyset,
DeveloperApplication.id
== DeveloperApplication_Keyset.developer_application_id,
)
.filter(
DeveloperApplication_Keyset.client_id == client_id,
DeveloperApplication_Keyset.is_active == True, # noqa
)
.options(sqlalchemy.orm.contains_eager("app_keyset_active"))
.first()
)
# if not clientObject:
# raise oauthlib_oauth1_errors.InvalidClientError("Invalid Client")
# if not clientObject:
# print "MISSING client"
# pdb.set_trace()
return clientObject
#
# grant getter and setter | oAuth1 = request_token_(getter|setter)
#
def grant_setter(self, client_id, code, request, *args, **kwargs):
"""
A function to save the grant code.
:param client_id: Unicode client identifier
:param code: A dict of the authorization code grant and, optionally, state.
:param request: The HTTP Request (oauthlib.common.Request)
def set_grant(client_id, code, request, *args, **kwargs):
save_grant(client_id, code, request.user, request.scopes)
"""
if not self.pyramid_request.active_useraccount_id:
raise ValueError("The `user` MUST be logged in")
grantObject = Developer_OAuth2Server_GrantToken()
grantObject.useraccount_id = self.pyramid_request.active_useraccount_id
grantObject.developer_application_id = request.client.id
grantObject.scope = (
request.scope
) # `Developer_OAuth2Server_GrantToken.scope` is TEXT field as is `request.scope`; `.scopes` are lists
grantObject.timestamp_created = self.pyramid_request.datetime
grantObject.is_active = True
grantObject.redirect_uri = request.redirect_uri
grantObject.code = code.get("code") # this is a dict with code|state
grantObject.timestamp_expires = (
grantObject.timestamp_created + datetime.timedelta(minutes=10)
)
self.pyramid_request.dbSession.add(grantObject)
self.pyramid_request.dbSession.flush()
return True
def grant_getter(self, client_id, code, *args, **kwargs):
"""
A method to load a grant.
:param client_id: Unicode client identifier
:param code: Unicode authorization_code
"""
grantObject = (
self.pyramid_request.dbSession.query(Developer_OAuth2Server_GrantToken)
.join(
DeveloperApplication,
Developer_OAuth2Server_GrantToken.developer_application_id
== DeveloperApplication.id,
)
.join(
DeveloperApplication_Keyset,
DeveloperApplication.id
== DeveloperApplication_Keyset.developer_application_id,
)
.filter(
Developer_OAuth2Server_GrantToken.code == code,
Developer_OAuth2Server_GrantToken.is_active == True, # noqa
DeveloperApplication_Keyset.client_id == client_id,
)
.options(
sqlalchemy.orm.contains_eager("developer_application"),
sqlalchemy.orm.contains_eager(
"developer_application.app_keyset_active"
),
)
.first()
)
if not grantObject:
return None
return grantObject
def grant_invalidate(self, grantObject):
"""
This method expects a `grantObject` as a single argument.
The grant should be deleted or otherwise marked as revoked.
:param grantObject: The grant object loaded by ``grant_getter```
"""
grantObject.is_active = False
self.pyramid_request.dbSession.flush()
#
# bearer_token setter
#
def bearer_token_setter(self, token, request, *args, **kwargs):
"""
:param token: A Bearer token dict
:param request: The HTTP Request (oauthlib.common.Request)
def bearer_token_setter(token, request, *args, **kwargs):
save_token(token, request.client, request.user)
The parameter token is a dict, that looks like::
{
u'access_token': u'6JwgO77PApxsFCU8Quz0pnL9s23016',
u'token_type': u'Bearer',
u'expires_in': 3600,
u'scope': u'email address'
}
"""
# what is the context of the token?
user_id = None
original_grant_type = None
if request.grant_type == "client_credentials":
user_id = request.client.user.id
original_grant_type = request.grant_type
elif request.grant_type == "authorization_code":
user_id = request.user.id
original_grant_type = request.grant_type
elif request.grant_type == "refresh_token":
refreshTok = self.token_getter(refresh_token=request.refresh_token)
if not refreshTok:
raise ValueError("could not load refresh token")
user_id = refreshTok.useraccount_id
original_grant_type = refreshTok.original_grant_type
else:
raise ValueError("what?!? %s" % request.grant_type)
# first, we want to EXPIRE all other bearer tokens for this user
# this is not required by spec, but is optional
# TODO: expire the ones that are active but have not hit an expiry date
liveTokens = (
self.pyramid_request.dbSession.query(Developer_OAuth2Server_BearerToken)
.filter(
Developer_OAuth2Server_BearerToken.developer_application_id
== request.client.id,
Developer_OAuth2Server_BearerToken.useraccount_id == user_id,
Developer_OAuth2Server_BearerToken.is_active == True, # noqa
Developer_OAuth2Server_BearerToken.original_grant_type
== original_grant_type,
)
.all()
)
if liveTokens:
# note that _token, this way we don't overwrite the `token` dict
for _token in liveTokens:
_token.is_active = False
self.pyramid_request.dbSession.flush()
timestamp_expiry = self.pyramid_request.datetime + datetime.timedelta(
seconds=token.get("expires_in")
)
bearerToken = Developer_OAuth2Server_BearerToken()
bearerToken.developer_application_id = request.client.id
bearerToken.useraccount_id = user_id
bearerToken.timestamp_created = self.pyramid_request.datetime
bearerToken.is_active = True
bearerToken.access_token = token["access_token"]
bearerToken.refresh_token = token.get("refresh_token", None)
bearerToken.token_type = "Bearer" # token['token_type']
bearerToken.timestamp_expires = timestamp_expiry
bearerToken.grant_type = request.grant_type
bearerToken.original_grant_type = original_grant_type
bearerToken.scope = token["scope"] # this will be a space separated string
self.pyramid_request.dbSession.add(bearerToken)
self.pyramid_request.dbSession.flush()
return bearerToken
def token_getter(self, access_token=None, refresh_token=None):
"""
The function accepts an `access_token` or `refresh_token` parameters,
and it returns a token object with at least these information:
- access_token: A string token
- refresh_token: A string token
- client_id: ID of the client
- scopes: A list of scopes
- expires: A `datetime.datetime` object
- user: The user object
:param access_token: Unicode access token
:param refresh_token: Unicode refresh token
"""
if all((access_token, refresh_token)) or not any((access_token, refresh_token)):
raise ValueError("Submit `access_token` or `refresh_token`, not both.")
if access_token:
bearerToken = (
self.pyramid_request.dbSession.query(Developer_OAuth2Server_BearerToken)
.filter(
Developer_OAuth2Server_BearerToken.access_token == access_token,
Developer_OAuth2Server_BearerToken.token_type == "Bearer",
Developer_OAuth2Server_BearerToken.is_active == True, # noqa
)
.options(
sqlalchemy.orm.joinedload("developer_application"),
sqlalchemy.orm.joinedload(
"developer_application.app_keyset_active"
),
)
.first()
)
return bearerToken
elif refresh_token:
bearerToken = (
self.pyramid_request.dbSession.query(Developer_OAuth2Server_BearerToken)
.filter(
Developer_OAuth2Server_BearerToken.refresh_token == refresh_token,
Developer_OAuth2Server_BearerToken.token_type == "Bearer",
Developer_OAuth2Server_BearerToken.is_active == True, # noqa
)
.options(
sqlalchemy.orm.joinedload("developer_application"),
sqlalchemy.orm.joinedload(
"developer_application.app_keyset_active"
),
)
.first()
)
return bearerToken
raise ValueError("foo")
def token_revoke(self, tokenObject):
"""
This method expects a `tokenObject` as a single argument.
The token should be deleted or otherwise marked as revoked.
:param tokenObject: The grant object loaded by ``token_getter```
"""
tokenObject.is_active = False
tokenObject.timestamp_revoked = self.pyramid_request.datetime
self.pyramid_request.dbSession.flush()
# ==============================================================================
def new_oauth2Provider(pyramid_request):
"""this is used to build a new auth"""
validatorHooks = CustomValidator_Hooks(pyramid_request)
provider = oauth2_provider.OAuth2Provider(
pyramid_request,
validator_api_hooks=validatorHooks,
validator_class=CustomValidator,
)
return provider
def new_oauth2ProviderLimited(pyramid_request):
"""this is used to build a new auth"""
validatorHooks = CustomValidator_Hooks(pyramid_request)
provider = oauth2_provider.OAuth2Provider(
pyramid_request,
validator_api_hooks=validatorHooks,
validator_class=CustomValidator,
server_class=WebApplicationServer,
)
return provider
| 38.603217 | 111 | 0.654629 |
aefebdec369cb160637f5728f26046d3ecad4f93 | 2,310 | py | Python | py/tests/test_auth.py | robopsi/semaphore | f61f301c7d3b845af7ac063544066d6866eda749 | [
"MIT"
] | null | null | null | py/tests/test_auth.py | robopsi/semaphore | f61f301c7d3b845af7ac063544066d6866eda749 | [
"MIT"
] | null | null | null | py/tests/test_auth.py | robopsi/semaphore | f61f301c7d3b845af7ac063544066d6866eda749 | [
"MIT"
] | 1 | 2020-07-03T00:58:05.000Z | 2020-07-03T00:58:05.000Z | import uuid
import semaphore
import pytest
def test_basic_key_functions():
sk, pk = semaphore.generate_key_pair()
signature = sk.sign(b'some secret data')
assert pk.verify(b'some secret data', signature)
assert not pk.verify(b'some other data', signature)
def test_challenge_response():
resp = semaphore.create_register_challenge(b'{"relay_id":"95dc7c80-6db7-4505-8969-3a0927bfb85d","public_key":"KXxwPvbhadLYTglsiGnQe2lxKLCT4VB2qEDd-OQVLbQ"}', 'EQXKqDYLei5XhDucMDIR3n1khdcOqGWmUWDYhcnvi-OBkW92qfcAMSjSn8xPYDmkB2kLnNNsaFeBx1VifD3TCw.eyJ0IjoiMjAxOC0wMy0wMVQwOTo0NjowNS41NDA0NzdaIn0', max_age=0xffffffff)
assert str(resp['public_key']) == 'KXxwPvbhadLYTglsiGnQe2lxKLCT4VB2qEDd-OQVLbQ'
assert resp['relay_id'] == uuid.UUID('95dc7c80-6db7-4505-8969-3a0927bfb85d')
assert len(resp['token']) > 40
def test_challenge_response_validation_errors():
with pytest.raises(semaphore.UnpackErrorSignatureExpired):
resp = semaphore.create_register_challenge(b'{"relay_id":"95dc7c80-6db7-4505-8969-3a0927bfb85d","public_key":"KXxwPvbhadLYTglsiGnQe2lxKLCT4VB2qEDd-OQVLbQ"}', 'EQXKqDYLei5XhDucMDIR3n1khdcOqGWmUWDYhcnvi-OBkW92qfcAMSjSn8xPYDmkB2kLnNNsaFeBx1VifD3TCw.eyJ0IjoiMjAxOC0wMy0wMVQwOTo0NjowNS41NDA0NzdaIn0', max_age=1)
with pytest.raises(semaphore.UnpackErrorBadPayload):
resp = semaphore.create_register_challenge(b'{"relay_id":"95dc7c80-6db7-4505-8969-3a0927bfb85d","public_key":"KXxwPvbhadLYTglsiGnQe2lxKLCT4VB2qEDd-OQVLbQ"}glumpat', 'EQXKqDYLei5XhDucMDIR3n1khdcOqGWmUWDYhcnvi-OBkW92qfcAMSjSn8xPYDmkB2kLnNNsaFeBx1VifD3TCw.eyJ0IjoiMjAxOC0wMy0wMVQwOTo0NjowNS41NDA0NzdaIn0', max_age=1)
def test_register_response():
pk = semaphore.PublicKey.parse('sFTtnMGut3xR_EqP1hSmyfBc6590wDQzHFGWj5nEG18')
resp = semaphore.validate_register_response(pk, b'{"relay_id":"2ffe6ba6-3a27-4936-b30f-d6944a4f1216","token":"iiWGyrgBZDOOclHjnQILU6zHL1Mjl-yXUpjHOIaArowhrZ2djSUkzPuH_l7UF6sKYpbKD4C2nZWCBhuULLJE-w"}', 'uLvKHrTtFohGVMLDxlMZythEXmTJTx8DCT2VwZ_x5Aw0UzTzoastrn2tFy4I8jeTYzS-N8D-PZ_khfVzfFZHBg.eyJ0IjoiMjAxOC0wMy0wMVQwOTo0ODo1OC41ODMzNTRaIn0', max_age=0xffffffff)
assert resp['token'] == 'iiWGyrgBZDOOclHjnQILU6zHL1Mjl-yXUpjHOIaArowhrZ2djSUkzPuH_l7UF6sKYpbKD4C2nZWCBhuULLJE-w'
assert resp['relay_id'] == uuid.UUID('2ffe6ba6-3a27-4936-b30f-d6944a4f1216')
| 72.1875 | 362 | 0.827273 |
81411abc782bf9b1f6f3f22e5119bf12fc73f345 | 5,777 | py | Python | moe/bandit/ucb/ucb_interface.py | dstoeckel/MOE | 5b5a6a2c6c3cf47320126f7f5894e2a83e347f5c | [
"Apache-2.0"
] | 966 | 2015-01-10T05:27:30.000Z | 2022-03-26T21:04:36.000Z | moe/bandit/ucb/ucb_interface.py | dstoeckel/MOE | 5b5a6a2c6c3cf47320126f7f5894e2a83e347f5c | [
"Apache-2.0"
] | 46 | 2015-01-16T22:33:08.000Z | 2019-09-04T16:33:27.000Z | moe/bandit/ucb/ucb_interface.py | dstoeckel/MOE | 5b5a6a2c6c3cf47320126f7f5894e2a83e347f5c | [
"Apache-2.0"
] | 143 | 2015-01-07T03:57:19.000Z | 2022-02-28T01:10:45.000Z | # -*- coding: utf-8 -*-
"""Classes (Python) to compute the Bandit UCB (Upper Confidence Bound) arm allocation and choosing the arm to pull next.
See :mod:`moe.bandit.bandit_interface` for further details on bandit.
"""
import copy
from abc import abstractmethod
from moe.bandit.bandit_interface import BanditInterface
from moe.bandit.utils import get_winning_arm_names_from_payoff_arm_name_list, get_equal_arm_allocations
class UCBInterface(BanditInterface):
r"""Implementation of the constructor of UCB (Upper Confidence Bound) and method allocate_arms. The method get_ucb_payoff is implemented in subclass.
A class to encapsulate the computation of bandit UCB.
The Algorithm: http://moodle.technion.ac.il/pluginfile.php/192340/mod_resource/content/0/UCB.pdf
To inherit this class, a subclass needs to implement get_ucb_payoff
(see :func:`moe.bandit.ucb.ucb1.UCB1.get_ucb_payoff` for an example), everything else is already implemented.
See :mod:`moe.bandit.bandit_interface` docs for further details.
"""
def __init__(
self,
historical_info,
subtype=None,
):
"""Construct a UCB object.
:param historical_info: a dictionary of arms sampled
:type historical_info: dictionary of (str, SampleArm()) pairs (see :class:`moe.bandit.data_containers.SampleArm` for more details)
:param subtype: subtype of the UCB bandit algorithm (default: None)
:type subtype: str
"""
self._historical_info = copy.deepcopy(historical_info)
self._subtype = subtype
@staticmethod
def get_unsampled_arm_names(arms_sampled):
r"""Compute the set of unsampled arm names based on the given ``arms_sampled``..
Throws an exception when arms_sampled is empty.
:param arms_sampled: a dictionary of arm name to :class:`moe.bandit.data_containers.SampleArm`
:type arms_sampled: dictionary of (str, SampleArm()) pairs
:return: set of names of the unsampled arms
:rtype: frozenset(str)
:raise: ValueError when ``arms_sampled`` are empty.
"""
if not arms_sampled:
raise ValueError('arms_sampled is empty!')
unsampled_arm_name_list = [name for name, sampled_arm in arms_sampled.iteritems() if sampled_arm.total == 0]
return frozenset(unsampled_arm_name_list)
@abstractmethod
def get_ucb_payoff(self, sampled_arm, number_sampled):
r"""Compute the expected upper confidence bound payoff using the UCB subtype formula.
See definition in subclasses for details.
:param sampled_arm: a sampled arm
:type sampled_arm: :class:`moe.bandit.data_containers.SampleArm`
:param number_sampled: the overall number of pulls so far
:type number_sampled: int
:return: ucb payoff
:rtype: float64
:raise: ValueError when ``sampled_arm`` is empty.
"""
pass
def allocate_arms(self):
r"""Compute the allocation to each arm given ``historical_info``, running bandit ``subtype`` endpoint.
Computes the allocation to each arm based on the given subtype, and, historical info.
Works with k-armed bandits (k >= 1).
The Algorithm: http://moodle.technion.ac.il/pluginfile.php/192340/mod_resource/content/0/UCB.pdf
If there is at least one unsampled arm, this method will choose to pull the unsampled arm
(randomly choose an unsampled arm if there are multiple unsampled arms).
If all arms are pulled at least once, this method will pull the optimal arm
(best expected upper confidence bound payoff).
See :func:`moe.bandit.ucb.ucb_interface.UCBInterface.get_ucb_payoff` for details on how to compute the expected upper confidence bound payoff (expected UCB payoff)
In case of a tie, the method will split the allocation among the optimal arms.
For example, if we have three arms (arm1, arm2, and arm3) with expected UCB payoff 0.5, 0.5, and 0.1 respectively.
We split the allocation between the optimal arms arm1 and arm2.
``{arm1: 0.5, arm2: 0.5, arm3: 0.0}``
:return: the dictionary of (arm, allocation) key-value pairs
:rtype: a dictionary of (str, float64) pairs
:raise: ValueError when ``sample_arms`` are empty.
"""
arms_sampled = self._historical_info.arms_sampled
if not arms_sampled:
raise ValueError('sample_arms are empty!')
return get_equal_arm_allocations(arms_sampled, self.get_winning_arm_names(arms_sampled))
def get_winning_arm_names(self, arms_sampled):
r"""Compute the set of winning arm names based on the given ``arms_sampled``..
Throws an exception when arms_sampled is empty.
:param arms_sampled: a dictionary of arm name to :class:`moe.bandit.data_containers.SampleArm`
:type arms_sampled: dictionary of (str, SampleArm()) pairs
:return: set of names of the winning arms
:rtype: frozenset(str)
:raise: ValueError when ``arms_sampled`` are empty.
"""
if not arms_sampled:
raise ValueError('arms_sampled is empty!')
# If there exists an unsampled arm, return the names of the unsampled arms
unsampled_arm_names = self.get_unsampled_arm_names(arms_sampled)
if unsampled_arm_names:
return unsampled_arm_names
number_sampled = sum([sampled_arm.total for sampled_arm in arms_sampled.itervalues()])
ucb_payoff_arm_name_list = [(self.get_ucb_payoff(sampled_arm, number_sampled), arm_name) for arm_name, sampled_arm in arms_sampled.iteritems()]
return get_winning_arm_names_from_payoff_arm_name_list(ucb_payoff_arm_name_list)
| 41.561151 | 171 | 0.701229 |
58240783179f0f625d2fcf5128e5fa70d688d446 | 2,603 | py | Python | scraping/reddit/reddit_scraping.py | Bahrifirass/rg_dataset | fe315f9bf0c3b59e781c1c3d57c9c4c59443ea75 | [
"MIT"
] | 3 | 2021-03-02T07:41:02.000Z | 2021-06-01T18:22:39.000Z | scraping/reddit/reddit_scraping.py | Bahrifirass/rg_dataset | fe315f9bf0c3b59e781c1c3d57c9c4c59443ea75 | [
"MIT"
] | 1 | 2021-03-15T08:05:58.000Z | 2021-03-15T08:05:58.000Z | scraping/reddit/reddit_scraping.py | Bahrifirass/rg_dataset | fe315f9bf0c3b59e781c1c3d57c9c4c59443ea75 | [
"MIT"
] | 5 | 2021-03-14T20:25:30.000Z | 2022-01-17T14:05:21.000Z | import praw
import json
SAVED_RESULTS = ['reddit.jsonl', 'reddit.json']
# you can use your own credentials if you want
CLIENT_ID = 'EZ57p7KGDL0_ZQ'
CLIENT_SECRET = 'FXYfQdTXOpiHEnF94s1Kcg2oeHeqWA'
USER_AGENT = 'Reddit WebScrapping'
def authenticate(id, secret, name):
''' authentication function to the Reddit instance
To create your own Reddit instance follow this link
https://www.reddit.com/prefs/apps
:param client_id: The Reddit instance ID found in the top left
:param client_secret: The Reddit instance secret token
:param user_agent: The Reddit instance name
:return: the Reddit instance created
'''
reddit = praw.Reddit(client_id=id, client_secret=secret, user_agent=name)
return reddit
def scraping_submissions(sub="all", search_tag="guitar timbre", file_path=SAVED_RESULTS, json_lines = True):
''' function that search the tag in all the subreddits, then loops through all the submissions and store them and
their comments in a dictionary.
:param sub: use 'all' if you want to search in all the subreddits, otherwise use its specific name. E.g. RoastMe
:param search_tag: the tag you want to look for in the subreddits / submissions
:param file_path: list containing all possible paths to store data in
:param json_lines: True if you want the output in a JSON Lines format, False if you want the normal JSON format
:return: a dictionary containing (submission : comments) pairs
'''
instance = authenticate(CLIENT_ID, CLIENT_SECRET, USER_AGENT)
sub_comments = {}
# loop through all the submissions and extract their comments
# search the tag guitar timbre through all subreddits
for submission in instance.subreddit(sub).search(search_tag):
submission.comments.replace_more(limit=0)
comments = []
for top_level_comment in submission.comments:
comments.append(top_level_comment.body)
# store the comments in a dict with key(submission) and value(list of comments of the submission)
sub_comments[submission.title] = comments
# dumb into a json file
if json_lines == True:
with open(file_path[0], 'w') as fp:
json.dump(sub_comments, fp, indent=4)
else:
with open(file_path[1], 'w') as fp:
json.dump(sub_comments, fp, indent=4)
if __name__ == "__main__":
# authenticate to the reddit instance created
#instance = authenticate(CLIENT_ID, CLIENT_SECRET, USER_AGENT)
# scraping comments from Reddit
scraping_submissions("all", "guitar timbre", SAVED_RESULTS, True) | 37.185714 | 117 | 0.718018 |
991b6381f50e88c92c1c995f77a8349ed6515fb6 | 43,409 | py | Python | gamestonk_terminal/stocks/options/options_controller.py | dakhouya/GamestonkTerminal | 006b3570b795215a17c64841110b649b03db9a98 | [
"MIT"
] | null | null | null | gamestonk_terminal/stocks/options/options_controller.py | dakhouya/GamestonkTerminal | 006b3570b795215a17c64841110b649b03db9a98 | [
"MIT"
] | 1 | 2022-03-29T13:45:05.000Z | 2022-03-29T13:45:05.000Z | gamestonk_terminal/stocks/options/options_controller.py | dakhouya/GamestonkTerminal | 006b3570b795215a17c64841110b649b03db9a98 | [
"MIT"
] | 1 | 2021-06-20T02:42:40.000Z | 2021-06-20T02:42:40.000Z | """ Options Controller Module """
__docformat__ = "numpy"
import argparse
import os
from datetime import datetime, timedelta
from typing import List
from colorama import Style
import pandas as pd
from prompt_toolkit.completion import NestedCompleter
from gamestonk_terminal.parent_classes import BaseController
from gamestonk_terminal import feature_flags as gtff
from gamestonk_terminal.config_terminal import TRADIER_TOKEN
from gamestonk_terminal.helper_funcs import (
EXPORT_BOTH_RAW_DATA_AND_FIGURES,
EXPORT_ONLY_FIGURES_ALLOWED,
EXPORT_ONLY_RAW_DATA_ALLOWED,
parse_known_args_and_warn,
valid_date,
)
from gamestonk_terminal.menu import session
from gamestonk_terminal.stocks.options import (
barchart_view,
calculator_view,
fdscanner_view,
syncretism_view,
tradier_model,
tradier_view,
yfinance_model,
yfinance_view,
alphaquery_view,
chartexchange_view,
payoff_controller,
pricing_controller,
screener_controller,
)
# pylint: disable=R1710,C0302,R0916
class OptionsController(BaseController):
"""Options Controller class"""
CHOICES_COMMANDS = [
"calc",
"yf",
"tr",
"info",
"pcr",
"load",
"exp",
"vol",
"voi",
"oi",
"hist",
"chains",
"grhist",
"unu",
"plot",
"parity",
"binom",
]
CHOICES_MENUS = [
"payoff",
"pricing",
"screen",
]
PRESET_PATH = os.path.join(os.path.abspath(os.path.dirname(__file__)), "presets/")
presets = [f.split(".")[0] for f in os.listdir(PRESET_PATH) if f.endswith(".ini")]
grhist_greeks_choices = [
"iv",
"gamma",
"theta",
"vega",
"delta",
"rho",
"premium",
]
unu_sortby_choices = [
"Strike",
"Vol/OI",
"Vol",
"OI",
"Bid",
"Ask",
"Exp",
"Ticker",
]
pcr_length_choices = ["10", "20", "30", "60", "90", "120", "150", "180"]
load_source_choices = ["tr", "yf"]
hist_source_choices = ["td", "ce"]
voi_source_choices = ["tr", "yf"]
oi_source_choices = ["tr", "yf"]
plot_vars_choices = ["ltd", "s", "lp", "b", "a", "c", "pc", "v", "oi", "iv"]
plot_custom_choices = ["smile"]
def __init__(self, ticker: str, queue: List[str] = None):
"""Constructor"""
super().__init__("/stocks/options/", queue)
self.ticker = ticker
self.prices = pd.DataFrame(columns=["Price", "Chance"])
self.selected_date = ""
self.chain = None
if ticker:
if TRADIER_TOKEN == "REPLACE_ME":
print("Loaded expiry dates from Yahoo Finance")
self.expiry_dates = yfinance_model.option_expirations(self.ticker)
else:
print("Loaded expiry dates from Tradier")
self.expiry_dates = tradier_model.option_expirations(self.ticker)
else:
self.expiry_dates = []
if session and gtff.USE_PROMPT_TOOLKIT:
choices: dict = {c: {} for c in self.controller_choices}
choices["unu"]["-s"] = {c: {} for c in self.unu_sortby_choices}
choices["pcr"] = {c: {} for c in self.pcr_length_choices}
choices["disp"] = {c: {} for c in self.presets}
choices["scr"] = {c: {} for c in self.presets}
choices["grhist"]["-g"] = {c: {} for c in self.grhist_greeks_choices}
choices["load"]["-s"] = {c: {} for c in self.load_source_choices}
choices["load"]["--source"] = {c: {} for c in self.hist_source_choices}
choices["load"]["-s"] = {c: {} for c in self.voi_source_choices}
choices["plot"]["-x"] = {c: {} for c in self.plot_vars_choices}
choices["plot"]["-y"] = {c: {} for c in self.plot_vars_choices}
choices["plot"]["-c"] = {c: {} for c in self.plot_custom_choices}
# This menu contains dynamic choices that may change during runtime
self.choices = choices
self.completer = NestedCompleter.from_nested_dict(choices)
def update_runtime_choices(self):
"""Update runtime choices"""
if self.expiry_dates and session and gtff.USE_PROMPT_TOOLKIT:
self.choices["exp"] = {str(c): {} for c in range(len(self.expiry_dates))}
self.choices["exp"]["-d"] = {c: {} for c in self.expiry_dates + [""]}
if self.chain:
self.choices["hist"] = {
str(c): {}
for c in self.chain.puts["strike"] + self.chain.calls["strike"]
}
self.choices["grhist"] = {
str(c): {}
for c in self.chain.puts["strike"] + self.chain.calls["strike"]
}
self.choices["binom"] = {
str(c): {}
for c in self.chain.puts["strike"] + self.chain.calls["strike"]
}
self.completer = NestedCompleter.from_nested_dict(self.choices)
def print_help(self):
"""Print help."""
colored = self.ticker and self.selected_date
help_text = f"""
unu show unusual options activity [fdscanner.com]
calc basic call/put PnL calculator
load load new ticker
exp see and set expiration dates
Ticker: {self.ticker or None}
Expiry: {self.selected_date or None}
{"" if self.ticker else Style.DIM}
pcr display put call ratio for ticker [AlphaQuery.com]{Style.DIM if not colored else ''}
info display option information (volatility, IV rank etc) [Barchart.com]
chains display option chains with greeks [Tradier]
oi plot open interest [Tradier/YF]
vol plot volume [Tradier/YF]
voi plot volume and open interest [Tradier/YF]
hist plot option history [Tradier]
grhist plot option greek history [Syncretism.io]
plot plot variables provided by the user [Yfinance]
parity shows whether options are above or below expected price [Yfinance]
binom shows the value of an option using binomial options pricing [Yfinance]
{Style.RESET_ALL if not colored else ''}
> screen screens tickers based on preset [Syncretism.io]{"" if colored else Style.DIM}
> payoff shows payoff diagram for a selection of options [Yfinance]
> pricing shows options pricing and risk neutral valuation [Yfinance]
{Style.RESET_ALL if not colored else ''}"""
print(help_text)
def custom_reset(self):
"""Class specific component of reset command"""
if self.ticker:
if self.selected_date:
return [
"stocks",
f"load {self.ticker}",
"options",
f"exp -d {self.selected_date}",
]
return ["stocks", f"load {self.ticker}", "options"]
return []
def call_calc(self, other_args: List[str]):
"""Process calc command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="calc",
description="Calculate profit or loss for given option settings.",
)
parser.add_argument(
"--put",
action="store_true",
default=False,
dest="put",
help="Flag to calculate put option",
)
parser.add_argument(
"--sell",
action="store_true",
default=False,
dest="sell",
help="Flag to get profit chart of selling contract",
)
parser.add_argument(
"-s",
"--strike",
type=float,
dest="strike",
help="Option strike price",
default=10,
)
parser.add_argument(
"-p",
"--premium",
type=float,
dest="premium",
help="Premium price",
default=1,
)
parser.add_argument(
"-m",
"--min",
type=float,
dest="min",
help="Min price to look at",
default=-1,
required="-M" in other_args,
)
parser.add_argument(
"-M",
"--max",
type=float,
dest="max",
help="Max price to look at",
default=-1,
required="-m" in other_args,
)
ns_parser = parse_known_args_and_warn(parser, other_args)
if ns_parser:
if ns_parser.min > 0 and ns_parser.max > 0:
pars = {"x_min": ns_parser.min, "x_max": ns_parser.max}
else:
pars = {}
calculator_view.view_calculator(
strike=ns_parser.strike,
premium=ns_parser.premium,
put=ns_parser.put,
sell=ns_parser.sell,
**pars,
)
def call_unu(self, other_args: List[str]):
"""Process act command"""
parser = argparse.ArgumentParser(
prog="unu",
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="This command gets unusual options from fdscanner.com",
)
parser.add_argument(
"-l",
"--limit",
dest="limit",
type=int,
default=20,
help="Limit of options to show. Each scraped page gives 20 results.",
)
parser.add_argument(
"-s",
"--sortby",
dest="sortby",
nargs="+",
default="Vol/OI",
choices=self.unu_sortby_choices,
help="Column to sort by. Vol/OI is the default and typical variable to be considered unusual.",
)
parser.add_argument(
"-a",
"--ascending",
dest="ascend",
default=False,
action="store_true",
help="Flag to sort in ascending order",
)
parser.add_argument(
"-p",
"--puts_only",
dest="puts_only",
help="Flag to show puts only",
default=False,
action="store_true",
)
parser.add_argument(
"-c",
"--calls_only",
dest="calls_only",
help="Flag to show calls only",
default=False,
action="store_true",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
if ns_parser.calls_only and ns_parser.puts_only:
print(
"Cannot return puts only and calls only. Either use one or neither\n."
)
else:
fdscanner_view.display_options(
num=ns_parser.limit,
sort_column=ns_parser.sortby,
export=ns_parser.export,
ascending=ns_parser.ascend,
calls_only=ns_parser.calls_only,
puts_only=ns_parser.puts_only,
)
def call_pcr(self, other_args: List[str]):
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="pcr",
description="Display put to call ratio for ticker [AlphaQuery.com]",
)
parser.add_argument(
"-l",
"-length",
help="Window length to get",
dest="length",
choices=self.pcr_length_choices,
default=30,
)
parser.add_argument(
"-s",
"--start",
help="Start date for plot",
type=valid_date,
default=datetime.now() - timedelta(days=366),
dest="start",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
if self.ticker:
alphaquery_view.display_put_call_ratio(
ticker=self.ticker,
window=ns_parser.length,
start_date=ns_parser.start.strftime("%Y-%m-%d"),
export=ns_parser.export,
)
else:
print("No ticker loaded.\n")
def call_info(self, other_args: List[str]):
"""Process info command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="info",
description="Display option data [Source: Barchart.com]",
)
ns_parser = parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
if self.ticker:
barchart_view.print_options_data(
ticker=self.ticker, export=ns_parser.export
)
else:
print("No ticker loaded.\n")
def call_grhist(self, other_args: List[str]):
"""Process grhist command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="grhist",
description="Plot historical option greeks.",
)
parser.add_argument(
"-s",
"--strike",
dest="strike",
type=float,
required="--chain" in other_args or "-h" not in other_args,
help="Strike price to look at",
)
parser.add_argument(
"-p",
"--put",
dest="put",
action="store_true",
default=False,
help="Flag for showing put option",
)
parser.add_argument(
"-g",
"--greek",
dest="greek",
type=str,
choices=self.grhist_greeks_choices,
default="delta",
help="Greek column to select",
)
parser.add_argument(
"-c",
"--chain",
dest="chain_id",
default="",
type=str,
help="OCC option symbol",
)
parser.add_argument(
"-r",
"--raw",
dest="raw",
action="store_true",
default=False,
help="Display raw data",
)
parser.add_argument(
"-l",
"--limit",
dest="limit",
default=20,
help="Limit of raw data rows to display",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-s")
ns_parser = parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
if self.ticker:
if self.selected_date:
if self.chain and (
(
ns_parser.put
and ns_parser.strike
in [float(strike) for strike in self.chain.puts["strike"]]
)
or (
not ns_parser.put
and ns_parser.strike
in [float(strike) for strike in self.chain.calls["strike"]]
)
):
syncretism_view.view_historical_greeks(
ticker=self.ticker,
expiry=self.selected_date,
strike=ns_parser.strike,
greek=ns_parser.greek,
chain_id=ns_parser.chain_id,
put=ns_parser.put,
raw=ns_parser.raw,
n_show=ns_parser.limit,
export=ns_parser.export,
)
else:
print("No correct strike input\n")
else:
print("No expiry loaded. First use `exp <expiry date>`\n")
else:
print("No ticker loaded. First use `load <ticker>` \n")
def call_load(self, other_args: List[str]):
"""Process load command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="load",
description="Load a ticker into option menu",
)
parser.add_argument(
"-t",
"--ticker",
action="store",
dest="ticker",
required="-h" not in other_args,
help="Stock ticker",
)
parser.add_argument(
"-s",
"--source",
choices=self.load_source_choices,
dest="source",
default=None,
help="Source to get option expirations from",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-t")
ns_parser = parse_known_args_and_warn(parser, other_args)
if ns_parser:
self.ticker = ns_parser.ticker.upper()
self.update_runtime_choices()
if TRADIER_TOKEN == "REPLACE_ME" or ns_parser.source == "yf":
self.expiry_dates = yfinance_model.option_expirations(self.ticker)
else:
self.expiry_dates = tradier_model.option_expirations(self.ticker)
print("")
if self.ticker and self.selected_date:
self.chain = yfinance_model.get_option_chain(
self.ticker, self.selected_date
)
def call_exp(self, other_args: List[str]):
"""Process exp command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="exp",
description="See and set expiration date",
)
parser.add_argument(
"-i",
"--index",
dest="index",
action="store",
type=int,
default=-1,
choices=range(len(self.expiry_dates)),
help="Select index for expiry date.",
)
parser.add_argument(
"-d",
"--date",
dest="date",
type=str,
choices=self.expiry_dates + [""],
help="Select date (YYYY-MM-DD)",
default="",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-i")
ns_parser = parse_known_args_and_warn(parser, other_args)
if ns_parser:
if self.ticker:
# Print possible expiry dates
if ns_parser.index == -1 and not ns_parser.date:
print("\nAvailable expiry dates:")
for i, d in enumerate(self.expiry_dates):
print(f" {(2 - len(str(i))) * ' '}{i}. {d}")
print("")
elif ns_parser.date:
if ns_parser.date in self.expiry_dates:
print(f"Expiration set to {ns_parser.date} \n")
self.selected_date = ns_parser.date
self.update_runtime_choices()
else:
print("Expiration not an option")
else:
expiry_date = self.expiry_dates[ns_parser.index]
print(f"Expiration set to {expiry_date} \n")
self.selected_date = expiry_date
self.update_runtime_choices()
if self.selected_date:
self.chain = yfinance_model.get_option_chain(
self.ticker, self.selected_date
)
self.update_runtime_choices()
else:
print("Please load a ticker using `load <ticker>`.\n")
def call_hist(self, other_args: List[str]):
"""Process hist command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="hist",
description="Gets historical quotes for given option chain",
)
parser.add_argument(
"-s",
"--strike",
dest="strike",
type=float,
required="--chain" not in other_args and "-c" not in other_args,
help="Strike price to look at",
)
parser.add_argument(
"-p",
"--put",
dest="put",
action="store_true",
default=False,
help="Flag for showing put option",
)
parser.add_argument(
"-c", "--chain", dest="chain_id", type=str, help="OCC option symbol"
)
parser.add_argument(
"-r",
"--raw",
dest="raw",
action="store_true",
default=False,
help="Display raw data",
)
parser.add_argument(
"--source",
dest="source",
type=str,
choices=self.hist_source_choices,
default="ce",
help="Choose Tradier(TD) or ChartExchange (CE), only affects raw data",
)
parser.add_argument(
"-l",
"--limit",
dest="limit",
type=int,
help="Limit of data rows to display",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-s")
ns_parser = parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
if self.ticker:
if self.selected_date:
if self.chain and (
(
ns_parser.put
and ns_parser.strike
in [float(strike) for strike in self.chain.puts["strike"]]
)
or (
not ns_parser.put
and ns_parser.strike
in [float(strike) for strike in self.chain.calls["strike"]]
)
):
if ns_parser.source.lower() == "ce":
chartexchange_view.display_raw(
self.ticker,
self.selected_date,
not ns_parser.put,
ns_parser.strike,
ns_parser.limit,
ns_parser.export,
)
else:
if TRADIER_TOKEN != "REPLACE_ME":
tradier_view.display_historical(
ticker=self.ticker,
expiry=self.selected_date,
strike=ns_parser.strike,
put=ns_parser.put,
raw=ns_parser.raw,
chain_id=ns_parser.chain_id,
export=ns_parser.export,
)
else:
print("TRADIER TOKEN not supplied. \n")
else:
print("No correct strike input\n")
else:
print("No expiry loaded. First use `exp <expiry date>` \n")
else:
print("No ticker loaded. First use `load <ticker>`\n")
def call_chains(self, other_args: List[str]):
"""Process chains command"""
parser = argparse.ArgumentParser(
prog="chains",
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Display option chains",
)
parser.add_argument(
"-c",
"--calls",
action="store_true",
default=False,
dest="calls",
help="Flag to show calls only",
)
parser.add_argument(
"-p",
"--puts",
action="store_true",
default=False,
dest="puts",
help="Flag to show puts only",
)
parser.add_argument(
"-m",
"--min",
dest="min_sp",
type=float,
default=-1,
help="minimum strike price to consider.",
)
parser.add_argument(
"-M",
"--max",
dest="max_sp",
type=float,
default=-1,
help="maximum strike price to consider.",
)
parser.add_argument(
"-d",
"--display",
dest="to_display",
default=tradier_model.default_columns,
type=tradier_view.check_valid_option_chains_headers,
help="columns to look at. Columns can be: {bid, ask, strike, bidsize, asksize, volume, open_interest, "
"delta, gamma, theta, vega, ask_iv, bid_iv, mid_iv} ",
)
ns_parser = parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
if self.ticker:
if self.selected_date:
if TRADIER_TOKEN != "REPLACE_ME":
tradier_view.display_chains(
ticker=self.ticker,
expiry=self.selected_date,
to_display=ns_parser.to_display,
min_sp=ns_parser.min_sp,
max_sp=ns_parser.max_sp,
calls_only=ns_parser.calls,
puts_only=ns_parser.puts,
export=ns_parser.export,
)
else:
print("TRADIER TOKEN not supplied. \n")
else:
print("No expiry loaded. First use `exp {expiry date}`\n")
else:
print("No ticker loaded. First use `load <ticker>`\n")
def call_vol(self, other_args: List[str]):
"""Process vol command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="vol",
description="Plot volume. Volume refers to the number of contracts traded today.",
)
parser.add_argument(
"-m",
"--min",
default=-1,
type=float,
help="Min strike to plot",
dest="min",
)
parser.add_argument(
"-M",
"--max",
default=-1,
type=float,
help="Max strike to plot",
dest="max",
)
parser.add_argument(
"-c",
"--calls",
action="store_true",
default=False,
dest="calls",
help="Flag to plot call options only",
)
parser.add_argument(
"-p",
"--puts",
action="store_true",
default=False,
dest="puts",
help="Flag to plot put options only",
)
parser.add_argument(
"-s",
"--source",
type=str,
default="tr",
choices=["tr", "yf"],
dest="source",
help="Source to get data from",
)
ns_parser = parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
if self.ticker:
if self.selected_date:
if ns_parser.source == "tr" and TRADIER_TOKEN != "REPLACE_ME":
tradier_view.plot_vol(
ticker=self.ticker,
expiry=self.selected_date,
min_sp=ns_parser.min,
max_sp=ns_parser.max,
calls_only=ns_parser.calls,
puts_only=ns_parser.puts,
export=ns_parser.export,
)
else:
yfinance_view.plot_vol(
ticker=self.ticker,
expiry=self.selected_date,
min_sp=ns_parser.min,
max_sp=ns_parser.max,
calls_only=ns_parser.calls,
puts_only=ns_parser.puts,
export=ns_parser.export,
)
else:
print("No expiry loaded. First use `exp {expiry date}`\n")
else:
print("No ticker loaded. First use `load <ticker>`\n")
def call_voi(self, other_args: List[str]):
"""Process voi command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="voi",
description="""Plots Volume + Open Interest of calls vs puts.""",
)
parser.add_argument(
"-v",
"--minv",
dest="min_vol",
type=float,
default=-1,
help="minimum volume (considering open interest) threshold of the plot.",
)
parser.add_argument(
"-m",
"--min",
dest="min_sp",
type=float,
default=-1,
help="minimum strike price to consider in the plot.",
)
parser.add_argument(
"-M",
"--max",
dest="max_sp",
type=float,
default=-1,
help="maximum strike price to consider in the plot.",
)
parser.add_argument(
"-s",
"--source",
type=str,
default="tr",
choices=self.voi_source_choices,
dest="source",
help="Source to get data from",
)
ns_parser = parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
if self.ticker:
if self.selected_date:
if ns_parser.source == "tr" and TRADIER_TOKEN != "REPLACE_ME":
tradier_view.plot_volume_open_interest(
ticker=self.ticker,
expiry=self.selected_date,
min_sp=ns_parser.min_sp,
max_sp=ns_parser.max_sp,
min_vol=ns_parser.min_vol,
export=ns_parser.export,
)
else:
yfinance_view.plot_volume_open_interest(
ticker=self.ticker,
expiry=self.selected_date,
min_sp=ns_parser.min_sp,
max_sp=ns_parser.max_sp,
min_vol=ns_parser.min_vol,
export=ns_parser.export,
)
else:
print("No expiry loaded. First use `exp {expiry date}`\n")
else:
print("No ticker loaded. First use `load <ticker>`\n")
def call_oi(self, other_args: List[str]):
"""Process oi command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="oi",
description="Plot open interest. Open interest represents the number of contracts that exist.",
)
parser.add_argument(
"-m",
"--min",
default=-1,
type=float,
help="Min strike to plot",
dest="min",
)
parser.add_argument(
"-M",
"--max",
default=-1,
type=float,
help="Max strike to plot",
dest="max",
)
parser.add_argument(
"-c",
"--calls",
action="store_true",
default=False,
dest="calls",
help="Flag to plot call options only",
)
parser.add_argument(
"-p",
"--puts",
action="store_true",
default=False,
dest="puts",
help="Flag to plot put options only",
)
parser.add_argument(
"-s",
"--source",
type=str,
default="tr",
choices=self.oi_source_choices,
dest="source",
help="Source to get data from",
)
ns_parser = parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
if self.ticker:
if self.selected_date:
if ns_parser.source == "tr" and TRADIER_TOKEN != "REPLACE_ME":
tradier_view.plot_oi(
ticker=self.ticker,
expiry=self.selected_date,
min_sp=ns_parser.min,
max_sp=ns_parser.max,
calls_only=ns_parser.calls,
puts_only=ns_parser.puts,
export=ns_parser.export,
)
else:
yfinance_view.plot_oi(
ticker=self.ticker,
expiry=self.selected_date,
min_sp=ns_parser.min,
max_sp=ns_parser.max,
calls_only=ns_parser.calls,
puts_only=ns_parser.puts,
export=ns_parser.export,
)
else:
print("No expiry loaded. First use `exp {expiry date}`\n")
else:
print("No ticker loaded. First use `load <ticker>`\n")
def call_plot(self, other_args: List[str]):
"""Process plot command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="plot",
description="Shows a plot for the given x and y variables",
)
parser.add_argument(
"-p",
"--put",
action="store_true",
default=False,
dest="put",
help="Shows puts instead of calls",
)
parser.add_argument(
"-x",
"--x_axis",
type=str,
dest="x",
default=None,
choices=self.plot_vars_choices,
help=(
"ltd- last trade date, s- strike, lp- last price, b- bid, a- ask,"
"c- change, pc- percent change, v- volume, oi- open interest, iv- implied volatility"
),
)
parser.add_argument(
"-y",
"--y_axis",
type=str,
dest="y",
default=None,
choices=self.plot_vars_choices,
help=(
"ltd- last trade date, s- strike, lp- last price, b- bid, a- ask,"
"c- change, pc- percent change, v- volume, oi- open interest, iv- implied volatility"
),
)
parser.add_argument(
"-c",
"--custom",
type=str,
choices=self.plot_custom_choices,
dest="custom",
default=None,
help="Choose from already created graphs",
)
ns_parser = parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_FIGURES_ALLOWED
)
if ns_parser:
if self.ticker:
if self.selected_date:
if (
ns_parser.x is None or ns_parser.y is None
) and ns_parser.custom is None:
print("Please submit an X and Y value, or select a preset.\n")
else:
yfinance_view.plot_plot(
self.ticker,
self.selected_date,
ns_parser.put,
ns_parser.x,
ns_parser.y,
ns_parser.custom,
ns_parser.export,
)
else:
print("No expiry loaded. First use `exp {expiry date}`\n")
else:
print("No ticker loaded. First use `load <ticker>`\n")
def call_parity(self, other_args: List[str]):
"""Process parity command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="parity",
description="Shows whether options are over or under valued",
)
parser.add_argument(
"-p",
"--put",
action="store_true",
default=False,
dest="put",
help="Shows puts instead of calls",
)
parser.add_argument(
"-a",
"--ask",
action="store_true",
default=False,
dest="ask",
help="Use ask price instead of lastPrice",
)
parser.add_argument(
"-m",
"--min",
type=float,
default=None,
dest="mini",
help="Minimum strike price shown",
)
parser.add_argument(
"-M",
"--max",
type=float,
default=None,
dest="maxi",
help="Maximum strike price shown",
)
ns_parser = parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
if self.ticker:
if self.selected_date:
yfinance_view.show_parity(
self.ticker,
self.selected_date,
ns_parser.put,
ns_parser.ask,
ns_parser.mini,
ns_parser.maxi,
ns_parser.export,
)
else:
print("No expiry loaded. First use `exp {expiry date}`\n")
else:
print("No ticker loaded. First use `load <ticker>`\n")
def call_binom(self, other_args: List[str]):
"""Process binom command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="binom",
description="Gives the option value using binomial option valuation",
)
parser.add_argument(
"-s",
"--strike",
type=float,
default=0,
dest="strike",
help="Strike price for option shown",
)
parser.add_argument(
"-p",
"--put",
action="store_true",
default=False,
dest="put",
help="Value a put instead of a call",
)
parser.add_argument(
"-e",
"--european",
action="store_true",
default=False,
dest="europe",
help="Value a European option instead of an American one",
)
parser.add_argument(
"-x",
"--xlsx",
action="store_true",
default=False,
dest="export",
help="Export an excel spreadsheet with binomial pricing data",
)
parser.add_argument(
"--plot",
action="store_true",
default=False,
dest="plot",
help="Plot expected ending values",
)
parser.add_argument(
"-v",
"--volatility",
type=float,
default=None,
dest="volatility",
help="Underlying asset annualized volatility",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-s")
ns_parser = parse_known_args_and_warn(parser, other_args)
if ns_parser:
if self.ticker:
if self.selected_date:
yfinance_view.show_binom(
self.ticker,
self.selected_date,
ns_parser.strike,
ns_parser.put,
ns_parser.europe,
ns_parser.export,
ns_parser.plot,
ns_parser.volatility,
)
else:
print("No expiry loaded. First use `exp {expiry date}`\n")
else:
print("No ticker loaded. First use `load <ticker>`\n")
def call_payoff(self, _):
"""Process payoff command"""
if self.ticker:
if self.selected_date:
self.queue = payoff_controller.PayoffController(
self.ticker, self.selected_date, self.queue
).menu()
else:
print("No expiry loaded. First use `exp {expiry date}`\n")
else:
print("No ticker loaded. First use `load <ticker>`\n")
def call_pricing(self, _):
"""Process pricing command"""
if self.ticker:
if self.selected_date:
self.queue = pricing_controller.PricingController(
self.ticker, self.selected_date, self.prices, self.queue
).menu()
else:
print("No expiry loaded. First use `exp {expiry date}`\n")
else:
print("No ticker loaded. First use `load <ticker>`\n")
def call_screen(self, _):
"""Process screen command"""
self.queue = screener_controller.ScreenerController(self.queue).menu()
| 34.979049 | 117 | 0.478173 |
f9de4e59414e071ea70e5ca9831e47c8be4bea5e | 9,862 | py | Python | easywrk/common.py | riag/easy-wrk | 107684f118225fce480d693c12d4127cc82a9f90 | [
"Apache-2.0"
] | null | null | null | easywrk/common.py | riag/easy-wrk | 107684f118225fce480d693c12d4127cc82a9f90 | [
"Apache-2.0"
] | null | null | null | easywrk/common.py | riag/easy-wrk | 107684f118225fce480d693c12d4127cc82a9f90 | [
"Apache-2.0"
] | null | null | null | #coding:utf8
import os
import sys
import attr
import logging
import base64
from pathlib import Path
from requests import Request
from dotenv import load_dotenv
from jinja2 import Environment, FileSystemLoader
import requests
from typing import List, Dict, Tuple, Any, IO
import cattr
from .validates import validate_name
logger = logging.getLogger(__name__)
BYTES_ENCODE_MAP = {
'base64': lambda b: str(base64.b64encode(b)),
'hex': bytes.hex,
'raw': None,
}
VALUE_TYPE_MAP = {
'str': lambda b: b,
'int': int,
'float': float
}
@attr.s
class WrkConfig(object):
threads= attr.ib(type=int, default=20)
thread_connections= attr.ib(type=int, default=30)
latency=attr.ib(type=bool, default=False)
duration = attr.ib(type=str, default="10s")
@attr.s
class ApiField(object):
name = attr.ib(type=str, default="")
value = attr.ib(type=str, default="")
type = attr.ib(type=str, default="str")
# 支持 raw , base64 , hex
encode = attr.ib(type=str, default="")
@attr.s
class ApiConfig(object):
name = attr.ib(
type=str,
validator= attr.validators.instance_of(str)
)
@name.validator
def validate_name(self, attribute, value):
if not value :
raise ValueError(f"field name must exist")
if not validate_name(value):
raise ValueError(f"value [{value}] is illegal")
desc = attr.ib(type=str, default="")
method = attr.ib(type=str, default="POST")
path = attr.ib(type=str, default="")
# 支持以下值:
# :json, :form; `:` 表示特定格式,目前只支持 json 和 form
# @文件路径, `@` 表示把文件内容放到 body 里
# 其他内容为字符串内容
body = attr.ib(type=str, default=":json")
# http header
headers = attr.ib(type=List[ApiField], default=[])
# http url query param
params = attr.ib(type=List[ApiField], default=[])
# json or form fields
# value 里如果传 @文件路径,表示后面接着是文件路径
# 表示是要上传文件
fields = attr.ib(type=List[ApiField], default=[])
def load_env_file(fpath):
load_dotenv(fpath)
def render_config_file(fpath):
config_dir = os.path.dirname(fpath)
data = None
with open(fpath, 'r') as f:
data = f.read()
env = Environment(loader=FileSystemLoader(config_dir))
t = env.from_string(data)
return t.render(**os.environ)
def make_wrkconfig(config) -> WrkConfig:
return cattr.structure(config['wrk'], WrkConfig)
def make_apiconfigs(config) -> List[ApiConfig]:
return cattr.structure(config['apis'], List[ApiConfig])
@attr.s
class EasyWrkContext(object):
base_url = attr.ib(type=str)
config_file_dir = attr.ib(type=Path)
wrk_config = attr.ib(type=WrkConfig)
api_config_list = attr.ib(type=List[ApiConfig])
api_config_map = attr.ib(type=Dict[str, ApiConfig])
def get_api_dir(self, api_name) -> Path:
api_dir = self.config_file_dir.joinpath('benchmark', api_name)
if not api_dir.is_dir():
api_dir.mkdir(exist_ok=True, parents=True)
return api_dir
def create_easywrk_context(base_url:str, config_file_dir:Path, config):
wrk_config = make_wrkconfig(config)
api_config_list = make_apiconfigs(config)
api_config_map = {}
for api in api_config_list:
if api.name in api_config_map:
logger.warning(f"api [{api.name}] is repeat, please check config file")
api_config_map[api.name] = api
return EasyWrkContext(
base_url = base_url,
config_file_dir = config_file_dir,
wrk_config = wrk_config,
api_config_list = api_config_list,
api_config_map = api_config_map
)
@attr.s
class RequestBuilder(object):
url = attr.ib(type=str, default="")
method = attr.ib(type=str, default="")
params = attr.ib(type=List[Tuple], default=[])
headers = attr.ib(type=Dict[str, str], default={})
data = attr.ib(type=bytes, default=None)
json = attr.ib(type=Dict[str, Any], default=None)
files = attr.ib(type=List[Tuple], default=None)
def build(self):
req = Request(
url = self.url,
method = self.method,
headers= self.headers,
params= self.params,
data= self.data,
json = self.json,
files= self.files
)
return req.prepare()
class BuildRequestException(Exception):
def __init__(self, msg:str):
super().__init__(msg)
def join_url_path(base_url:str, path:str):
l = [base_url,]
if not base_url.endswith('/') and not path.startswith('/'):
l.append('/')
elif base_url.endswith('/') and path.startswith('/'):
l.append(path[1:])
else:
l.append(path)
return "".join(l)
def build_request(context: EasyWrkContext , api_config: ApiConfig):
url = join_url_path(context.base_url, api_config.path)
req_builder = RequestBuilder(
url = url,
method = api_config.method,
)
build_headers_map(req_builder, api_config)
build_params(req_builder, api_config)
build_body(context.config_file_dir, req_builder, api_config)
return req_builder
def build_headers_map(req_builder:RequestBuilder, api_config: ApiConfig):
m = {}
for header in api_config.headers:
if header.name in m:
x = f"header [{header.name}] already exist"
raise BuildRequestException(x)
m[header.name] = header.value
req_builder.headers = m
return req_builder
def build_params(req_builder: RequestBuilder, api_config: ApiConfig):
l = []
for item in api_config.params:
l.append((item.name, item.value))
req_builder.params = l
return req_builder
def _get_file_path(config_file_dir:Path, value:str):
p = value
if os.path.isabs(p):
p = Path(p)
else:
p = config_file_dir.joinpath(p).absolute()
return p
def _is_file_field(value:str):
if value.startswith("@@"):
return False
return value.startswith("@")
def _encode_file_data(config_file_dir:Path, value:str, encode:str):
p = value[1:]
encode_func = None
if len(encode) > 0:
if encode not in BYTES_ENCODE_MAP:
raise BuildRequestException(f"not support encode value [{encode}]")
encode_func = BYTES_ENCODE_MAP.get(encode)
p = _get_file_path(config_file_dir, value)
if encode_func is None:
with p.open("r") as f:
return f.read()
with p.open("rb") as f:
return encode_func(f.read())
def build_forms(config_file_dir:Path, req_builder: RequestBuilder, api_config:ApiConfig):
data = []
files = []
if not api_config.fields:
print("not found any form field")
sys.exit(1)
for field in api_config.fields:
if not _is_file_field(field.value):
data.append((field.name, field.value))
continue
p = _get_file_path(config_file_dir, field.value[1:])
files.append((field.name, p.open("rb")))
req_builder.data = data
req_builder.files = files
return req_builder
def build_json(config_file_dir:Path, req_builder: RequestBuilder, api_config: ApiConfig):
data = {}
finish_fields = []
if not api_config.fields:
print("not found any json field")
sys.exit(1)
for field in api_config.fields:
name:str = field.name
if name.startswith('/'):
raise BuildRequestException(f"field [{field.name}] cannot start with [/]")
if name.endswith('/'):
name = name[0:-1]
if name in finish_fields:
raise BuildRequestException(f"field [{field.name}] already in json data")
value = field.value
if _is_file_field(value):
value = _encode_file_data(config_file_dir, value, field.encode)
else:
convert = VALUE_TYPE_MAP.get(field.type)
if convert is None:
raise BuildRequestException(f"not found type convert for field [{field.name}]")
value = convert(value)
if not '/' in name:
data[field.name] = value
else:
name_list = name.split('/')
current = data
last_idx = len(name_list) - 1
for idx, item in enumerate(name_list):
if idx == last_idx:
current[item] = value
break
v = current.get(item)
if v is None:
v = {}
current[item] = v
current = v
finish_fields.append(name)
req_builder.json = data
return req_builder
def render_file(config_file_dir:Path, req_builder: RequestBuilder, api_config: ApiConfig, fpath:str):
p = _get_file_path(config_file_dir, fpath)
data = None
with p.open('r') as f:
data = f.read()
env = Environment(loader=FileSystemLoader(config_file_dir))
t = env.from_string(data)
req_builder.data = t.render(**os.environ).encode('utf-8')
return req_builder
def build_body(config_file_dir:Path, req_builder: RequestBuilder, api_config: ApiConfig):
body = api_config.body
if body is None or len(body) == 0:
return
if _is_file_field(body):
p = _get_file_path(config_file_dir, body[1:])
with p.open("rb") as f:
req_builder.data = f.read()
return req_builder
if body.startswith(":") and not body.startswith("::"):
if body == ':json':
return build_json(config_file_dir, req_builder, api_config)
if body == ":form":
return build_forms(config_file_dir, req_builder, api_config)
if body == ':render:':
b = body[8:]
return render_file(config_file_dir, req_builder, api_config, b)
raise BuildRequestException(f"not support body value [{body}]")
req_builder.data = body.encode("utf-8")
return req_builder
| 26.726287 | 101 | 0.629284 |
94c080ed5531e72eb83c4d7eee663d48195295c0 | 493 | py | Python | nina_xmpp/__main__.py | magicbrothers/nina_xmpp | f8886c115a454db0dcaab15e37cc5ca8263ca644 | [
"MIT"
] | 15 | 2021-07-28T21:50:51.000Z | 2022-03-17T20:20:00.000Z | nina_xmpp/__main__.py | magicbrothers/nina_xmpp | f8886c115a454db0dcaab15e37cc5ca8263ca644 | [
"MIT"
] | 22 | 2021-02-08T09:31:31.000Z | 2022-02-04T19:48:58.000Z | nina_xmpp/__main__.py | magicbrothers/nina_xmpp | f8886c115a454db0dcaab15e37cc5ca8263ca644 | [
"MIT"
] | 7 | 2021-02-07T12:45:05.000Z | 2021-12-26T00:49:38.000Z | import asyncio
from . import NinaXMPP
def main():
import yaml
import argparse
from argparse_logging import add_log_level_argument
parser = argparse.ArgumentParser()
parser.add_argument('config_file', type=argparse.FileType('r'))
add_log_level_argument(parser)
args = parser.parse_args()
config = yaml.safe_load(args.config_file)
args.config_file.close()
main = NinaXMPP(config)
asyncio.run(main.run())
if __name__ == '__main__':
main()
| 18.961538 | 67 | 0.703854 |
3230ce524e2374e8b065d2820be9c652c70d2be3 | 10,442 | py | Python | pySDC/projects/AsympConv/PFASST_conv_Linf.py | brownbaerchen/pySDC | 31293859d731646aa09cef4345669eac65501550 | [
"BSD-2-Clause"
] | 20 | 2015-03-21T09:02:55.000Z | 2022-02-26T20:22:21.000Z | pySDC/projects/AsympConv/PFASST_conv_Linf.py | brownbaerchen/pySDC | 31293859d731646aa09cef4345669eac65501550 | [
"BSD-2-Clause"
] | 61 | 2015-03-02T09:35:55.000Z | 2022-03-17T12:42:48.000Z | pySDC/projects/AsympConv/PFASST_conv_Linf.py | brownbaerchen/pySDC | 31293859d731646aa09cef4345669eac65501550 | [
"BSD-2-Clause"
] | 19 | 2015-02-20T11:52:33.000Z | 2022-02-02T10:46:27.000Z | import csv
import os
# import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from pySDC.helpers.stats_helper import filter_stats, sort_stats
from pySDC.implementations.collocation_classes.gauss_radau_right import CollGaussRadau_Right
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.problem_classes.AdvectionEquation_1D_FD import advection1d
from pySDC.implementations.problem_classes.HeatEquation_1D_FD import heat1d
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
from pySDC.implementations.transfer_classes.TransferMesh import mesh_to_mesh
def main():
"""
Main driver running diffusion and advection tests
"""
QI = 'LU'
run_diffusion(QI=QI)
run_advection(QI=QI)
QI = 'LU2'
run_diffusion(QI=QI)
run_advection(QI=QI)
plot_results()
def run_diffusion(QI):
"""
A simple test program to test PFASST convergence for the heat equation with random initial data
Args:
QI: preconditioner
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1E-08
level_params['nsweeps'] = [3, 1]
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['collocation_class'] = CollGaussRadau_Right
sweeper_params['num_nodes'] = [3]
sweeper_params['QI'] = [QI, 'LU']
sweeper_params['initial_guess'] = 'zero'
# initialize problem parameters
problem_params = dict()
problem_params['nu'] = 0.1 # diffusion coefficient
problem_params['freq'] = -1 # frequency for the test value
problem_params['nvars'] = [127, 63] # number of degrees of freedom for each level
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 200
# initialize space transfer parameters
space_transfer_params = dict()
space_transfer_params['rorder'] = 2
space_transfer_params['iorder'] = 2
space_transfer_params['periodic'] = False
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = heat1d # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = generic_implicit # pass sweeper (see part B)
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['step_params'] = step_params # pass step parameters
description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class
description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer
# set time parameters
t0 = 0.0
Tend = 1.0
# set up number of parallel time-steps to run PFASST with
fname = 'data/results_conv_diffusion_Linf_QI' + str(QI) + '.txt'
file = open(fname, 'w')
writer = csv.writer(file)
writer.writerow(('num_proc', 'niter'))
file.close()
for i in range(0, 13):
num_proc = 2 ** i
level_params['dt'] = (Tend - t0) / num_proc
description['level_params'] = level_params # pass level parameters
out = 'Working on num_proc = %5i' % num_proc
print(out)
cfl = problem_params['nu'] * level_params['dt'] / (1.0 / (problem_params['nvars'][0] + 1)) ** 2
out = ' CFL number: %4.2e' % cfl
print(out)
# instantiate controller
controller = controller_nonMPI(num_procs=num_proc, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
# filter statistics by type (number of iterations)
filtered_stats = filter_stats(stats, type='niter')
# convert filtered statistics to list of iterations count, sorted by process
iter_counts = sort_stats(filtered_stats, sortby='time')
niters = np.array([item[1] for item in iter_counts])
out = ' Mean number of iterations: %4.2f' % np.mean(niters)
print(out)
file = open(fname, 'a')
writer = csv.writer(file)
writer.writerow((num_proc, np.mean(niters)))
file.close()
assert os.path.isfile(fname), 'ERROR: pickle did not create file'
def run_advection(QI):
"""
A simple test program to test PFASST convergence for the periodic advection equation
Args:
QI: preconditioner
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1E-08
level_params['nsweeps'] = [3, 1]
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['collocation_class'] = CollGaussRadau_Right
sweeper_params['num_nodes'] = [3]
sweeper_params['QI'] = [QI, 'LU'] # For the IMEX sweeper, the LU-trick can be activated for the implicit part
sweeper_params['initial_guess'] = 'zero'
# initialize problem parameters
problem_params = dict()
problem_params['freq'] = 64 # frequency for the test value
problem_params['nvars'] = [128, 64] # number of degrees of freedom for each level
problem_params['order'] = 2
problem_params['type'] = 'center'
problem_params['c'] = 0.1
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 200
# initialize space transfer parameters
space_transfer_params = dict()
space_transfer_params['rorder'] = 2
space_transfer_params['iorder'] = 2
space_transfer_params['periodic'] = True
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = advection1d # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = generic_implicit # pass sweeper (see part B)
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['step_params'] = step_params # pass step parameters
description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class
description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer
# set time parameters
t0 = 0.0
Tend = 1.0
# set up number of parallel time-steps to run PFASST with
fname = 'data/results_conv_advection_Linf_QI' + str(QI) + '.txt'
file = open(fname, 'w')
writer = csv.writer(file)
writer.writerow(('num_proc', 'niter'))
file.close()
for i in range(0, 7):
num_proc = 2 ** i
level_params['dt'] = (Tend - t0) / num_proc
description['level_params'] = level_params # pass level parameters
out = 'Working on num_proc = %5i' % num_proc
print(out)
cfl = problem_params['c'] * level_params['dt'] / (1.0 / problem_params['nvars'][0])
out = ' CFL number: %4.2e' % cfl
print(out)
# instantiate controller
controller = controller_nonMPI(num_procs=num_proc, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
# filter statistics by type (number of iterations)
filtered_stats = filter_stats(stats, type='niter')
# convert filtered statistics to list of iterations count, sorted by process
iter_counts = sort_stats(filtered_stats, sortby='time')
niters = np.array([item[1] for item in iter_counts])
out = ' Mean number of iterations: %4.2f' % np.mean(niters)
print(out)
file = open(fname, 'a')
writer = csv.writer(file)
writer.writerow((num_proc, np.mean(niters)))
file.close()
assert os.path.isfile(fname), 'ERROR: pickle did not create file'
def plot_results(cwd=''):
"""
Plotting routine for iteration counts
Args:
cwd: current working directory
"""
setups = [('diffusion', 'LU', 'LU2'), ('advection', 'LU', 'LU2')]
for type, QI1, QI2 in setups:
fname = cwd + 'data/results_conv_' + type + '_Linf_QI' + QI1 + '.txt'
file = open(fname, 'r')
reader = csv.DictReader(file, delimiter=',')
xvalues_1 = []
niter_1 = []
for row in reader:
xvalues_1.append(int(row['num_proc']))
niter_1.append(float(row['niter']))
file.close()
fname = cwd + 'data/results_conv_' + type + '_Linf_QI' + QI2 + '.txt'
file = open(fname, 'r')
reader = csv.DictReader(file, delimiter=',')
xvalues_2 = []
niter_2 = []
for row in reader:
xvalues_2.append(int(row['num_proc']))
niter_2.append(float(row['niter']))
file.close()
# set up plotting parameters
params = {'legend.fontsize': 20,
'figure.figsize': (12, 8),
'axes.labelsize': 20,
'axes.titlesize': 20,
'xtick.labelsize': 16,
'ytick.labelsize': 16,
'lines.linewidth': 3
}
plt.rcParams.update(params)
# set up figure
plt.figure()
plt.xlabel('number of time-steps (L)')
plt.ylabel('no. of iterations')
plt.xlim(min(xvalues_1 + xvalues_2) / 2.0, max(xvalues_1 + xvalues_2) * 2.0)
plt.ylim(min(niter_1 + niter_2) - 1, max(niter_1 + niter_2) + 1)
plt.grid()
# plot
plt.semilogx(xvalues_1, niter_1, 'r-', marker='s', markersize=10, label=QI1)
plt.semilogx(xvalues_2, niter_2, 'b-', marker='o', markersize=10, label=QI2)
plt.legend(loc=2, ncol=1, numpoints=1)
# save plot, beautify
fname = 'data/conv_test_niter_Linf_' + type + '.png'
plt.savefig(fname, bbox_inches='tight')
assert os.path.isfile(fname), 'ERROR: plotting did not create file'
if __name__ == "__main__":
main()
| 34.013029 | 120 | 0.650642 |
d198467dcbf804d949e206c8253cd2f90edecbd9 | 7,213 | py | Python | compiler/testdata/expected/python.asyncio/service_extension_same_file/f_BasePinger.py | BunnyLINE/frugal | 1f82fb86b9aace8b78fcc0fedb1d60a9d60ffc31 | [
"Apache-2.0"
] | 144 | 2017-08-17T15:51:58.000Z | 2022-01-14T21:36:55.000Z | compiler/testdata/expected/python.asyncio/service_extension_same_file/f_BasePinger.py | BunnyLINE/frugal | 1f82fb86b9aace8b78fcc0fedb1d60a9d60ffc31 | [
"Apache-2.0"
] | 930 | 2017-08-17T17:53:30.000Z | 2022-03-28T14:04:49.000Z | compiler/testdata/expected/python.asyncio/service_extension_same_file/f_BasePinger.py | BunnyLINE/frugal | 1f82fb86b9aace8b78fcc0fedb1d60a9d60ffc31 | [
"Apache-2.0"
] | 77 | 2017-08-17T15:54:31.000Z | 2021-12-25T15:18:34.000Z | #
# Autogenerated by Frugal Compiler (3.14.10)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
import asyncio
from datetime import timedelta
import inspect
from frugal.aio.processor import FBaseProcessor
from frugal.aio.processor import FProcessorFunction
from frugal.exceptions import TApplicationExceptionType
from frugal.exceptions import TTransportExceptionType
from frugal.middleware import Method
from frugal.transport import TMemoryOutputBuffer
from frugal.util.deprecate import deprecated
from thrift.Thrift import TApplicationException
from thrift.Thrift import TMessageType
from thrift.transport.TTransport import TTransportException
from .ttypes import *
class Iface(object):
async def basePing(self, ctx):
"""
Args:
ctx: FContext
"""
pass
class Client(Iface):
def __init__(self, provider, middleware=None):
"""
Create a new Client with an FServiceProvider containing a transport
and protocol factory.
Args:
provider: FServiceProvider
middleware: ServiceMiddleware or list of ServiceMiddleware
"""
middleware = middleware or []
if middleware and not isinstance(middleware, list):
middleware = [middleware]
self._transport = provider.get_transport()
self._protocol_factory = provider.get_protocol_factory()
middleware += provider.get_middleware()
self._methods = {
'basePing': Method(self._basePing, middleware),
}
async def basePing(self, ctx):
"""
Args:
ctx: FContext
"""
return await self._methods['basePing']([ctx])
async def _basePing(self, ctx):
memory_buffer = TMemoryOutputBuffer(self._transport.get_request_size_limit())
oprot = self._protocol_factory.get_protocol(memory_buffer)
oprot.write_request_headers(ctx)
oprot.writeMessageBegin('basePing', TMessageType.CALL, 0)
args = basePing_args()
args.write(oprot)
oprot.writeMessageEnd()
response_transport = await self._transport.request(ctx, memory_buffer.getvalue())
iprot = self._protocol_factory.get_protocol(response_transport)
iprot.read_response_headers(ctx)
_, mtype, _ = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
if x.type == TApplicationExceptionType.RESPONSE_TOO_LARGE:
raise TTransportException(type=TTransportExceptionType.RESPONSE_TOO_LARGE, message=x.message)
raise x
result = basePing_result()
result.read(iprot)
iprot.readMessageEnd()
class Processor(FBaseProcessor):
def __init__(self, handler, middleware=None):
"""
Create a new Processor.
Args:
handler: Iface
"""
if middleware and not isinstance(middleware, list):
middleware = [middleware]
super(Processor, self).__init__()
self.add_to_processor_map('basePing', _basePing(Method(handler.basePing, middleware), self.get_write_lock()))
class _basePing(FProcessorFunction):
def __init__(self, handler, lock):
super(_basePing, self).__init__(handler, lock)
async def process(self, ctx, iprot, oprot):
args = basePing_args()
args.read(iprot)
iprot.readMessageEnd()
result = basePing_result()
try:
ret = self._handler([ctx])
if inspect.iscoroutine(ret):
ret = await ret
except TApplicationException as ex:
async with self._lock:
_write_application_exception(ctx, oprot, "basePing", exception=ex)
return
except Exception as e:
async with self._lock:
_write_application_exception(ctx, oprot, "basePing", ex_code=TApplicationExceptionType.INTERNAL_ERROR, message=str(e))
raise
async with self._lock:
try:
oprot.write_response_headers(ctx)
oprot.writeMessageBegin('basePing', TMessageType.REPLY, 0)
result.write(oprot)
oprot.writeMessageEnd()
oprot.get_transport().flush()
except TTransportException as e:
# catch a request too large error because the TMemoryOutputBuffer always throws that if too much data is written
if e.type == TTransportExceptionType.REQUEST_TOO_LARGE:
raise _write_application_exception(ctx, oprot, "basePing", ex_code=TApplicationExceptionType.RESPONSE_TOO_LARGE, message=e.message)
else:
raise e
def _write_application_exception(ctx, oprot, method, ex_code=None, message=None, exception=None):
if exception is not None:
x = exception
else:
x = TApplicationException(type=ex_code, message=message)
oprot.write_response_headers(ctx)
oprot.writeMessageBegin(method, TMessageType.EXCEPTION, 0)
x.write(oprot)
oprot.writeMessageEnd()
oprot.get_transport().flush()
return x
class basePing_args(object):
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('basePing_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class basePing_result(object):
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('basePing_result')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| 31.77533 | 151 | 0.628587 |
ace57cf6494e8d34b4c048263eddbfb763434eaa | 8,768 | py | Python | phy/apps/__init__.py | PaulMAnderson/phy | 134264e6c1ec586f797459633fa4e71352fafb4e | [
"BSD-3-Clause"
] | null | null | null | phy/apps/__init__.py | PaulMAnderson/phy | 134264e6c1ec586f797459633fa4e71352fafb4e | [
"BSD-3-Clause"
] | null | null | null | phy/apps/__init__.py | PaulMAnderson/phy | 134264e6c1ec586f797459633fa4e71352fafb4e | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""CLI tool."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
from contextlib import contextmanager
import logging
from pathlib import Path
import sys
from traceback import format_exception
import click
from phylib import add_default_handler, _Formatter # noqa
from phylib import _logger_date_fmt, _logger_fmt # noqa
from phy import __version_git__
from phy.gui.qt import QtDialogLogger
from phy.utils.profiling import _enable_profiler, _enable_pdb
from .base import ( # noqa
BaseController, WaveformMixin, FeatureMixin, TemplateMixin, TraceMixin)
logger = logging.getLogger(__name__)
#------------------------------------------------------------------------------
# CLI utils
#------------------------------------------------------------------------------
DEBUG = False
if '--debug' in sys.argv: # pragma: no cover
DEBUG = True
sys.argv.remove('--debug')
if '--pdb' in sys.argv: # pragma: no cover
sys.argv.remove('--pdb')
_enable_pdb()
# Add `profile` in the builtins.
if '--lprof' in sys.argv or '--prof' in sys.argv: # pragma: no cover
_enable_profiler('--lprof' in sys.argv)
if '--prof' in sys.argv:
sys.argv.remove('--prof')
if '--lprof' in sys.argv:
sys.argv.remove('--lprof')
#------------------------------------------------------------------------------
# Set up logging with the CLI tool
#------------------------------------------------------------------------------
def exceptionHandler(exception_type, exception, traceback): # pragma: no cover
tb = ''.join(format_exception(exception_type, exception, traceback))
logger.error("An error has occurred (%s): %s\n%s", exception_type.__name__, exception, tb)
@contextmanager
def capture_exceptions(): # pragma: no cover
"""Log exceptions instead of crashing the GUI, and display an error dialog on errors."""
logger.debug("Start capturing exceptions.")
# Add a custom exception hook.
excepthook = sys.excepthook
sys.excepthook = exceptionHandler
# Add a dialog exception handler.
handler = QtDialogLogger()
handler.setLevel(logging.ERROR)
logging.getLogger('phy').addHandler(handler)
yield
# Reset the original exception hook.
sys.excepthook = excepthook
# Remove the dialog exception handler.
logging.getLogger('phy').removeHandler(handler)
logger.debug("Stop capturing exceptions.")
#------------------------------------------------------------------------------
# Root CLI tool
#------------------------------------------------------------------------------
@click.group()
@click.version_option(version=__version_git__)
@click.help_option('-h', '--help')
@click.pass_context
def phycli(ctx):
"""Interactive visualization and manual spike sorting of large-scale ephys data."""
add_default_handler(level='DEBUG' if DEBUG else 'INFO', logger=logging.getLogger('phy'))
add_default_handler(level='DEBUG' if DEBUG else 'INFO', logger=logging.getLogger('phylib'))
add_default_handler(level='DEBUG' if DEBUG else 'INFO', logger=logging.getLogger('mtscomp'))
#------------------------------------------------------------------------------
# GUI command wrapper
#------------------------------------------------------------------------------
def _gui_command(f):
"""Command options for GUI commands."""
f = click.option(
'--clear-cache/--no-clear-cache', default=False,
help="Clear the .phy cache in the data directory.")(f)
f = click.option(
'--clear-state/--no-clear-state', default=False,
help="Clear the GUI state in `~/.phy/` and in `.phy`.")(f)
return f
#------------------------------------------------------------------------------
# Raw data GUI
#------------------------------------------------------------------------------
@phycli.command('trace-gui') # pragma: no cover
@click.argument('dat-path', type=click.Path(exists=True))
@click.option('-s', '--sample-rate', type=float)
@click.option('-d', '--dtype', type=str)
@click.option('-n', '--n-channels', type=int)
@click.option('-h', '--offset', type=int)
@click.option('-f', '--fortran', type=bool, is_flag=True)
@_gui_command
@click.pass_context
def cli_trace_gui(ctx, dat_path, **kwargs):
"""Launch the trace GUI on a raw data file."""
from .trace.gui import trace_gui
with capture_exceptions():
kwargs['n_channels_dat'] = kwargs.pop('n_channels')
kwargs['order'] = 'F' if kwargs.pop('fortran', None) else None
trace_gui(dat_path, **kwargs)
#------------------------------------------------------------------------------
# Template GUI
#------------------------------------------------------------------------------
@phycli.command('template-gui') # pragma: no cover
@click.argument('params-path', type=click.Path(exists=True))
@_gui_command
@click.pass_context
def cli_template_gui(ctx, params_path, **kwargs):
"""Launch the template GUI on a params.py file."""
from .template.gui import template_gui
prof = __builtins__.get('profile', None)
with capture_exceptions():
if prof:
from phy.utils.profiling import _profile
return _profile(prof, 'template_gui(params_path)', globals(), locals())
template_gui(params_path, **kwargs)
@phycli.command('template-describe')
@click.argument('params-path', type=click.Path(exists=True))
@click.pass_context
def cli_template_describe(ctx, params_path):
"""Describe a template file."""
from .template.gui import template_describe
template_describe(params_path)
#------------------------------------------------------------------------------
# Kwik GUI
#------------------------------------------------------------------------------
# Create the `phy cluster-manual file.kwik` command.
@phycli.command('kwik-gui') # pragma: no cover
@click.argument('path', type=click.Path(exists=True))
@click.option('--channel-group', type=int)
@click.option('--clustering', type=str)
@_gui_command
@click.pass_context
def cli_kwik_gui(ctx, path, channel_group=None, clustering=None, **kwargs):
"""Launch the Kwik GUI on a Kwik file."""
from .kwik.gui import kwik_gui
with capture_exceptions():
assert path
kwik_gui(path, channel_group=channel_group, clustering=clustering, **kwargs)
@phycli.command('kwik-describe')
@click.argument('path', type=click.Path(exists=True))
@click.option('--channel-group', type=int, help='channel group')
@click.option('--clustering', type=str, help='clustering')
@click.pass_context
def cli_kwik_describe(ctx, path, channel_group=0, clustering='main'):
"""Describe a Kwik file."""
from .kwik.gui import kwik_describe
assert path
kwik_describe(path, channel_group=channel_group, clustering=clustering)
#------------------------------------------------------------------------------
# Conversion
#------------------------------------------------------------------------------
@phycli.command('alf-convert')
@click.argument('subdirs', nargs=-1, type=click.Path(exists=True, file_okay=False, dir_okay=True))
@click.argument('out_dir', type=click.Path())
@click.pass_context
def cli_alf_convert(ctx, subdirs, out_dir):
"""Convert an ephys dataset into ALF. If several directories are specified, it is assumed
that each directory contains the data for one probe of the same recording."""
from phylib.io.alf import EphysAlfCreator
from phylib.io.merge import Merger
from phylib.io.model import load_model
out_dir = Path(out_dir)
if len(subdirs) >= 2:
# Merge in the `merged` subdirectory inside the output directory.
m = Merger(subdirs, out_dir / '_tmp_merged')
model = m.merge()
else:
model = load_model(Path(subdirs[0]) / 'params.py')
c = EphysAlfCreator(model)
c.convert(out_dir)
#------------------------------------------------------------------------------
# Waveform extraction
#------------------------------------------------------------------------------
@phycli.command('extract-waveforms')
@click.argument('params-path', type=click.Path(exists=True))
@click.argument('n_spikes_per_cluster', type=int, default=500)
@click.argument('--max-n-channels', type=int, default=16)
@click.pass_context
def template_extract_waveforms(
ctx, params_path, n_spikes_per_cluster, max_n_channels=None): # pragma: no cover
"""Extract spike waveforms."""
from phylib.io.model import load_model
model = load_model(params_path)
model.save_spikes_subset_waveforms(
max_n_spikes_per_template=n_spikes_per_cluster, max_n_channels=max_n_channels)
model.close()
| 35.354839 | 98 | 0.577669 |
c42239ea333ce610904b70c839f1bc4813381294 | 414 | py | Python | demo/classification/criteo_ctr/run_demo_ctr.py | bluesjjw/xlearn | 6da54dbef3edb6edaa3c0cd189704d72ef1b5e21 | [
"Apache-2.0"
] | 1 | 2021-01-06T08:27:27.000Z | 2021-01-06T08:27:27.000Z | demo/classification/criteo_ctr/run_demo_ctr.py | bluesjjw/xlearn | 6da54dbef3edb6edaa3c0cd189704d72ef1b5e21 | [
"Apache-2.0"
] | null | null | null | demo/classification/criteo_ctr/run_demo_ctr.py | bluesjjw/xlearn | 6da54dbef3edb6edaa3c0cd189704d72ef1b5e21 | [
"Apache-2.0"
] | null | null | null | import xlearn as xl
# Training task
ffm_model = xl.create_ffm()
ffm_model.setTrain("./small_train.txt")
ffm_model.setValidate("./small_test.txt")
param = {'task':'binary', 'lr':0.2,
'lambda':0.002, 'metric':'acc'}
ffm_model.fit(param, './model.out')
# Prediction task
ffm_model.setTest("./small_test.txt")
# Convert output to 0-1
ffm_model.setSigmoid()
ffm_model.predict("./model.out", "./output.txt") | 25.875 | 48 | 0.695652 |
62c56984b854c8c4157cedc75759097cf79bfacd | 11,283 | py | Python | zazu.py | PatiMohit/ZazuML | c5247859353cacf0e4a58f9c530a07038d9e12cf | [
"MIT"
] | 1 | 2020-12-31T18:02:41.000Z | 2020-12-31T18:02:41.000Z | zazu.py | PatiMohit/ZazuML | c5247859353cacf0e4a58f9c530a07038d9e12cf | [
"MIT"
] | null | null | null | zazu.py | PatiMohit/ZazuML | c5247859353cacf0e4a58f9c530a07038d9e12cf | [
"MIT"
] | null | null | null | from model_selector import find_model
from launch_pad import Launcher
from hyperparameter_tuner import Tuner, OngoingTrials
from spec import ConfigSpec, OptModel
from spec import ModelsSpec
from logging_utils import init_logging, logginger
from dataloop_services import deploy_model, deploy_zazu, push_package, update_service, get_dataset_obj, deploy_predict, \
deploy_zazu_timer
from augmentations_tuner.fastautoaugment import AugSearch
import argparse
import os
import torch
import json
import dtlpy as dl
import sys
logger = logginger(__name__)
class ZaZu:
def __init__(self, opt_model, remote=False):
self.remote = remote
self.opt_model = opt_model
self.path_to_most_suitable_model = 'model.txt'
self.path_to_best_trial = 'best_trial.json'
self.path_to_trials = 'trials.json'
self.path_to_best_checkpoint = 'checkpoint.pt'
models_spec_path = 'models.json'
self.models = ModelsSpec(models_spec_path)
def find_best_model(self):
closest_model = find_model(self.opt_model, self.models)
logger.info(str(closest_model))
if os.path.exists(self.path_to_most_suitable_model):
logger.info('overwriting model.txt . . .')
os.remove(self.path_to_most_suitable_model)
with open(self.path_to_most_suitable_model, "w") as f:
f.write(closest_model)
self.update_optimal_model()
def hp_search(self):
if not self.remote:
if self.opt_model.max_instances_at_once > torch.cuda.device_count():
print(torch.cuda.is_available())
raise Exception(''' 'max_instances_at_once' must be smaller or equal to the number of available gpus''')
if not hasattr(self.opt_model, 'name'):
logger.info("no 'update_optimal_model' method, checking for model.txt file . . . ")
self.update_optimal_model()
# initialize hyperparameter_tuner and gun i.e.
ongoing_trials = OngoingTrials()
tuner = Tuner(self.opt_model, ongoing_trials)
gun = Launcher(self.opt_model, ongoing_trials, remote=self.remote)
logger.info('commencing hyper-parameter search . . . ')
tuner.search_hp()
gun.launch_trials()
tuner.end_trial()
# starting second set of trials
tuner.search_hp()
while ongoing_trials.status is not 'STOPPED':
gun.launch_trials()
tuner.end_trial()
# starting next set of trials
tuner.search_hp()
trials = tuner.get_trials()
if self.opt_model.augmentation_search_method == 'fastautoaugment':
sorted_trial_ids = tuner.get_sorted_trial_ids()
string1 = self.path_to_best_checkpoint.split('.')[0]
paths_ls = []
for i in range(len(sorted_trial_ids[:5])):
save_checkpoint_location = string1 + str(i) + '.pt'
logger.info('trial ' + sorted_trial_ids[i] + '\tval: ' + str(trials[sorted_trial_ids[i]]['metrics']))
save_checkpoint_location = os.path.join('augmentations_tuner', 'fastautoaugment', 'FastAutoAugment', 'models', save_checkpoint_location)
if os.path.exists(save_checkpoint_location):
logger.info('overwriting checkpoint . . .')
os.remove(save_checkpoint_location)
torch.save(trials[sorted_trial_ids[i]]['checkpoint'], save_checkpoint_location)
paths_ls.append(save_checkpoint_location)
augsearch = AugSearch(paths_ls=paths_ls) #TODO: calibrate between the model dictionaries
checkpointwithaugspath = 'final' + string1 + '.pt'
augsearch.retrain(save_path=checkpointwithaugspath)
tuner.add_to_oracle_trials(checkpointwithaugspath)
sorted_trial_ids = tuner.get_sorted_trial_ids()
logger.info('the best trial, trial ' + sorted_trial_ids[0] + '\tval: ' + str(trials[sorted_trial_ids[0]]['metrics']))
if os.path.exists(save_checkpoint_location):
logger.info('overwriting checkpoint . . .')
os.remove(save_checkpoint_location)
torch.save(trials[sorted_trial_ids[0]]['checkpoint'], save_checkpoint_location)
logger.info('best trial: ' + str(trials[sorted_trial_ids[0]]['hp_values']) + '\nbest value: ' + str(
trials[sorted_trial_ids[0]]['metrics']))
best_trial = trials[sorted_trial_ids[0]]['hp_values']
if os.path.exists(self.path_to_best_trial):
logger.info('overwriting best_trial.json . . .')
os.remove(self.path_to_best_trial)
with open(self.path_to_best_trial, 'w') as fp:
json.dump(best_trial, fp)
logger.info('results saved to best_trial.json')
def augmentations_search(self):
pass
def train_new_model(self):
# to train a new model you must have updated the found model and the best trial
if not hasattr(self.opt_model, 'name'):
logger.info("no 'update_optimal_model' method, checking for model.txt file . . . ")
self.update_optimal_model()
if not os.path.exists(self.path_to_best_trial):
raise Exception('''best_trial.json doesn't exist, you can run "hp_search" to get it''')
with open(self.path_to_best_trial, 'r') as fp:
best_trial = json.load(fp)
gun = Launcher(self.opt_model, remote=self.remote)
gun.train_and_save_best_trial(best_trial, self.path_to_best_checkpoint)
def update_optimal_model(self):
# this will update opt_model with chosen model
if not os.path.exists(self.path_to_most_suitable_model):
raise Exception('''model.txt file doesn't exist, you can run "find_best_model" method to get it''')
with open(self.path_to_most_suitable_model, "r") as f:
closest_model = f.read().strip()
self.opt_model.add_attr(closest_model, 'name')
self.opt_model.add_attr(self.models.spec_data[closest_model]['hp_search_space'], 'hp_space')
self.opt_model.add_attr(self.models.spec_data[closest_model]['training_configs'], 'training_configs')
def run_inference(self):
if not hasattr(self.opt_model, 'name'):
logger.info("no 'update_optimal_model' method, checking for model.txt file . . . ")
self.update_optimal_model()
gun = Launcher(self.opt_model, remote=self.remote)
path_to_first_checkpoint = self.path_to_best_checkpoint.split('.')[0] + str(0) + '.pt'
gun.predict(path_to_first_checkpoint)
def one_time_inference(self, image_path, checkpoint_path):
from ObjectDetNet.retinanet import AdapterModel
model = AdapterModel()
return model.predict_single_image(checkpoint_path, image_path)
def maybe_login(env):
try:
dl.setenv(env)
except:
dl.login()
dl.setenv(env)
def maybe_do_deployment_stuff():
if args.deploy:
logger.info('about to launch 2 deployments, zazu and trial')
with open('global_configs.json', 'r') as fp:
global_project_name = json.load(fp)['project']
global_project = dl.projects.get(project_name=global_project_name)
global_package_obj = push_package(global_project)
try:
# predict_service = deploy_predict(package=global_package_obj)
trial_service = deploy_model(package=global_package_obj)
zazu_service = deploy_zazu(package=global_package_obj)
logger.info('deployments launched successfully')
except:
# predict_service.delete()
trial_service.delete()
zazu_service.delete()
if args.zazu_timer:
logger.info('about to launch timer deployment')
with open('global_configs.json', 'r') as fp:
global_project_name = json.load(fp)['project']
global_project = dl.projects.get(project_name=global_project_name)
global_package_obj = push_package(global_project)
with open('configs.json', 'r') as fp:
configs = json.load(fp)
configs_input = dl.FunctionIO(type='Json', name='configs', value=json.dumps(configs))
time_input = dl.FunctionIO(type='Json', name='time', value=3600*0.25)
test_dataset_input = dl.FunctionIO(type='Json', name='test_dataset_id', value='5eb7e0bdd4eb9434c77d80b5')
query_input = dl.FunctionIO(type='Json', name='query', value=json.dumps({"resource": "items", "sort": {}, "page": 0, "pageSize": 1000, "filter": {"$and": [{"dir": "/items/val*"}, {"hidden": False}, {"type": "file"}]}}))
init_inputs = [configs_input, time_input, test_dataset_input, query_input]
deploy_zazu_timer(package=global_package_obj,
init_inputs=init_inputs)
logger.info('timer deployment launched successfully')
if args.update:
with open('global_configs.json', 'r') as fp:
global_project_name = json.load(fp)
maybe_login()
global_project = dl.projects.get(project_name=global_project_name)
update_service(global_project, 'trial')
update_service(global_project, 'zazu')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--remote", action='store_true', default=False)
parser.add_argument("--deploy", action='store_true', default=False)
parser.add_argument("--update", action='store_true', default=False)
parser.add_argument("--search", action='store_true', default=False)
parser.add_argument("--train", action='store_true', default=False)
parser.add_argument("--predict", action='store_true', default=False)
parser.add_argument("--predict_once", action='store_true', default=False)
parser.add_argument("--zazu_timer", action='store_true', default=False)
args = parser.parse_args()
with open('configs.json', 'r') as fp:
configs = json.load(fp)
try:
maybe_login(configs['dataloop']['setenv'])
except:
pass
maybe_do_deployment_stuff()
if args.remote:
configs_input = dl.FunctionIO(type='Json', name='configs', value=configs)
inputs = [configs_input]
zazu_service = dl.services.get('zazu')
# get project id for billing bla bla bla
dataset_obj = get_dataset_obj(configs['dataloop'])
id = dataset_obj.project.id
if args.search:
zazu_service.execute(function_name='search', execution_input=inputs, project_id=id)
if args.predict:
zazu_service.execute(function_name='predict', execution_input=inputs, project_id=id)
else:
logger = init_logging(__name__)
this_path = path = os.getcwd()
configs_path = os.path.join(this_path, 'configs.json')
configs = ConfigSpec(configs_path)
opt_model = OptModel()
opt_model.add_child_spec(configs, 'configs')
zazu = ZaZu(opt_model, remote=args.remote)
if args.search:
zazu.find_best_model()
zazu.hp_search()
if args.train:
zazu.train_new_model()
if args.predict:
zazu.run_inference()
if args.predict_once:
zazu.one_time_inference('/home/noam/0120122798.jpg', 'checkpoint0.pt')
| 44.77381 | 227 | 0.66321 |
02d560a1ea200edb9fdf8f9c412bf90076e75cd6 | 573 | py | Python | ProDy/distance.py | wojdyr/pdb-benchmarks | 6f99ef1ad7151c0c637f5f2a2c35a5b4ceb042fe | [
"MIT"
] | 30 | 2016-05-21T18:07:36.000Z | 2022-02-25T15:10:49.000Z | ProDy/distance.py | wojdyr/pdb-benchmarks | 6f99ef1ad7151c0c637f5f2a2c35a5b4ceb042fe | [
"MIT"
] | 7 | 2016-06-20T22:40:50.000Z | 2020-06-17T17:06:10.000Z | ProDy/distance.py | wojdyr/pdb-benchmarks | 6f99ef1ad7151c0c637f5f2a2c35a5b4ceb042fe | [
"MIT"
] | 6 | 2016-05-26T14:44:28.000Z | 2020-05-26T09:50:47.000Z | # Benchmark the calculation of a distance in a PDB file
# The distance is the closest distance between any atoms of residues 50 and 60
# of chain A in 1AKE
import time
from prody import *
pdb_filepath = "data/1AKE.pdb"
struc = parsePDB(pdb_filepath)
def distance():
min_dist = float("inf")
for atom_a in struc['A', 50]:
for atom_b in struc['A', 60]:
if calcDistance(atom_a, atom_b) < min_dist:
min_dist = calcDistance(atom_a, atom_b)
return min_dist
start = time.time()
distance()
end = time.time()
print(end - start)
| 23.875 | 78 | 0.670157 |
ac901e5eb1a71c25b62d71b016f86ba8580c259a | 4,710 | py | Python | _site/lib/entry.py | niklasbt/notebook | a1144028f57ef735371c5795b708527ef5beca01 | [
"MIT"
] | null | null | null | _site/lib/entry.py | niklasbt/notebook | a1144028f57ef735371c5795b708527ef5beca01 | [
"MIT"
] | null | null | null | _site/lib/entry.py | niklasbt/notebook | a1144028f57ef735371c5795b708527ef5beca01 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import importlib
from datetime import datetime
### Functions ##################################################################
def getFiles(path,type):
if type == 'project':
files = [f for f in os.listdir(path) if f.endswith('.md')]
files = [os.path.splitext(f)[0] for f in files]
files.sort()
files.insert(0,'New project')
print(files)
elif type == 'template':
files = [f for f in os.listdir(path) if f.endswith('.py')]
for f in files:
if '__' in f:
files.remove(f)
files = [os.path.splitext(f)[0] for f in files]
files.sort()
files.insert(0,'Blank template')
return(files)
def listFiles(list,type):
os.system('clear')
if type == 'project':
print('CHOOSE A PROJECT '+'#'*(os.get_terminal_size()[0]-17))
print('\nAvailable projects:')
for k in range(len(list)):
print(' '+str(k)+': '+list[k])
if type == 'template':
print('CHOOSE A TEMPLATE '+'#'*(os.get_terminal_size()[0]-18))
print('\nAvailable templates:')
for k in range(len(list)):
print(' '+str(k)+': '+list[k])
def save_entry(title,short_title,project,body):
save_path = '../_posts/'
header = ([
'---\n',
'title: '+title+'\n',
'project: '+project+'\n',
'---\n',
])
lines = header + body
for k in range(len(lines)):
if not lines[k].endswith('\n'):
lines[k] = "".join((lines[k],'\n'))
filename = datetime.today().strftime('%Y-%m-%d')+'-'+short_title+'.md'
fullpath = os.path.join(save_path,filename)
if os.path.isfile(fullpath):
print('\n This entry exists! \n')
exit()
with open(fullpath,'w') as file:
file.writelines(lines)
### Exceptions #################################################################
class negativeList(Exception):
pass
class exceedsList(Exception):
pass
class badTemplate(Exception):
pass
### Code here ##################################################################
projects = getFiles('../_projects','project') # Get list of projects
templates = getFiles('templates','template') # Get list of templates
body = []
# Print projects and query user
listFiles(projects,'project')
while True:
try:
projectNo = int(input('\nEntry project #: '))
if projectNo > len(projects)-1:
raise exceedsList
elif projectNo < 0:
raise negativeList
break
except ValueError:
input('\nInvalid project #...')
listFiles(projects,'project')
except exceedsList:
input('\nInvalid project #...')
listFiles(projects,'project')
except negativeList:
input('\nInvalid project #...')
listFiles(projects,'project')
# If a new project is to be created
if projectNo == 0:
print('New project!')
exec(open('project.py').read())
projName = short_name
else:
projName = projects[projectNo]
# Query entry info
title = input(" Entry title: ")
short_title = input(" Short title: ")
# List templates and query user
listFiles(templates,'template')
while True:
try:
templateNo = int(input('\nTemplate #: '))
if templateNo > len(templates)-1:
raise exceedsList
elif templateNo < 0:
raise negativeList
break
except ValueError:
input('\nInvalid template #...')
listFiles(templates,'template')
except exceedsList:
input('\nInvalid template #...')
listFiles(templates,'template')
except negativeList:
input('\nInvalid template #...')
listFiles(templates,'template')
# If a blank template is to be used
if templateNo == 0:
save_entry(title,short_title,projName,body)
exit()
# Otherwise, use the chosen template
while True:
try:
importedTemplate = importlib.import_module('templates.'+templates[templateNo])
body = importedTemplate.get_body()
if not all(type(l) is str for l in body):
body = []
raise badTemplate
save_entry(title,short_title,projName,body)
exit()
except AttributeError:
input('\nBad template!')
listFiles(templates,'template')
templateNo = int(input('\nTemplate #: '))
if templateNo == 0:
save_entry(title,short_title,projName,body)
exit()
except badTemplate:
input('\nBad template!')
listFiles(templates,'template')
templateNo = int(input('\nTemplate #: '))
if templateNo == 0:
save_entry(title,short_title,projName,body)
exit()
| 30.387097 | 86 | 0.563694 |
8ddc861a1b6c0b098ec7cc32abf72e3744d1640a | 228 | py | Python | boxdb/table_checkup.py | kshitij1235/boxdb | 4aa121f856c148c5136368041a610a584fb1dbc6 | [
"MIT"
] | 1 | 2022-01-31T17:21:02.000Z | 2022-01-31T17:21:02.000Z | boxdb/table_checkup.py | kshitij1235/boxdb | 4aa121f856c148c5136368041a610a584fb1dbc6 | [
"MIT"
] | null | null | null | boxdb/table_checkup.py | kshitij1235/boxdb | 4aa121f856c148c5136368041a610a584fb1dbc6 | [
"MIT"
] | null | null | null | """
boxdb/table_checkup.py -> v0.3
This file contain code for
1)to check table
"""
from os import path
def check_table(table_name):
"""
checks if table exist's or not
"""
return path.exists(f"./{table_name}")
| 15.2 | 41 | 0.653509 |
469acc25fd3f1c2d6a3bdf40d6ec95ebe12e4a7f | 5,356 | py | Python | friendly/source_cache.py | MrGreenTea/friendly | 091f6af1d3c2be8fee078e52db6e16074d5518e5 | [
"MIT"
] | null | null | null | friendly/source_cache.py | MrGreenTea/friendly | 091f6af1d3c2be8fee078e52db6e16074d5518e5 | [
"MIT"
] | null | null | null | friendly/source_cache.py | MrGreenTea/friendly | 091f6af1d3c2be8fee078e52db6e16074d5518e5 | [
"MIT"
] | null | null | null | """source_cache.py
Used to cache and retrieve source code.
This is especially useful when a custom REPL is used.
Note that we monkeypatch Python's linecache.getlines.
"""
import linecache
import time
old_getlines = linecache.getlines
idle_get_lines = None
class Cache:
"""Class used to store source of files and similar objects"""
def __init__(self):
self.cache = {}
self.context = 4
def add(self, filename, source):
"""Adds a source (received as a string) corresponding to a filename
in the cache.
The filename can be a true file name, or a fake one, like
<friendly-console:42>, used for saving an REPL entry.
These fake filenames might not be retrieved by Python's linecache
which is why we keep a duplicate of anything we add to linecache.cache
"""
# filename could be a Path object,
# which does not have a startswith() method used below
filename = str(filename)
lines = [line + "\n" for line in source.splitlines()]
entry = (len(source), time.time(), lines, filename)
if not filename.startswith("<"):
# Linecache never allows retrieving of such values,
# so it is pointless to attempt to store them there.
linecache.cache[filename] = entry
self.cache[filename] = lines
def remove(self, filename):
"""Removes an entry from the cache if it can be found."""
if filename in self.cache:
del self.cache[filename]
if filename in linecache.cache:
del linecache.cache[filename]
def get_source_lines(self, filename, module_globals=None):
"""Given a filename, returns the corresponding source, either
from the cache or from actually opening the file.
If the filename corresponds to a true file, and the last time
it was modified differs from the recorded value, a fresh copy
is retrieved.
The contents is stored as a string and returned as a list of lines,
each line ending with a newline character.
"""
if idle_get_lines is not None:
lines = idle_get_lines(filename, None) # noqa
else:
lines = old_getlines(filename, module_globals=module_globals)
if not lines and filename in self.cache:
lines = self.cache[filename]
lines.append("\n") # required when dealing with EOF errors
return lines
def get_formatted_partial_source(
self, filename, linenumber, offset=None, text_range=None
):
"""Formats a few lines around a 'bad line', and returns
the formatted source as well as the content of the 'bad line'.
"""
lines = self.get_source_lines(filename)
if not lines or not "".join(lines).strip():
return "", ""
begin = max(0, linenumber - self.context)
partial_source, bad_line = highlight_source(
linenumber,
linenumber - begin - 1,
# it is useful to show at least one more line when a statement
# continues beyond the current line.
lines[begin : linenumber + 1],
offset=offset,
text_range=text_range,
)
return partial_source, bad_line
cache = Cache()
# Monkeypatch linecache to make our own cached content available to Python.
linecache.getlines = cache.get_source_lines
def highlight_source(linenumber, index, lines, offset=None, text_range=None):
"""Extracts a few relevant lines from a file content given as a list
of lines, adding line number information and identifying
a particular line.
When dealing with a ``SyntaxError`` or its subclasses, offset is an
integer normally used by Python to indicate the position of
the error with a ``^``, like::
if True
^
which, in this case, points to a missing colon. We use the same
representation in this case.
"""
# The weird index arithmetic below is based on the information returned
# by Python's inspect.getinnerframes()
new_lines = []
problem_line = ""
nb_digits = len(str(linenumber + index))
no_mark = " {:%d}: " % nb_digits
with_mark = " -->{:%d}: " % nb_digits
offset_mark = None
if offset is not None:
offset_mark = " " * (8 + nb_digits + offset) + "^"
text_range_mark = None
if text_range is not None:
begin, end = text_range
text_range_mark = " " * (8 + nb_digits + begin + 1) + "^" * (end - begin)
marked = False
for i, line in enumerate(lines, linenumber - index):
if i == linenumber:
num = with_mark.format(i)
problem_line = line
new_lines.append(num + line.rstrip())
if offset_mark is not None:
new_lines.append(offset_mark)
elif text_range_mark is not None:
new_lines.append(text_range_mark)
marked = True
elif marked:
if not line.strip(): # do not add empty line if last line
break
num = no_mark.format(i)
new_lines.append(num + line.rstrip())
else:
num = no_mark.format(i)
new_lines.append(num + line.rstrip())
return "\n".join(new_lines), problem_line
| 34.779221 | 81 | 0.622293 |
051f9e21a1f420101115302ad07e0edae7742bb6 | 607 | py | Python | lib/sqlalchemy/ext/asyncio/base.py | Dreamsorcerer/sqlalchemy | 153671df9d4cd7f2cdb3e14e6221f529269885d9 | [
"MIT"
] | 1 | 2021-04-04T10:13:08.000Z | 2021-04-04T10:13:08.000Z | lib/sqlalchemy/ext/asyncio/base.py | Dreamsorcerer/sqlalchemy | 153671df9d4cd7f2cdb3e14e6221f529269885d9 | [
"MIT"
] | null | null | null | lib/sqlalchemy/ext/asyncio/base.py | Dreamsorcerer/sqlalchemy | 153671df9d4cd7f2cdb3e14e6221f529269885d9 | [
"MIT"
] | 1 | 2020-12-04T14:51:39.000Z | 2020-12-04T14:51:39.000Z | import abc
from . import exc as async_exc
class StartableContext(abc.ABC):
@abc.abstractmethod
async def start(self) -> "StartableContext":
pass
def __await__(self):
return self.start().__await__()
async def __aenter__(self):
return await self.start()
@abc.abstractmethod
async def __aexit__(self, type_, value, traceback):
pass
def _raise_for_not_started(self):
raise async_exc.AsyncContextNotStarted(
"%s context has not been started and object has not been awaited."
% (self.__class__.__name__)
)
| 23.346154 | 78 | 0.649094 |
deebf591cd10f3db91717d3d24d0b370936955eb | 818 | py | Python | setup.py | vuhcl/cs110_final_project | fe72f9a17799dd770786f3e7279f140c30284f63 | [
"MIT"
] | null | null | null | setup.py | vuhcl/cs110_final_project | fe72f9a17799dd770786f3e7279f140c30284f63 | [
"MIT"
] | null | null | null | setup.py | vuhcl/cs110_final_project | fe72f9a17799dd770786f3e7279f140c30284f63 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='cs110_finalproject_vu_quotientfilter',
version='1.0a1',
description='Final Project - CS110 Fall 2017',
url='https://github.com/vuhcl/cs110_final_project',
author='Vu H. Chu-Le',
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
py_modules=["__init__.py"],
install_requires=['mmh3', 'math', 'numpy'],
)
| 27.266667 | 64 | 0.665037 |
7dacc7015e98af041ec2d9972b1e2a749989d4b7 | 596 | py | Python | src/event/api/urls.py | brijeshgzp05/yantragene2k19 | 139f5a8e084b03eec58bd6da5878dc3c16b2a5cb | [
"MIT"
] | 3 | 2019-10-27T10:26:01.000Z | 2020-02-28T16:33:42.000Z | src/event/api/urls.py | brijeshgzp05/yantragene2k19 | 139f5a8e084b03eec58bd6da5878dc3c16b2a5cb | [
"MIT"
] | null | null | null | src/event/api/urls.py | brijeshgzp05/yantragene2k19 | 139f5a8e084b03eec58bd6da5878dc3c16b2a5cb | [
"MIT"
] | null | null | null | from django.urls import path,re_path
from .views import (
EventDetailAPIView,
ParticipantCreateAPIView,
ParticipantListAPIView,
ParticipantDeleteAPIView,
)
app_name = 'event-api'
urlpatterns = [
path('participatedin/', ParticipantListAPIView.as_view(), name='participant-list'),
re_path(r'^(?P<slug>[\w-]+)/$', EventDetailAPIView.as_view(), name="event-detail"),
re_path(r'^participatedin/delete/(?P<pk>\d+)/$', ParticipantDeleteAPIView.as_view(), name="participant-delete"),
path('participant/create/', ParticipantCreateAPIView.as_view(), name='participant-create'),
]
| 33.111111 | 116 | 0.734899 |
de85e696b501875e637d70451cc7f5b17c076d61 | 3,361 | py | Python | Widen/LC464_can_I_win.py | crazywiden/Leetcode_daily_submit | 15637e260ab547022ac0c828dd196337bd8d50a3 | [
"MIT"
] | null | null | null | Widen/LC464_can_I_win.py | crazywiden/Leetcode_daily_submit | 15637e260ab547022ac0c828dd196337bd8d50a3 | [
"MIT"
] | null | null | null | Widen/LC464_can_I_win.py | crazywiden/Leetcode_daily_submit | 15637e260ab547022ac0c828dd196337bd8d50a3 | [
"MIT"
] | null | null | null | """
LC464 -- can I win
In the "100 game," two players take turns adding, to a running total, any integer from 1..10. The player who first causes the running total to reach or exceed 100 wins.
What if we change the game so that players cannot re-use integers?
For example, two players might take turns drawing from a common pool of numbers of 1..15 without replacement until they reach a total >= 100.
Given an integer maxChoosableInteger and another integer desiredTotal, determine if the first player to move can force a win, assuming both players play optimally.
You can always assume that maxChoosableInteger will not be larger than 20 and desiredTotal will not be larger than 300.
Example
Input:
maxChoosableInteger = 10
desiredTotal = 11
Output:
false
Explanation:
No matter which integer the first player choose, the first player will lose.
The first player can choose an integer from 1 up to 10.
If the first player choose 1, the second player can only choose integers from 2 up to 10.
The second player will win by choosing 10 and get a total = 11, which is >= desiredTotal.
Same with other integers chosen by the first player, the second player will always win.
"""
# Runtime: 868 ms, faster than 61.70% of Python online submissions for Can I Win.
# Memory Usage: 18.8 MB, less than 94.68% of Python online submissions for Can I Win.
class Solution(object):
def canIWin(self, maxChoosableInteger, desiredTotal):
"""
:type maxChoosableInteger: int
:type desiredTotal: int
:rtype: bool
"""
if (1 + maxChoosableInteger) * maxChoosableInteger/2 < desiredTotal:
return False
self.memo = {}
return self.helper(range(1, maxChoosableInteger + 1), desiredTotal)
def helper(self, nums, desiredTotal):
key = str(nums)
if key in self.memo:
return self.memo[key]
if nums[-1] >= desiredTotal:
return True
for i in range(len(nums)):
if not self.helper(nums[:i] + nums[i+1:], desiredTotal - nums[i]):
self.memo[key]= True
return True
self.memo[key] = False
return False
# this problem is not very good...
# sort of brutal force with memorization
# time complexity -- O(2^M)
# space complexity -- O(2^M)
class Solution(object):
def canIWin(self, maxChoosableInteger, desiredTotal):
dp = dict()
def search(state, total):
for x in range(maxChoosableInteger, 0, -1):
if not state & (1 << (x - 1)):
if total + x >= desiredTotal:
dp[state] = True
return True
break
for x in range(1, maxChoosableInteger + 1):
if not state & (1 << (x - 1)):
nstate = state | (1 << (x - 1))
if nstate not in dp:
dp[nstate] = search(nstate, total + x)
if not dp[nstate]:
dp[state] = True
return True
dp[state] = False
return False
if maxChoosableInteger >= desiredTotal: return True
if (1 + maxChoosableInteger) * maxChoosableInteger < 2 * desiredTotal: return False
return search(0, 0) | 38.632184 | 168 | 0.611723 |
81e6c0f11a050e3a59bf1ed662b22850fa1ef3d2 | 1,639 | py | Python | src/userChatLog.py | liusoon/wechat-alfred-workflow | 449995275dd700bcb3686abcfe2ed9c63ea826a3 | [
"MIT"
] | 929 | 2018-03-18T07:36:28.000Z | 2022-03-25T07:33:46.000Z | src/userChatLog.py | liusoon/wechat-alfred-workflow | 449995275dd700bcb3686abcfe2ed9c63ea826a3 | [
"MIT"
] | 57 | 2018-03-18T14:03:51.000Z | 2021-05-28T01:14:01.000Z | src/userChatLog.py | liusoon/wechat-alfred-workflow | 449995275dd700bcb3686abcfe2ed9c63ea826a3 | [
"MIT"
] | 141 | 2018-03-21T07:53:39.000Z | 2022-03-28T07:51:15.000Z | # -*- coding:utf-8 -*-
import json,sys,os
from workflow import Workflow, web
reload(sys)
sys.setdefaultencoding('utf-8')
def main(wf):
userId = os.getenv('userId')
baseUrl = os.getenv('baseUrl')
url = baseUrl + 'chatlog?userId=' + userId + '&count=45'
try:
result = web.get(url=url)
result.raise_for_status()
resp = result.text
userList = json.loads(resp)
if len(userList) > 0:
wf.store_data('wechat_send_content',sys.argv[1])
for item in userList:
title = item['title']
subtitle = item['subTitle']
icon = item['icon']
userId = item['userId']
copyText = item['copyText']
qlurl = item['url']
srvId = str(item['srvId'])
titleLen = len(title)
lineNun = 70
if titleLen < lineNun:
largetext = title
else:
titleArray = []
for n in range(titleLen):
if n % lineNun == 0:
titleArray.append(title[n:n+lineNun])
largetext='\n'.join(titleArray)
wf.add_item(title=title, subtitle=subtitle, icon=icon, valid=True, largetext=largetext, quicklookurl=qlurl, copytext=copyText, arg=srvId)
else:
wf.add_item(title='找不到联系人…',subtitle='请重新输入')
except IOError:
wf.add_item(title='请先启动微信 & 登录…',subtitle='并确保安装微信小助手')
wf.send_feedback()
if __name__ == '__main__':
wf = Workflow()
sys.exit(wf.run(main))
| 34.87234 | 153 | 0.519219 |
0e708407f81a1c877e0f5ffc15d3c334cb691899 | 9,410 | py | Python | meltingpot/python/utils/scenarios/scenario_test.py | Rohan138/meltingpot | d4e3839225b78babcedbbbf95cf747ff9e0a87b5 | [
"Apache-2.0"
] | null | null | null | meltingpot/python/utils/scenarios/scenario_test.py | Rohan138/meltingpot | d4e3839225b78babcedbbbf95cf747ff9e0a87b5 | [
"Apache-2.0"
] | null | null | null | meltingpot/python/utils/scenarios/scenario_test.py | Rohan138/meltingpot | d4e3839225b78babcedbbbf95cf747ff9e0a87b5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests of scenarios."""
import random
from unittest import mock
from absl.testing import absltest
from absl.testing import parameterized
import dm_env
import immutabledict
from meltingpot.python import substrate as substrate_factory
from meltingpot.python.utils.bots import policy
from meltingpot.python.utils.scenarios import population
from meltingpot.python.utils.scenarios import scenario as scenario_utils
def _track(source, fields):
destination = []
for field in fields:
getattr(source, field).subscribe(
on_next=destination.append,
on_error=lambda e: destination.append(type(e)),
on_completed=lambda: destination.append('DONE'),
)
return destination
@parameterized.parameters(
((), (), (), ()),
(('a',), (True,), ('a',), ()),
(('a',), (False,), (), ('a',)),
(('a', 'b', 'c'), (True, True, False), ('a', 'b'), ('c',)),
(('a', 'b', 'c'), (False, True, False), ('b',), ('a', 'c')),
)
class PartitionMergeTest(parameterized.TestCase):
def test_partition(self, merged, is_focal, *expected):
actual = scenario_utils._partition(merged, is_focal)
self.assertEqual(actual, expected)
def test_merge(self, expected, is_focal, *partions):
actual = scenario_utils._merge(*partions, is_focal)
self.assertEqual(actual, expected)
class ScenarioWrapperTest(absltest.TestCase):
def test_scenario(self):
substrate = mock.Mock(spec_set=substrate_factory.Substrate)
substrate.reset.return_value = dm_env.TimeStep(
step_type=dm_env.StepType.FIRST,
discount=0,
reward=(10, 20, 30, 40),
observation=(
immutabledict.immutabledict(ok=10, not_ok=100),
immutabledict.immutabledict(ok=20, not_ok=200),
immutabledict.immutabledict(ok=30, not_ok=300),
immutabledict.immutabledict(ok=40, not_ok=400),
),
)
substrate.step.return_value = dm_env.transition(
reward=(11, 21, 31, 41),
observation=(
immutabledict.immutabledict(ok=11, not_ok=101),
immutabledict.immutabledict(ok=21, not_ok=201),
immutabledict.immutabledict(ok=31, not_ok=301),
immutabledict.immutabledict(ok=41, not_ok=401),
),
)
substrate.events.return_value = (
mock.sentinel.event_0, mock.sentinel.event_1)
substrate.action_spec.return_value = tuple(
f'action_spec_{n}' for n in range(4)
)
substrate.observation_spec.return_value = tuple(
immutabledict.immutabledict(
ok=f'ok_spec_{n}', not_ok=f'not_ok_spec_{n}')
for n in range(4)
)
substrate.reward_spec.return_value = tuple(
f'reward_spec_{n}' for n in range(4)
)
bots = {}
for n in range(2):
bot = mock.Mock(spec_set=policy.Policy)
bot.initial_state.return_value = f'bot_state_{n}'
bot.step.return_value = (n + 10, f'bot_state_{n}')
bots[f'bot_{n}'] = bot
background_population = population.Population(
policies=bots, population_size=2)
with scenario_utils.Scenario(
substrate=substrate_factory.Substrate(substrate),
background_population=background_population,
is_focal=[True, False, True, False],
permitted_observations={'ok'}) as scenario:
observables = scenario.observables()
received = {
'base': _track(observables, ['events', 'action', 'timestep']),
'background': _track(observables.background, ['action', 'timestep']),
'substrate': _track(
observables.substrate, ['events', 'action', 'timestep']),
}
action_spec = scenario.action_spec()
observation_spec = scenario.observation_spec()
reward_spec = scenario.reward_spec()
with mock.patch.object(
random, 'choices', return_value=['bot_0', 'bot_1']):
initial_timestep = scenario.reset()
step_timestep = scenario.step([0, 1])
with self.subTest(name='action_spec'):
self.assertEqual(action_spec, ('action_spec_0', 'action_spec_2'))
with self.subTest(name='observation_spec'):
self.assertEqual(observation_spec,
(immutabledict.immutabledict(ok='ok_spec_0'),
immutabledict.immutabledict(ok='ok_spec_2')))
with self.subTest(name='reward_spec'):
self.assertEqual(reward_spec, ('reward_spec_0', 'reward_spec_2'))
with self.subTest(name='events'):
self.assertEmpty(scenario.events())
with self.subTest(name='initial_timestep'):
expected = dm_env.TimeStep(
step_type=dm_env.StepType.FIRST,
discount=0,
reward=(10, 30),
observation=(
immutabledict.immutabledict(ok=10),
immutabledict.immutabledict(ok=30),
),
)
self.assertEqual(initial_timestep, expected)
with self.subTest(name='step_timestep'):
expected = dm_env.transition(
reward=(11, 31),
observation=(
immutabledict.immutabledict(ok=11),
immutabledict.immutabledict(ok=31),
),
)
self.assertEqual(step_timestep, expected)
with self.subTest(name='substrate_step'):
substrate.step.assert_called_once_with((0, 10, 1, 11))
with self.subTest(name='bot_0_step'):
actual = bots['bot_0'].step.call_args_list[0]
expected = mock.call(
timestep=dm_env.TimeStep(
step_type=dm_env.StepType.FIRST,
discount=0,
reward=20,
observation=immutabledict.immutabledict(ok=20, not_ok=200),
),
prev_state='bot_state_0')
self.assertEqual(actual, expected)
with self.subTest(name='bot_1_step'):
actual = bots['bot_1'].step.call_args_list[0]
expected = mock.call(
timestep=dm_env.TimeStep(
step_type=dm_env.StepType.FIRST,
discount=0,
reward=40,
observation=immutabledict.immutabledict(ok=40, not_ok=400),
),
prev_state='bot_state_1')
self.assertEqual(actual, expected)
with self.subTest(name='base_observables'):
expected = [
dm_env.TimeStep(
step_type=dm_env.StepType.FIRST,
discount=0,
reward=(10, 30),
observation=(
immutabledict.immutabledict(ok=10),
immutabledict.immutabledict(ok=30),
),
),
[0, 1],
dm_env.transition(
reward=(11, 31),
observation=(
immutabledict.immutabledict(ok=11),
immutabledict.immutabledict(ok=31),
),
),
'DONE',
'DONE',
'DONE',
]
self.assertEqual(received['base'], expected)
with self.subTest(name='substrate_observables'):
expected = [
dm_env.TimeStep(
step_type=dm_env.StepType.FIRST,
discount=0,
reward=(10, 20, 30, 40),
observation=(
immutabledict.immutabledict(ok=10, not_ok=100),
immutabledict.immutabledict(ok=20, not_ok=200),
immutabledict.immutabledict(ok=30, not_ok=300),
immutabledict.immutabledict(ok=40, not_ok=400),
),
),
mock.sentinel.event_0,
mock.sentinel.event_1,
(0, 10, 1, 11),
dm_env.transition(
reward=(11, 21, 31, 41),
observation=(
immutabledict.immutabledict(ok=11, not_ok=101),
immutabledict.immutabledict(ok=21, not_ok=201),
immutabledict.immutabledict(ok=31, not_ok=301),
immutabledict.immutabledict(ok=41, not_ok=401),
),
),
mock.sentinel.event_0,
mock.sentinel.event_1,
'DONE',
'DONE',
'DONE',
]
self.assertEqual(received['substrate'], expected)
with self.subTest(name='background_observables'):
expected = [
dm_env.TimeStep(
step_type=dm_env.StepType.FIRST,
discount=0,
reward=(20, 40),
observation=(
immutabledict.immutabledict(ok=20, not_ok=200),
immutabledict.immutabledict(ok=40, not_ok=400),
),
),
(10, 11),
dm_env.transition(
reward=(21, 41),
observation=(
immutabledict.immutabledict(ok=21, not_ok=201),
immutabledict.immutabledict(ok=41, not_ok=401),
),
),
'DONE',
'DONE',
]
self.assertEqual(received['background'], expected)
if __name__ == '__main__':
absltest.main()
| 34.723247 | 79 | 0.60085 |
278c8ca41bc2707dde6327c5c71d395b042a9074 | 2,244 | py | Python | freezer-dr-7.1.0/freezer_dr/main.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 15 | 2016-05-25T08:14:59.000Z | 2022-03-10T11:36:00.000Z | freezer-dr-7.1.0/freezer_dr/main.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | freezer-dr-7.1.0/freezer_dr/main.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 3 | 2019-01-10T10:40:36.000Z | 2019-03-19T06:33:01.000Z | # (c) Copyright 2014,2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from freezer_dr.common import config
from freezer_dr.evacuators.common.manager import EvacuationManager
from freezer_dr.monitors.common.manager import MonitorManager
from freezer_dr.notifiers.common.manager import NotificationManager
from oslo_config import cfg
from oslo_log import log
CONF = cfg.CONF
LOG = log.getLogger(__name__)
def main():
config.configure()
config.setup_logging()
LOG.info('Starting Freezer DR ... ')
# initialize the notification driver as it will be used in many parts
notifier = NotificationManager()
# load and initialize the monitoring driver
monitor = MonitorManager(notifier=notifier.get_driver())
# Do the monitoring procedure
# Monitor, analyse, nodes down ?, wait, double check ? evacuate ..
nodes = monitor.monitor()
if nodes:
# @todo put node in maintenance mode :) Not working with virtual
# deployments
# Load Fence driver
# Shutdown the node
evac = EvacuationManager()
notify_nodes = evac.get_nodes_details(nodes)
notifier.notify(notify_nodes, 'original')
evacuated_nodes, failed_nodes = evac.evacuate(nodes)
LOG.debug("Successfully evacuated nodes {0}".format(evacuated_nodes))
LOG.debug("Failed to evacuate nodes {0}".format(failed_nodes))
evacuated_nodes = evac.get_nodes_details(evacuated_nodes)
notifier.notify(evacuated_nodes, 'success')
failed_nodes = evac.get_nodes_details(failed_nodes)
notifier.notify(failed_nodes, 'error')
else:
print("No nodes reported to be down")
| 38.689655 | 77 | 0.731729 |
07c7ea3fac4902afa878b1c347349553ca96d18d | 163 | py | Python | moto/organizations/urls.py | gtourkas/moto | 307104417b579d23d02f670ff55217a2d4a16bee | [
"Apache-2.0"
] | 5,460 | 2015-01-01T01:11:17.000Z | 2022-03-31T23:45:38.000Z | moto/organizations/urls.py | gtourkas/moto | 307104417b579d23d02f670ff55217a2d4a16bee | [
"Apache-2.0"
] | 4,475 | 2015-01-05T19:37:30.000Z | 2022-03-31T13:55:12.000Z | moto/organizations/urls.py | gtourkas/moto | 307104417b579d23d02f670ff55217a2d4a16bee | [
"Apache-2.0"
] | 1,831 | 2015-01-14T00:00:44.000Z | 2022-03-31T20:30:04.000Z | from .responses import OrganizationsResponse
url_bases = [r"https?://organizations\.(.+)\.amazonaws\.com"]
url_paths = {"{0}/$": OrganizationsResponse.dispatch}
| 27.166667 | 61 | 0.730061 |
13a38022977e55fa9f6f4f12695adf1aa2234607 | 273 | py | Python | src/downward/experiments/issue919/translator_additional_parser.py | ScarfZapdos/conan-bge-questgen | 4d184c5bf0ae4b768b8043cec586395df9ce1451 | [
"MIT"
] | 1 | 2021-09-09T13:03:02.000Z | 2021-09-09T13:03:02.000Z | src/downward/experiments/issue919/translator_additional_parser.py | ScarfZapdos/conan-bge-questgen | 4d184c5bf0ae4b768b8043cec586395df9ce1451 | [
"MIT"
] | null | null | null | src/downward/experiments/issue919/translator_additional_parser.py | ScarfZapdos/conan-bge-questgen | 4d184c5bf0ae4b768b8043cec586395df9ce1451 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import hashlib
from lab.parser import Parser
def add_hash_value(content, props):
props['translator_output_sas_hash'] = hashlib.sha512(content).hexdigest()
parser = Parser()
parser.add_function(add_hash_value, file="output.sas")
parser.parse()
| 21 | 77 | 0.769231 |
f6f224b0bb884c9d83b61218dd536aecccb1d161 | 1,549 | py | Python | main.py | camilos-ufm/OSPoolThreads | fc8e182a0d2d37a90b71dd90ec0518a2f79029ac | [
"CNRI-Python",
"CECILL-B"
] | 1 | 2021-04-17T07:05:03.000Z | 2021-04-17T07:05:03.000Z | main.py | camilos-ufm/OSPoolThreads | fc8e182a0d2d37a90b71dd90ec0518a2f79029ac | [
"CNRI-Python",
"CECILL-B"
] | null | null | null | main.py | camilos-ufm/OSPoolThreads | fc8e182a0d2d37a90b71dd90ec0518a2f79029ac | [
"CNRI-Python",
"CECILL-B"
] | null | null | null | from mat_mult import mat_mult
from csv_manager import csv_manager
import sys, time
import sys
# python main file1 file2 pool_size fileoutput
def show_help():
print("Help Menu\n\n"
"fileA:\n Archivo (matriz) de entrada 1 de 2.\n"
"\nfileB:\n Archivo (matriz) de entrada 2 de 2.\n"
"\npool_size:\n El tamaño del pool a utilizar.\n"
"\noutput_filename:\n Nombre del archivo de salida.\n")
if __name__ == "__main__":
arguments = sys.argv
if (len(arguments) == 5):
file_a = arguments[1]
file_b = arguments[2]
pool_size = arguments[3]
output_filename = arguments[4]
file_a_df = csv_manager.read(file_a)
file_b_df = csv_manager.read(file_b)
mult = mat_mult(file_a_df, file_b_df, pool_size)
start = 0
end = 0
if(mult.validate_dimentions()):
start = time.perf_counter()
mult.run()
end = time.perf_counter()
else:
print("Not able to operate, mismatched dimentions")
print(f"Time taken to complete mat_mult(): {round(end - start, 5)} seconds(s)")
csv_manager.write_final_response(mult.get_final_response(), output_filename)
csv_manager.write(output_filename, f"\n")
csv_manager.write(output_filename, f"Time taken to complete mat_mult(): {round(end - start, 5)} seconds(s)\n")
csv_manager.write(output_filename, f"Pool size used to complete mat_mult(): {pool_size}\n")
else:
show_help()
| 33.673913 | 118 | 0.622983 |
25133b3f5335050737c10c820cf76c838eea2ae6 | 323 | py | Python | tests/test_cli.py | fakegit/pytube | fae2e46c300ec8c41ddc60f5a4eb8ce7cdd47cf5 | [
"MIT-0"
] | null | null | null | tests/test_cli.py | fakegit/pytube | fae2e46c300ec8c41ddc60f5a4eb8ce7cdd47cf5 | [
"MIT-0"
] | null | null | null | tests/test_cli.py | fakegit/pytube | fae2e46c300ec8c41ddc60f5a4eb8ce7cdd47cf5 | [
"MIT-0"
] | null | null | null | # -*- coding: utf-8 -*-
import mock
from pytube import cli
@mock.patch("pytube.cli.YouTube")
@mock.patch("pytube.cli.sys")
def test_download(MockYouTube, mock_sys):
instance = MockYouTube.return_value
instance.prefetch_init.return_value = None
instance.streams = mock.Mock()
cli.download("asdf", "asdf")
| 23.071429 | 46 | 0.712074 |
3a8382b37ed6e3cc7aa4af996a753d5b54dac9b9 | 9,105 | py | Python | demos/kernelmethod/server_kernelmethod.py | monadyn/fedlearn-algo | c4459d421139b0bb765527d636fff123bf17bda4 | [
"Apache-2.0"
] | 86 | 2021-07-20T01:54:21.000Z | 2021-10-06T04:02:40.000Z | demos/kernelmethod/server_kernelmethod.py | fedlearnAI/fedlearnalgo | 63d9ceb64d331ff2b5103ae49e54229cad7e2095 | [
"Apache-2.0"
] | 5 | 2021-07-23T21:22:16.000Z | 2021-09-12T15:48:35.000Z | demos/kernelmethod/server_kernelmethod.py | fedlearnAI/fedlearnalgo | 63d9ceb64d331ff2b5103ae49e54229cad7e2095 | [
"Apache-2.0"
] | 28 | 2021-07-20T07:15:33.000Z | 2021-08-22T20:04:57.000Z | # Copyright 2021 Fedlearn authors.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file is the server implementation of kernel method."""
from core.entity.common.message import RequestMessage, ResponseMessage
from core.entity.common.machineinfo import MachineInfo
from core.server.server import Server
import numpy
from typing import Dict, Set
class KernelMethodServerError(ValueError):
pass
def check_message(msg: str, target: Set):
if msg in target:
return True
else:
return False
class KernelMethodsServer(Server):
def __init__(self, machine_info: MachineInfo):
super().__init__()
self.machine_info = machine_info
self.clients_info = None
self.max_iter = 1000
self.iter = 0
self.sample_num = 0
self.clients_token = []
self.prediction = []
self.dict_functions = {}
self.inference_communication_count = 0
self.function_registration()
self.machine_ind = 0
def metric(self) -> None:
return
def get_next_phase(self, phase: str) -> str:
"""
Transfer old phase of client to new phase of server
"""
# train
if phase == "train_init":
next_phase = "train_loop_start"
elif (phase == "train_loop_start") or (phase == "param_update"):
next_phase = "meta_comp"
elif phase == "meta_comp":
next_phase = "param_update"
# inference
elif phase == "inference_init":
next_phase = "inference_comp"
elif phase == "inference_comp":
next_phase = "inference_end"
# raise error
else:
raise ValueError("Cannot find phase %s in both train and inference!"%phase)
return next_phase
def function_registration(self):
"""
Define the correspondence between response message type and server processing function.
"""
# train
self.dict_functions["train_loop_start"] = self.train_loop_start
self.dict_functions["meta_comp"] = self.req_tr_meta_comp
self.dict_functions["param_update"] = self.param_update
# inference
self.dict_functions["inference_comp"] = self.req_infer_comp
self.dict_functions["inference_end"] = self.predict
# Training related code implementation.
def init_training_control(self) -> Dict[MachineInfo, RequestMessage]:
"""
Send training initialization request to clients.
"""
requests = {}
for client_info in self.clients_info:
requests[client_info] = RequestMessage(sender=self.machine_info,
receiver=client_info,
body=None,
phase_id="train_init")
return requests
def is_training_continue(self) -> bool:
if self.iter <= self.max_iter:
return True
else:
return False
def is_inference_continue(self) -> bool:
return self.inference_communication_count < 3
def train_loop_start(self, responses: Dict[MachineInfo, ResponseMessage]) -> Dict[MachineInfo, RequestMessage]:
"""
Training loop starts.
"""
requests = {}
for client_info, response in responses.items():
self.sample_num = response.body['sample_num']
request = RequestMessage(sender=self.machine_info,
receiver=client_info,
body=None,
phase_id="train_loop_start")
requests[client_info] = request
return requests
def param_update(self, responses: Dict[MachineInfo, ResponseMessage]) -> Dict[MachineInfo, RequestMessage]:
"""
Aggregate the update from clients, then send request to clients for local parameter update.
The input response message contains all clients' meta computation update.
"""
vec_sum = numpy.zeros((self.sample_num, 1), dtype=numpy.float)
for client_info, response in responses.items():
body = response.body
vec_sum += body['meta_result']
loss = numpy.dot(vec_sum.T, vec_sum)/self.sample_num
print('training loss at iteration ' + str(self.iter) + ' is ' +str(loss))
client_num = len(self.clients_token)
self.machine_ind += 1
if self.machine_ind >= client_num:
self.machine_ind = 0
requests = {}
body = {'aggregation_result': vec_sum, 'chosen_machine': self.clients_token[self.machine_ind]}
for client_info, response in responses.items():
request = RequestMessage(sender=self.machine_info,
receiver=client_info,
body=body,
phase_id="param_update")
requests[client_info] = request
self.iter += 1
return requests
def req_tr_meta_comp(self, responses: Dict[MachineInfo, ResponseMessage]) -> Dict[MachineInfo, RequestMessage]:
"""
Send request to clients to compute the meta results.
"""
requests = {}
for client_info, response in responses.items():
request = RequestMessage(sender=self.machine_info,
receiver=client_info,
body=None,
phase_id="meta_comp")
requests[client_info] = request
return requests
def post_training_session(self) -> Dict[MachineInfo, RequestMessage]:
"""
Send finish signal to clients.
"""
requests = {}
for client_info in self.clients_info:
body = {'message': 'finish_training'}
requests[client_info] = RequestMessage(sender=self.machine_info,
receiver=client_info,
body=body,
phase_id="train_finish")
return requests
# Inference related function code.
def init_inference_control(self) -> Dict[MachineInfo, RequestMessage]:
"""
Send request to clients for inference initialization.
"""
requests = {}
for client_info in self.clients_info:
requests[client_info] = RequestMessage(sender=self.machine_info,
receiver=client_info,
body=None,
phase_id="inference_init")
self.inference_communication_count += 1
return requests
def req_infer_comp(self, responses: Dict[MachineInfo, ResponseMessage]) -> Dict[MachineInfo, RequestMessage]:
"""
Send request to clients for meta result compute.
"""
requests = {}
for client_info, response in responses.items():
if check_message(response.body['message'], {'initialization_ready'}):
self.sample_num = response.body['sample_num']
else:
raise KernelMethodServerError('inference on client %s fails to be initialized', client_info.token)
request = RequestMessage(sender=self.machine_info,
receiver=client_info,
body=None,
phase_id="inference_comp")
requests[client_info] = request
self.inference_communication_count += 1
return requests
def predict(self, responses: Dict[MachineInfo, ResponseMessage]) -> Dict[MachineInfo, RequestMessage]:
"""
Finish the inference session.
"""
requests = {}
self.prediction = numpy.zeros((self.sample_num, 1), dtype=numpy.float)
for client_info, response in responses.items():
self.prediction += response.body['inner_product']
request = RequestMessage(sender=self.machine_info,
receiver=client_info,
body=None,
phase_id=None)
requests[client_info] = request
self.inference_communication_count += 1
return requests
def post_inference_session(self) -> None:
print("Predictions: ")
print(self.prediction)
return None | 39.586957 | 115 | 0.582757 |
685eb6df68d793b9c7baf533b2cdda8f79f95ba2 | 2,623 | py | Python | demo-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_service.py | zouzhberk/ambaridemo | 90068c5ee7adf83ba9dde9a8ad8d396b2fbf87c8 | [
"Apache-2.0"
] | null | null | null | demo-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_service.py | zouzhberk/ambaridemo | 90068c5ee7adf83ba9dde9a8ad8d396b2fbf87c8 | [
"Apache-2.0"
] | null | null | null | demo-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_service.py | zouzhberk/ambaridemo | 90068c5ee7adf83ba9dde9a8ad8d396b2fbf87c8 | [
"Apache-2.0"
] | null | null | null | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
import os
from resource_management import *
from ambari_commons import OSConst
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import hdp_select
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
def zookeeper_service(action='start', rolling_restart=False):
import params
# This path may be missing after Ambari upgrade. We need to create it.
if not rolling_restart and not os.path.exists("/usr/hdp/current/zookeeper-server") and params.current_version:
conf_select.select(params.stack_name, "zookeeper", params.current_version)
hdp_select.select("zookeeper-server", params.version)
cmd = format("env ZOOCFGDIR={config_dir} ZOOCFG=zoo.cfg {zk_bin}/zkServer.sh")
if action == 'start':
daemon_cmd = format("source {config_dir}/zookeeper-env.sh ; {cmd} start")
no_op_test = format("ls {zk_pid_file} >/dev/null 2>&1 && ps -p `cat {zk_pid_file}` >/dev/null 2>&1")
Execute(daemon_cmd,
not_if=no_op_test,
user=params.zk_user
)
if params.security_enabled:
kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
Execute(kinit_cmd,
user=params.smokeuser
)
elif action == 'stop':
daemon_cmd = format("source {config_dir}/zookeeper-env.sh ; {cmd} stop")
rm_pid = format("rm -f {zk_pid_file}")
Execute(daemon_cmd,
user=params.zk_user
)
Execute(rm_pid)
@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
def zookeeper_service(action='start', rolling_restart=False):
import params
if action == 'start':
Service(params.zookeeper_win_service_name, action="start")
elif action == 'stop':
Service(params.zookeeper_win_service_name, action="stop") | 38.014493 | 112 | 0.749905 |
e641d0b209cde5111b660437d9adc2a929c44b40 | 2,323 | py | Python | src/ingest/seasonal_team_gamelogs.py | Langutang/Marauder.AI | fd25af74d3c9a99ed6e0cd7092cd5846041605a9 | [
"MIT"
] | null | null | null | src/ingest/seasonal_team_gamelogs.py | Langutang/Marauder.AI | fd25af74d3c9a99ed6e0cd7092cd5846041605a9 | [
"MIT"
] | null | null | null | src/ingest/seasonal_team_gamelogs.py | Langutang/Marauder.AI | fd25af74d3c9a99ed6e0cd7092cd5846041605a9 | [
"MIT"
] | null | null | null | import requests
from requests.auth import HTTPBasicAuth
import time
url = "https://api.mysportsfeeds.com/v2.1/pull/nhl/2021-2022-regular/games.csv"
token = "4e92f126-d598-4577-98e3-bb0674"
password = "MYSPORTSFEEDS"
top_directory = r"C:\Users\John Lang\Documents\Marauder\NHL\core"
years = ["2017","2018","2019","2020","2021"]
season_playoff = ["regular","playoff"]
############## seasonal team gamelogs
print("________________________________")
print("GRABBING SEASON GAMELOGS BY TEAM")
print("________________________________")
counter = 0
teams = ["PIT","MTL","CHI","VAN","STL","WSH","BOS","NYI","CAR","CBJ","CGY","SJS",
"ANA","MIN","PIT","TOR","WPJ","BUF","ARI","NJD","COL","EDM","PHI","TBL",
"NYR","DET","NSH","VGK","LAK","OTT","FLO","DAL"]
for team in teams:
for condition in season_playoff:
for year in years:
try:
print("----------------------------------------")
print("Currently ingesting season gamelog:")
print(f"TEAM: {team}")
print(f"YEAR: {year}")
print(f"IN SEASON TYPE: {condition}")
team.lower()
gamelog_urls = f"https://api.mysportsfeeds.com/v2.1/pull/nhl/{year}-{condition}/team_gamelogs.csv?team={team}"
gamelog_r = requests.get(gamelog_urls, auth=HTTPBasicAuth(token, password))
print("status code success: " + gamelog_r.status_code)
file = open(top_directory + f"/{year}/" + f"/{team}_{condition}_teamgamelog.csv", "w+")
file.write(gamelog_r.text)
counter = counter + 1
print(f"COUNTER VALUE: {counter}")
print(f"success for the year: {year} and season type: {condition}")
print("------------FINALIZING SAVE-------------")
print("------------------------------------------")
while (counter > 30):
print("Cooling down request sends... Go get money:")
time.sleep(120)
counter = 0
print("COUNTER RESET, loop restarting")
except:
print("something fucked up")
print("status code on failure: " + gamelog_r.status_code)
| 40.754386 | 127 | 0.529918 |
c0ad2600cc8fe5da9310b85afbd1059684147120 | 12,684 | py | Python | image_split_tools/sheet_music_detailed.py | ftshijt/Music-Project | db73f92cfd8ebd34ae9623d26e131c2094961bf3 | [
"MIT"
] | 3 | 2019-02-26T13:36:20.000Z | 2019-08-16T13:40:21.000Z | image_split_tools/sheet_music_detailed.py | ftshijt/Music-Project | db73f92cfd8ebd34ae9623d26e131c2094961bf3 | [
"MIT"
] | null | null | null | image_split_tools/sheet_music_detailed.py | ftshijt/Music-Project | db73f92cfd8ebd34ae9623d26e131c2094961bf3 | [
"MIT"
] | 1 | 2019-03-08T15:42:35.000Z | 2019-03-08T15:42:35.000Z | # -*- coding: utf-8 -*-
"""
Created on Jan 29 14:48:16 2019
@author: GS_qbr
"""
from PIL import Image
from PIL import ImageDraw
import cv2 as cv
import numpy as np
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import os
import pandas as pd
import csv
def Traversing_dir(folder_name): # 根据后缀名,遍历文件夹删除文件
# 遍历根目录
for root, dirs, files in os.walk(folder_name):
for file in files:
# 文件后缀名
extFile = os.path.splitext(file)[1]
if extFile == ".png":
os.remove(os.path.join(root, file)) # 删除文件
for dir in dirs:
# 递归调用自身
Traversing_dir(dir)
def Folder_check(folder_name): # 文件夹没有就新建,有就清空文件
curPath = os.getcwd()
targetPath = curPath + os.path.sep + folder_name
if not os.path.exists(targetPath):
os.makedirs(targetPath)
else:
Traversing_dir(folder_name)
def Overall_split(pic_name):
'''
funtion: 图片水平投影,对两行乐谱进行整体分割
函数运行过程中,生成img_projection.png(水平投影的可视化)
line_cut(i).png(最终分割的图片)
parameters:
img_twovalues: 二值化后的图像numpy矩阵
cut_img: 最终实施切割的图像对象(Image类型)
return:
i+1 : 总共分割成几部分
'''
pic_path = 'image/' + pic_name + '.png'
img_arr = cv.imread(pic_path, 0)
cut_img = Image.fromarray((255 - img_arr).astype("uint8"))
# 1. 得到二值化后的图片
threshold, img_twovalues = cv.threshold(img_arr, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)
# 2. 图片水平投影,对两行乐谱进行整体分割
img_projection = img_twovalues.copy() # 实现数组的深copy
(height, width) = img_projection.shape
count = [0 for z in range(0, height)]
for j in range(0, height):
for i in range(0, width):
if img_projection[j, i] == 0:
count[j] += 1
for j in range(0, height):
for i in range(0, count[j]):
img_projection[j, i] = 0
count_nonzero_index = np.array(count).nonzero()[0] # 建立高度投影有值的索引
key_index = []
for i in range(1, np.shape(count_nonzero_index)[0]):
if (count_nonzero_index[i] != count_nonzero_index[i - 1] + 1):
key_index.append(i)
print(i, count_nonzero_index[i])
if (len(key_index) == 0): # 针对只有一行谱子的情况
key_index.append(count_nonzero_index[0])
index_continue = [] # 得到一段一段连续有值的高度分割的索引
for i in range(np.shape(key_index)[0]):
if i == 0:
tmp = (0, key_index[0] - 1)
index_continue.append(tmp)
else:
tmp = (key_index[i - 1], key_index[i] - 1)
index_continue.append(tmp)
tmp = (key_index[i], np.shape(count_nonzero_index)[0] - 1)
index_continue.append(tmp)
index_continue_delete = []
height_min = int(height / 8)
for i in range(len(index_continue)):
if count_nonzero_index[index_continue[i][1]] - count_nonzero_index[index_continue[i][0]] < height_min:
index_continue_delete.append(i)
count_delete = 0
for i in range(len(index_continue_delete)):
index_continue.remove(index_continue[index_continue_delete[i] - count_delete]) # 删的时候因为元素个数在减少,索引值也要跟着变
count_delete = count_delete + 1
img_split_array = []
box_coordinate = []
for i in range(len(index_continue)):
box = (0, count_nonzero_index[index_continue[i][0]], width, count_nonzero_index[index_continue[i][1]])
region = cut_img.crop(box) # 此时,region是一个新的图像对象。
region_arr = 255 - np.array(region)
img_split_array.append(region_arr)
region = Image.fromarray(region_arr.astype("uint8"))
name = pic_name + "/line_cut" + str(i) + ".png"
region.save(name)
# print('NO.{} section split complete'.format(i+1))
box_coordinate.append(box)
cv.line(img_arr, (0, count_nonzero_index[index_continue[i][0]]),
(1669, count_nonzero_index[index_continue[i][0]]), (0, 0, 255), 1)
cv.line(img_arr, (0, count_nonzero_index[index_continue[i][1]]),
(1669, count_nonzero_index[index_continue[i][1]]), (0, 0, 255), 1)
cv.imwrite(pic_name + '/img_split.png', img_arr)
plt.imsave(pic_name + '/img_projection.png', img_projection)
return i + 1, box_coordinate
def Bar_line_cut(rootFolderName, pic_id):
pic_path = rootFolderName + '/line_cut' + str(pic_id) + '.png'
img_arr = cv.imread(pic_path, 0)
cut_img = Image.fromarray((255 - img_arr).astype("uint8"))
# 1. 得到二值化后的图片
threshold, img_twovalues = cv.threshold(img_arr, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)
# 2. 图片水平投影
img_projection = img_twovalues.copy() # 实现数组的深copy
(height, width) = img_projection.shape
count = [0 for z in range(0, height)]
for j in range(0, height):
for i in range(0, width):
if img_projection[j, i] == 0:
count[j] += 1
for j in range(0, height):
for i in range(0, count[j]):
img_projection[j, i] = 0
# 3. 对每个分割完的部分单独处理,按小节线分割(小节线的特点:从第一行五线谱到最后一行五线谱)
# five_lines_index = [] #五线谱的线所在的位置
# for i in range(len(count)):
# if count[i] > width * 2/3:
# five_lines_index.append(i)
five_lines_index = np.array(count).argsort()[-10:][::-1]
if (len(five_lines_index) != 10):
print('There is sth. wrong with count array in voice_part_cut, please check!')
return
five_lines_begin = np.min(five_lines_index) # 五线谱开始的行
five_lines_end = np.max(five_lines_index) # 五线谱结束的行
height_five_lines = five_lines_end - five_lines_begin
bar_line_index = [] # 小节线所在位置
for j in range(width):
count_five_lines = 0
for i in range(five_lines_begin, five_lines_end):
if img_twovalues[i][j] == 0:
count_five_lines = count_five_lines + 1
if count_five_lines == height_five_lines:
bar_line_index.append(j)
# # 检测休止线,并去除
# pause_index = len(bar_line_index)
# for i in range(len(bar_line_index)-2):
# if bar_line_index[i] + 1 == bar_line_index[i+1] and bar_line_index[i+1] == bar_line_index[i+2]:
# pause_index = i
# break;
# bar_line_index = bar_line_index[0:pause_index]
#
# print(pause_index)
bar_id = 0
box_coordinate = []
for i in range(len(bar_line_index) - 1):
if bar_line_index[i + 1] - bar_line_index[i] >= 15:
box = (bar_line_index[i], 0, bar_line_index[i + 1], height)
region = cut_img.crop(box) # 此时,region是一个新的图像对象。
region_arr = 255 - np.array(region)
region = Image.fromarray(region_arr.astype("uint8"))
name = rootFolderName + "/bar_cut_" + str(pic_id) + "_" + str(bar_id) + ".png"
region.save(name)
# print('NO.{} bar split complete'.format( bar_id+1 ))
bar_id = bar_id + 1
box_coordinate.append(box)
return bar_id, box_coordinate
def Voice_part_cut(rootFolderName, overrall_id, pic_id, height_total, width_total):
pic_path = rootFolderName + '/bar_cut_' + str(overrall_id) + '_' + str(pic_id) + '.png'
print(pic_path)
img_arr = cv.imread(pic_path, 0)
cut_img = Image.fromarray((255 - img_arr).astype("uint8"))
# 1. 得到二值化后的图片
threshold, img_twovalues = cv.threshold(img_arr, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)
# 2. 图片水平投影
img_projection = img_twovalues.copy() # 实现数组的深copy
(height, width) = img_projection.shape
count = [0 for z in range(0, height)]
for j in range(0, height):
for i in range(0, width):
if img_projection[j, i] == 0:
count[j] += 1
for j in range(0, height):
for i in range(0, count[j]):
img_projection[j, i] = 0
# 3. 对每个分割完的部分单独处理,按小节线分割(小节线的特点:从第一行五线谱到最后一行五线谱)
# five_lines_index = [] #五线谱的线所在的位置
# for i in range(len(count)):
# if count[i] > width * 4/5:
# five_lines_index.append(i)
five_lines_index = np.array(count).argsort()[-10:][::-1]
five_lines_index = np.sort(five_lines_index)
if (len(five_lines_index) != 10):
print('There is sth. wrong with count array in voice_part_cut, please check!')
return
upper_section_last_line_index = five_lines_index[4] # 对分割完的某一小节来说,一共分为两大行(左手、右手),先找到上半部分(右手)的五线谱的最后一条线
for i in range(upper_section_last_line_index, len(count)):
if count[i] == 0 or count[i] == 1:
voice_part_cut_index = i
break;
box_coordinate = []
difficulty_local = []
difficulty_overall = []
# 4. 找准位置后进行切割
box = (0, 0, width, voice_part_cut_index)
region = cut_img.crop(box) # 此时,region是一个新的图像对象。
region_arr = 255 - np.array(region)
region = Image.fromarray(region_arr.astype("uint8"))
name = rootFolderName + "/result/voice_part_cut_" + str(overrall_id) + '_' + str(pic_id) + "_0.png"
region.save(name)
difficuly_upper = np.sum(((255 - np.array(region)) / 255).astype('int'))
density_upper = round(difficuly_upper / (height * width), 4)
density_upper_overall = round(difficuly_upper / (height_total * width_total), 4)
# print('The upper voice part split complete, difficulty:{:.4}%, difficulty_overall:{:.4}%'.format( density_upper * 100 , density_upper_overall * 100))
box_coordinate.append(box)
difficulty_local.append(density_upper)
difficulty_overall.append(density_upper_overall)
box = (0, voice_part_cut_index, width, len(count))
region = cut_img.crop(box) # 此时,region是一个新的图像对象。
region_arr = 255 - np.array(region)
region = Image.fromarray(region_arr.astype("uint8"))
name = rootFolderName + "/result/voice_part_cut_" + str(overrall_id) + '_' + str(pic_id) + "_1.png"
region.save(name)
difficuly_lower = np.sum(((255 - np.array(region)) / 255).astype('int'))
density_lower = round(difficuly_lower / (height * width), 3)
density_lower_overall = round(difficuly_lower / (height_total * width_total), 4)
# print('The lower voice part split complete, difficulty:{:.4}%, difficulty_overall:{:.4}%'.format( density_lower * 100 , density_lower_overall * 100))
box_coordinate.append(box)
difficulty_local.append(density_lower)
difficulty_overall.append(density_lower_overall)
return box_coordinate, difficulty_local, difficulty_overall
def Cut_into_voive_part(pic_name):
Folder_check(pic_name)
Folder_check(pic_name + '/result')
overall_num, overall_coordinate = Overall_split(pic_name) # 返回有多少图片要放入bar切割
(height, width) = cv.imread('image/' + pic_name + '.png', 0).shape
# 头数据
fileHeader = ["overall_id", "bar_id", "part_id", "x_left", "y_left", "x_right", "y_right", "difficulty_local",
"difficulty_overall"]
# 写入数据
csvFile = open(pic_name + '/result/coordinate_info.csv', "w")
writer = csv.writer(csvFile)
writer.writerow(fileHeader)
for overall_id in range(overall_num):
bar_num, bar_coordinate = Bar_line_cut(pic_name, overall_id)
for bar_id in range(bar_num):
part_coordinate, difficulty_local, difficulty_overall = Voice_part_cut(pic_name, overall_id, bar_id, height,
width)
# 相较于整张图片,part切割时的坐标起点
x_absolute = overall_coordinate[overall_id][0] + bar_coordinate[bar_id][0]
y_absolute = overall_coordinate[overall_id][1] + bar_coordinate[bar_id][1]
# (x1,y1),(x2,y2)代表高声部的左上和右下关键点
x1 = x_absolute + part_coordinate[0][0]
y1 = y_absolute + part_coordinate[0][1]
x2 = x_absolute + part_coordinate[0][2]
y2 = y_absolute + part_coordinate[0][3]
# (x3,y3),(x4,y4)代表低声部的左上和右下关键点
x3 = x_absolute + part_coordinate[1][0]
y3 = y_absolute + part_coordinate[1][1]
x4 = x_absolute + part_coordinate[1][2]
y4 = y_absolute + part_coordinate[1][3]
# 内容数据
line1 = [overall_id, bar_id, '0', x1, y1, x2, y2, difficulty_local[0], difficulty_overall[0]]
line2 = [overall_id, bar_id, '1', x3, y3, x4, y4, difficulty_local[1], difficulty_overall[1]]
# 分批写入
writer.writerows([line1, line2])
csvFile.close()
return overall_num, bar_num # 返回这两个值,后期代码遍历结果用
if __name__ == '__main__':
# pic_name = ['EQYY1-3.png']
pic_name = os.listdir('C:/Users/PKU/Desktop/eye_exp/image')
for i in range(len(pic_name)):
print('**********************' + pic_name[i] + '************************')
pic_name[i] = pic_name[i][:-4]
Cut_into_voive_part(pic_name[i])
| 37.976048 | 155 | 0.621649 |
9159d24beac4f2c4e4ce1e8d3dfbfc90376a6dab | 361 | py | Python | SistemaWebAuditorias/Apps/Auditoria/migrations/0004_auto_20191120_2137.py | diegoquirozramirez/Sistema-Web-Auditorias | d070a7c85a9dad1740dea5bc397fd3cfb3735ba8 | [
"MIT"
] | null | null | null | SistemaWebAuditorias/Apps/Auditoria/migrations/0004_auto_20191120_2137.py | diegoquirozramirez/Sistema-Web-Auditorias | d070a7c85a9dad1740dea5bc397fd3cfb3735ba8 | [
"MIT"
] | null | null | null | SistemaWebAuditorias/Apps/Auditoria/migrations/0004_auto_20191120_2137.py | diegoquirozramirez/Sistema-Web-Auditorias | d070a7c85a9dad1740dea5bc397fd3cfb3735ba8 | [
"MIT"
] | null | null | null | # Generated by Django 2.0 on 2019-11-21 02:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Auditoria', '0003_convocatoria'),
]
operations = [
migrations.AlterUniqueTogether(
name='convocatoria',
unique_together={('auditoria', 'auditoria')},
),
]
| 20.055556 | 57 | 0.609418 |
2c288ee33822b47fa9dab04287b651b1bfe2513c | 2,175 | py | Python | main.py | rauner/DeepLearning-Lemon-Markets-Trading | 41412d6f7c0851739ed43befd2594a5e7a6f00f6 | [
"MIT"
] | null | null | null | main.py | rauner/DeepLearning-Lemon-Markets-Trading | 41412d6f7c0851739ed43befd2594a5e7a6f00f6 | [
"MIT"
] | null | null | null | main.py | rauner/DeepLearning-Lemon-Markets-Trading | 41412d6f7c0851739ed43befd2594a5e7a6f00f6 | [
"MIT"
] | null | null | null | #from dotenv import load_dotenv
from handlers.lemon import LemonMarketsAPI
#load_dotenv()
import pandas as pd
import numpy as np
import yfinance as yf
from datetime import datetime, timedelta, date
from handlers import model
from handlers import helpers
# show data for different tickers
#isin = os.environ.get("isin")
#start = os.environ.get("startDate")
#
isin = ['ETH-USD']
testShare = 0.05
interval = 2 #time steps in minutes
# intraday data only available from yf for the last 60 days
data = yf.download(isin, start=date.today() - timedelta(days=59), end=date.today(), interval= str(interval) + 'm')
print(data.head())
# normalize the dataset
#scaler = MinMaxScaler(feature_range=(0, 1))
#data = scaler.fit_transform(data)
# include days and weeks as cos features
data = data.reset_index(level=0)
seconds = data['Datetime'].map(pd.Timestamp.timestamp)
day = 24*60*60/interval
week = 7*day
data['day cos'] = np.cos(seconds * (2 * np.pi / day))
data['week cos'] = np.cos(seconds * (2 * np.pi / week))
data = data.drop('Datetime', axis = 1)
# split into train and test
n = len(data)
train_df = data[0:int(n*0.7)]
val_df = data[int(n*0.7):int(n*0.9)]
test_df = data[int(n*0.9):]
num_features = data.shape[1]
# normalization, simple mean and variance
train_mean = train_df.mean()
train_std = train_df.std()
train_df = (train_df - train_mean) / train_std
val_df = (val_df - train_mean) / train_std
test_df = (test_df - train_mean) / train_std
# implement covnet
conv_width = 10
label_width = 1
input_width = label_width + (conv_width - 1)
shift = 1
wide_conv_window = helpers.WindowGenerator(
input_width=input_width,
label_width=label_width,
train_df = train_df,
val_df = val_df,
test_df = test_df,
shift=shift,
label_columns=['Adj Close'])
filters = 32
kernel_size = 10
activation = 'relu'
conv_model = model.conv_model()
patience=2
MAX_EPOCHS = 5
history = model.compile_and_fit(conv_model, wide_conv_window)
wide_conv_window.plot(conv_model)
'''
clean, make class and so on nice
implement that its forcasts the next h with ARNN
# implement XGBOOST
# implement random tree
# implement bagging
# boosting
''' | 20.138889 | 115 | 0.72092 |
aacee9ccc520c1eac175132e0740c996dee2f046 | 1,237 | py | Python | pytglib/api/types/background_type_pattern.py | iTeam-co/pytglib | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 6 | 2019-10-30T08:57:27.000Z | 2021-02-08T14:17:43.000Z | pytglib/api/types/background_type_pattern.py | iTeam-co/python-telegram | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 1 | 2021-08-19T05:44:10.000Z | 2021-08-19T07:14:56.000Z | pytglib/api/types/background_type_pattern.py | iTeam-co/python-telegram | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 5 | 2019-12-04T05:30:39.000Z | 2021-05-21T18:23:32.000Z |
from ..utils import Object
class BackgroundTypePattern(Object):
"""
A PNG or TGV (gzipped subset of SVG with MIME type "application/x-tgwallpattern") pattern to be combined with the background fill chosen by the user
Attributes:
ID (:obj:`str`): ``BackgroundTypePattern``
Args:
fill (:class:`telegram.api.types.BackgroundFill`):
Description of the background fill
intensity (:obj:`int`):
Intensity of the pattern when it is shown above the filled background, 0-100
is_moving (:obj:`bool`):
True, if the background needs to be slightly moved when device is tilted
Returns:
BackgroundType
Raises:
:class:`telegram.Error`
"""
ID = "backgroundTypePattern"
def __init__(self, fill, intensity, is_moving, **kwargs):
self.fill = fill # BackgroundFill
self.intensity = intensity # int
self.is_moving = is_moving # bool
@staticmethod
def read(q: dict, *args) -> "BackgroundTypePattern":
fill = Object.read(q.get('fill'))
intensity = q.get('intensity')
is_moving = q.get('is_moving')
return BackgroundTypePattern(fill, intensity, is_moving)
| 30.170732 | 152 | 0.637025 |
79ca4ddfca11b5041323c1fa25aece0f30a506f7 | 5,739 | py | Python | userbot/modules/system_stats.py | edomiredo/One4uBot | e427814b0a5fbc6b5a96602d7590d1b9e77f9e69 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/system_stats.py | edomiredo/One4uBot | e427814b0a5fbc6b5a96602d7590d1b9e77f9e69 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/system_stats.py | edomiredo/One4uBot | e427814b0a5fbc6b5a96602d7590d1b9e77f9e69 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1 | 2020-03-21T03:39:58.000Z | 2020-03-21T03:39:58.000Z | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot module for getting information about the server. """
from asyncio import create_subprocess_shell as asyncrunapp
from asyncio.subprocess import PIPE as asyncPIPE
from platform import python_version, uname
from shutil import which
from os import remove
from telethon import version
from userbot import CMD_HELP, ALIVE_NAME
from userbot.events import register
# ================= CONSTANT =================
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else uname().node
# ============================================
@register(outgoing=True, pattern="^.sysd$")
async def sysdetails(sysd):
""" For .sysd command, get system info using neofetch. """
try:
neo = "neofetch --stdout"
fetch = await asyncrunapp(
neo,
stdout=asyncPIPE,
stderr=asyncPIPE,
)
stdout, stderr = await fetch.communicate()
result = str(stdout.decode().strip()) \
+ str(stderr.decode().strip())
await sysd.edit("`" + result + "`")
except FileNotFoundError:
await sysd.edit("`Install neofetch first !!`")
@register(outgoing=True, pattern="^.botver$")
async def bot_ver(event):
""" For .botver command, get the bot version. """
if which("git") is not None:
invokever = "git describe --all --long"
ver = await asyncrunapp(
invokever,
stdout=asyncPIPE,
stderr=asyncPIPE,
)
stdout, stderr = await ver.communicate()
verout = str(stdout.decode().strip()) \
+ str(stderr.decode().strip())
invokerev = "git rev-list --all --count"
rev = await asyncrunapp(
invokerev,
stdout=asyncPIPE,
stderr=asyncPIPE,
)
stdout, stderr = await rev.communicate()
revout = str(stdout.decode().strip()) \
+ str(stderr.decode().strip())
await event.edit("`Userbot Version: "
f"{verout}"
"` \n"
"`Revision: "
f"{revout}"
"`")
else:
await event.edit(
"Shame that you don't have git, You're running 5.0 - 'Extended' anyway"
)
@register(outgoing=True, pattern="^.pip(?: |$)(.*)")
async def pipcheck(pip):
""" For .pip command, do a pip search. """
pipmodule = pip.pattern_match.group(1)
if pipmodule:
await pip.edit("`Searching . . .`")
invokepip = f"pip3 search {pipmodule}"
pipc = await asyncrunapp(
invokepip,
stdout=asyncPIPE,
stderr=asyncPIPE,
)
stdout, stderr = await pipc.communicate()
pipout = str(stdout.decode().strip()) \
+ str(stderr.decode().strip())
if pipout:
if len(pipout) > 4096:
await pip.edit("`Output too large, sending as file`")
file = open("output.txt", "w+")
file.write(pipout)
file.close()
await pip.client.send_file(
pip.chat_id,
"output.txt",
reply_to=pip.id,
)
remove("output.txt")
return
await pip.edit("**Query: **\n`"
f"{invokepip}"
"`\n**Result: **\n`"
f"{pipout}"
"`")
else:
await pip.edit("**Query: **\n`"
f"{invokepip}"
"`\n**Result: **\n`No Result Returned/False`")
else:
await pip.edit("`Use .help pip to see an example`")
@register(outgoing=True, pattern="^.alive$")
async def amireallyalive(alive):
""" For .alive command, check if the bot is running. """
await alive.edit("`My Detail Ubot `\n"
f"> `Telethon : v{version.__version__} `\n"
f"> `Python : v{python_version()} `\n"
"===================== \n"
f"`User : `{DEFAULTUSER} \n"
"===================== \n")
@register(outgoing=True, pattern="^.aliveu")
async def amireallyaliveuser(username):
""" For .aliveu command, change the username in the .alive command. """
message = username.text
output = '.aliveu [new user without brackets] nor can it be empty'
if not (message == '.aliveu' or message[7:8] != ' '):
newuser = message[8:]
global DEFAULTUSER
DEFAULTUSER = newuser
output = 'Successfully changed user to ' + newuser + '!'
await username.edit("`" f"{output}" "`")
@register(outgoing=True, pattern="^.resetalive$")
async def amireallyalivereset(ureset):
""" For .resetalive command, reset the username in the .alive command. """
global DEFAULTUSER
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else uname().node
await ureset.edit("`" "Successfully reset user for alive!" "`")
CMD_HELP.update(
{"sysd": ".sysd\
\nUsage: Shows system information using neofetch."})
CMD_HELP.update({"botver": ".botver\
\nUsage: Shows the userbot version."})
CMD_HELP.update(
{"pip": ".pip <module(s)>\
\nUsage: Does a search of pip modules(s)."})
CMD_HELP.update({
"alive":
".alive\
\nUsage: Type .alive to see wether your bot is working or not.\
\n\n.aliveu <text>\
\nUsage: Changes the 'user' in alive to the text you want.\
\n\n.resetalive\
\nUsage: Resets the user to default."
})
| 33.561404 | 83 | 0.541384 |
cbe62ff7020ba764b9c65657d2edba9988f5c168 | 2,480 | py | Python | src/tars/strategies/trend_following_strategy.py | fredmontet/tars | 922786e8c6456fc0cc1a9db07714f11dd78219d9 | [
"MIT"
] | 3 | 2022-02-06T14:41:07.000Z | 2022-03-25T16:27:45.000Z | src/tars/strategies/trend_following_strategy.py | fredmontet/tars | 922786e8c6456fc0cc1a9db07714f11dd78219d9 | [
"MIT"
] | 6 | 2021-09-20T03:33:31.000Z | 2022-03-24T09:00:48.000Z | src/tars/strategies/trend_following_strategy.py | fredmontet/tars | 922786e8c6456fc0cc1a9db07714f11dd78219d9 | [
"MIT"
] | null | null | null | import pandas as pd
from scipy.signal import savgol_filter
from ..evaluators import TraderEvaluator
from .abstract_strategy import AbstractStrategy
from ..markets import CryptoMarket
class TrendFollowing(AbstractStrategy):
"""
Follow a quote's trend by taking a buy/sell decision based on the 2nd
derivative of a Savinsky-Golay filtered signal. i.e. :
sell if dx < negative limit
buy if dx > positive limit
:param trader: Trader
The Trader handling a portfolio
:param pair: str
The pair e.g. XETHZUSD to buy and hold
:param volume: float
The volume of the pair's quote buy
:param validate: boolean
Safety Boolean to make sure not to trade real money by default
:ivar evaluator: AbstractEvaluator
Evaluator allows for the evaluation of a strategy
:ivar market: AbstractMarket
Market object to get information from
"""
def __init__(self, trader, pair, volume, validate=True):
self.name = 'Trend Following with Differential Filter'
self.trader = trader
self.pair = pair
self.volume = volume
self.validate = validate
self.evaluator = TraderEvaluator(self.trader)
self.market = CryptoMarket()
def run(self):
""" Run the strategy """
# Checkpoint
balance = self.trader.portfolio.get_trade_balance().loc['eb'].ZUSD
self.evaluator.add_checkpoint(pd.Timestamp.utcnow(), balance)
# Run strategy
## parameters
n = 10
r = 60
w = 103
o = 4
## process data
market = CryptoMarket()
df0 = market.get_ohlc_data(pair=self.pair)[0]['close'].iloc[::-1]
df1 = df0.diff(n).diff(n).rolling(r).mean()
arr = savgol_filter(df1.to_numpy(), w, o)
df2 = pd.DataFrame(arr).set_index(df1.index)
## set thresholds
dx = df2.iloc[-1][0]
pos_lim = 0.9
neg_lim = -0.9
## trading rules
if dx <= neg_lim:
self.trader.add_order(pair=self.pair, type='sell',
ordertype='market', volume=self.volume,
validate=self.validate)
elif dx >= pos_lim:
self.trader.add_order(pair=self.pair, type='buy',
ordertype='market', volume=self.volume,
validate=self.validate)
else:
pass
| 31.794872 | 77 | 0.591935 |
66dc6ee923cc072487dc6e236c5f01981ead183c | 1,473 | py | Python | tasker.py | Avishek-Paul/SlackAssistant | 4cb41fe62526dc26381c6ca6bc420b1104a8da2f | [
"MIT"
] | null | null | null | tasker.py | Avishek-Paul/SlackAssistant | 4cb41fe62526dc26381c6ca6bc420b1104a8da2f | [
"MIT"
] | null | null | null | tasker.py | Avishek-Paul/SlackAssistant | 4cb41fe62526dc26381c6ca6bc420b1104a8da2f | [
"MIT"
] | null | null | null |
import importlib
import glob
import config
import logging
import logging.config
from celery import Celery
logger = logging.getLogger(__name__)
logging.config.fileConfig('logging_config.ini')
app = Celery('slack_assistant', broker=config.broker)
# def loadPlugins(sef):
files = [f.replace('/','.')[:-3] for f in glob.glob("plugins/*.py") if '__' not in f] #grabs all plugin names from plugins folder
plugins = []
for file_ in files:
plugin_module = importlib.import_module(file_)
raw_plugin = getattr(plugin_module, file_.split('.')[1])
plugin_instance = raw_plugin()
plugins.append(plugin_instance)
@app.task
def eventHandler(event):
eventType = event.get('type', "")
kw = None
if eventType == "message":
if event.get('text', "").startswith('!'): #for traditional plugins
message = event.get('text')
kw = [message.split()[0]]
if event.get("channel") in config.channel_monitor.keys(): #for monitoring channels
kw = ['!chatMonitor']
elif eventType == "reaction_added" or eventType=="reaction_removed": #for reaction-based plugins
kw = [event.get('reaction', None), 'reactionBased']
elif eventType == "user_typing":
kw = ["!trollbot"]
if not kw:
return
for plugin in plugins:
if any(x in plugin.keywords for x in kw):
logger.debug("Keywords matched: %s", plugin.keywords)
plugin.execute(event)
| 30.6875 | 129 | 0.647658 |
caabcecf0a5251a3b7cea58013d39374a8c8f666 | 10,909 | py | Python | saticl/tasks/__init__.py | edornd/multimodal-icl | f79bfa73665db471c12ee9cb57bbee1bcabb0467 | [
"MIT"
] | 6 | 2021-12-08T05:58:18.000Z | 2021-12-29T09:55:32.000Z | saticl/tasks/__init__.py | edornd/multimodal-icl | f79bfa73665db471c12ee9cb57bbee1bcabb0467 | [
"MIT"
] | null | null | null | saticl/tasks/__init__.py | edornd/multimodal-icl | f79bfa73665db471c12ee9cb57bbee1bcabb0467 | [
"MIT"
] | null | null | null | import logging
import random
from collections import defaultdict
from pathlib import Path
from typing import Iterable, List
import numpy as np
from ordered_set import OrderedSet
from saticl.datasets.base import DatasetBase
from saticl.logging.console import DistributedLogger
from saticl.tasks.agrivision import ICL_AGRIVISION
from saticl.tasks.isaid import ICL_ISAID
from saticl.tasks.isprs import ICL_ISPRS
from saticl.utils.common import prepare_folder
from tqdm import tqdm
LOG = DistributedLogger(logging.getLogger(__name__))
AVAILABLE_TASKS = {"potsdam": ICL_ISPRS, "vaihingen": ICL_ISPRS, "agrivision": ICL_AGRIVISION, "isaid": ICL_ISAID}
def filter_with_overlap(image_labels: Iterable[int], new_labels: Iterable[int], *args, **kwargs) -> bool:
"""Returns whether the current image must be maintained or discarded for the given step,
based on the labels on the current image and the labels required at the step.
Args:
image_labels (List[int]): indices of the labels present in the current image
new_labels (List[int]): indices of the labels needed at the current step
Returns:
bool: true if any of the current labels are present, false otherwise
"""
return any(x in new_labels for x in image_labels)
def filter_without_overlap(image_labels: Iterable[int], new_labels: Iterable[int], curr_labels: Iterable[int]) -> bool:
"""Filters out any image that contains data with no labels belonging to the current step, including
those images that contain future labels (potentially dangerous if an image contains more or less every label).
Args:
image_labels (List[int]): indices of unique labels for the current image
new_labels (List[int]): indices of labels for the step T
curr_labels (List[int]): indices of labels from steps 1 .. T - 1 + labels from step T + [0, 255]
Returns:
bool: true whether the image must be kept, false otherwise
"""
# check whether at least one of the labels of the current image is present in the current step labels
contains_new_labels = any(x in new_labels for x in image_labels)
# also check that the image ONLY contains labels from the current step or previous steps
no_future_labels = all(x in curr_labels for x in image_labels)
return contains_new_labels and no_future_labels
def filter_with_split(dataset: DatasetBase, new_labels: set):
"""Subdivides the given dataset into equal partitions, one per category (excluding background, if present).
If the dataset has N classes, this function divides into N splits, where each split i contributes with only
the label i.
Args:
dataset (DatasetBase): dataset to be filtered
new_labels (set): set of labels for current step
Returns:
List[bool]: list of values to be kept for the current step
"""
# count samples and classes, we do not care about background for splits
num_samples = len(dataset)
shift = int(dataset.has_background())
num_classes = len(dataset.categories()) - shift
# create a dict of <index label: list of tiles> and a supporting count array
label2tile = defaultdict(list)
shuffled = random.sample(list(range(num_samples)), k=num_samples)
tile_counts = np.zeros(num_classes)
for i in tqdm(shuffled):
_, mask = dataset[i]
# extract unique labels, remove background if it's included
available_labels = np.unique(mask)
if dataset.has_background():
available_labels = available_labels[available_labels != 0]
# it may happen when it only contains background
if len(available_labels) == 0:
continue
# retrieve the currently less populated category (excluding background if present)
# e.g. tile counts[ 34, 45, 12] -> index = 2
# then use this index to retrieve the corresponding label
# last, store the tile for that label and increment the count
index = np.argmin(tile_counts[available_labels - shift])
label = available_labels[index]
label2tile[label].append(i)
tile_counts[label - shift] += 1
# create a list of booleans, one per sample, true when included, false otherwise
filtered = [False] * num_samples
for label, tiles in label2tile.items():
for index in tiles:
filtered[index] = label in new_labels
return filtered
class Task:
def __init__(self,
dataset: str,
name: str,
step: int = 0,
add_background: bool = False,
data_folder: Path = Path("data/tasks")) -> None:
# sanity checks:
# - the data folder can be used to cache indices
# - the dataset exists in the task list
# - the task name appears in the entries associated with the dataset
# - the step exists in the given task
assert data_folder.exists() and data_folder.is_dir(), f"Wrong path: {str(data_folder)}"
assert dataset in AVAILABLE_TASKS, f"No tasks for dataset: {dataset}"
tasks = AVAILABLE_TASKS.get(dataset, {})
assert name in tasks, f"Unknown task: {name}"
task_dict = tasks[name]
assert step in task_dict, f"Step {step} out of range for: {task_dict}"
# we made sure dataset and task exist, and the step is within range
self.task_dict = task_dict
new_labels = OrderedSet([label for label in task_dict[step]])
old_labels = OrderedSet([label for s in range(step) for label in task_dict[s]])
# step 0 - sanity check: only new labels by definitions
if step == 0:
assert len(old_labels) == 0 and len(new_labels) > 0, "step 0: expected only new labels"
# step N - sanity check: old and new are non-empty and disjoint sets
else:
assert len(new_labels) > 0 and len(old_labels) > 0, f"step {step}: Old and new must be non-empty sets"
assert not new_labels.intersection(old_labels), "Old and new labels are not disjoint sets"
self.seen_labels = new_labels.union(old_labels)
self.new_labels = new_labels
self.old_labels = old_labels
self.data_root = data_folder
# save information, save shift for sets without background class
# useful for actual class counts
self.shift = int(add_background)
self.dataset_name = dataset
self.name = name
self.step = step
def task_name(self) -> str:
return f"{self.name}_step-{self.step}"
def num_classes_per_task(self) -> List[int]:
"""Counts the number of classes for each step, including the current one.
Future steps are not involved yet. In case of dataset with missing background,
a shift is applied (it does nothing when `add_background` is false, since shift=0)
Returns:
List[int]: list containing the class count at each step from 0 to t
"""
counts = [len(self.task_dict[s]) for s in range(self.step + 1)]
counts[0] += self.shift
return counts
def old_class_count(self) -> int:
"""Counts the total amount of classes seen until now, excluding the current step.
Formally, returns sum(classes_0 ... classes_t-1).
Shift accounts for datasets missing background.
Returns:
int: [description]
"""
if self.step == 0:
return 0
return sum([len(self.task_dict[s]) for s in range(self.step)]) + self.shift
def current_class_count(self) -> int:
"""Returns the number of classes at the current step.
If we are at step 0 and the dataset doesn't have its own background class, add 1.
Returns:
int: class count at step t
"""
shift = int(self.step == 0) * self.shift
return len(self.task_dict[self.step]) + shift
def filter_images(self, dataset: DatasetBase, mode: str = "overlap") -> List[bool]:
"""Iterates the given dataset, storing in a numpy array which image indices are fit
for the current task. The fit criterion is defined with or without overlap.
WITH OVERLAP (default): the image is kept when the mask contains one of the task labels,
regardless of the other labels in the mask.
WITHOUT OVERLAP: the image is kept when the mask contains one of the task labels AND
the other labels are ∈ old labels (i.e. does not include labels from future tasks)
WITH SPLIT: first, the dataset is divided into evenly sized chunks of N/num_classes samples each.
Then, each chunk_i is assigned to class i, removing any tile that does not contain any pixel
with label=i.
The image is kept at step T if (and only if) it belongs to the chunk i and contains
at least one pixel labeled as i, for each i in C_T, the set of current new classes.
Args:
dataset (DatasetBase): original dataset to be filtered
overlap (str, optional): filter with overlap, without overlap, or splitting. Defaults to overlap.
Returns:
List[bool]: a list of bool values, one per image, where true means to keep it.
"""
assert mode in ("overlap", "noov", "split")
postfix = "" if mode == "overlap" else f"_{mode}"
cached_name = f"{self.task_name()}_{dataset.stage()}{postfix}.npy"
cached_path = self.data_root / self.dataset_name / cached_name
if cached_path.exists() and cached_path.is_file():
filtered = np.load(cached_path)
else:
# quick hack just to avoid losing time transforming, we don't need it
transform = dataset.transform
dataset.transform = None
filtered = list()
if mode != "split":
# first select the right function for the filtering
# then iterate dataset to decide which image to keep and which not
filter_fn = filter_with_overlap if mode == "overlap" else filter_without_overlap
for _, mask in tqdm(dataset, desc=f"Creating {cached_name}"):
mask_indices = np.unique(mask)
filtered.append(filter_fn(mask_indices, self.new_labels, self.seen_labels))
else:
filtered = filter_with_split(dataset, new_labels=self.new_labels)
# restore dataset transforms before it's too late
dataset.transform = transform
# create a numpy array of indices, then store it to file
filtered = np.array(filtered)
cached_path = prepare_folder(cached_path.parent)
np.save(str(cached_path / cached_name), filtered)
assert any(filtered), "Current filter does not include any images"
return filtered
| 47.637555 | 119 | 0.660464 |
f30eac5930a77fdd519ff99155f37e05cf917441 | 486 | py | Python | src/djshop/apps/offers/forms/bundle_offers.py | diegojromerolopez/djshop | 3fe623532228006e96269bd0f21c7b7d380c3a1a | [
"MIT"
] | null | null | null | src/djshop/apps/offers/forms/bundle_offers.py | diegojromerolopez/djshop | 3fe623532228006e96269bd0f21c7b7d380c3a1a | [
"MIT"
] | 1 | 2017-05-08T17:05:00.000Z | 2018-04-04T11:55:30.000Z | src/djshop/apps/offers/forms/bundle_offers.py | diegojromerolopez/djshop | 3fe623532228006e96269bd0f21c7b7d380c3a1a | [
"MIT"
] | 2 | 2017-04-25T10:32:57.000Z | 2020-05-04T16:12:08.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from djshop.apps.offers.models import BundleOffer
from django import forms
# Bundle offer form
class BundleOfferForm(forms.ModelForm):
class Meta:
model = BundleOffer
fields = ["name", "description", "product", "bundle_product_units", "paid_product_units"]
# Product form
class DeleteBundleOfferForm(forms.Form):
confirmed = forms.BooleanField(label=u"Confirm you want to delete this offer")
| 25.578947 | 97 | 0.738683 |
126faae346a32f2c6d533c08d0a9fdb11f73862b | 25,198 | py | Python | sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/aio/_text_analytics_client_async.py | letmaik/azure-sdk-for-python | 4ed6294caef4699534c56c9d840f379bced1ab6f | [
"MIT"
] | null | null | null | sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/aio/_text_analytics_client_async.py | letmaik/azure-sdk-for-python | 4ed6294caef4699534c56c9d840f379bced1ab6f | [
"MIT"
] | null | null | null | sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/aio/_text_analytics_client_async.py | letmaik/azure-sdk-for-python | 4ed6294caef4699534c56c9d840f379bced1ab6f | [
"MIT"
] | null | null | null | # coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from typing import ( # pylint: disable=unused-import
Union,
Optional,
Any,
List,
Dict,
TYPE_CHECKING
)
from azure.core.tracing.decorator_async import distributed_trace_async
from .._generated.models import TextAnalyticsErrorException
from .._generated.aio._text_analytics_client_async import TextAnalyticsClient as TextAnalytics
from ._base_client_async import AsyncTextAnalyticsClientBase
from .._request_handlers import _validate_batch_input
from .._response_handlers import (
process_batch_error,
entities_result,
linked_entities_result,
key_phrases_result,
sentiment_result,
language_result,
pii_entities_result
)
from .._models import (
DetectLanguageInput,
TextDocumentInput,
DetectLanguageResult,
RecognizeEntitiesResult,
RecognizeLinkedEntitiesResult,
ExtractKeyPhrasesResult,
AnalyzeSentimentResult,
RecognizePiiEntitiesResult,
DocumentError,
)
if TYPE_CHECKING:
from azure.core.credentials_async import AsyncTokenCredential
from .._credential import TextAnalyticsApiKeyCredential
class TextAnalyticsClient(AsyncTextAnalyticsClientBase):
"""The Text Analytics API is a suite of text analytics web services built with best-in-class
Microsoft machine learning algorithms. The API can be used to analyze unstructured text for
tasks such as sentiment analysis, key phrase extraction, and language detection. No training data
is needed to use this API - just bring your text data. This API uses advanced natural language
processing techniques to deliver best in class predictions.
Further documentation can be found in
https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview
:param str endpoint: Supported Cognitive Services or Text Analytics resource
endpoints (protocol and hostname, for example: https://westus2.api.cognitive.microsoft.com).
:param credential: Credentials needed for the client to connect to Azure.
This can be the an instance of TextAnalyticsApiKeyCredential if using a
cognitive services/text analytics API key or a token credential
from :mod:`azure.identity`.
:type credential: :class:`~azure.ai.textanalytics.TextAnalyticsApiKeyCredential`
or :class:`~azure.core.credentials_async.AsyncTokenCredential`
:keyword str default_country_hint: Sets the default country_hint to use for all operations.
Defaults to "US". If you don't want to use a country hint, pass the string "none".
:keyword str default_language: Sets the default language to use for all operations.
Defaults to "en".
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_authentication_async.py
:start-after: [START create_ta_client_with_key_async]
:end-before: [END create_ta_client_with_key_async]
:language: python
:dedent: 8
:caption: Creating the TextAnalyticsClient with endpoint and API key.
.. literalinclude:: ../samples/async_samples/sample_authentication_async.py
:start-after: [START create_ta_client_with_aad_async]
:end-before: [END create_ta_client_with_aad_async]
:language: python
:dedent: 8
:caption: Creating the TextAnalyticsClient with endpoint and token credential from Azure Active Directory.
"""
def __init__( # type: ignore
self,
endpoint: str,
credential: Union["TextAnalyticsApiKeyCredential", "AsyncTokenCredential"],
**kwargs: Any
) -> None:
super(TextAnalyticsClient, self).__init__(credential=credential, **kwargs)
self._client = TextAnalytics(
endpoint=endpoint, credentials=credential, pipeline=self._pipeline
)
self._default_language = kwargs.pop("default_language", "en")
self._default_country_hint = kwargs.pop("default_country_hint", "US")
@distributed_trace_async
async def detect_language( # type: ignore
self,
documents: Union[List[str], List[DetectLanguageInput], List[Dict[str, str]]],
**kwargs: Any
) -> List[Union[DetectLanguageResult, DocumentError]]:
"""Detects Language for a batch of documents.
Returns the detected language and a numeric score between zero and
one. Scores close to one indicate 100% certainty that the identified
language is true. See https://aka.ms/talangs for the list of enabled languages.
See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits
for document length limits, maximum batch size, and supported text encoding.
:param documents: The set of documents to process as part of this batch.
If you wish to specify the ID and country_hint on a per-item basis you must
use as input a list[:class:`~azure.ai.textanalytics.DetectLanguageInput`] or a list of
dict representations of :class:`~azure.ai.textanalytics.DetectLanguageInput`, like
`{"id": "1", "country_hint": "us", "text": "hello world"}`.
:type documents:
list[str] or list[~azure.ai.textanalytics.DetectLanguageInput]
:keyword str country_hint: A country hint for the entire batch. Accepts two
letter country codes specified by ISO 3166-1 alpha-2. Per-document
country hints will take precedence over whole batch hints. Defaults to
"US". If you don't want to use a country hint, pass the string "none".
:keyword str model_version: This value indicates which model will
be used for scoring, e.g. "latest", "2019-10-01". If a model-version
is not specified, the API will default to the latest, non-preview version.
:keyword bool show_stats: If set to true, response will contain document
level statistics.
:return: The combined list of :class:`~azure.ai.textanalytics.DetectLanguageResult`
and :class:`~azure.ai.textanalytics.DocumentError` in the order the original documents
were passed in.
:rtype: list[~azure.ai.textanalytics.DetectLanguageResult,
~azure.ai.textanalytics.DocumentError]
:raises ~azure.core.exceptions.HttpResponseError:
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_detect_language_async.py
:start-after: [START batch_detect_language_async]
:end-before: [END batch_detect_language_async]
:language: python
:dedent: 8
:caption: Detecting language in a batch of documents.
"""
country_hint_arg = kwargs.pop("country_hint", None)
country_hint = country_hint_arg if country_hint_arg is not None else self._default_country_hint
docs = _validate_batch_input(documents, "country_hint", country_hint)
model_version = kwargs.pop("model_version", None)
show_stats = kwargs.pop("show_stats", False)
try:
return await self._client.languages(
documents=docs,
model_version=model_version,
show_stats=show_stats,
cls=language_result,
**kwargs
)
except TextAnalyticsErrorException as error:
process_batch_error(error)
@distributed_trace_async
async def recognize_entities( # type: ignore
self,
documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
**kwargs: Any
) -> List[Union[RecognizeEntitiesResult, DocumentError]]:
"""Entity Recognition for a batch of documents.
Identifies and categorizes entities in your text as people, places,
organizations, date/time, quantities, percentages, currencies, and more.
For the list of supported entity types, check: https://aka.ms/taner
See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits
for document length limits, maximum batch size, and supported text encoding.
:param documents: The set of documents to process as part of this batch.
If you wish to specify the ID and language on a per-item basis you must
use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
`{"id": "1", "language": "en", "text": "hello world"}`.
:type documents:
list[str] or list[~azure.ai.textanalytics.TextDocumentInput]
:keyword str language: The 2 letter ISO 639-1 representation of language for the
entire batch. For example, use "en" for English; "es" for Spanish etc.
If not set, uses "en" for English as default. Per-document language will
take precedence over whole batch language. See https://aka.ms/talangs for
supported languages in Text Analytics API.
:keyword str model_version: This value indicates which model will
be used for scoring, e.g. "latest", "2019-10-01". If a model-version
is not specified, the API will default to the latest, non-preview version.
:keyword bool show_stats: If set to true, response will contain document level statistics.
:return: The combined list of :class:`~azure.ai.textanalytics.RecognizeEntitiesResult` and
:class:`~azure.ai.textanalytics.DocumentError` in the order the original documents were
passed in.
:rtype: list[~azure.ai.textanalytics.RecognizeEntitiesResult,
~azure.ai.textanalytics.DocumentError]
:raises ~azure.core.exceptions.HttpResponseError:
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_recognize_entities_async.py
:start-after: [START batch_recognize_entities_async]
:end-before: [END batch_recognize_entities_async]
:language: python
:dedent: 8
:caption: Recognize entities in a batch of documents.
"""
language_arg = kwargs.pop("language", None)
language = language_arg if language_arg is not None else self._default_language
docs = _validate_batch_input(documents, "language", language)
model_version = kwargs.pop("model_version", None)
show_stats = kwargs.pop("show_stats", False)
try:
return await self._client.entities_recognition_general(
documents=docs,
model_version=model_version,
show_stats=show_stats,
cls=entities_result,
**kwargs
)
except TextAnalyticsErrorException as error:
process_batch_error(error)
@distributed_trace_async
async def recognize_pii_entities( # type: ignore
self,
documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
**kwargs: Any
) -> List[Union[RecognizePiiEntitiesResult, DocumentError]]:
"""Recognize entities containing personal information for a batch of documents.
Returns a list of personal information entities ("SSN",
"Bank Account", etc) in the document. For the list of supported entity types,
check https://aka.ms/tanerpii.
See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits
for document length limits, maximum batch size, and supported text encoding.
:param documents: The set of documents to process as part of this batch.
If you wish to specify the ID and language on a per-item basis you must
use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
`{"id": "1", "language": "en", "text": "hello world"}`.
:type documents:
list[str] or list[~azure.ai.textanalytics.TextDocumentInput]
:keyword str language: The 2 letter ISO 639-1 representation of language for the
entire batch. For example, use "en" for English; "es" for Spanish etc.
If not set, uses "en" for English as default. Per-document language will
take precedence over whole batch language. See https://aka.ms/talangs for
supported languages in Text Analytics API.
:keyword str model_version: This value indicates which model will
be used for scoring, e.g. "latest", "2019-10-01". If a model-version
is not specified, the API will default to the latest, non-preview version.
:keyword bool show_stats: If set to true, response will contain document level statistics.
:return: The combined list of :class:`~azure.ai.textanalytics.RecognizePiiEntitiesResult`
and :class:`~azure.ai.textanalytics.DocumentError` in the order the original documents
were passed in.
:rtype: list[~azure.ai.textanalytics.RecognizePiiEntitiesResult,
~azure.ai.textanalytics.DocumentError]
:raises ~azure.core.exceptions.HttpResponseError:
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_recognize_pii_entities_async.py
:start-after: [START batch_recognize_pii_entities_async]
:end-before: [END batch_recognize_pii_entities_async]
:language: python
:dedent: 8
:caption: Recognize personally identifiable information entities in a batch of documents.
"""
language_arg = kwargs.pop("language", None)
language = language_arg if language_arg is not None else self._default_language
docs = _validate_batch_input(documents, "language", language)
model_version = kwargs.pop("model_version", None)
show_stats = kwargs.pop("show_stats", False)
try:
return await self._client.entities_recognition_pii(
documents=docs,
model_version=model_version,
show_stats=show_stats,
cls=pii_entities_result,
**kwargs
)
except TextAnalyticsErrorException as error:
process_batch_error(error)
@distributed_trace_async
async def recognize_linked_entities( # type: ignore
self,
documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
**kwargs: Any
) -> List[Union[RecognizeLinkedEntitiesResult, DocumentError]]:
"""Recognize linked entities from a well-known knowledge base for a batch of documents.
Identifies and disambiguates the identity of each entity found in text (for example,
determining whether an occurrence of the word Mars refers to the planet, or to the
Roman god of war). Recognized entities are associated with URLs to a well-known
knowledge base, like Wikipedia.
See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits
for document length limits, maximum batch size, and supported text encoding.
:param documents: The set of documents to process as part of this batch.
If you wish to specify the ID and language on a per-item basis you must
use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
`{"id": "1", "language": "en", "text": "hello world"}`.
:type documents:
list[str] or list[~azure.ai.textanalytics.TextDocumentInput]
:keyword str language: The 2 letter ISO 639-1 representation of language for the
entire batch. For example, use "en" for English; "es" for Spanish etc.
If not set, uses "en" for English as default. Per-document language will
take precedence over whole batch language. See https://aka.ms/talangs for
supported languages in Text Analytics API.
:keyword str model_version: This value indicates which model will
be used for scoring, e.g. "latest", "2019-10-01". If a model-version
is not specified, the API will default to the latest, non-preview version.
:keyword bool show_stats: If set to true, response will contain document level statistics.
:return: The combined list of :class:`~azure.ai.textanalytics.RecognizeLinkedEntitiesResult`
and :class:`~azure.ai.textanalytics.DocumentError` in the order the original documents
were passed in.
:rtype: list[~azure.ai.textanalytics.RecognizeLinkedEntitiesResult,
~azure.ai.textanalytics.DocumentError]
:raises ~azure.core.exceptions.HttpResponseError:
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_recognize_linked_entities_async.py
:start-after: [START batch_recognize_linked_entities_async]
:end-before: [END batch_recognize_linked_entities_async]
:language: python
:dedent: 8
:caption: Recognize linked entities in a batch of documents.
"""
language_arg = kwargs.pop("language", None)
language = language_arg if language_arg is not None else self._default_language
docs = _validate_batch_input(documents, "language", language)
model_version = kwargs.pop("model_version", None)
show_stats = kwargs.pop("show_stats", False)
try:
return await self._client.entities_linking(
documents=docs,
model_version=model_version,
show_stats=show_stats,
cls=linked_entities_result,
**kwargs
)
except TextAnalyticsErrorException as error:
process_batch_error(error)
@distributed_trace_async
async def extract_key_phrases( # type: ignore
self,
documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
**kwargs: Any
) -> List[Union[ExtractKeyPhrasesResult, DocumentError]]:
"""Extract Key Phrases from a batch of documents.
Returns a list of strings denoting the key phrases in the input
text. For example, for the input text "The food was delicious and there
were wonderful staff", the API returns the main talking points: "food"
and "wonderful staff"
See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits
for document length limits, maximum batch size, and supported text encoding.
:param documents: The set of documents to process as part of this batch.
If you wish to specify the ID and language on a per-item basis you must
use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
`{"id": "1", "language": "en", "text": "hello world"}`.
:type documents:
list[str] or list[~azure.ai.textanalytics.TextDocumentInput]
:keyword str language: The 2 letter ISO 639-1 representation of language for the
entire batch. For example, use "en" for English; "es" for Spanish etc.
If not set, uses "en" for English as default. Per-document language will
take precedence over whole batch language. See https://aka.ms/talangs for
supported languages in Text Analytics API.
:keyword str model_version: This value indicates which model will
be used for scoring, e.g. "latest", "2019-10-01". If a model-version
is not specified, the API will default to the latest, non-preview version.
:keyword bool show_stats: If set to true, response will contain document level statistics.
:return: The combined list of :class:`~azure.ai.textanalytics.ExtractKeyPhrasesResult` and
:class:`~azure.ai.textanalytics.DocumentError` in the order the original documents were
passed in.
:rtype: list[~azure.ai.textanalytics.ExtractKeyPhrasesResult,
~azure.ai.textanalytics.DocumentError]
:raises ~azure.core.exceptions.HttpResponseError:
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_extract_key_phrases_async.py
:start-after: [START batch_extract_key_phrases_async]
:end-before: [END batch_extract_key_phrases_async]
:language: python
:dedent: 8
:caption: Extract the key phrases in a batch of documents.
"""
language_arg = kwargs.pop("language", None)
language = language_arg if language_arg is not None else self._default_language
docs = _validate_batch_input(documents, "language", language)
model_version = kwargs.pop("model_version", None)
show_stats = kwargs.pop("show_stats", False)
try:
return await self._client.key_phrases(
documents=docs,
model_version=model_version,
show_stats=show_stats,
cls=key_phrases_result,
**kwargs
)
except TextAnalyticsErrorException as error:
process_batch_error(error)
@distributed_trace_async
async def analyze_sentiment( # type: ignore
self,
documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
**kwargs: Any
) -> List[Union[AnalyzeSentimentResult, DocumentError]]:
"""Analyze sentiment for a batch of documents.
Returns a sentiment prediction, as well as sentiment scores for
each sentiment class (Positive, Negative, and Neutral) for the document
and each sentence within it.
See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits
for document length limits, maximum batch size, and supported text encoding.
:param documents: The set of documents to process as part of this batch.
If you wish to specify the ID and language on a per-item basis you must
use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
`{"id": "1", "language": "en", "text": "hello world"}`.
:type documents:
list[str] or list[~azure.ai.textanalytics.TextDocumentInput]
:keyword str language: The 2 letter ISO 639-1 representation of language for the
entire batch. For example, use "en" for English; "es" for Spanish etc.
If not set, uses "en" for English as default. Per-document language will
take precedence over whole batch language. See https://aka.ms/talangs for
supported languages in Text Analytics API.
:keyword str model_version: This value indicates which model will
be used for scoring, e.g. "latest", "2019-10-01". If a model-version
is not specified, the API will default to the latest, non-preview version.
:keyword bool show_stats: If set to true, response will contain document level statistics.
:return: The combined list of :class:`~azure.ai.textanalytics.AnalyzeSentimentResult` and
:class:`~azure.ai.textanalytics.DocumentError` in the order the original documents were
passed in.
:rtype: list[~azure.ai.textanalytics.AnalyzeSentimentResult,
~azure.ai.textanalytics.DocumentError]
:raises ~azure.core.exceptions.HttpResponseError:
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_analyze_sentiment_async.py
:start-after: [START batch_analyze_sentiment_async]
:end-before: [END batch_analyze_sentiment_async]
:language: python
:dedent: 8
:caption: Analyze sentiment in a batch of documents.
"""
language_arg = kwargs.pop("language", None)
language = language_arg if language_arg is not None else self._default_language
docs = _validate_batch_input(documents, "language", language)
model_version = kwargs.pop("model_version", None)
show_stats = kwargs.pop("show_stats", False)
try:
return await self._client.sentiment(
documents=docs,
model_version=model_version,
show_stats=show_stats,
cls=sentiment_result,
**kwargs
)
except TextAnalyticsErrorException as error:
process_batch_error(error)
| 52.715481 | 118 | 0.668863 |
b0fd1a4e359844c28297c4fb729f9ac90777de75 | 861 | py | Python | eco-server.py | leopedroso45/ClientServerPy | 7216838e445b3e90fefcfb740ad194a089c5115d | [
"MIT"
] | null | null | null | eco-server.py | leopedroso45/ClientServerPy | 7216838e445b3e90fefcfb740ad194a089c5115d | [
"MIT"
] | null | null | null | eco-server.py | leopedroso45/ClientServerPy | 7216838e445b3e90fefcfb740ad194a089c5115d | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from threading import Timer
from _thread import *
import threading
import time
import socket
HOST = '127.0.0.1' # Standard loopback interface address (localhost)
PORT = 65432 # Port to listen on (non-privileged ports are > 1023)
print_lock = threading.Lock()
def timeout():
print('Sem mensagens')
def timing():
t = Timer(20, timeout)
t.start()
t.join()
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, PORT))
s.listen()
threading.th conn, addr = s.accept()
if conn != False:
print("Alguem esta se conectando...")
with conn:
print('Conectado com ', addr)
while conn._closed != True:
data = conn.recv(1024)
timing()
if not data:
timing()
s.close()
conn.sendall(data)
| 22.657895 | 73 | 0.601626 |
e9c8330682cb9b43f65141d12547f02f50b9983d | 1,437 | py | Python | examples/braket_sampler_min_vertex.py | aws/amazon-braket-ocean-plugin-python | 41e9b115e2f547498b90ed62a0b0ef4f57dc6b6a | [
"Apache-2.0"
] | 10 | 2020-09-22T08:41:16.000Z | 2022-03-02T23:23:24.000Z | examples/braket_sampler_min_vertex.py | aws/amazon-braket-ocean-plugin-python | 41e9b115e2f547498b90ed62a0b0ef4f57dc6b6a | [
"Apache-2.0"
] | 38 | 2020-08-13T19:14:40.000Z | 2022-02-14T13:54:22.000Z | examples/braket_sampler_min_vertex.py | aws/amazon-braket-ocean-plugin-python | 41e9b115e2f547498b90ed62a0b0ef4f57dc6b6a | [
"Apache-2.0"
] | 5 | 2021-06-30T16:39:25.000Z | 2022-01-16T04:26:55.000Z | # Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import dwave_networkx as dnx
import networkx as nx
from braket.aws import AwsDevice
from dwave.system.composites import EmbeddingComposite
from braket.ocean_plugin import BraketSampler
s3_destination_folder = ("your-s3-bucket", "your-folder")
# Get an online D-Wave device ARN
device_arn = AwsDevice.get_devices(provider_names=["D-Wave Systems"], statuses=["ONLINE"])[0].arn
print("Using device ARN", device_arn)
sampler = BraketSampler(s3_destination_folder, device_arn)
star_graph = nx.star_graph(4) # star graph where node 0 is connected to 4 other nodes
# EmbeddingComposite automatically maps the problem to the structure of the solver.
embedded_sampler = EmbeddingComposite(sampler)
# The below result should be 0 because node 0 is connected to the 4 other nodes in a star graph
print(dnx.min_vertex_cover(star_graph, embedded_sampler, resultFormat="HISTOGRAM"))
| 39.916667 | 97 | 0.783577 |
d559ec7107d9c6854dd5c871e72c9ad487961c3e | 387 | py | Python | basiclive/core/lims/migrations/0054_auto_20200519_1654.py | znarthur/basic-live | 79c194311de05af2e1bb21d1bc8c6c14dda356d0 | [
"BSD-3-Clause"
] | null | null | null | basiclive/core/lims/migrations/0054_auto_20200519_1654.py | znarthur/basic-live | 79c194311de05af2e1bb21d1bc8c6c14dda356d0 | [
"BSD-3-Clause"
] | 1 | 2020-12-03T15:27:09.000Z | 2020-12-03T15:27:09.000Z | basiclive/core/lims/migrations/0054_auto_20200519_1654.py | znarthur/basic-live | 79c194311de05af2e1bb21d1bc8c6c14dda356d0 | [
"BSD-3-Clause"
] | 1 | 2021-09-28T21:06:09.000Z | 2021-09-28T21:06:09.000Z | # Generated by Django 3.0.6 on 2020-05-19 22:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lims', '0053_auto_20200519_1641'),
]
operations = [
migrations.AlterField(
model_name='beamline',
name='active',
field=models.BooleanField(default=True),
),
]
| 20.368421 | 52 | 0.599483 |
8c0a8b2ddba0f170d02aa64ef23a6fd73c37accb | 1,400 | py | Python | flask_management_blueprint/management/health_check.py | stone-payments/flask-management-blueprint | eafec0e1560726705990ac8a37c120006011047c | [
"Apache-2.0"
] | 3 | 2018-03-23T21:55:38.000Z | 2020-03-11T10:22:29.000Z | flask_management_blueprint/management/health_check.py | stone-payments/flask-management-blueprint | eafec0e1560726705990ac8a37c120006011047c | [
"Apache-2.0"
] | 8 | 2018-03-16T17:40:28.000Z | 2021-06-11T17:45:37.000Z | flask_management_blueprint/management/health_check.py | stone-payments/flask-management-blueprint | eafec0e1560726705990ac8a37c120006011047c | [
"Apache-2.0"
] | 4 | 2018-06-06T20:00:34.000Z | 2020-03-10T14:51:08.000Z | """Module to define classes to retrieve app health"""
import asyncio
import async_timeout
import aiohttp
async def fetch(session, url):
"""Method to fetch data from a url asynchronously
"""
async with async_timeout.timeout(30):
async with session.get(url) as response:
return await response.json()
async def execute_request(url):
"""Method to execute a http request asynchronously
"""
async with aiohttp.ClientSession() as session:
json = await fetch(session, url)
return json
class HealthCheck(object):
"""Class to define methods to manage app health"""
RESOURCES = []
@classmethod
def register_resource(cls, name, url):
"""Method to register a resource necessary to the app
Args:
name (string): Name of system to check health
url (string): Url of system to check health
"""
cls.RESOURCES.append({name: url})
@classmethod
def check_resources_health(cls):
"""Method to check the health of all the registered resources
"""
url_list = [list(rec.values())[0]
for rec in cls.RESOURCES]
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
resp = loop.run_until_complete(asyncio.gather(
*[execute_request(url) for url in url_list]
))
return resp
| 25.454545 | 69 | 0.630714 |
d1de34205cc3ccc33b870a40d722cbba5fbfe0d2 | 705 | wsgi | Python | opm/opm.wsgi | Open-Prose-Metrics/open_prose_metrics_app-core | 9df65edfe9ee9af0a0731c3f2e21ea25bced250c | [
"MIT"
] | null | null | null | opm/opm.wsgi | Open-Prose-Metrics/open_prose_metrics_app-core | 9df65edfe9ee9af0a0731c3f2e21ea25bced250c | [
"MIT"
] | null | null | null | opm/opm.wsgi | Open-Prose-Metrics/open_prose_metrics_app-core | 9df65edfe9ee9af0a0731c3f2e21ea25bced250c | [
"MIT"
] | null | null | null | import sys, os
import logging
logging.basicConfig(stream=sys.stderr)
PROJECT_FOLDER = '/var/www/opm'
PROJECT_HOME = '/var/www/opm/app'
PYTHON_VERSION = 'python3.7'
activate_this = os.path.join(PROJECT_FOLDER, 'virtualenv', 'bin', 'activate_this.py')
exec(compile(open(activate_this, "rb").read(), activate_this, 'exec'), dict(__file__=activate_this))
# add your project directory to the sys.path
#if project_home not in sys.path:
#sys.path = [project_home] + sys.path
sys.path.append(PROJECT_HOME)
#sys.path.append(app.path.join(PROJECT_FOLDER, 'virtualenv', 'lib', PYTHON_VERSION, 'site-packages')
# import flask app but need to call it "application" for WSGI to work
from app import app as application
| 41.470588 | 100 | 0.763121 |
6e7acbb617cd37119c395acea3981ebe4477b7c7 | 7,233 | py | Python | prototypes/python_prototype/multiplicativeFormulation.py | dariothornhill/adaptive-engine | 209e7a552f8675cdd18f6c1352ede68f6fbf9e36 | [
"Apache-2.0"
] | 31 | 2017-12-08T08:03:27.000Z | 2021-09-15T07:37:38.000Z | prototypes/python_prototype/multiplicativeFormulation.py | dariothornhill/adaptive-engine | 209e7a552f8675cdd18f6c1352ede68f6fbf9e36 | [
"Apache-2.0"
] | 12 | 2018-09-27T13:55:15.000Z | 2021-06-10T19:15:17.000Z | prototypes/python_prototype/multiplicativeFormulation.py | Dhiliban24/Adaptive-engine | 48f1077e4af619701e13af2d7a738bdedaec37d2 | [
"Apache-2.0"
] | 19 | 2018-10-26T07:12:20.000Z | 2022-02-10T12:51:27.000Z | ##Author: Ilia Rushkin, VPAL Research, Harvard University, Cambridge, MA, USA
import numpy as np
import pandas as pd
from fakeInitials import initialize_variables
from derivedData import calculate_derived_data
from empiricalEstimation import estimate
class MultiplicativeFormulation(object):
def __init__(self, **kwargs):
initialize_variables(self, **kwargs)
calculate_derived_data(self)
def mapUser(self, user_id):
"""
This function maps the user_id to the user index used by other functions, and also adds new users
SYNCHRONIZATION IS IMPORTANT
"""
# global users
try:
u=np.where(self.users==user_id)[0][0]
except:
"""
Add a new user
"""
# global n_users, last_seen, m_L, m_exposure, m_unseen, m_correctness, m_timestamp
n_users = len(self.users)
u=n_users
# n_users+=1
self.users=np.append(self.users,user_id)
self.last_seen=np.append(self.last_seen,-1)
self.m_L=np.vstack((self.m_L,L_i))
self.m_exposure=np.vstack((self.m_exposure,row_exposure))
self.m_unseen=np.vstack((self.m_unseen,row_unseen))
# m_correctness=np.vstack((m_correctness,row_correctness))
# m_timestamp=np.vstack((m_timestamp,row_timestamp))
return u
def mapItem(self,item_id):
item=np.where(self.items==item_id)[0][0]
return item
def bayesUpdate(self, u, item, score=1.0,time=0, attempts='all'):
#This function updates the user mastery and record of interactions that will be needed for recommendation and estimation of the BKT
# global m_x0_mult, m_x1_0_mult, m_L, m_trans, last_seen, m_unseen, transactions, m_exposure, m_tagging, epsilon, inv_epsilon
self.last_seen[u]=item
# m_correctness[u,item]=score
# m_timestamp[u,item]=time
if self.m_unseen[u,item]:
self.m_unseen[u,item]=False
self.m_exposure[u,]+=self.m_tagging[item,]
self.m_confidence[u,]+=self.m_k[item,]
if attempts=='first':
##Record the transaction by appending a new row to the data frame "transactions":
self.transactions=self.transactions.append(pd.DataFrame([[u,item,time,score]], columns=['user_id','problem_id','time','score']),ignore_index=True)
##The increment of odds due to evidence of the problem, but before the transfer
x=self.m_x0_mult[item,]*np.power(self.m_x1_0_mult[item,],score)
L=self.m_L[u,]*x
##Add the transferred knowledge
L+=self.m_trans[item,]*(L+1)
if attempts!='first':
##Record the transaction by appending a new row to the data frame "transactions":
self.transactions=self.transactions.append(pd.DataFrame([[u,item,time,score]], columns=['user_id','problem_id','time','score']),ignore_index=True)
##The increment of odds due to evidence of the problem, but before the transfer
x=self.m_x0_mult[item,]*np.power(self.m_x1_0_mult[item,],score)
L=self.m_L[u,]*x
##Add the transferred knowledge
L+=self.m_trans[item,]*(L+1)
L[np.where(np.isposinf(L))]=self.inv_epsilon
L[np.where(L==0.0)]=self.epsilon
self.m_L[u,]=L
#m_L[u,]+=trans[item,]*(L+1)
#return{'L':L, 'x':x}
#This function calculates the probability of correctness on a problem, as a prediction based on student's current mastery.
def predictCorrectness(self, u, item):
# global m_L, m_p_slip, m_p_guess
L=self.m_L[u,]
p_slip=self.m_p_slip[item,];
p_guess=self.m_p_guess[item,];
x=(L*(1.0-p_slip)+p_guess)/(L*p_slip+1.0-p_guess); ##Odds by LO
x=np.prod(x) ##Total odds
p=x/(1+x) ##Convert odds to probability
if np.isnan(p) or np.isinf(p):
p=1.0
return(p)
##This function returns the id of the next recommended problem in an adaptive module. If none is recommended (list of problems exhausted or the user has reached mastery) it returns None.
def recommend(self, u, module=0, stopOnMastery=False, normalize=False):
# global m_L, L_star, m_w, m_unseen, m_k, r_star, last_seen, m_difficulty, W_r, W_d, W_p, W_c, scope
#Subset to the unseen problems from the relevant scope
#ind_unseen=np.where(m_unseen[u,] & ((scope==module)|(scope==0)))[0]
ind_unseen=np.where(self.m_unseen[u,] & (self.scope[:,module]))[0]
L=np.log(self.m_L[u,])
if stopOnMastery:
m_k_unseen=self.m_k[ind_unseen,]
R=np.dot(m_k_unseen, np.maximum((self.L_star-L),0))
ind_unseen=ind_unseen[R!=0.0]
N=len(ind_unseen)
if(N==0): ##This means we ran out of problems, so we stop
next_item = None
else:
#L=np.log(m_L[u,])
#Calculate the user readiness for LOs
m_r=np.dot(np.minimum(L-self.L_star,0), self.m_w);
m_k_unseen=self.m_k[ind_unseen,]
P=np.dot(m_k_unseen, np.minimum((m_r+self.r_star),0))
R=np.dot(m_k_unseen, np.maximum((self.L_star-L),0))
if self.last_seen[u]<0:
C=np.repeat(0.0,N)
else:
C=np.sqrt(np.dot(m_k_unseen, self.m_k[self.last_seen[u],]))
#A=0.0
d_temp=self.m_difficulty[:,ind_unseen]
L_temp=np.tile(L,(N,1)).transpose()
D=-np.diag(np.dot(m_k_unseen,np.abs(L_temp-d_temp)))
#if stopOnMastery and sum(D)==0: ##This means the user has reached threshold mastery in all LOs relevant to the problems in the homework, so we stop
next_item=None
#else:
if normalize:
temp=(D.max()-D.min());
if(temp!=0.0):
D=D/temp
temp=(R.max()-R.min());
if(temp!=0.0):
R=R/temp
temp=(P.max()-P.min());
if(temp!=0.0):
P=P/temp
temp=(C.max()-C.min());
if(temp!=0.0):
C=C/temp
next_item=ind_unseen[np.argmax(self.W_p*P+self.W_r*R+self.W_d*D+self.W_c*C)]
return(next_item)
def updateModel(self):
# global eta, M, L_i, m_exposure, m_L, m_L_i, m_trans, m_guess, m_slip
est=estimate(self, self.eta, self.M)
self.L_i=1.0*est['L_i']
self.m_L_i=np.tile(self.L_i,(self.m_L.shape[0],1))
ind_pristine=np.where(self.m_exposure==0.0)
self.m_L[ind_pristine]=self.m_L_i[ind_pristine]
m_trans=1.0*est['trans']
m_guess=1.0*est['guess']
m_slip=1.0*est['slip']
# execfile('derivedData.py')
calculate_derived_data(self)
| 36.165 | 190 | 0.56657 |
8235a5261305f298ea38ed3ebd94f7e4aa65a828 | 1,238 | py | Python | apps/leaflet_ts/leaflet_ts/lib/ldap.py | earthobservatory/displacement-ts-server | 185d9fab9af35c863de8a12d09d9f677f03ec6f9 | [
"Apache-2.0"
] | null | null | null | apps/leaflet_ts/leaflet_ts/lib/ldap.py | earthobservatory/displacement-ts-server | 185d9fab9af35c863de8a12d09d9f677f03ec6f9 | [
"Apache-2.0"
] | null | null | null | apps/leaflet_ts/leaflet_ts/lib/ldap.py | earthobservatory/displacement-ts-server | 185d9fab9af35c863de8a12d09d9f677f03ec6f9 | [
"Apache-2.0"
] | null | null | null | import ldap, traceback
from leaflet_ts import app
def ldap_user_verified(username, password):
"""Verify user via ldap."""
host = app.config['LDAP_HOST']
base_dn = app.config['LDAP_BASEDN']
groups = app.config['LDAP_GROUPS']
try:
l = ldap.Connection(
host,
dn='uid=%s,%s' % (username, base_dn),
encryption='ssl',
password=password)
except Exception, e:
app.logger.info("Got error trying to verify LDAP user %s:" % username)
app.logger.info("%s:\n\n%s" % (str(e), traceback.format_exc()))
return None
# validate user
r = l.search('uid=%s' % username, base_dn=base_dn)
if len(r) != 1:
app.logger.info(
"Got invalid number of entries for %s: %s" % (username, len(r)))
app.logger.info("r: %s" % str(r))
return None
# validate user is part of a group allowed
uid = 'uid=%s,%s' % (username, base_dn)
for group in groups:
g = l.search('cn=%s' % group, base_dn=base_dn)
for this_g in g:
if uid in this_g['uniqueMember']: return dict(r[0])
app.logger.info(
"User %s is not part of any approved LDAP groups." % username)
return None
| 29.47619 | 78 | 0.579968 |
5af1783a72898dd08522b140632ca6b71846c429 | 12,200 | py | Python | tests/unit/mdp/test_pressure.py | CubeSkyy/ILU-RL | 676d88587e1d8638487de1b5a3f8785a7375ad1a | [
"MIT"
] | null | null | null | tests/unit/mdp/test_pressure.py | CubeSkyy/ILU-RL | 676d88587e1d8638487de1b5a3f8785a7375ad1a | [
"MIT"
] | null | null | null | tests/unit/mdp/test_pressure.py | CubeSkyy/ILU-RL | 676d88587e1d8638487de1b5a3f8785a7375ad1a | [
"MIT"
] | 2 | 2021-03-13T15:38:05.000Z | 2021-05-08T22:25:36.000Z | import unittest
from ilurl.params import MDPParams
from ilurl.utils.properties import lazy_property
from tests.unit.mdp.test_mdp_base import (TestGridMDPSetUp,
INCOMING_247123161, OUTGOING_247123161,
INCOMING_247123464, OUTGOING_247123464,
INCOMING_247123468, OUTGOING_247123468,
MAX_VEHS, MAX_VEHS_OUT)
from tests.unit.utils import process_pressure
class TestGridPressure(TestGridMDPSetUp):
"""
* Tests pressure related state and reward
* Set of tests that target the implemented
problem formulations, i.e. state and reward
function definitions.
* Use lazy_properties to compute once and use
as many times as you want -- it's a cached
property
"""
@property
def mdp_params(self):
mdp_params = MDPParams(
features=('pressure',),
reward='reward_min_pressure',
normalize_velocities=True,
discretize_state_space=False,
reward_rescale=0.01,
time_period=None,
velocity_threshold=0.1)
return mdp_params
def setUp(self):
"""Code here will run before every test"""
super(TestGridPressure, self).setUp()
def test_pressure_tl1ph0(self):
"""Tests pressure state
* traffic light 1
* ID = '247123161'
* phase0
"""
ID = '247123161'
outgoing = OUTGOING_247123161
incoming = INCOMING_247123161[0]
p0 = process_pressure(self.kernel_data_1, incoming, outgoing)
# State.
# 247123161 static assertion
self.assertEqual(self.state[ID][0], 2.0, f'pressure:{ID}\tphase:0') # pressure, phase 0
# 247123161 dynamic assertion
self.assertEqual(self.state[ID][0], p0) # pressure, phase 0
def test_pressure_tl1ph1(self):
"""Tests pressure state
* traffic light 1
* ID = '247123161'
* phase 1
"""
ID = '247123161'
outgoing = OUTGOING_247123161
incoming = INCOMING_247123161[1]
p1 = process_pressure(self.kernel_data_1, incoming, outgoing)
# State.
# 247123161 static assertion
self.assertEqual(self.state[ID][1], -1.0) # pressure, phase 1
# # 247123161 dynamic assertion
self.assertEqual(self.state[ID][1], p1) # pressure, phase 1
def test_min_pressure_tl1(self):
"""Tests pressure reward
* traffic light 1
* ID = '247123161'
"""
ID = '247123161'
reward = self.reward(self.observation_space)
self.assertEqual(reward[ID], round(-0.01*(2.0 -1.0), 4))
def test_pressure_tl2ph0(self):
"""Tests pressure state
* traffic light 2
* ID = '247123464'
* phase 0
"""
ID = '247123464'
outgoing = OUTGOING_247123464
incoming = INCOMING_247123464[0]
p0 = process_pressure(self.kernel_data_1, incoming, outgoing)
# State.
# 247123464 static assertion
self.assertEqual(self.state[ID][0], 0.0) # pressure, phase 0
# 247123464 dynamic assertion
self.assertEqual(self.state[ID][0], p0) # pressure, phase 0
def test_pressure_tl2ph1(self):
"""Tests pressure state
* traffic light 2
* ID = '247123464'
* phase 1
"""
ID = '247123464'
outgoing = OUTGOING_247123464
incoming = INCOMING_247123464[1]
p1 = process_pressure(self.kernel_data_1, incoming, outgoing)
# State.
# 247123464 static assertion
self.assertEqual(self.state[ID][1], 3.0) # pressure, phase 1
# 247123464 dynamic assertion
self.assertEqual(self.state[ID][1], p1) # pressure, phase 1
def test_min_pressure_tl2(self):
"""Tests pressure reward
* traffic light 2
* ID = '247123464'
"""
ID = '247123464'
reward = self.reward(self.observation_space)
self.assertEqual(reward[ID], round(-0.01*(0.0 + 3.0), 4))
def test_pressure_tl3ph0(self):
"""Tests pressure state
* traffic light 3
* ID = '247123468'
* phase 0
"""
ID = '247123468'
outgoing = OUTGOING_247123468
incoming = INCOMING_247123468[0]
p0 = process_pressure(self.kernel_data_1, incoming, outgoing)
# State.
# 247123468 static assertion
self.assertEqual(self.state[ID][0], 1.0) # pressure, phase 0
# 247123468 dynamic assertion
self.assertEqual(self.state[ID][0], p0) # pressure, phase 0
def test_pressure_tl3ph1(self):
"""Tests pressure state
* traffic light 3
* ID = '247123468'
* phase 1
"""
ID = '247123468'
outgoing = OUTGOING_247123468
incoming = INCOMING_247123468[1]
p1 = process_pressure(self.kernel_data_1, incoming, outgoing)
# State.
# 247123468 static assertion
self.assertEqual(self.state[ID][1], 1.0) # pressure, phase 1
# 247123468 dynamic assertion
self.assertEqual(self.state[ID][1], p1) # pressure, phase 1
def test_min_pressure_tl3(self):
"""Tests pressure reward
* traffic light 3
* ID = '247123468'
"""
ID = '247123468'
reward = self.reward(self.observation_space)
self.assertEqual(reward[ID], round(-0.01*(1.0 + 1.0), 4))
class TestGridPressureNorm(TestGridPressure):
"""
* Tests pressure related state and reward
* Normalize state space by number of vehicles
* Set of tests that target the implemented
problem formulations, i.e. state and reward
function definitions.
* Use lazy_properties to compute once and use
as many times as you want -- it's a cached
property
"""
@property
def mdp_params(self):
mdp_params = MDPParams(
features=('pressure',),
reward='reward_min_pressure',
normalize_velocities=True,
normalize_vehicles=self.norm_vehs,
discretize_state_space=False,
reward_rescale=0.01,
time_period=None,
velocity_threshold=0.1)
return mdp_params
@property
def norm_vehs(self):
return True
def test_pressure_tl1ph0(self):
"""Tests pressure state
* traffic light 1
* ID = '247123161'
* phase0
"""
ID = '247123161'
outgoing = OUTGOING_247123161
incoming = INCOMING_247123161[0]
fct1 = MAX_VEHS[(ID, 0)] if self.norm_vehs else 1
fct2 = MAX_VEHS_OUT[(ID, 0)] if self.norm_vehs else 1
p0 = process_pressure(self.kernel_data_1, incoming, outgoing,
fctin=fct1, fctout=fct2)
# State.
# 247123161 static assertion
self.assertEqual(self.state[ID][0], 0.125, f'pressure:{ID}\tphase:0') # pressure, phase 0
# 247123161 dynamic assertion
self.assertEqual(self.state[ID][0], p0) # pressure, phase 0
def test_pressure_tl1ph1(self):
"""Tests pressure state
* traffic light 1
* ID = '247123161'
* phase 1
"""
ID = '247123161'
outgoing = OUTGOING_247123161
incoming = INCOMING_247123161[1]
fct1 = MAX_VEHS[(ID, 1)] if self.norm_vehs else 1
fct2 = MAX_VEHS_OUT[(ID, 1)] if self.norm_vehs else 1
p1 = process_pressure(self.kernel_data_1, incoming, outgoing,
fctin=fct1, fctout=fct2)
# State.
# 247123161 static assertion
self.assertEqual(self.state[ID][1], -0.0972) # pressure, phase 1
# 247123161 dynamic assertion
self.assertEqual(self.state[ID][1], p1) # pressure, phase 1
def test_min_pressure_tl1(self):
"""Tests pressure reward
* traffic light 1
* ID = '247123161'
"""
ID = '247123161'
reward = self.reward(self.observation_space)
self.assertEqual(reward[ID], round(-0.01*(0.125 - 0.0972), 4))
def test_pressure_tl2ph0(self):
"""Tests pressure state
* traffic light 2
* ID = '247123464'
* phase 0
"""
ID = '247123464'
outgoing = OUTGOING_247123464
incoming = INCOMING_247123464[0]
fct1 = MAX_VEHS[(ID, 0)] if self.norm_vehs else 1
fct2 = MAX_VEHS_OUT[(ID, 0)] if self.norm_vehs else 1
p0 = process_pressure(self.kernel_data_1, incoming, outgoing,
fctin=fct1, fctout=fct2)
# State.
# 247123464 static assertion
self.assertEqual(self.state[ID][0], 0.0) # pressure, phase 0
# 247123464 dynamic assertion
self.assertEqual(self.state[ID][0], p0) # pressure, phase 0
def test_pressure_tl2ph1(self):
"""Tests pressure state
* traffic light 2
* ID = '247123464'
* phase 1
"""
ID = '247123464'
outgoing = OUTGOING_247123464
incoming = INCOMING_247123464[1]
fct1 = MAX_VEHS[(ID, 1)] if self.norm_vehs else 1
fct2 = MAX_VEHS_OUT[(ID, 1)] if self.norm_vehs else 1
p1 = process_pressure(self.kernel_data_1, incoming, outgoing,
fctin=fct1, fctout=fct2)
# State.
# 247123464 static assertion
self.assertEqual(self.state[ID][1], 0.0938) # pressure, phase 1
# 247123464 dynamic assertion
self.assertEqual(self.state[ID][1], p1) # pressure, phase 1
def test_min_pressure_tl2(self):
"""Tests pressure reward
* traffic light 2
* ID = '247123464'
"""
ID = '247123464'
reward = self.reward(self.observation_space)
self.assertAlmostEqual(reward[ID], round(-0.01*(0.0 + 0.0938), 4))
def test_pressure_tl3ph0(self):
"""Tests pressure state
* traffic light 3
* ID = '247123468'
* phase 0
"""
ID = '247123468'
outgoing = OUTGOING_247123468
incoming = INCOMING_247123468[0]
fct1 = MAX_VEHS[(ID, 0)] if self.norm_vehs else 1
fct2 = MAX_VEHS_OUT[(ID, 0)] if self.norm_vehs else 1
p0 = process_pressure(self.kernel_data_1, incoming, outgoing,
fctin=fct1, fctout=fct2)
# State.
# 247123468 static assertion
self.assertEqual(self.state[ID][0], 0.1597) # pressure, phase 0
# 247123468 dynamic assertion
self.assertEqual(self.state[ID][0], p0) # pressure, phase 0
def test_pressure_tl3ph1(self):
"""Tests pressure state
* traffic light 3
* ID = '247123468'
* phase 1
"""
ID = '247123468'
outgoing = OUTGOING_247123468
incoming = INCOMING_247123468[1]
fct1 = MAX_VEHS[(ID, 1)] if self.norm_vehs else 1
fct2 = MAX_VEHS_OUT[(ID, 1)] if self.norm_vehs else 1
p1 = process_pressure(self.kernel_data_1, incoming, outgoing,
fctin=fct1, fctout=fct2)
# State.
# 247123468 static assertion
self.assertEqual(self.state[ID][1], 0.0) # pressure, phase 1
# 247123468 dynamic assertion
self.assertEqual(self.state[ID][1], p1) # pressure, phase 1
def test_min_pressure_tl3(self):
"""Tests pressure reward
* traffic light 3
* ID = '247123468'
"""
ID = '247123468'
reward = self.reward(self.observation_space)
self.assertEqual(reward[ID], round(-0.01*(0.1597 + 0.0), 4))
if __name__ == '__main__':
unittest.main()
| 30.730479 | 97 | 0.565738 |
3cc7adf7a0a9cd51e1b8cca01b8280fc702d1186 | 257 | py | Python | osnim/0131/10952 A+B - 5.py | Kwak-JunYoung/154Algoritm-5weeks | fa18ae5f68a1ee722a30a05309214247f7fbfda4 | [
"MIT"
] | 3 | 2022-01-24T03:06:32.000Z | 2022-01-30T08:43:58.000Z | osnim/0131/10952 A+B - 5.py | Kwak-JunYoung/154Algoritm-5weeks | fa18ae5f68a1ee722a30a05309214247f7fbfda4 | [
"MIT"
] | null | null | null | osnim/0131/10952 A+B - 5.py | Kwak-JunYoung/154Algoritm-5weeks | fa18ae5f68a1ee722a30a05309214247f7fbfda4 | [
"MIT"
] | 2 | 2022-01-24T02:27:40.000Z | 2022-01-30T08:57:03.000Z | from sys import stdin
while True:
try:
A, B = map(int, stdin.readline().split())
# A, B = map(int, input().split())
if A == 0 and B == 0:
break
print(A + B)
except EOFError or ValueError:
break
| 18.357143 | 49 | 0.490272 |
31a5c3f6fe8b7c4faaa99bb94df84b54edb08a34 | 926 | py | Python | mmdet/models/detectors/__init__.py | hukefei/chongqing_contest | c38ae3e6f25230282c65cdd568de93f28e88c6d6 | [
"Apache-2.0"
] | 1 | 2021-04-12T13:29:54.000Z | 2021-04-12T13:29:54.000Z | mmdet/models/detectors/__init__.py | hukefei/chongqing_contest | c38ae3e6f25230282c65cdd568de93f28e88c6d6 | [
"Apache-2.0"
] | 1 | 2021-04-12T13:31:27.000Z | 2021-04-12T13:33:10.000Z | mmdet/models/detectors/__init__.py | hukefei/chongqing_contest | c38ae3e6f25230282c65cdd568de93f28e88c6d6 | [
"Apache-2.0"
] | 1 | 2021-04-21T10:14:15.000Z | 2021-04-21T10:14:15.000Z | from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .double_head_rcnn import DoubleHeadRCNN
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .single_stage import SingleStageDetector
from .two_stage import TwoStageDetector
from .cascade_rcnn_pair import CascadeRCNN_pair
from .efficientdet import EfficientDet
__all__ = [
'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'FastRCNN', 'FasterRCNN', 'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade',
'DoubleHeadRCNN', 'RetinaNet', 'FCOS', 'GridRCNN', 'MaskScoringRCNN',
'RepPointsDetector', 'CascadeRCNN_pair', 'EfficientDet',
]
| 37.04 | 77 | 0.808855 |
3b3762961e7607f5ead0466238fd30e52a66a36c | 14,996 | py | Python | app/main/views.py | ToonoW/flashy | 565f5348be8f7f7369346be06ed64e2c4f403a1d | [
"MIT"
] | null | null | null | app/main/views.py | ToonoW/flashy | 565f5348be8f7f7369346be06ed64e2c4f403a1d | [
"MIT"
] | null | null | null | app/main/views.py | ToonoW/flashy | 565f5348be8f7f7369346be06ed64e2c4f403a1d | [
"MIT"
] | null | null | null | from flask import render_template, redirect, url_for, abort, flash, request,\
current_app, make_response
from flask.ext.login import login_required, current_user
from flask.ext.sqlalchemy import get_debug_queries
from . import main
from .forms import EditProfileForm, EditProfileAdminForm, PostForm,\
CommentForm, UploadTopicForm, UploadVedioForm
from .. import db
from ..models import Permission, Role, User, Post, Comment, Topic, Post
from ..decorators import admin_required, permission_required
import os, time, hashlib
@main.after_app_request
def after_request(response):
for query in get_debug_queries():
if query.duration >= current_app.config['FLASKY_SLOW_DB_QUERY_TIME']:
current_app.logger.warning(
'Slow query: %s\nParameters: %s\nDuration: %fs\nContext: %s\n'
% (query.statement, query.parameters, query.duration,
query.context))
return response
@main.route('/shutdown')
def server_shutdown():
if not current_app.testing:
abort(404)
shutdown = request.environ.get('werkzeug.server.shutdown')
if not shutdown:
abort(500)
shutdown()
return 'Shutting down...'
# 主页
@main.route('/', methods=['GET', 'POST'])
def index():
recommands = Post.query.order_by(Post.play_times.desc()).limit(9).all()
#gdmus = Post.query.filter(Post.category == 'gdmu').order_by(Post.timestamp.desc()).limit(8).all()
#lifes = Post.query.filter(Post.category == 'life').order_by(Post.timestamp.desc()).limit(9).all()
#technologys = Post.query.filter(Post.category == 'technology').order_by(Post.timestamp.desc()).limit(5).all()
movies = Post.query.filter(Post.category == 'movie').order_by(Post.timestamp.desc()).limit(6).all()
musics = Post.query.filter(Post.category == 'music').order_by(Post.timestamp.desc()).limit(4).all()
animations = Post.query.filter(Post.category == 'animation').order_by(Post.timestamp.desc()).limit(5).all()
tvs = Post.query.filter(Post.category == 'tv').order_by(Post.timestamp.desc()).limit(4).all()
return render_template('index.html', recommands=recommands, movies= movies, musics=musics, animations=animations, tvs=tvs)
# GDMU
@main.route('/gdmu')
def gdmu_category():
gdmus = Post.query.filter(Post.category == 'gdmu').order_by(Post.timestamp.desc()).limit(9).all()
return render_template('GDMU.html', gdmus=gdmus)
# tv
@main.route('/tv')
def tv_category():
tvs = Post.query.filter(Post.category == 'tv').order_by(Post.timestamp.desc()).all()
return render_template('tv.html', tvs=tvs)
# movie
@main.route('/movie')
def movie_category():
movies = Post.query.filter(Post.category == 'movie').order_by(Post.timestamp.desc()).all()
return render_template('tv.html', tvs=movies)
# music
@main.route('/music')
def music_category():
musics = Post.query.filter(Post.category == 'music').order_by(Post.timestamp.desc()).all()
return render_template('tv.html', tvs=musics)
# animation
@main.route('/animation')
def animations_category():
animations = Post.query.filter(Post.category == 'animation').order_by(Post.timestamp.desc()).all()
return render_template('tv.html', tvs=animations)
@main.route('/user/<username>')
def user(username):
user = User.query.filter_by(username=username).first_or_404()
page = request.args.get('page', 1, type=int)
pagination = user.posts.order_by(Post.timestamp.desc()).paginate(
page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
return render_template('user.html', user=user, posts=posts,
pagination=pagination)
@main.route('/edit-profile', methods=['GET', 'POST'])
@login_required
def edit_profile():
form = EditProfileForm()
if form.validate_on_submit():
current_user.name = form.name.data
current_user.location = form.location.data
current_user.about_me = form.about_me.data
db.session.add(current_user)
flash('Your profile has been updated.')
return redirect(url_for('.user', username=current_user.username))
form.name.data = current_user.name
form.location.data = current_user.location
form.about_me.data = current_user.about_me
return render_template('edit_profile.html', form=form)
@main.route('/edit-profile/<int:id>', methods=['GET', 'POST'])
@login_required
@admin_required
def edit_profile_admin(id):
user = User.query.get_or_404(id)
form = EditProfileAdminForm(user=user)
if form.validate_on_submit():
user.email = form.email.data
user.username = form.username.data
user.confirmed = form.confirmed.data
user.role = Role.query.get(form.role.data)
user.name = form.name.data
user.location = form.location.data
user.about_me = form.about_me.data
db.session.add(user)
flash('The profile has been updated.')
return redirect(url_for('.user', username=user.username))
form.email.data = user.email
form.username.data = user.username
form.confirmed.data = user.confirmed
form.role.data = user.role_id
form.name.data = user.name
form.location.data = user.location
form.about_me.data = user.about_me
return render_template('edit_profile.html', form=form, user=user)
@main.route('/post/<int:id>', methods=['GET', 'POST'])
def post(id):
post = Post.query.get_or_404(id)
form = CommentForm()
if form.validate_on_submit():
comment = Comment(body=form.body.data,
post=post,
author=current_user._get_current_object())
db.session.add(comment)
flash('Your comment has been published.')
return redirect(url_for('.post', id=post.id, page=-1))
page = request.args.get('page', 1, type=int)
if page == -1:
page = (post.comments.count() - 1) // \
current_app.config['FLASKY_COMMENTS_PER_PAGE'] + 1
pagination = post.comments.order_by(Comment.timestamp.asc()).paginate(
page, per_page=current_app.config['FLASKY_COMMENTS_PER_PAGE'],
error_out=False)
comments = pagination.items
return render_template('post.html', posts=[post], form=form,
comments=comments, pagination=pagination)
@main.route('/video/play/<int:id>', methods=['GET', 'POST'])
def play_video(id):
post = Post.query.get_or_404(id)
try:
post.play_times = post.play_times + 1
db.session.commit()
except:
pass
form = CommentForm()
if form.validate_on_submit():
comment = Comment(body=form.body.data,
post=post,
author=current_user._get_current_object())
db.session.add(comment)
flash('Your comment has been published.')
return redirect(url_for('.post', id=post.id, page=-1))
page = request.args.get('page', 1, type=int)
if page == -1:
page = (post.comments.count() - 1) // \
current_app.config['FLASKY_COMMENTS_PER_PAGE'] + 1
pagination = post.comments.order_by(Comment.timestamp.asc()).paginate(
page, per_page=current_app.config['FLASKY_COMMENTS_PER_PAGE'],
error_out=False)
comments = pagination.items
others = Post.query.filter(Post.category == post.category).order_by(Post.timestamp.desc()).limit(4).all()
return render_template('play.html', video=post, form=form,
comments=comments, pagination=pagination, others=others)
@main.route('/edit/<int:id>', methods=['GET', 'POST'])
@login_required
def edit(id):
post = Post.query.get_or_404(id)
if current_user != post.author and \
not current_user.can(Permission.ADMINISTER):
abort(403)
form = PostForm()
if form.validate_on_submit():
post.body = form.body.data
db.session.add(post)
flash('The post has been updated.')
return redirect(url_for('.post', id=post.id))
form.body.data = post.body
return render_template('edit_post.html', form=form)
@main.route('/follow/<username>')
@login_required
@permission_required(Permission.FOLLOW)
def follow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('Invalid user.')
return redirect(url_for('.index'))
if current_user.is_following(user):
flash('You are already following this user.')
return redirect(url_for('.user', username=username))
current_user.follow(user)
flash('You are now following %s.' % username)
return redirect(url_for('.user', username=username))
@main.route('/unfollow/<username>')
@login_required
@permission_required(Permission.FOLLOW)
def unfollow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('Invalid user.')
return redirect(url_for('.index'))
if not current_user.is_following(user):
flash('You are not following this user.')
return redirect(url_for('.user', username=username))
current_user.unfollow(user)
flash('You are not following %s anymore.' % username)
return redirect(url_for('.user', username=username))
@main.route('/followers/<username>')
def followers(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('Invalid user.')
return redirect(url_for('.index'))
page = request.args.get('page', 1, type=int)
pagination = user.followers.paginate(
page, per_page=current_app.config['FLASKY_FOLLOWERS_PER_PAGE'],
error_out=False)
follows = [{'user': item.follower, 'timestamp': item.timestamp}
for item in pagination.items]
return render_template('followers.html', user=user, title="Followers of",
endpoint='.followers', pagination=pagination,
follows=follows)
@main.route('/followed-by/<username>')
def followed_by(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('Invalid user.')
return redirect(url_for('.index'))
page = request.args.get('page', 1, type=int)
pagination = user.followed.paginate(
page, per_page=current_app.config['FLASKY_FOLLOWERS_PER_PAGE'],
error_out=False)
follows = [{'user': item.followed, 'timestamp': item.timestamp}
for item in pagination.items]
return render_template('followers.html', user=user, title="Followed by",
endpoint='.followed_by', pagination=pagination,
follows=follows)
@main.route('/all')
@login_required
def show_all():
resp = make_response(redirect(url_for('.index')))
resp.set_cookie('show_followed', '', max_age=30*24*60*60)
return resp
@main.route('/followed')
@login_required
def show_followed():
resp = make_response(redirect(url_for('.index')))
resp.set_cookie('show_followed', '1', max_age=30*24*60*60)
return resp
@main.route('/moderate')
@login_required
@permission_required(Permission.MODERATE_COMMENTS)
def moderate():
page = request.args.get('page', 1, type=int)
pagination = Comment.query.order_by(Comment.timestamp.desc()).paginate(
page, per_page=current_app.config['FLASKY_COMMENTS_PER_PAGE'],
error_out=False)
comments = pagination.items
return render_template('moderate.html', comments=comments,
pagination=pagination, page=page)
@main.route('/moderate/enable/<int:id>')
@login_required
@permission_required(Permission.MODERATE_COMMENTS)
def moderate_enable(id):
comment = Comment.query.get_or_404(id)
comment.disabled = False
db.session.add(comment)
return redirect(url_for('.moderate',
page=request.args.get('page', 1, type=int)))
@main.route('/moderate/disable/<int:id>')
@login_required
@permission_required(Permission.MODERATE_COMMENTS)
def moderate_disable(id):
comment = Comment.query.get_or_404(id)
comment.disabled = True
db.session.add(comment)
return redirect(url_for('.moderate',
page=request.args.get('page', 1, type=int)))
# 上传topic
@main.route('/topic/upload', methods=['POST', 'GET'])
@login_required
def topic_upload():
form = UploadTopicForm()
if form.validate_on_submit():
topic = Topic(title=form.title.data,
content=form.content.data)
topic.author_id = current_user.id
form.image.data.save(os.getcwd() + '/app/static/topic_image/' + form.image.data.filename)
topic.image_url = '/static/topic_image/' + form.image.data.filename
db.session.add(topic)
db.session.commit()
flash("上传新主题成功!")
return redirect(url_for('.topic_upload'))
return render_template('upload_topic.html', form=form)
# 上传视频
@main.route('/video/upload', methods=['POST', 'GET'])
@login_required
def video_upload():
form = UploadVedioForm()
if form.validate_on_submit():
post = Post(
title = form.title.data,
category = form.category.data
)
post.author_id = current_user.id
dirname = form.video.data.filename + (str)(time.time())
dirname = tran2md5(dirname)
abspath = os.path.abspath('app/static/video')
dirpath = os.path.join(abspath, dirname)
# TODO 更换相对路径
dirpath = os.path.join(os.getcwd() + '/app/static/video', dirname)
os.mkdir(dirpath)
filename = 'picture' + get_extname(form.image.data.filename)
form.image.data.save(os.path.join(dirpath, filename))
from PIL import Image
im = Image.open(os.path.join(dirpath, filename))
out = im.resize((263, 147))
out.save(os.path.join(dirpath, filename))
post.image_url = '/static/video/' + dirname + '/' + filename
filename = 'cover_image' + get_extname(form.cover_image.data.filename)
form.cover_image.data.save(os.path.join(dirpath, filename))
post.cover_image_url = '/static/video/' + dirname + '/' + filename
filename = 'video' + get_extname(form.video.data.filename)
form.video.data.save(os.path.join(dirpath, filename))
if not get_extname(form.video.data.filename) == '.mp4':
command = 'ffmpeg -i ' + os.path.join(dirpath, filename) + ' ' + os.path.join(dirpath, 'video.mp4')
os.popen(command)
post.video_url = '/static/video/' + dirname + '/' + filename
post.video_url_mp4 = '/static/video/' + dirname + '/video.mp4'
db.session.add(post)
db.session.commit()
flash("视频上传成功")
return redirect(url_for('.video_upload'))
return render_template('upload_video.html', form=form)
def tran2md5(src):
m1 = hashlib.md5()
m1.update(src.encode('utf-8'))
return m1.hexdigest()
def get_extname(filename):
(name, ext) = os.path.splitext(filename)
return ext
| 36.845209 | 126 | 0.660976 |
7a3e13ff737d1ef7195ae3879573172b0736eddd | 2,896 | py | Python | Player.py | PubuduS/pygame | 7137942ea6f99745295c94a12802b738d41c0f00 | [
"MIT"
] | null | null | null | Player.py | PubuduS/pygame | 7137942ea6f99745295c94a12802b738d41c0f00 | [
"MIT"
] | null | null | null | Player.py | PubuduS/pygame | 7137942ea6f99745295c94a12802b738d41c0f00 | [
"MIT"
] | null | null | null | import pygame
from GameGenerics import *
class Player(GameGenerics):
## Constructor for Player class
# Initialize the class member variables.
def __init__(self):
super(Player, self).__init__()
## holds a spaceship image.
# space_invaders.png Icon made by Freepic from www.flaticon.com
self.player_image = "images/space_invaders.png"
## holds a bullet image.
self.bullet_image = "images/bullet.png"
self.bullet_img = pygame.image.load(self.bullet_image)
self.player_img = pygame.image.load(self.player_image)
self.playerX_change = 0
self.control_data = [0, "ready"]
## Player function will draw the image across across screen.
# param1 (screen): takes a reference to player screen.
# param2 (x_axis): takes the x axis position which is used to draw the image.
# param3 (y_axis): takes the y axis position which is used to draw the image.
# return void
def player(self, screen, x_axis=0, y_axis=0):
screen.blit(self.player_img, (x_axis, y_axis))
## player_controls function allows user to calculate the left and right positions of
# the player sprite. It returns the correct x axis coordinates according to user inputs.
# param1 (event): takes a event.
# return float playerX_change.
def player_controls(self, event, fire_state):
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
self.playerX_change = -5
if event.key == pygame.K_RIGHT:
self.playerX_change = 5
# We only need to fire bullet when it is in ready state.
if fire_state == "ready":
if event.key == pygame.K_SPACE:
fire_state = "fire"
bullet_sound = mixer.Sound("sound/laser.wav")
bullet_sound.play()
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
self.playerX_change = 0
self.control_data[0] = self.playerX_change
self.control_data[1] = fire_state
return self.control_data
## boundary_control function sets the boundaries across screen.
# It will prevent player sprite from going beyond our screen.
# param1 (x_axis): takes the position of x axis.
# return int x_axis
def boundary_control(self, x_axis):
if x_axis <= 0:
x_axis = 0
elif x_axis >= 736:
x_axis = 736
return x_axis
## fire_bullets function draws the bullet across screen
# param1 (screen): takes a reference to screen.
# param2 (x_axis): takes position of x axis.
# param3 (y_axis): takes position of y axis.
# return void
def fire_bullets(self, screen, x_axis, y_axis):
screen.blit(self.bullet_img, (x_axis + 16, y_axis + 10))
| 36.2 | 92 | 0.635014 |
a718c368754bec8f2fec75a0d06db98382aacf2b | 1,377 | py | Python | Tweeter.py | celif/Daily-Art-Twitter-Bot | 86bee44bed8ae7c958d3639c8b03bfa219392fae | [
"MIT"
] | null | null | null | Tweeter.py | celif/Daily-Art-Twitter-Bot | 86bee44bed8ae7c958d3639c8b03bfa219392fae | [
"MIT"
] | null | null | null | Tweeter.py | celif/Daily-Art-Twitter-Bot | 86bee44bed8ae7c958d3639c8b03bfa219392fae | [
"MIT"
] | null | null | null | #!/usr/bin/python
from ImportedLibs import *
from ArtFinder import *
class Tweeter(object):
def setAuth(self):
APP_KEY = 'Consumer Key'
APP_SECRET = 'Consumer Secret'
OAUTH_TOKEN = 'Access Token'
OAUTH_TOKEN_SECRET = 'Access Token Secret'
twitter = Twython(APP_KEY, APP_SECRET,OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
return twitter
def tweet_artwork(self, twitter, dict):
IMAGE_SIZE_LIMIT = 5242880
# Get the image from the dictionary
artwork = requests.get(dict['artwork'])
# Write artwork content to pictureFile
pictureFile = BytesIO(artwork.content)
tweetString = dict['title'] + "\nBy: " + dict['artist'] + " in " + dict['date']
response = twitter.upload_media(media=pictureFile)
if response['size'] > IMAGE_SIZE_LIMIT:
return False
else:
twitter.update_status(status=tweetString, media_ids=[response['media_id']])
if __name__ == '__main__':
#Instantiate Tweeter and ArtFinder classes
tweeterObject = Tweeter()
artFinderObject = ArtFinder()
account = tweeterObject.setAuth()
# Tweet at 24 hour intervals
while True:
tweetData = artFinderObject.get_image_data()
tweeterObject.tweet_artwork(account, tweetData)
time.sleep(86400)
| 25.5 | 87 | 0.636166 |
d4cd4596ad7f6e0187f91e645753c131d68a9a4a | 845 | py | Python | python/orthogonal_test.py | davxy/numeric | 1e8b44a72e1d570433a5ba81ae0795a750ce5921 | [
"Unlicense"
] | 2 | 2020-05-03T17:02:44.000Z | 2022-02-21T04:09:34.000Z | python/orthogonal_test.py | davxy/numeric | 1e8b44a72e1d570433a5ba81ae0795a750ce5921 | [
"Unlicense"
] | null | null | null | python/orthogonal_test.py | davxy/numeric | 1e8b44a72e1d570433a5ba81ae0795a750ce5921 | [
"Unlicense"
] | null | null | null | # Orthogonal linear system solver tests
from math import sqrt
import numpy as np
from orthogonal import orthogonal
################################################################################
# 2x2 orthogonal matrix
A = np.matrix('1 1;'
'1 -1', float)
A = A*1.0/sqrt(2.0)
# Known terms vector
b = np.matrix('2; 3')
# Solve the system
x = orthogonal(A, b, 1)
# Check
if np.allclose(b, A*x) == False:
raise Exception('Orthogonal test failure')
################################################################################
# 2x2 orthogonal matrix
A = np.matrix('2 -2 1;'
'1 2 2;'
'2 1 -2', float)
A = A*1.0/3.0
# Known terms vector
b = np.matrix('2; 3; 4')
# Solve the system
x = orthogonal(A, b)
# Check
if np.allclose(b, A*x) == False:
raise Exception('Orthogonal test failure') | 24.142857 | 80 | 0.498225 |
76bcc4e3ca6f4c2e442b34bdc9eefa2ddf712e68 | 5,313 | py | Python | Python/maximum-sum-of-3-non-overlapping-subarrays.py | RideGreg/LeetCode | b70818b1e6947bf29519a24f78816e022ebab59e | [
"MIT"
] | 1 | 2022-01-30T06:55:28.000Z | 2022-01-30T06:55:28.000Z | Python/maximum-sum-of-3-non-overlapping-subarrays.py | RideGreg/LeetCode | b70818b1e6947bf29519a24f78816e022ebab59e | [
"MIT"
] | null | null | null | Python/maximum-sum-of-3-non-overlapping-subarrays.py | RideGreg/LeetCode | b70818b1e6947bf29519a24f78816e022ebab59e | [
"MIT"
] | 1 | 2021-12-31T03:56:39.000Z | 2021-12-31T03:56:39.000Z | # Time: O(n)
# Space: O(n)
# 689
# In a given array nums of positive integers, find three non-overlapping subarrays with maximum sum.
#
# Each subarray will be of size k, and we want to maximize the sum of all 3*k entries.
#
# Return the result as a list of indices representing the starting position of each interval (0-indexed).
# If there are multiple answers, return the lexicographically smallest one.
#
# Example:
# Input: [1,2,1,2,6,7,5,1], 2
# Output: [0, 3, 5]
#
# Explanation: Subarrays [1, 2], [2, 6], [7, 5] correspond to the starting indices [0, 3, 5].
# We could have also taken [2, 1], but an answer of [1, 3, 5] would be lexicographically larger.
#
# Note:
# - nums.length will be between 1 and 20000.
# - nums[i] will be between 1 and 65535.
# - k will be between 1 and floor(nums.length / 3).
class Solution(object):
def maxSumOfThreeSubarrays(self, nums, k): # USE THIS
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
accu = [0]
for num in nums:
accu.append(accu[-1]+num)
# left_pos[x] is start pos of largest k-size subarray in nums[:x] (included).
n = len(nums)
left_pos = [0] * n
total = accu[k]-accu[0]
for i in range(k, n):
cur = accu[i+1] - accu[i+1-k]
if cur > total:
left_pos[i], total = i+1-k, cur
else:
left_pos[i] = left_pos[i-1]
# right_pos[x] is start pos of largest k-size subarray in nums[x:] (included).
right_pos = [n-k] * n
total = accu[n]-accu[n-k]
for i in reversed(range(n-k)):
cur = accu[i+k]-accu[i]
if cur >= total: #!!!! THIS IS IMPORTANT need lexicographically smallest when sums are equal
right_pos[i], total = i, cur
else:
right_pos[i] = right_pos[i+1]
ans, max_sum = [], 0
for i in range(k, n-2*k+1):
left, right = left_pos[i-1], right_pos[i+k]
total = (accu[i+k]-accu[i]) + \
(accu[left+k]-accu[left]) + \
(accu[right+k]-accu[right])
if total > max_sum:
max_sum = total
ans = [left, i, right]
return ans
# 预处理,时间复杂度O(n)
# 构造K项和数组sums,sums[i] = sum(nums[i .. i + k - 1]) !!<- this sums is complex than accu in above solution
# 从左向右构造K项和最大值及其下标数组maxa,其元素记为va, ia。maxa[x]为以x及其以前元素为开头的正向K项和最大值。
# 从右向左构造K项和最大值及其下标数组maxb,其元素记为vb, ib。maxb[x]为以x及其以后元素为开头的反向K项和最大值。
#
# 在[k, nsize - k)范围内枚举中段子数组的起点x
# 则3段子数组和 = sums[x] + va + vb
def maxSumOfThreeSubarrays_bookshadow(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
size = len(nums)
nsize = size - k + 1
sums = [0] * nsize
maxa = [0] * nsize
maxb = [0] * nsize
total = 0
for x in range(size):
total += nums[x]
if x >= k - 1:
sums[x - k + 1] = total
total -= nums[x - k + 1]
maxn, maxi = 0, 0
for x in range(nsize):
if sums[x] > maxn:
maxn, maxi = sums[x], x
maxa[x] = (maxn, maxi)
maxn, maxi = 0, nsize - 1
for x in range(nsize - 1, -1, -1):
if sums[x] > maxn:
maxn, maxi = sums[x], x
maxb[x] = (maxn, maxi)
ansn, ansi = 0, None
for x in range(k, nsize - k):
va, ia = maxa[x - k]
vb, ib = maxb[x + k]
if sums[x] + va + vb > ansn:
ansn = sums[x] + va + vb
ansi = [ia, x, ib]
return ansi
# TLE O(n^2), filled upper triangle of 2D DP array, actually only need to fill 1 row dp[0][:] and 1 column dp[:][N-1]
def maxSumOfThreeSubarrays_ming(self, nums, k):
N = len(nums)
dp = [[0] * N for _ in range(N)] # store the sum of nums[i:j+1]
idx = [[0] * N for _ in range(N)] # store the starting idx of largest k-size subarray in nums[i:j+1]
# prepare the data
for i in range(N - k + 1):
dp[i][i + k - 1] = sum(nums[i:i + k])
idx[i][i + k - 1] = i
for size in range(k + 1, N + 1):
for i in range(N - size + 1):
j = i + size - 1
if dp[i][j - 1] >= dp[i + 1][j]:
dp[i][j] = dp[i][j - 1]
idx[i][j] = idx[i][j - 1]
else:
dp[i][j] = dp[i + 1][j]
idx[i][j] = idx[i + 1][j]
# divide the whole array into 3 parts
max_sum, max_i, max_j, max_k = float('-inf'), None, None, None
for i in range(k, N - 2 * k + 1):
ssum = dp[0][i - 1] + dp[i][i+k-1] + dp[i+k][N - 1]
if ssum > max_sum:
max_sum, max_i, max_j, max_k = ssum, idx[0][i - 1], idx[i][i+k-1], idx[i+k][N - 1]
return [max_i, max_j, max_k]
print(Solution().maxSumOfThreeSubarrays([9,8,7,6,2,2,2,2], 2)) # [0,2,4]
print(Solution().maxSumOfThreeSubarrays([1,2,1,2,6,7,5,1], 2)) # [0,3,5]
print(Solution().maxSumOfThreeSubarrays([1,2,1,1,2,6,2,2,1], 2)) # [0,4,6]
print(Solution().maxSumOfThreeSubarrays([1,2,1,1,2,6,2,2,2,1], 2)) # [0,4,6] | 37.415493 | 121 | 0.501788 |
ba9739ccb22e5708458c453eeb366de77d0e4b4d | 30,759 | py | Python | humanfriendly/terminal/__init__.py | blaise-io/python-humanfriendly | 1abbe85eb281fca17ae1cb769d0a23acdd2e1bd8 | [
"MIT"
] | 249 | 2015-01-03T09:31:18.000Z | 2022-03-26T13:46:13.000Z | humanfriendly/terminal/__init__.py | blaise-io/python-humanfriendly | 1abbe85eb281fca17ae1cb769d0a23acdd2e1bd8 | [
"MIT"
] | 55 | 2015-06-24T18:48:10.000Z | 2022-03-21T12:14:14.000Z | virtual/lib/python3.6/site-packages/humanfriendly/terminal/__init__.py | Mercy-Njoroge/blog | 404336fb0fc8d172ddde8b744042cb3f37d89c65 | [
"MIT"
] | 47 | 2015-01-03T09:35:28.000Z | 2022-02-28T16:59:58.000Z | # Human friendly input/output in Python.
#
# Author: Peter Odding <peter@peterodding.com>
# Last Change: March 1, 2020
# URL: https://humanfriendly.readthedocs.io
"""
Interaction with interactive text terminals.
The :mod:`~humanfriendly.terminal` module makes it easy to interact with
interactive text terminals and format text for rendering on such terminals. If
the terms used in the documentation of this module don't make sense to you then
please refer to the `Wikipedia article on ANSI escape sequences`_ for details
about how ANSI escape sequences work.
This module was originally developed for use on UNIX systems, but since then
Windows 10 gained native support for ANSI escape sequences and this module was
enhanced to recognize and support this. For details please refer to the
:func:`enable_ansi_support()` function.
.. _Wikipedia article on ANSI escape sequences: http://en.wikipedia.org/wiki/ANSI_escape_code#Sequence_elements
"""
# Standard library modules.
import codecs
import numbers
import os
import platform
import re
import subprocess
import sys
# The `fcntl' module is platform specific so importing it may give an error. We
# hide this implementation detail from callers by handling the import error and
# setting a flag instead.
try:
import fcntl
import termios
import struct
HAVE_IOCTL = True
except ImportError:
HAVE_IOCTL = False
# Modules included in our package.
from humanfriendly.compat import coerce_string, is_unicode, on_windows, which
from humanfriendly.decorators import cached
from humanfriendly.deprecation import define_aliases
from humanfriendly.text import concatenate, format
from humanfriendly.usage import format_usage
# Public identifiers that require documentation.
__all__ = (
'ANSI_COLOR_CODES',
'ANSI_CSI',
'ANSI_ERASE_LINE',
'ANSI_HIDE_CURSOR',
'ANSI_RESET',
'ANSI_SGR',
'ANSI_SHOW_CURSOR',
'ANSI_TEXT_STYLES',
'CLEAN_OUTPUT_PATTERN',
'DEFAULT_COLUMNS',
'DEFAULT_ENCODING',
'DEFAULT_LINES',
'HIGHLIGHT_COLOR',
'ansi_strip',
'ansi_style',
'ansi_width',
'ansi_wrap',
'auto_encode',
'clean_terminal_output',
'connected_to_terminal',
'enable_ansi_support',
'find_terminal_size',
'find_terminal_size_using_ioctl',
'find_terminal_size_using_stty',
'get_pager_command',
'have_windows_native_ansi_support',
'message',
'output',
'readline_strip',
'readline_wrap',
'show_pager',
'terminal_supports_colors',
'usage',
'warning',
)
ANSI_CSI = '\x1b['
"""The ANSI "Control Sequence Introducer" (a string)."""
ANSI_SGR = 'm'
"""The ANSI "Select Graphic Rendition" sequence (a string)."""
ANSI_ERASE_LINE = '%sK' % ANSI_CSI
"""The ANSI escape sequence to erase the current line (a string)."""
ANSI_RESET = '%s0%s' % (ANSI_CSI, ANSI_SGR)
"""The ANSI escape sequence to reset styling (a string)."""
ANSI_HIDE_CURSOR = '%s?25l' % ANSI_CSI
"""The ANSI escape sequence to hide the text cursor (a string)."""
ANSI_SHOW_CURSOR = '%s?25h' % ANSI_CSI
"""The ANSI escape sequence to show the text cursor (a string)."""
ANSI_COLOR_CODES = dict(black=0, red=1, green=2, yellow=3, blue=4, magenta=5, cyan=6, white=7)
"""
A dictionary with (name, number) pairs of `portable color codes`_. Used by
:func:`ansi_style()` to generate ANSI escape sequences that change font color.
.. _portable color codes: http://en.wikipedia.org/wiki/ANSI_escape_code#Colors
"""
ANSI_TEXT_STYLES = dict(bold=1, faint=2, italic=3, underline=4, inverse=7, strike_through=9)
"""
A dictionary with (name, number) pairs of text styles (effects). Used by
:func:`ansi_style()` to generate ANSI escape sequences that change text
styles. Only widely supported text styles are included here.
"""
CLEAN_OUTPUT_PATTERN = re.compile(u'(\r|\n|\b|%s)' % re.escape(ANSI_ERASE_LINE))
"""
A compiled regular expression used to separate significant characters from other text.
This pattern is used by :func:`clean_terminal_output()` to split terminal
output into regular text versus backspace, carriage return and line feed
characters and ANSI 'erase line' escape sequences.
"""
DEFAULT_LINES = 25
"""The default number of lines in a terminal (an integer)."""
DEFAULT_COLUMNS = 80
"""The default number of columns in a terminal (an integer)."""
DEFAULT_ENCODING = 'UTF-8'
"""The output encoding for Unicode strings."""
HIGHLIGHT_COLOR = os.environ.get('HUMANFRIENDLY_HIGHLIGHT_COLOR', 'green')
"""
The color used to highlight important tokens in formatted text (e.g. the usage
message of the ``humanfriendly`` program). If the environment variable
``$HUMANFRIENDLY_HIGHLIGHT_COLOR`` is set it determines the value of
:data:`HIGHLIGHT_COLOR`.
"""
def ansi_strip(text, readline_hints=True):
"""
Strip ANSI escape sequences from the given string.
:param text: The text from which ANSI escape sequences should be removed (a
string).
:param readline_hints: If :data:`True` then :func:`readline_strip()` is
used to remove `readline hints`_ from the string.
:returns: The text without ANSI escape sequences (a string).
"""
pattern = '%s.*?%s' % (re.escape(ANSI_CSI), re.escape(ANSI_SGR))
text = re.sub(pattern, '', text)
if readline_hints:
text = readline_strip(text)
return text
def ansi_style(**kw):
"""
Generate ANSI escape sequences for the given color and/or style(s).
:param color: The foreground color. Three types of values are supported:
- The name of a color (one of the strings 'black', 'red',
'green', 'yellow', 'blue', 'magenta', 'cyan' or 'white').
- An integer that refers to the 256 color mode palette.
- A tuple or list with three integers representing an RGB
(red, green, blue) value.
The value :data:`None` (the default) means no escape
sequence to switch color will be emitted.
:param background: The background color (see the description
of the `color` argument).
:param bright: Use high intensity colors instead of default colors
(a boolean, defaults to :data:`False`).
:param readline_hints: If :data:`True` then :func:`readline_wrap()` is
applied to the generated ANSI escape sequences (the
default is :data:`False`).
:param kw: Any additional keyword arguments are expected to match a key
in the :data:`ANSI_TEXT_STYLES` dictionary. If the argument's
value evaluates to :data:`True` the respective style will be
enabled.
:returns: The ANSI escape sequences to enable the requested text styles or
an empty string if no styles were requested.
:raises: :exc:`~exceptions.ValueError` when an invalid color name is given.
Even though only eight named colors are supported, the use of `bright=True`
and `faint=True` increases the number of available colors to around 24 (it
may be slightly lower, for example because faint black is just black).
**Support for 8-bit colors**
In `release 4.7`_ support for 256 color mode was added. While this
significantly increases the available colors it's not very human friendly
in usage because you need to look up color codes in the `256 color mode
palette <https://en.wikipedia.org/wiki/ANSI_escape_code#8-bit>`_.
You can use the ``humanfriendly --demo`` command to get a demonstration of
the available colors, see also the screen shot below. Note that the small
font size in the screen shot was so that the demonstration of 256 color
mode support would fit into a single screen shot without scrolling :-)
(I wasn't feeling very creative).
.. image:: images/ansi-demo.png
**Support for 24-bit colors**
In `release 4.14`_ support for 24-bit colors was added by accepting a tuple
or list with three integers representing the RGB (red, green, blue) value
of a color. This is not included in the demo because rendering millions of
colors was deemed unpractical ;-).
.. _release 4.7: http://humanfriendly.readthedocs.io/en/latest/changelog.html#release-4-7-2018-01-14
.. _release 4.14: http://humanfriendly.readthedocs.io/en/latest/changelog.html#release-4-14-2018-07-13
"""
# Start with sequences that change text styles.
sequences = [ANSI_TEXT_STYLES[k] for k, v in kw.items() if k in ANSI_TEXT_STYLES and v]
# Append the color code (if any).
for color_type in 'color', 'background':
color_value = kw.get(color_type)
if isinstance(color_value, (tuple, list)):
if len(color_value) != 3:
msg = "Invalid color value %r! (expected tuple or list with three numbers)"
raise ValueError(msg % color_value)
sequences.append(48 if color_type == 'background' else 38)
sequences.append(2)
sequences.extend(map(int, color_value))
elif isinstance(color_value, numbers.Number):
# Numeric values are assumed to be 256 color codes.
sequences.extend((
39 if color_type == 'background' else 38,
5, int(color_value)
))
elif color_value:
# Other values are assumed to be strings containing one of the known color names.
if color_value not in ANSI_COLOR_CODES:
msg = "Invalid color value %r! (expected an integer or one of the strings %s)"
raise ValueError(msg % (color_value, concatenate(map(repr, sorted(ANSI_COLOR_CODES)))))
# Pick the right offset for foreground versus background
# colors and regular intensity versus bright colors.
offset = (
(100 if kw.get('bright') else 40)
if color_type == 'background'
else (90 if kw.get('bright') else 30)
)
# Combine the offset and color code into a single integer.
sequences.append(offset + ANSI_COLOR_CODES[color_value])
if sequences:
encoded = ANSI_CSI + ';'.join(map(str, sequences)) + ANSI_SGR
return readline_wrap(encoded) if kw.get('readline_hints') else encoded
else:
return ''
def ansi_width(text):
"""
Calculate the effective width of the given text (ignoring ANSI escape sequences).
:param text: The text whose width should be calculated (a string).
:returns: The width of the text without ANSI escape sequences (an
integer).
This function uses :func:`ansi_strip()` to strip ANSI escape sequences from
the given string and returns the length of the resulting string.
"""
return len(ansi_strip(text))
def ansi_wrap(text, **kw):
"""
Wrap text in ANSI escape sequences for the given color and/or style(s).
:param text: The text to wrap (a string).
:param kw: Any keyword arguments are passed to :func:`ansi_style()`.
:returns: The result of this function depends on the keyword arguments:
- If :func:`ansi_style()` generates an ANSI escape sequence based
on the keyword arguments, the given text is prefixed with the
generated ANSI escape sequence and suffixed with
:data:`ANSI_RESET`.
- If :func:`ansi_style()` returns an empty string then the text
given by the caller is returned unchanged.
"""
start_sequence = ansi_style(**kw)
if start_sequence:
end_sequence = ANSI_RESET
if kw.get('readline_hints'):
end_sequence = readline_wrap(end_sequence)
return start_sequence + text + end_sequence
else:
return text
def auto_encode(stream, text, *args, **kw):
"""
Reliably write Unicode strings to the terminal.
:param stream: The file-like object to write to (a value like
:data:`sys.stdout` or :data:`sys.stderr`).
:param text: The text to write to the stream (a string).
:param args: Refer to :func:`~humanfriendly.text.format()`.
:param kw: Refer to :func:`~humanfriendly.text.format()`.
Renders the text using :func:`~humanfriendly.text.format()` and writes it
to the given stream. If an :exc:`~exceptions.UnicodeEncodeError` is
encountered in doing so, the text is encoded using :data:`DEFAULT_ENCODING`
and the write is retried. The reasoning behind this rather blunt approach
is that it's preferable to get output on the command line in the wrong
encoding then to have the Python program blow up with a
:exc:`~exceptions.UnicodeEncodeError` exception.
"""
text = format(text, *args, **kw)
try:
stream.write(text)
except UnicodeEncodeError:
stream.write(codecs.encode(text, DEFAULT_ENCODING))
def clean_terminal_output(text):
"""
Clean up the terminal output of a command.
:param text: The raw text with special characters (a Unicode string).
:returns: A list of Unicode strings (one for each line).
This function emulates the effect of backspace (0x08), carriage return
(0x0D) and line feed (0x0A) characters and the ANSI 'erase line' escape
sequence on interactive terminals. It's intended to clean up command output
that was originally meant to be rendered on an interactive terminal and
that has been captured using e.g. the :man:`script` program [#]_ or the
:mod:`pty` module [#]_.
.. [#] My coloredlogs_ package supports the ``coloredlogs --to-html``
command which uses :man:`script` to fool a subprocess into thinking
that it's connected to an interactive terminal (in order to get it
to emit ANSI escape sequences).
.. [#] My capturer_ package uses the :mod:`pty` module to fool the current
process and subprocesses into thinking they are connected to an
interactive terminal (in order to get them to emit ANSI escape
sequences).
**Some caveats about the use of this function:**
- Strictly speaking the effect of carriage returns cannot be emulated
outside of an actual terminal due to the interaction between overlapping
output, terminal widths and line wrapping. The goal of this function is
to sanitize noise in terminal output while preserving useful output.
Think of it as a useful and pragmatic but possibly lossy conversion.
- The algorithm isn't smart enough to properly handle a pair of ANSI escape
sequences that open before a carriage return and close after the last
carriage return in a linefeed delimited string; the resulting string will
contain only the closing end of the ANSI escape sequence pair. Tracking
this kind of complexity requires a state machine and proper parsing.
.. _capturer: https://pypi.org/project/capturer
.. _coloredlogs: https://pypi.org/project/coloredlogs
"""
cleaned_lines = []
current_line = ''
current_position = 0
for token in CLEAN_OUTPUT_PATTERN.split(text):
if token == '\r':
# Seek back to the start of the current line.
current_position = 0
elif token == '\b':
# Seek back one character in the current line.
current_position = max(0, current_position - 1)
else:
if token == '\n':
# Capture the current line.
cleaned_lines.append(current_line)
if token in ('\n', ANSI_ERASE_LINE):
# Clear the current line.
current_line = ''
current_position = 0
elif token:
# Merge regular output into the current line.
new_position = current_position + len(token)
prefix = current_line[:current_position]
suffix = current_line[new_position:]
current_line = prefix + token + suffix
current_position = new_position
# Capture the last line (if any).
cleaned_lines.append(current_line)
# Remove any empty trailing lines.
while cleaned_lines and not cleaned_lines[-1]:
cleaned_lines.pop(-1)
return cleaned_lines
def connected_to_terminal(stream=None):
"""
Check if a stream is connected to a terminal.
:param stream: The stream to check (a file-like object,
defaults to :data:`sys.stdout`).
:returns: :data:`True` if the stream is connected to a terminal,
:data:`False` otherwise.
See also :func:`terminal_supports_colors()`.
"""
stream = sys.stdout if stream is None else stream
try:
return stream.isatty()
except Exception:
return False
@cached
def enable_ansi_support():
"""
Try to enable support for ANSI escape sequences (required on Windows).
:returns: :data:`True` if ANSI is supported, :data:`False` otherwise.
This functions checks for the following supported configurations, in the
given order:
1. On Windows, if :func:`have_windows_native_ansi_support()` confirms
native support for ANSI escape sequences :mod:`ctypes` will be used to
enable this support.
2. On Windows, if the environment variable ``$ANSICON`` is set nothing is
done because it is assumed that support for ANSI escape sequences has
already been enabled via `ansicon <https://github.com/adoxa/ansicon>`_.
3. On Windows, an attempt is made to import and initialize the Python
package :pypi:`colorama` instead (of course for this to work
:pypi:`colorama` has to be installed).
4. On other platforms this function calls :func:`connected_to_terminal()`
to determine whether ANSI escape sequences are supported (that is to
say all platforms that are not Windows are assumed to support ANSI
escape sequences natively, without weird contortions like above).
This makes it possible to call :func:`enable_ansi_support()`
unconditionally without checking the current platform.
The :func:`~humanfriendly.decorators.cached` decorator is used to ensure
that this function is only executed once, but its return value remains
available on later calls.
"""
if have_windows_native_ansi_support():
import ctypes
ctypes.windll.kernel32.SetConsoleMode(ctypes.windll.kernel32.GetStdHandle(-11), 7)
ctypes.windll.kernel32.SetConsoleMode(ctypes.windll.kernel32.GetStdHandle(-12), 7)
return True
elif on_windows():
if 'ANSICON' in os.environ:
return True
try:
import colorama
colorama.init()
return True
except ImportError:
return False
else:
return connected_to_terminal()
def find_terminal_size():
"""
Determine the number of lines and columns visible in the terminal.
:returns: A tuple of two integers with the line and column count.
The result of this function is based on the first of the following three
methods that works:
1. First :func:`find_terminal_size_using_ioctl()` is tried,
2. then :func:`find_terminal_size_using_stty()` is tried,
3. finally :data:`DEFAULT_LINES` and :data:`DEFAULT_COLUMNS` are returned.
.. note:: The :func:`find_terminal_size()` function performs the steps
above every time it is called, the result is not cached. This is
because the size of a virtual terminal can change at any time and
the result of :func:`find_terminal_size()` should be correct.
`Pre-emptive snarky comment`_: It's possible to cache the result
of this function and use :mod:`signal.SIGWINCH <signal>` to
refresh the cached values!
Response: As a library I don't consider it the role of the
:mod:`humanfriendly.terminal` module to install a process wide
signal handler ...
.. _Pre-emptive snarky comment: http://blogs.msdn.com/b/oldnewthing/archive/2008/01/30/7315957.aspx
"""
# The first method. Any of the standard streams may have been redirected
# somewhere and there's no telling which, so we'll just try them all.
for stream in sys.stdin, sys.stdout, sys.stderr:
try:
result = find_terminal_size_using_ioctl(stream)
if min(result) >= 1:
return result
except Exception:
pass
# The second method.
try:
result = find_terminal_size_using_stty()
if min(result) >= 1:
return result
except Exception:
pass
# Fall back to conservative defaults.
return DEFAULT_LINES, DEFAULT_COLUMNS
def find_terminal_size_using_ioctl(stream):
"""
Find the terminal size using :func:`fcntl.ioctl()`.
:param stream: A stream connected to the terminal (a file object with a
``fileno`` attribute).
:returns: A tuple of two integers with the line and column count.
:raises: This function can raise exceptions but I'm not going to document
them here, you should be using :func:`find_terminal_size()`.
Based on an `implementation found on StackOverflow <http://stackoverflow.com/a/3010495/788200>`_.
"""
if not HAVE_IOCTL:
raise NotImplementedError("It looks like the `fcntl' module is not available!")
h, w, hp, wp = struct.unpack('HHHH', fcntl.ioctl(stream, termios.TIOCGWINSZ, struct.pack('HHHH', 0, 0, 0, 0)))
return h, w
def find_terminal_size_using_stty():
"""
Find the terminal size using the external command ``stty size``.
:param stream: A stream connected to the terminal (a file object).
:returns: A tuple of two integers with the line and column count.
:raises: This function can raise exceptions but I'm not going to document
them here, you should be using :func:`find_terminal_size()`.
"""
stty = subprocess.Popen(['stty', 'size'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = stty.communicate()
tokens = stdout.split()
if len(tokens) != 2:
raise Exception("Invalid output from `stty size'!")
return tuple(map(int, tokens))
def get_pager_command(text=None):
"""
Get the command to show a text on the terminal using a pager.
:param text: The text to print to the terminal (a string).
:returns: A list of strings with the pager command and arguments.
The use of a pager helps to avoid the wall of text effect where the user
has to scroll up to see where the output began (not very user friendly).
If the given text contains ANSI escape sequences the command ``less
--RAW-CONTROL-CHARS`` is used, otherwise the environment variable
``$PAGER`` is used (if ``$PAGER`` isn't set :man:`less` is used).
When the selected pager is :man:`less`, the following options are used to
make the experience more user friendly:
- ``--quit-if-one-screen`` causes :man:`less` to automatically exit if the
entire text can be displayed on the first screen. This makes the use of a
pager transparent for smaller texts (because the operator doesn't have to
quit the pager).
- ``--no-init`` prevents :man:`less` from clearing the screen when it
exits. This ensures that the operator gets a chance to review the text
(for example a usage message) after quitting the pager, while composing
the next command.
"""
# Compose the pager command.
if text and ANSI_CSI in text:
command_line = ['less', '--RAW-CONTROL-CHARS']
else:
command_line = [os.environ.get('PAGER', 'less')]
# Pass some additional options to `less' (to make it more
# user friendly) without breaking support for other pagers.
if os.path.basename(command_line[0]) == 'less':
command_line.append('--no-init')
command_line.append('--quit-if-one-screen')
return command_line
@cached
def have_windows_native_ansi_support():
"""
Check if we're running on a Windows 10 release with native support for ANSI escape sequences.
:returns: :data:`True` if so, :data:`False` otherwise.
The :func:`~humanfriendly.decorators.cached` decorator is used as a minor
performance optimization. Semantically this should have zero impact because
the answer doesn't change in the lifetime of a computer process.
"""
if on_windows():
try:
# I can't be 100% sure this will never break and I'm not in a
# position to test it thoroughly either, so I decided that paying
# the price of one additional try / except statement is worth the
# additional peace of mind :-).
components = tuple(int(c) for c in platform.version().split('.'))
return components >= (10, 0, 14393)
except Exception:
pass
return False
def message(text, *args, **kw):
"""
Print a formatted message to the standard error stream.
For details about argument handling please refer to
:func:`~humanfriendly.text.format()`.
Renders the message using :func:`~humanfriendly.text.format()` and writes
the resulting string (followed by a newline) to :data:`sys.stderr` using
:func:`auto_encode()`.
"""
auto_encode(sys.stderr, coerce_string(text) + '\n', *args, **kw)
def output(text, *args, **kw):
"""
Print a formatted message to the standard output stream.
For details about argument handling please refer to
:func:`~humanfriendly.text.format()`.
Renders the message using :func:`~humanfriendly.text.format()` and writes
the resulting string (followed by a newline) to :data:`sys.stdout` using
:func:`auto_encode()`.
"""
auto_encode(sys.stdout, coerce_string(text) + '\n', *args, **kw)
def readline_strip(expr):
"""
Remove `readline hints`_ from a string.
:param text: The text to strip (a string).
:returns: The stripped text.
"""
return expr.replace('\001', '').replace('\002', '')
def readline_wrap(expr):
"""
Wrap an ANSI escape sequence in `readline hints`_.
:param text: The text with the escape sequence to wrap (a string).
:returns: The wrapped text.
.. _readline hints: http://superuser.com/a/301355
"""
return '\001' + expr + '\002'
def show_pager(formatted_text, encoding=DEFAULT_ENCODING):
"""
Print a large text to the terminal using a pager.
:param formatted_text: The text to print to the terminal (a string).
:param encoding: The name of the text encoding used to encode the formatted
text if the formatted text is a Unicode string (a string,
defaults to :data:`DEFAULT_ENCODING`).
When :func:`connected_to_terminal()` returns :data:`True` a pager is used
to show the text on the terminal, otherwise the text is printed directly
without invoking a pager.
The use of a pager helps to avoid the wall of text effect where the user
has to scroll up to see where the output began (not very user friendly).
Refer to :func:`get_pager_command()` for details about the command line
that's used to invoke the pager.
"""
if connected_to_terminal():
# Make sure the selected pager command is available.
command_line = get_pager_command(formatted_text)
if which(command_line[0]):
pager = subprocess.Popen(command_line, stdin=subprocess.PIPE)
if is_unicode(formatted_text):
formatted_text = formatted_text.encode(encoding)
pager.communicate(input=formatted_text)
return
output(formatted_text)
def terminal_supports_colors(stream=None):
"""
Check if a stream is connected to a terminal that supports ANSI escape sequences.
:param stream: The stream to check (a file-like object,
defaults to :data:`sys.stdout`).
:returns: :data:`True` if the terminal supports ANSI escape sequences,
:data:`False` otherwise.
This function was originally inspired by the implementation of
`django.core.management.color.supports_color()
<https://github.com/django/django/blob/master/django/core/management/color.py>`_
but has since evolved significantly.
"""
if on_windows():
# On Windows support for ANSI escape sequences is not a given.
have_ansicon = 'ANSICON' in os.environ
have_colorama = 'colorama' in sys.modules
have_native_support = have_windows_native_ansi_support()
if not (have_ansicon or have_colorama or have_native_support):
return False
return connected_to_terminal(stream)
def usage(usage_text):
"""
Print a human friendly usage message to the terminal.
:param text: The usage message to print (a string).
This function does two things:
1. If :data:`sys.stdout` is connected to a terminal (see
:func:`connected_to_terminal()`) then the usage message is formatted
using :func:`.format_usage()`.
2. The usage message is shown using a pager (see :func:`show_pager()`).
"""
if terminal_supports_colors(sys.stdout):
usage_text = format_usage(usage_text)
show_pager(usage_text)
def warning(text, *args, **kw):
"""
Show a warning message on the terminal.
For details about argument handling please refer to
:func:`~humanfriendly.text.format()`.
Renders the message using :func:`~humanfriendly.text.format()` and writes
the resulting string (followed by a newline) to :data:`sys.stderr` using
:func:`auto_encode()`.
If :data:`sys.stderr` is connected to a terminal that supports colors,
:func:`ansi_wrap()` is used to color the message in a red font (to make
the warning stand out from surrounding text).
"""
text = coerce_string(text)
if terminal_supports_colors(sys.stderr):
text = ansi_wrap(text, color='red')
auto_encode(sys.stderr, text + '\n', *args, **kw)
# Define aliases for backwards compatibility.
define_aliases(
module_name=__name__,
# In humanfriendly 1.31 the find_meta_variables() and format_usage()
# functions were extracted to the new module humanfriendly.usage.
find_meta_variables='humanfriendly.usage.find_meta_variables',
format_usage='humanfriendly.usage.format_usage',
# In humanfriendly 8.0 the html_to_ansi() function and HTMLConverter
# class were extracted to the new module humanfriendly.terminal.html.
html_to_ansi='humanfriendly.terminal.html.html_to_ansi',
HTMLConverter='humanfriendly.terminal.html.HTMLConverter',
)
| 39.586873 | 114 | 0.67577 |
4b708817cfd9f33b5fe41cb0eeff1f80a606144a | 2,465 | py | Python | homeassistant/components/camera/blink.py | jamescurtin/home-assistant | 6a9968ccb9b0082f5629e50955549d432aba7d90 | [
"Apache-2.0"
] | 2 | 2020-02-20T18:47:55.000Z | 2021-11-09T11:33:28.000Z | homeassistant/components/camera/blink.py | moose51789/home-assistant | 63c9d59d5455850fd4b37c2475fe6f10effb5245 | [
"Apache-2.0"
] | 5 | 2022-03-01T06:31:03.000Z | 2022-03-31T07:20:45.000Z | homeassistant/components/camera/blink.py | moose51789/home-assistant | 63c9d59d5455850fd4b37c2475fe6f10effb5245 | [
"Apache-2.0"
] | 3 | 2018-08-27T10:08:30.000Z | 2020-07-04T10:07:03.000Z | """
Support for Blink system camera.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/camera.blink/
"""
import logging
from datetime import timedelta
import requests
from homeassistant.components.blink import DOMAIN
from homeassistant.components.camera import Camera
from homeassistant.util import Throttle
DEPENDENCIES = ['blink']
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=90)
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up a Blink Camera."""
if discovery_info is None:
return
data = hass.data[DOMAIN].blink
devs = list()
for name in data.cameras:
devs.append(BlinkCamera(hass, config, data, name))
add_devices(devs)
class BlinkCamera(Camera):
"""An implementation of a Blink Camera."""
def __init__(self, hass, config, data, name):
"""Initialize a camera."""
super().__init__()
self.data = data
self.hass = hass
self._name = name
self.notifications = self.data.cameras[self._name].notifications
self.response = None
_LOGGER.info("Initialized blink camera %s", self._name)
@property
def name(self):
"""Return the camera name."""
return self._name
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def request_image(self):
"""Request a new image from Blink servers."""
_LOGGER.info("Requesting new image from blink servers")
image_url = self.check_for_motion()
header = self.data.cameras[self._name].header
self.response = requests.get(image_url, headers=header, stream=True)
def check_for_motion(self):
"""Check if motion has been detected since last update."""
self.data.refresh()
notifs = self.data.cameras[self._name].notifications
if notifs > self.notifications:
# We detected motion at some point
self.data.last_motion()
self.notifications = notifs
# returning motion image currently not working
# return self.data.cameras[self._name].motion['image']
elif notifs < self.notifications:
self.notifications = notifs
return self.data.camera_thumbs[self._name]
def camera_image(self):
"""Return a still image response from the camera."""
self.request_image()
return self.response.content
| 30.060976 | 76 | 0.670994 |
056b1d08bcf300254d9bac28126c290ac0810afb | 1,695 | py | Python | ooobuild/lo/io/x_sequence_output_stream.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/io/x_sequence_output_stream.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/io/x_sequence_output_stream.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.io
import typing
from abc import abstractmethod
from .x_output_stream import XOutputStream as XOutputStream_a4e00b35
class XSequenceOutputStream(XOutputStream_a4e00b35):
"""
This interface offers access to the written bytes.
See Also:
`API XSequenceOutputStream <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1io_1_1XSequenceOutputStream.html>`_
"""
__ooo_ns__: str = 'com.sun.star.io'
__ooo_full_ns__: str = 'com.sun.star.io.XSequenceOutputStream'
__ooo_type_name__: str = 'interface'
__pyunointerface__: str = 'com.sun.star.io.XSequenceOutputStream'
@abstractmethod
def getWrittenBytes(self) -> 'typing.Tuple[int, ...]':
"""
allows to get access to the written data
Raises:
com.sun.star.io.NotConnectedException: ``NotConnectedException``
com.sun.star.io.IOException: ``IOException``
"""
__all__ = ['XSequenceOutputStream']
| 34.591837 | 143 | 0.729204 |
dd0eb45aa0be9ae144b2103b8c8b611c479fbea7 | 1,344 | py | Python | py/probe_info_service/app_engine/config.py | arccode/factory | a1b0fccd68987d8cd9c89710adc3c04b868347ec | [
"BSD-3-Clause"
] | 3 | 2022-01-06T16:52:52.000Z | 2022-03-07T11:30:47.000Z | py/probe_info_service/app_engine/config.py | arccode/factory | a1b0fccd68987d8cd9c89710adc3c04b868347ec | [
"BSD-3-Clause"
] | null | null | null | py/probe_info_service/app_engine/config.py | arccode/factory | a1b0fccd68987d8cd9c89710adc3c04b868347ec | [
"BSD-3-Clause"
] | 1 | 2021-10-24T01:47:22.000Z | 2021-10-24T01:47:22.000Z | # Copyright 2020 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import enum
import logging
import os
import yaml
from cros.factory.utils import file_utils
# TODO(yhong): Stop disabling unused-import check once the issue
# https://github.com/PyCQA/pylint/issues/1630 is solved.
from cros.factory.utils import type_utils # pylint: disable=unused-import
_CONFIGURATIONS_YAML_PATH = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'configurations.yaml')
@enum.unique
class EnvType(enum.Enum):
LOCAL = 'local'
STAGING = 'staging'
PROD = 'prod'
_DEFAULT_CONFIG = {
'env_type': EnvType.LOCAL.value,
'log_level': logging.DEBUG,
}
class Config(metaclass=type_utils.Singleton):
def __init__(self):
gae_application = os.environ.get('GAE_APPLICATION')
if gae_application:
# We consider the configuration file missing the config set for the
# current environment a vital error so not to catch the exceptions.
env_configuration = yaml.safe_load(
file_utils.ReadFile(_CONFIGURATIONS_YAML_PATH))[gae_application]
else:
env_configuration = _DEFAULT_CONFIG
self.env_type = EnvType(env_configuration['env_type'])
self.log_level = env_configuration['log_level']
| 28.595745 | 74 | 0.74256 |
4b80dce6a68cfcef017fca876c3e7ac62b105172 | 35,162 | py | Python | plotter/full_node/full_node_store.py | Plotter-Network/plotter-blockchain | 13d10557496f37b9a001786ff837bdf34d8f1bcb | [
"Apache-2.0"
] | 1 | 2021-07-10T12:50:30.000Z | 2021-07-10T12:50:30.000Z | plotter/full_node/full_node_store.py | Plotter-Network/plotter-blockchain | 13d10557496f37b9a001786ff837bdf34d8f1bcb | [
"Apache-2.0"
] | null | null | null | plotter/full_node/full_node_store.py | Plotter-Network/plotter-blockchain | 13d10557496f37b9a001786ff837bdf34d8f1bcb | [
"Apache-2.0"
] | null | null | null | import asyncio
import dataclasses
import logging
import time
from typing import Dict, List, Optional, Set, Tuple
from plotter.consensus.block_record import BlockRecord
from plotter.consensus.blockchain_interface import BlockchainInterface
from plotter.consensus.constants import ConsensusConstants
from plotter.consensus.difficulty_adjustment import can_finish_sub_and_full_epoch
from plotter.consensus.make_sub_epoch_summary import next_sub_epoch_summary
from plotter.consensus.multiprocess_validation import PreValidationResult
from plotter.consensus.pot_iterations import calculate_sp_interval_iters
from plotter.full_node.signage_point import SignagePoint
from plotter.protocols import timelord_protocol
from plotter.server.outbound_message import Message
from plotter.types.blockchain_format.classgroup import ClassgroupElement
from plotter.types.blockchain_format.sized_bytes import bytes32
from plotter.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from plotter.types.blockchain_format.vdf import VDFInfo
from plotter.types.end_of_slot_bundle import EndOfSubSlotBundle
from plotter.types.full_block import FullBlock
from plotter.types.generator_types import CompressorArg
from plotter.types.unfinished_block import UnfinishedBlock
from plotter.util.ints import uint8, uint32, uint64, uint128
log = logging.getLogger(__name__)
class FullNodeStore:
constants: ConsensusConstants
# Blocks which we have created, but don't have plot signatures yet, so not yet "unfinished blocks"
candidate_blocks: Dict[bytes32, Tuple[uint32, UnfinishedBlock]]
candidate_backup_blocks: Dict[bytes32, Tuple[uint32, UnfinishedBlock]]
# Header hashes of unfinished blocks that we have seen recently
seen_unfinished_blocks: set
# Unfinished blocks, keyed from reward hash
unfinished_blocks: Dict[bytes32, Tuple[uint32, UnfinishedBlock, PreValidationResult]]
# Finished slots and sps from the peak's slot onwards
# We store all 32 SPs for each slot, starting as 32 Nones and filling them as we go
# Also stores the total iters at the end of slot
# For the first sub-slot, EndOfSlotBundle is None
finished_sub_slots: List[Tuple[Optional[EndOfSubSlotBundle], List[Optional[SignagePoint]], uint128]]
# These caches maintain objects which depend on infused blocks in the reward chain, that we
# might receive before the blocks themselves. The dict keys are the reward chain challenge hashes.
# End of slots which depend on infusions that we don't have
future_eos_cache: Dict[bytes32, List[EndOfSubSlotBundle]]
# Signage points which depend on infusions that we don't have
future_sp_cache: Dict[bytes32, List[Tuple[uint8, SignagePoint]]]
# Infusion point VDFs which depend on infusions that we don't have
future_ip_cache: Dict[bytes32, List[timelord_protocol.NewInfusionPointVDF]]
# This stores the time that each key was added to the future cache, so we can clear old keys
future_cache_key_times: Dict[bytes32, int]
# Partial hashes of unfinished blocks we are requesting
requesting_unfinished_blocks: Set[bytes32]
previous_generator: Optional[CompressorArg]
pending_tx_request: Dict[bytes32, bytes32] # tx_id: peer_id
peers_with_tx: Dict[bytes32, Set[bytes32]] # tx_id: Set[peer_ids}
tx_fetch_tasks: Dict[bytes32, asyncio.Task] # Task id: task
serialized_wp_message: Optional[Message]
serialized_wp_message_tip: Optional[bytes32]
def __init__(self, constants: ConsensusConstants):
self.candidate_blocks = {}
self.candidate_backup_blocks = {}
self.seen_unfinished_blocks = set()
self.unfinished_blocks = {}
self.finished_sub_slots = []
self.future_eos_cache = {}
self.future_sp_cache = {}
self.future_ip_cache = {}
self.requesting_unfinished_blocks = set()
self.previous_generator = None
self.future_cache_key_times = {}
self.constants = constants
self.clear_slots()
self.initialize_genesis_sub_slot()
self.pending_tx_request = {}
self.peers_with_tx = {}
self.tx_fetch_tasks = {}
self.serialized_wp_message = None
self.serialized_wp_message_tip = None
def add_candidate_block(
self, quality_string: bytes32, height: uint32, unfinished_block: UnfinishedBlock, backup: bool = False
):
if backup:
self.candidate_backup_blocks[quality_string] = (height, unfinished_block)
else:
self.candidate_blocks[quality_string] = (height, unfinished_block)
def get_candidate_block(
self, quality_string: bytes32, backup: bool = False
) -> Optional[Tuple[uint32, UnfinishedBlock]]:
if backup:
return self.candidate_backup_blocks.get(quality_string, None)
else:
return self.candidate_blocks.get(quality_string, None)
def clear_candidate_blocks_below(self, height: uint32) -> None:
del_keys = []
for key, value in self.candidate_blocks.items():
if value[0] < height:
del_keys.append(key)
for key in del_keys:
try:
del self.candidate_blocks[key]
except KeyError:
pass
del_keys = []
for key, value in self.candidate_backup_blocks.items():
if value[0] < height:
del_keys.append(key)
for key in del_keys:
try:
del self.candidate_backup_blocks[key]
except KeyError:
pass
def seen_unfinished_block(self, object_hash: bytes32) -> bool:
if object_hash in self.seen_unfinished_blocks:
return True
self.seen_unfinished_blocks.add(object_hash)
return False
def clear_seen_unfinished_blocks(self) -> None:
self.seen_unfinished_blocks.clear()
def add_unfinished_block(
self, height: uint32, unfinished_block: UnfinishedBlock, result: PreValidationResult
) -> None:
self.unfinished_blocks[unfinished_block.partial_hash] = (height, unfinished_block, result)
def get_unfinished_block(self, unfinished_reward_hash: bytes32) -> Optional[UnfinishedBlock]:
result = self.unfinished_blocks.get(unfinished_reward_hash, None)
if result is None:
return None
return result[1]
def get_unfinished_block_result(self, unfinished_reward_hash: bytes32) -> Optional[PreValidationResult]:
result = self.unfinished_blocks.get(unfinished_reward_hash, None)
if result is None:
return None
return result[2]
def get_unfinished_blocks(self) -> Dict[bytes32, Tuple[uint32, UnfinishedBlock, PreValidationResult]]:
return self.unfinished_blocks
def clear_unfinished_blocks_below(self, height: uint32) -> None:
del_keys: List[bytes32] = []
for partial_reward_hash, (unf_height, unfinished_block, _) in self.unfinished_blocks.items():
if unf_height < height:
del_keys.append(partial_reward_hash)
for del_key in del_keys:
del self.unfinished_blocks[del_key]
def remove_unfinished_block(self, partial_reward_hash: bytes32):
if partial_reward_hash in self.unfinished_blocks:
del self.unfinished_blocks[partial_reward_hash]
def add_to_future_ip(self, infusion_point: timelord_protocol.NewInfusionPointVDF):
ch: bytes32 = infusion_point.reward_chain_ip_vdf.challenge
if ch not in self.future_ip_cache:
self.future_ip_cache[ch] = []
self.future_ip_cache[ch].append(infusion_point)
def in_future_sp_cache(self, signage_point: SignagePoint, index: uint8) -> bool:
if signage_point.rc_vdf is None:
return False
if signage_point.rc_vdf.challenge not in self.future_sp_cache:
return False
for cache_index, cache_sp in self.future_sp_cache[signage_point.rc_vdf.challenge]:
if cache_index == index and cache_sp.rc_vdf == signage_point.rc_vdf:
return True
return False
def add_to_future_sp(self, signage_point: SignagePoint, index: uint8):
# We are missing a block here
if (
signage_point.cc_vdf is None
or signage_point.rc_vdf is None
or signage_point.cc_proof is None
or signage_point.rc_proof is None
):
return None
if signage_point.rc_vdf.challenge not in self.future_sp_cache:
self.future_sp_cache[signage_point.rc_vdf.challenge] = []
if self.in_future_sp_cache(signage_point, index):
return None
self.future_cache_key_times[signage_point.rc_vdf.challenge] = int(time.time())
self.future_sp_cache[signage_point.rc_vdf.challenge].append((index, signage_point))
log.info(f"Don't have rc hash {signage_point.rc_vdf.challenge}. caching signage point {index}.")
def get_future_ip(self, rc_challenge_hash: bytes32) -> List[timelord_protocol.NewInfusionPointVDF]:
return self.future_ip_cache.get(rc_challenge_hash, [])
def clear_old_cache_entries(self) -> None:
current_time: int = int(time.time())
remove_keys: List[bytes32] = []
for rc_hash, time_added in self.future_cache_key_times.items():
if current_time - time_added > 3600:
remove_keys.append(rc_hash)
for k in remove_keys:
self.future_cache_key_times.pop(k, None)
self.future_ip_cache.pop(k, [])
self.future_eos_cache.pop(k, [])
self.future_sp_cache.pop(k, [])
def clear_slots(self):
self.finished_sub_slots.clear()
def get_sub_slot(self, challenge_hash: bytes32) -> Optional[Tuple[EndOfSubSlotBundle, int, uint128]]:
assert len(self.finished_sub_slots) >= 1
for index, (sub_slot, _, total_iters) in enumerate(self.finished_sub_slots):
if sub_slot is not None and sub_slot.challenge_chain.get_hash() == challenge_hash:
return sub_slot, index, total_iters
return None
def initialize_genesis_sub_slot(self):
self.clear_slots()
self.finished_sub_slots = [(None, [None] * self.constants.NUM_SPS_SUB_SLOT, uint128(0))]
def new_finished_sub_slot(
self,
eos: EndOfSubSlotBundle,
blocks: BlockchainInterface,
peak: Optional[BlockRecord],
peak_full_block: Optional[FullBlock],
) -> Optional[List[timelord_protocol.NewInfusionPointVDF]]:
"""
Returns false if not added. Returns a list if added. The list contains all infusion points that depended
on this sub slot
"""
assert len(self.finished_sub_slots) >= 1
assert (peak is None) == (peak_full_block is None)
last_slot, _, last_slot_iters = self.finished_sub_slots[-1]
cc_challenge: bytes32 = (
last_slot.challenge_chain.get_hash() if last_slot is not None else self.constants.GENESIS_CHALLENGE
)
rc_challenge: bytes32 = (
last_slot.reward_chain.get_hash() if last_slot is not None else self.constants.GENESIS_CHALLENGE
)
icc_challenge: Optional[bytes32] = None
icc_iters: Optional[uint64] = None
# Skip if already present
for slot, _, _ in self.finished_sub_slots:
if slot == eos:
return []
if eos.challenge_chain.challenge_chain_end_of_slot_vdf.challenge != cc_challenge:
# This slot does not append to our next slot
# This prevent other peers from appending fake VDFs to our cache
return None
if peak is None:
sub_slot_iters = self.constants.SUB_SLOT_ITERS_STARTING
else:
sub_slot_iters = peak.sub_slot_iters
total_iters = uint128(last_slot_iters + sub_slot_iters)
if peak is not None and peak.total_iters > last_slot_iters:
# Peak is in this slot
rc_challenge = eos.reward_chain.end_of_slot_vdf.challenge
cc_start_element = peak.challenge_vdf_output
iters = uint64(total_iters - peak.total_iters)
if peak.reward_infusion_new_challenge != rc_challenge:
# We don't have this challenge hash yet
if rc_challenge not in self.future_eos_cache:
self.future_eos_cache[rc_challenge] = []
self.future_eos_cache[rc_challenge].append(eos)
self.future_cache_key_times[rc_challenge] = int(time.time())
log.info(f"Don't have challenge hash {rc_challenge}, caching EOS")
return None
if peak.deficit == self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
icc_start_element = None
elif peak.deficit == self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1:
icc_start_element = ClassgroupElement.get_default_element()
else:
icc_start_element = peak.infused_challenge_vdf_output
if peak.deficit < self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
curr = peak
while not curr.first_in_sub_slot and not curr.is_challenge_block(self.constants):
curr = blocks.block_record(curr.prev_hash)
if curr.is_challenge_block(self.constants):
icc_challenge = curr.challenge_block_info_hash
icc_iters = uint64(total_iters - curr.total_iters)
else:
assert curr.finished_infused_challenge_slot_hashes is not None
icc_challenge = curr.finished_infused_challenge_slot_hashes[-1]
icc_iters = sub_slot_iters
assert icc_challenge is not None
if can_finish_sub_and_full_epoch(
self.constants,
blocks,
peak.height,
peak.prev_hash,
peak.deficit,
peak.sub_epoch_summary_included is not None,
)[0]:
assert peak_full_block is not None
ses: Optional[SubEpochSummary] = next_sub_epoch_summary(
self.constants, blocks, peak.required_iters, peak_full_block, True
)
if ses is not None:
if eos.challenge_chain.subepoch_summary_hash != ses.get_hash():
log.warning(f"SES not correct {ses.get_hash(), eos.challenge_chain}")
return None
else:
if eos.challenge_chain.subepoch_summary_hash is not None:
log.warning("SES not correct, should be None")
return None
else:
# This is on an empty slot
cc_start_element = ClassgroupElement.get_default_element()
icc_start_element = ClassgroupElement.get_default_element()
iters = sub_slot_iters
icc_iters = sub_slot_iters
# The icc should only be present if the previous slot had an icc too, and not deficit 0 (just finished slot)
icc_challenge = (
last_slot.infused_challenge_chain.get_hash()
if last_slot is not None
and last_slot.infused_challenge_chain is not None
and last_slot.reward_chain.deficit != self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK
else None
)
# Validate cc VDF
partial_cc_vdf_info = VDFInfo(
cc_challenge,
iters,
eos.challenge_chain.challenge_chain_end_of_slot_vdf.output,
)
# The EOS will have the whole sub-slot iters, but the proof is only the delta, from the last peak
if eos.challenge_chain.challenge_chain_end_of_slot_vdf != dataclasses.replace(
partial_cc_vdf_info,
number_of_iterations=sub_slot_iters,
):
return None
if (
not eos.proofs.challenge_chain_slot_proof.normalized_to_identity
and not eos.proofs.challenge_chain_slot_proof.is_valid(
self.constants,
cc_start_element,
partial_cc_vdf_info,
)
):
return None
if (
eos.proofs.challenge_chain_slot_proof.normalized_to_identity
and not eos.proofs.challenge_chain_slot_proof.is_valid(
self.constants,
ClassgroupElement.get_default_element(),
eos.challenge_chain.challenge_chain_end_of_slot_vdf,
)
):
return None
# Validate reward chain VDF
if not eos.proofs.reward_chain_slot_proof.is_valid(
self.constants,
ClassgroupElement.get_default_element(),
eos.reward_chain.end_of_slot_vdf,
VDFInfo(rc_challenge, iters, eos.reward_chain.end_of_slot_vdf.output),
):
return None
if icc_challenge is not None:
assert icc_start_element is not None
assert icc_iters is not None
assert eos.infused_challenge_chain is not None
assert eos.infused_challenge_chain is not None
assert eos.proofs.infused_challenge_chain_slot_proof is not None
partial_icc_vdf_info = VDFInfo(
icc_challenge,
iters,
eos.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf.output,
)
# The EOS will have the whole sub-slot iters, but the proof is only the delta, from the last peak
if eos.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf != dataclasses.replace(
partial_icc_vdf_info,
number_of_iterations=icc_iters,
):
return None
if (
not eos.proofs.infused_challenge_chain_slot_proof.normalized_to_identity
and not eos.proofs.infused_challenge_chain_slot_proof.is_valid(
self.constants, icc_start_element, partial_icc_vdf_info
)
):
return None
if (
eos.proofs.infused_challenge_chain_slot_proof.normalized_to_identity
and not eos.proofs.infused_challenge_chain_slot_proof.is_valid(
self.constants,
ClassgroupElement.get_default_element(),
eos.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf,
)
):
return None
else:
# This is the first sub slot and it's empty, therefore there is no ICC
if eos.infused_challenge_chain is not None or eos.proofs.infused_challenge_chain_slot_proof is not None:
return None
self.finished_sub_slots.append((eos, [None] * self.constants.NUM_SPS_SUB_SLOT, total_iters))
new_ips: List[timelord_protocol.NewInfusionPointVDF] = []
for ip in self.future_ip_cache.get(eos.reward_chain.get_hash(), []):
new_ips.append(ip)
return new_ips
def new_signage_point(
self,
index: uint8,
blocks: BlockchainInterface,
peak: Optional[BlockRecord],
next_sub_slot_iters: uint64,
signage_point: SignagePoint,
skip_vdf_validation=False,
) -> bool:
"""
Returns true if sp successfully added
"""
assert len(self.finished_sub_slots) >= 1
if peak is None or peak.height < 2:
sub_slot_iters = self.constants.SUB_SLOT_ITERS_STARTING
else:
sub_slot_iters = peak.sub_slot_iters
# If we don't have this slot, return False
if index == 0 or index >= self.constants.NUM_SPS_SUB_SLOT:
return False
assert (
signage_point.cc_vdf is not None
and signage_point.cc_proof is not None
and signage_point.rc_vdf is not None
and signage_point.rc_proof is not None
)
for sub_slot, sp_arr, start_ss_total_iters in self.finished_sub_slots:
if sub_slot is None:
assert start_ss_total_iters == 0
ss_challenge_hash = self.constants.GENESIS_CHALLENGE
ss_reward_hash = self.constants.GENESIS_CHALLENGE
else:
ss_challenge_hash = sub_slot.challenge_chain.get_hash()
ss_reward_hash = sub_slot.reward_chain.get_hash()
if ss_challenge_hash == signage_point.cc_vdf.challenge:
# If we do have this slot, find the Prev block from SP and validate SP
if peak is not None and start_ss_total_iters > peak.total_iters:
# We are in a future sub slot from the peak, so maybe there is a new SSI
checkpoint_size: uint64 = uint64(next_sub_slot_iters // self.constants.NUM_SPS_SUB_SLOT)
delta_iters: uint64 = uint64(checkpoint_size * index)
future_sub_slot: bool = True
else:
# We are not in a future sub slot from the peak, so there is no new SSI
checkpoint_size = uint64(sub_slot_iters // self.constants.NUM_SPS_SUB_SLOT)
delta_iters = uint64(checkpoint_size * index)
future_sub_slot = False
sp_total_iters = start_ss_total_iters + delta_iters
curr = peak
if peak is None or future_sub_slot:
check_from_start_of_ss = True
else:
check_from_start_of_ss = False
while (
curr is not None
and curr.total_iters > start_ss_total_iters
and curr.total_iters > sp_total_iters
):
if curr.first_in_sub_slot:
# Did not find a block where it's iters are before our sp_total_iters, in this ss
check_from_start_of_ss = True
break
curr = blocks.block_record(curr.prev_hash)
if check_from_start_of_ss:
# Check VDFs from start of sub slot
cc_vdf_info_expected = VDFInfo(
ss_challenge_hash,
delta_iters,
signage_point.cc_vdf.output,
)
rc_vdf_info_expected = VDFInfo(
ss_reward_hash,
delta_iters,
signage_point.rc_vdf.output,
)
else:
# Check VDFs from curr
assert curr is not None
cc_vdf_info_expected = VDFInfo(
ss_challenge_hash,
uint64(sp_total_iters - curr.total_iters),
signage_point.cc_vdf.output,
)
rc_vdf_info_expected = VDFInfo(
curr.reward_infusion_new_challenge,
uint64(sp_total_iters - curr.total_iters),
signage_point.rc_vdf.output,
)
if not signage_point.cc_vdf == dataclasses.replace(
cc_vdf_info_expected, number_of_iterations=delta_iters
):
self.add_to_future_sp(signage_point, index)
return False
if check_from_start_of_ss:
start_ele = ClassgroupElement.get_default_element()
else:
assert curr is not None
start_ele = curr.challenge_vdf_output
if not skip_vdf_validation:
if not signage_point.cc_proof.normalized_to_identity and not signage_point.cc_proof.is_valid(
self.constants,
start_ele,
cc_vdf_info_expected,
):
self.add_to_future_sp(signage_point, index)
return False
if signage_point.cc_proof.normalized_to_identity and not signage_point.cc_proof.is_valid(
self.constants,
ClassgroupElement.get_default_element(),
signage_point.cc_vdf,
):
self.add_to_future_sp(signage_point, index)
return False
if rc_vdf_info_expected.challenge != signage_point.rc_vdf.challenge:
# This signage point is probably outdated
self.add_to_future_sp(signage_point, index)
return False
if not skip_vdf_validation:
if not signage_point.rc_proof.is_valid(
self.constants,
ClassgroupElement.get_default_element(),
signage_point.rc_vdf,
rc_vdf_info_expected,
):
self.add_to_future_sp(signage_point, index)
return False
sp_arr[index] = signage_point
return True
self.add_to_future_sp(signage_point, index)
return False
def get_signage_point(self, cc_signage_point: bytes32) -> Optional[SignagePoint]:
assert len(self.finished_sub_slots) >= 1
if cc_signage_point == self.constants.GENESIS_CHALLENGE:
return SignagePoint(None, None, None, None)
for sub_slot, sps, _ in self.finished_sub_slots:
if sub_slot is not None and sub_slot.challenge_chain.get_hash() == cc_signage_point:
return SignagePoint(None, None, None, None)
for sp in sps:
if sp is not None:
assert sp.cc_vdf is not None
if sp.cc_vdf.output.get_hash() == cc_signage_point:
return sp
return None
def get_signage_point_by_index(
self, challenge_hash: bytes32, index: uint8, last_rc_infusion: bytes32
) -> Optional[SignagePoint]:
assert len(self.finished_sub_slots) >= 1
for sub_slot, sps, _ in self.finished_sub_slots:
if sub_slot is not None:
cc_hash = sub_slot.challenge_chain.get_hash()
else:
cc_hash = self.constants.GENESIS_CHALLENGE
if cc_hash == challenge_hash:
if index == 0:
return SignagePoint(None, None, None, None)
sp: Optional[SignagePoint] = sps[index]
if sp is not None:
assert sp.rc_vdf is not None
if sp.rc_vdf.challenge == last_rc_infusion:
return sp
return None
return None
def have_newer_signage_point(self, challenge_hash: bytes32, index: uint8, last_rc_infusion: bytes32) -> bool:
"""
Returns true if we have a signage point at this index which is based on a newer infusion.
"""
assert len(self.finished_sub_slots) >= 1
for sub_slot, sps, _ in self.finished_sub_slots:
if sub_slot is not None:
cc_hash = sub_slot.challenge_chain.get_hash()
else:
cc_hash = self.constants.GENESIS_CHALLENGE
if cc_hash == challenge_hash:
found_rc_hash = False
for i in range(0, index):
sp: Optional[SignagePoint] = sps[i]
if sp is not None and sp.rc_vdf is not None and sp.rc_vdf.challenge == last_rc_infusion:
found_rc_hash = True
sp = sps[index]
if (
found_rc_hash
and sp is not None
and sp.rc_vdf is not None
and sp.rc_vdf.challenge != last_rc_infusion
):
return True
return False
def new_peak(
self,
peak: BlockRecord,
peak_full_block: FullBlock,
sp_sub_slot: Optional[EndOfSubSlotBundle], # None if not overflow, or in first/second slot
ip_sub_slot: Optional[EndOfSubSlotBundle], # None if in first slot
fork_block: Optional[BlockRecord],
blocks: BlockchainInterface,
) -> Tuple[
Optional[EndOfSubSlotBundle], List[Tuple[uint8, SignagePoint]], List[timelord_protocol.NewInfusionPointVDF]
]:
"""
If the peak is an overflow block, must provide two sub-slots: one for the current sub-slot and one for
the prev sub-slot (since we still might get more blocks with an sp in the previous sub-slot)
Results in either one or two sub-slots in finished_sub_slots.
"""
assert len(self.finished_sub_slots) >= 1
if ip_sub_slot is None:
# We are still in the first sub-slot, no new sub slots ey
self.initialize_genesis_sub_slot()
else:
# This is not the first sub-slot in the chain
sp_sub_slot_sps: List[Optional[SignagePoint]] = [None] * self.constants.NUM_SPS_SUB_SLOT
ip_sub_slot_sps: List[Optional[SignagePoint]] = [None] * self.constants.NUM_SPS_SUB_SLOT
if fork_block is not None and fork_block.sub_slot_iters != peak.sub_slot_iters:
# If there was a reorg and a difficulty adjustment, just clear all the slots
self.clear_slots()
else:
interval_iters = calculate_sp_interval_iters(self.constants, peak.sub_slot_iters)
# If it's not a reorg, or there is a reorg on the same difficulty, we can keep signage points
# that we had before, in the cache
for index, (sub_slot, sps, total_iters) in enumerate(self.finished_sub_slots):
if sub_slot is None:
continue
if fork_block is None:
# If this is not a reorg, we still want to remove signage points after the new peak
fork_block = peak
replaced_sps: List[Optional[SignagePoint]] = [] # index 0 is the end of sub slot
for i, sp in enumerate(sps):
if (total_iters + i * interval_iters) < fork_block.total_iters:
# Sps before the fork point as still valid
replaced_sps.append(sp)
else:
if sp is not None:
log.debug(
f"Reverting {i} {(total_iters + i * interval_iters)} {fork_block.total_iters}"
)
# Sps after the fork point should be removed
replaced_sps.append(None)
assert len(sps) == len(replaced_sps)
if sub_slot == sp_sub_slot:
sp_sub_slot_sps = replaced_sps
if sub_slot == ip_sub_slot:
ip_sub_slot_sps = replaced_sps
self.clear_slots()
prev_sub_slot_total_iters = peak.sp_sub_slot_total_iters(self.constants)
if sp_sub_slot is not None or prev_sub_slot_total_iters == 0:
assert peak.overflow or prev_sub_slot_total_iters
self.finished_sub_slots.append((sp_sub_slot, sp_sub_slot_sps, prev_sub_slot_total_iters))
ip_sub_slot_total_iters = peak.ip_sub_slot_total_iters(self.constants)
self.finished_sub_slots.append((ip_sub_slot, ip_sub_slot_sps, ip_sub_slot_total_iters))
new_eos: Optional[EndOfSubSlotBundle] = None
new_sps: List[Tuple[uint8, SignagePoint]] = []
new_ips: List[timelord_protocol.NewInfusionPointVDF] = []
future_eos: List[EndOfSubSlotBundle] = self.future_eos_cache.get(peak.reward_infusion_new_challenge, []).copy()
for eos in future_eos:
if self.new_finished_sub_slot(eos, blocks, peak, peak_full_block) is not None:
new_eos = eos
break
future_sps: List[Tuple[uint8, SignagePoint]] = self.future_sp_cache.get(
peak.reward_infusion_new_challenge, []
).copy()
for index, sp in future_sps:
assert sp.cc_vdf is not None
if self.new_signage_point(index, blocks, peak, peak.sub_slot_iters, sp):
new_sps.append((index, sp))
for ip in self.future_ip_cache.get(peak.reward_infusion_new_challenge, []):
new_ips.append(ip)
self.future_eos_cache.pop(peak.reward_infusion_new_challenge, [])
self.future_sp_cache.pop(peak.reward_infusion_new_challenge, [])
self.future_ip_cache.pop(peak.reward_infusion_new_challenge, [])
return new_eos, new_sps, new_ips
def get_finished_sub_slots(
self,
block_records: BlockchainInterface,
prev_b: Optional[BlockRecord],
last_challenge_to_add: bytes32,
) -> Optional[List[EndOfSubSlotBundle]]:
"""
Retrieves the EndOfSubSlotBundles that are in the store either:
1. From the starting challenge if prev_b is None
2. That are not included in the blockchain with peak of prev_b if prev_b is not None
Stops at last_challenge
"""
if prev_b is None:
# The first sub slot must be None
assert self.finished_sub_slots[0][0] is None
challenge_in_chain: bytes32 = self.constants.GENESIS_CHALLENGE
else:
curr: BlockRecord = prev_b
while not curr.first_in_sub_slot:
curr = block_records.block_record(curr.prev_hash)
assert curr is not None
assert curr.finished_challenge_slot_hashes is not None
challenge_in_chain = curr.finished_challenge_slot_hashes[-1]
if last_challenge_to_add == challenge_in_chain:
# No additional slots to add
return []
collected_sub_slots: List[EndOfSubSlotBundle] = []
found_last_challenge = False
found_connecting_challenge = False
for sub_slot, sps, total_iters in self.finished_sub_slots[1:]:
assert sub_slot is not None
if sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf.challenge == challenge_in_chain:
found_connecting_challenge = True
if found_connecting_challenge:
collected_sub_slots.append(sub_slot)
if found_connecting_challenge and sub_slot.challenge_chain.get_hash() == last_challenge_to_add:
found_last_challenge = True
break
if not found_last_challenge:
log.warning(f"Did not find hash {last_challenge_to_add} connected to " f"{challenge_in_chain}")
return None
return collected_sub_slots
| 45.137356 | 120 | 0.621722 |
64d477127f680ebe36b11aac11dc6e49ca9c4850 | 2,138 | py | Python | {{cookiecutter.project_name}}/{{cookiecutter.app_name}}/config.py | Anyesh/cookiecutter-flask-all-in-one | 6ae0a423e8cab23c23756f1ef610643c85b71f32 | [
"MIT"
] | 35 | 2020-11-27T09:06:44.000Z | 2022-01-31T17:08:41.000Z | {{cookiecutter.project_name}}/{{cookiecutter.app_name}}/config.py | Anyesh/cookiecutter-flask-all-in-one | 6ae0a423e8cab23c23756f1ef610643c85b71f32 | [
"MIT"
] | 1 | 2021-05-15T17:34:47.000Z | 2021-06-04T20:12:59.000Z | {{cookiecutter.project_name}}/{{cookiecutter.app_name}}/config.py | Anyesh/cookiecutter-flask-all-in-one | 6ae0a423e8cab23c23756f1ef610643c85b71f32 | [
"MIT"
] | 2 | 2020-12-03T13:55:11.000Z | 2020-12-08T10:42:03.000Z | """Flask configuration variables."""
import os
from dotenv import load_dotenv
basedir = os.path.abspath(os.path.dirname(__file__))
load_dotenv(os.path.join(basedir, '.env'))
LOGGING_CONFIG = {
'version': 1,
'root': {
'level': 'NOTSET',
'handlers': ['default']
},
'formatters': {
'verbose': {
'format': '[%(asctime)s: %(levelname)s | %(name)s] %(message)s'
}
},
'handlers': {
'default': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'app': {
'handlers': ['default'],
'level': 'INFO',
'propagate': False
}
}
}
class Config:
"""Set Flask configuration from .env file."""
# General Config
SECRET_KEY = os.environ.get('SECRET_KEY')
FLASK_APP = os.environ.get('FLASK_APP')
FLASK_ENV = os.environ.get('FLASK_ENV')
FLASK_DEBUG = os.environ.get('FLASK_DEBUG')
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URI', 'sqlite://')
APP_NAME = os.environ.get('APP_NAME')
ADMINS = [os.environ.get('EMAIL')]
LANGUAGES = ['fr', 'en']
SQLALCHEMY_ECHO = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
# SMTP
MAIL_SERVER = 'smtp.gmail.com'
MAIL_PORT = int(os.environ.get('MAIL_PORT') or 465)
MAIL_USE_SSL = True
MAIL_USERNAME = os.environ.get('EMAIL')
MAIL_PASSWORD = os.environ.get('PASSWORD_EMAIL')
# JWT
PROPAGATE_EXCEPTIONS = True
JWT_BLACKLIST_ENABLED = True
JWT_BLACKLIST_TOKEN_CHECKS = ['access', 'refresh']
# Twitter variables
TWITTER_OAUTH_API_KEY = os.environ.get('TWITTER_OAUTH_API_KEY')
TWITTER_OAUTH_API_SECRET = os.environ.get('TWITTER_OAUTH_API_SECRET')
# Google variables
GOOGLE_OAUTH_CLIENT_ID = os.environ.get('GOOGLE_OAUTH_CLIENT_ID')
GOOGLE_OAUTH_CLIENT_SECRET = os.environ.get('GOOGLE_OAUTH_CLIENT_SECRET')
# Facbook variables
FACEBOOK_OAUTH_CLIENT_ID = os.environ.get('FACEBOOK_OAUTH_CLIENT_ID')
FACEBOOK_OAUTH_CLIENT_SECRET = os.environ.get('FACEBOOK_OAUTH_CLIENT_SECRET')
| 30.542857 | 81 | 0.634705 |
8be756cc3546345697cff459215a76af156082cd | 40,990 | py | Python | tests/test_text.py | rianmcguire/WeasyPrint | 7e400663236d16121e14cf3183ce53828d056092 | [
"BSD-3-Clause"
] | null | null | null | tests/test_text.py | rianmcguire/WeasyPrint | 7e400663236d16121e14cf3183ce53828d056092 | [
"BSD-3-Clause"
] | null | null | null | tests/test_text.py | rianmcguire/WeasyPrint | 7e400663236d16121e14cf3183ce53828d056092 | [
"BSD-3-Clause"
] | null | null | null | """
weasyprint.tests.test_text
--------------------------
Test the text layout.
"""
import pytest
from weasyprint.css.properties import INITIAL_VALUES
from weasyprint.text.line_break import split_first_line
from .testing_utils import MONO_FONTS, SANS_FONTS, assert_no_logs, render_pages
def make_text(text, width=None, **style):
"""Wrapper for split_first_line() creating a style dict."""
new_style = dict(INITIAL_VALUES)
new_style['font_family'] = MONO_FONTS.split(',')
new_style.update(style)
return split_first_line(
text, new_style, context=None, max_width=width,
justification_spacing=0)
@assert_no_logs
def test_line_content():
for width, remaining in [(100, 'text for test'),
(45, 'is a text for test')]:
text = 'This is a text for test'
_, length, resume_index, _, _, _ = make_text(
text, width, font_family=SANS_FONTS.split(','), font_size=19)
assert text[resume_index:] == remaining
assert length + 1 == resume_index # +1 for the removed trailing space
@assert_no_logs
def test_line_with_any_width():
_, _, _, width_1, _, _ = make_text('some text')
_, _, _, width_2, _, _ = make_text('some text some text')
assert width_1 < width_2
@assert_no_logs
def test_line_breaking():
string = 'Thïs is a text for test'
# These two tests do not really rely on installed fonts
_, _, resume_index, _, _, _ = make_text(string, 90, font_size=1)
assert resume_index is None
_, _, resume_index, _, _, _ = make_text(string, 90, font_size=100)
assert string.encode('utf-8')[resume_index:].decode('utf-8') == (
'is a text for test')
_, _, resume_index, _, _, _ = make_text(
string, 100, font_family=SANS_FONTS.split(','), font_size=19)
assert string.encode('utf-8')[resume_index:].decode('utf-8') == (
'text for test')
@assert_no_logs
def test_line_breaking_rtl():
string = 'لوريم ايبسوم دولا'
# These two tests do not really rely on installed fonts
_, _, resume_index, _, _, _ = make_text(string, 90, font_size=1)
assert resume_index is None
_, _, resume_index, _, _, _ = make_text(string, 90, font_size=100)
assert string.encode('utf-8')[resume_index:].decode('utf-8') == (
'ايبسوم دولا')
@assert_no_logs
def test_text_dimension():
string = 'This is a text for test. This is a test for text.py'
_, _, _, width_1, height_1, _ = make_text(string, 200, font_size=12)
_, _, _, width_2, height_2, _ = make_text(string, 200, font_size=20)
assert width_1 * height_1 < width_2 * height_2
@assert_no_logs
def test_text_font_size_zero():
page, = render_pages('''
<style>
p { font-size: 0; }
</style>
<p>test font size zero</p>
''')
html, = page.children
body, = html.children
paragraph, = body.children
line, = paragraph.children
# zero-sized text boxes are removed
assert not line.children
assert line.height == 0
assert paragraph.height == 0
@assert_no_logs
def test_text_font_size_very_small():
# Test regression: https://github.com/Kozea/WeasyPrint/issues/1499
page, = render_pages('''
<style>
p { font-size: 0.00000001px }
</style>
<p>test font size zero</p>
''')
html, = page.children
body, = html.children
paragraph, = body.children
line, = paragraph.children
assert line.height < 0.001
assert paragraph.height < 0.001
@assert_no_logs
def test_text_spaced_inlines():
page, = render_pages('''
<p>start <i><b>bi1</b> <b>bi2</b></i> <b>b1</b> end</p>
''')
html, = page.children
body, = html.children
paragraph, = body.children
line, = paragraph.children
start, i, space, b, end = line.children
assert start.text == 'start '
assert space.text == ' '
assert space.width > 0
assert end.text == ' end'
bi1, space, bi2 = i.children
bi1, = bi1.children
bi2, = bi2.children
assert bi1.text == 'bi1'
assert space.text == ' '
assert space.width > 0
assert bi2.text == 'bi2'
b1, = b.children
assert b1.text == 'b1'
@assert_no_logs
def test_text_align_left():
# <--------------------> page, body
# +-----+
# +---+ |
# | | |
# +---+-----+
# ^ ^ ^ ^
# x=0 x=40 x=100 x=200
page, = render_pages('''
<style>
@page { size: 200px }
</style>
<body>
<img src="pattern.png" style="width: 40px"
><img src="pattern.png" style="width: 60px">''')
html, = page.children
body, = html.children
line, = body.children
img_1, img_2 = line.children
# initial value for text-align: left (in ltr text)
assert img_1.position_x == 0
assert img_2.position_x == 40
@assert_no_logs
def test_text_align_right():
# <--------------------> page, body
# +-----+
# +---+ |
# | | |
# +---+-----+
# ^ ^ ^ ^
# x=0 x=100 x=200
# x=140
page, = render_pages('''
<style>
@page { size: 200px }
body { text-align: right }
</style>
<body>
<img src="pattern.png" style="width: 40px"
><img src="pattern.png" style="width: 60px">''')
html, = page.children
body, = html.children
line, = body.children
img_1, img_2 = line.children
assert img_1.position_x == 100 # 200 - 60 - 40
assert img_2.position_x == 140 # 200 - 60
@assert_no_logs
def test_text_align_center():
# <--------------------> page, body
# +-----+
# +---+ |
# | | |
# +---+-----+
# ^ ^ ^ ^
# x= x=50 x=150
# x=90
page, = render_pages('''
<style>
@page { size: 200px }
body { text-align: center }
</style>
<body>
<img src="pattern.png" style="width: 40px"
><img src="pattern.png" style="width: 60px">''')
html, = page.children
body, = html.children
line, = body.children
img_1, img_2 = line.children
assert img_1.position_x == 50
assert img_2.position_x == 90
@assert_no_logs
def test_text_align_justify():
page, = render_pages('''
<style>
@page { size: 300px 1000px }
body { text-align: justify }
</style>
<p><img src="pattern.png" style="width: 40px">
<strong>
<img src="pattern.png" style="width: 60px">
<img src="pattern.png" style="width: 10px">
<img src="pattern.png" style="width: 100px"
></strong><img src="pattern.png" style="width: 290px"
><!-- Last image will be on its own line. -->''')
html, = page.children
body, = html.children
paragraph, = body.children
line_1, line_2 = paragraph.children
image_1, space_1, strong = line_1.children
image_2, space_2, image_3, space_3, image_4 = strong.children
image_5, = line_2.children
assert space_1.text == ' '
assert space_2.text == ' '
assert space_3.text == ' '
assert image_1.position_x == 0
assert space_1.position_x == 40
assert strong.position_x == 70
assert image_2.position_x == 70
assert space_2.position_x == 130
assert image_3.position_x == 160
assert space_3.position_x == 170
assert image_4.position_x == 200
assert strong.width == 230
assert image_5.position_x == 0
@assert_no_logs
def test_text_align_justify_all():
page, = render_pages('''
<style>
@page { size: 300px 1000px }
body { text-align: justify-all }
</style>
<p><img src="pattern.png" style="width: 40px">
<strong>
<img src="pattern.png" style="width: 60px">
<img src="pattern.png" style="width: 10px">
<img src="pattern.png" style="width: 100px"
></strong><img src="pattern.png" style="width: 200px">
<img src="pattern.png" style="width: 10px">''')
html, = page.children
body, = html.children
paragraph, = body.children
line_1, line_2 = paragraph.children
image_1, space_1, strong = line_1.children
image_2, space_2, image_3, space_3, image_4 = strong.children
image_5, space_4, image_6 = line_2.children
assert space_1.text == ' '
assert space_2.text == ' '
assert space_3.text == ' '
assert space_4.text == ' '
assert image_1.position_x == 0
assert space_1.position_x == 40
assert strong.position_x == 70
assert image_2.position_x == 70
assert space_2.position_x == 130
assert image_3.position_x == 160
assert space_3.position_x == 170
assert image_4.position_x == 200
assert strong.width == 230
assert image_5.position_x == 0
assert space_4.position_x == 200
assert image_6.position_x == 290
@assert_no_logs
def test_text_align_all_last():
page, = render_pages('''
<style>
@page { size: 300px 1000px }
body { text-align-all: justify; text-align-last: right }
</style>
<p><img src="pattern.png" style="width: 40px">
<strong>
<img src="pattern.png" style="width: 60px">
<img src="pattern.png" style="width: 10px">
<img src="pattern.png" style="width: 100px"
></strong><img src="pattern.png" style="width: 200px"
><img src="pattern.png" style="width: 10px">''')
html, = page.children
body, = html.children
paragraph, = body.children
line_1, line_2 = paragraph.children
image_1, space_1, strong = line_1.children
image_2, space_2, image_3, space_3, image_4 = strong.children
image_5, image_6 = line_2.children
assert space_1.text == ' '
assert space_2.text == ' '
assert space_3.text == ' '
assert image_1.position_x == 0
assert space_1.position_x == 40
assert strong.position_x == 70
assert image_2.position_x == 70
assert space_2.position_x == 130
assert image_3.position_x == 160
assert space_3.position_x == 170
assert image_4.position_x == 200
assert strong.width == 230
assert image_5.position_x == 90
assert image_6.position_x == 290
@assert_no_logs
def test_text_align_not_enough_space():
page, = render_pages('''
<style>
p { text-align: center; width: 0 }
span { display: inline-block }
</style>
<p><span>aaaaaaaaaaaaaaaaaaaaaaaaaa</span></p>''')
html, = page.children
body, = html.children
paragraph, = body.children
span, = paragraph.children
assert span.position_x == 0
@assert_no_logs
def test_text_align_justify_no_space():
# single-word line (zero spaces)
page, = render_pages('''
<style>
body { text-align: justify; width: 50px }
</style>
<p>Supercalifragilisticexpialidocious bar</p>
''')
html, = page.children
body, = html.children
paragraph, = body.children
line_1, line_2 = paragraph.children
text, = line_1.children
assert text.position_x == 0
@assert_no_logs
def test_text_align_justify_text_indent():
# text-indent
page, = render_pages('''
<style>
@page { size: 300px 1000px }
body { text-align: justify }
p { text-indent: 3px }
</style>
<p><img src="pattern.png" style="width: 40px">
<strong>
<img src="pattern.png" style="width: 60px">
<img src="pattern.png" style="width: 10px">
<img src="pattern.png" style="width: 100px"
></strong><img src="pattern.png" style="width: 290px"
><!-- Last image will be on its own line. -->''')
html, = page.children
body, = html.children
paragraph, = body.children
line_1, line_2 = paragraph.children
image_1, space_1, strong = line_1.children
image_2, space_2, image_3, space_3, image_4 = strong.children
image_5, = line_2.children
assert space_1.text == ' '
assert space_2.text == ' '
assert space_3.text == ' '
assert image_1.position_x == 3
assert space_1.position_x == 43
assert strong.position_x == 72
assert image_2.position_x == 72
assert space_2.position_x == 132
assert image_3.position_x == 161
assert space_3.position_x == 171
assert image_4.position_x == 200
assert strong.width == 228
assert image_5.position_x == 0
@assert_no_logs
def test_text_align_justify_no_break_between_children():
# Test justification when line break happens between two inline children
# that must stay together.
# Test regression: https://github.com/Kozea/WeasyPrint/issues/637
page, = render_pages('''
<style>
@font-face {src: url(weasyprint.otf); font-family: weasyprint}
p { text-align: justify; font-family: weasyprint; width: 7em }
</style>
<p>
<span>a</span>
<span>b</span>
<span>bla</span><span>,</span>
<span>b</span>
</p>
''')
html, = page.children
body, = html.children
paragraph, = body.children
line_1, line_2 = paragraph.children
span_1, space_1, span_2, space_2 = line_1.children
assert span_1.position_x == 0
assert span_2.position_x == 6 * 16 # 1 character + 5 spaces
assert line_1.width == 7 * 16 # 7em
span_1, span_2, space_1, span_3, space_2 = line_2.children
assert span_1.position_x == 0
assert span_2.position_x == 3 * 16 # 3 characters
assert span_3.position_x == 5 * 16 # (3 + 1) characters + 1 space
@assert_no_logs
def test_word_spacing():
# keep the empty <style> as a regression test: element.text is None
# (Not a string.)
page, = render_pages('''
<style></style>
<body><strong>Lorem ipsum dolor<em>sit amet</em></strong>''')
html, = page.children
body, = html.children
line, = body.children
strong_1, = line.children
# TODO: Pango gives only half of word-spacing to a space at the end
# of a TextBox. Is this what we want?
page, = render_pages('''
<style>strong { word-spacing: 11px }</style>
<body><strong>Lorem ipsum dolor<em>sit amet</em></strong>''')
html, = page.children
body, = html.children
line, = body.children
strong_2, = line.children
assert strong_2.width - strong_1.width == 33
@assert_no_logs
def test_letter_spacing_1():
page, = render_pages('''
<body><strong>Supercalifragilisticexpialidocious</strong>''')
html, = page.children
body, = html.children
line, = body.children
strong_1, = line.children
page, = render_pages('''
<style>strong { letter-spacing: 11px }</style>
<body><strong>Supercalifragilisticexpialidocious</strong>''')
html, = page.children
body, = html.children
line, = body.children
strong_2, = line.children
assert strong_2.width - strong_1.width == 34 * 11
# an embedded tag should not affect the single-line letter spacing
page, = render_pages(
'<style>strong { letter-spacing: 11px }</style>'
'<body><strong>Supercali<span>fragilistic</span>expialidocious'
'</strong>')
html, = page.children
body, = html.children
line, = body.children
strong_3, = line.children
assert strong_3.width == strong_2.width
# duplicate wrapped lines should also have same overall width
# Note work-around for word-wrap bug (issue #163) by marking word
# as an inline-block
page, = render_pages(
'<style>'
' strong {'
' letter-spacing: 11px;'
f' max-width: {strong_3.width * 1.5}px'
'}'
' span { display: inline-block }'
'</style>'
'<body><strong>'
' <span>Supercali<i>fragilistic</i>expialidocious</span> '
' <span>Supercali<i>fragilistic</i>expialidocious</span>'
'</strong>')
html, = page.children
body, = html.children
line1, line2 = body.children
assert line1.children[0].width == line2.children[0].width
assert line1.children[0].width == strong_2.width
@pytest.mark.parametrize('spacing', ('word-spacing', 'letter-spacing'))
@assert_no_logs
def test_spacing_ex(spacing):
# Test regression on ex units in spacing properties
render_pages(f'<div style="{spacing}: 2ex">abc def')
@pytest.mark.parametrize('indent', ('12px', '6%'))
@assert_no_logs
def test_text_indent(indent):
page, = render_pages('''
<style>
@page { size: 220px }
body { margin: 10px; text-indent: %(indent)s }
</style>
<p>Some text that is long enough that it take at least three line,
but maybe more.
''' % {'indent': indent})
html, = page.children
body, = html.children
paragraph, = body.children
lines = paragraph.children
text_1, = lines[0].children
text_2, = lines[1].children
text_3, = lines[2].children
assert text_1.position_x == 22 # 10px margin-left + 12px indent
assert text_2.position_x == 10 # No indent
assert text_3.position_x == 10 # No indent
@assert_no_logs
def test_text_indent_inline():
# Test regression: https://github.com/Kozea/WeasyPrint/issues/1000
page, = render_pages('''
<style>
@font-face { src: url(weasyprint.otf); font-family: weasyprint }
p { display: inline-block; text-indent: 1em;
font-family: weasyprint }
</style>
<p><span>text
''')
html, = page.children
body, = html.children
paragraph, = body.children
line, = paragraph.children
assert line.width == (4 + 1) * 16
@pytest.mark.parametrize('indent', ('12px', '6%'))
@assert_no_logs
def test_text_indent_multipage(indent):
# Test regression: https://github.com/Kozea/WeasyPrint/issues/706
pages = render_pages('''
<style>
@page { size: 220px 1.5em; margin: 0 }
body { margin: 10px; text-indent: %(indent)s }
</style>
<p>Some text that is long enough that it take at least three line,
but maybe more.
''' % {'indent': indent})
page = pages.pop(0)
html, = page.children
body, = html.children
paragraph, = body.children
line, = paragraph.children
text, = line.children
assert text.position_x == 22 # 10px margin-left + 12px indent
page = pages.pop(0)
html, = page.children
body, = html.children
paragraph, = body.children
line, = paragraph.children
text, = line.children
assert text.position_x == 10 # No indent
@assert_no_logs
def test_hyphenate_character_1():
page, = render_pages(
'<html style="width: 5em; font-family: weasyprint">'
'<style>'
' @font-face {src: url(weasyprint.otf); font-family: weasyprint}'
'</style>'
'<body style="hyphens: auto;'
'hyphenate-character: \'!\'" lang=fr>'
'hyphénation')
html, = page.children
body, = html.children
lines = body.children
assert len(lines) > 1
assert lines[0].children[0].text.endswith('!')
full_text = ''.join(line.children[0].text for line in lines)
assert full_text.replace('!', '') == 'hyphénation'
@assert_no_logs
def test_hyphenate_character_2():
page, = render_pages(
'<html style="width: 5em; font-family: weasyprint">'
'<style>'
' @font-face {src: url(weasyprint.otf); font-family: weasyprint}'
'</style>'
'<body style="hyphens: auto;'
'hyphenate-character: \'à\'" lang=fr>'
'hyphénation')
html, = page.children
body, = html.children
lines = body.children
assert len(lines) > 1
assert lines[0].children[0].text.endswith('à')
full_text = ''.join(line.children[0].text for line in lines)
assert full_text.replace('à', '') == 'hyphénation'
@assert_no_logs
def test_hyphenate_character_3():
page, = render_pages(
'<html style="width: 5em; font-family: weasyprint">'
'<style>'
' @font-face {src: url(weasyprint.otf); font-family: weasyprint}'
'</style>'
'<body style="hyphens: auto;'
'hyphenate-character: \'ù ù\'" lang=fr>'
'hyphénation')
html, = page.children
body, = html.children
lines = body.children
assert len(lines) > 1
assert lines[0].children[0].text.endswith('ù ù')
full_text = ''.join(line.children[0].text for line in lines)
assert full_text.replace(' ', '').replace('ù', '') == 'hyphénation'
@assert_no_logs
def test_hyphenate_character_4():
page, = render_pages(
'<html style="width: 5em; font-family: weasyprint">'
'<style>'
' @font-face {src: url(weasyprint.otf); font-family: weasyprint}'
'</style>'
'<body style="hyphens: auto;'
'hyphenate-character: \'\'" lang=fr>'
'hyphénation')
html, = page.children
body, = html.children
lines = body.children
assert len(lines) > 1
full_text = ''.join(line.children[0].text for line in lines)
assert full_text == 'hyphénation'
@assert_no_logs
def test_hyphenate_character_5():
page, = render_pages(
'<html style="width: 5em; font-family: weasyprint">'
'<style>'
' @font-face {src: url(weasyprint.otf); font-family: weasyprint}'
'</style>'
'<body style="hyphens: auto;'
'hyphenate-character: \'———\'" lang=fr>'
'hyphénation')
html, = page.children
body, = html.children
lines = body.children
assert len(lines) > 1
assert lines[0].children[0].text.endswith('———')
full_text = ''.join(line.children[0].text for line in lines)
assert full_text.replace('—', '') == 'hyphénation'
@assert_no_logs
def test_hyphenate_manual_1():
for i in range(1, len('hyphénation')):
for hyphenate_character in ('!', 'ù ù'):
word = 'hyphénation'[:i] + '\u00ad' + 'hyphénation'[i:]
page, = render_pages(
'<html style="width: 5em; font-family: weasyprint">'
'<style>@font-face {'
' src: url(weasyprint.otf); font-family: weasyprint}</style>'
'<body style="hyphens: manual;'
f' hyphenate-character: \'{hyphenate_character}\'"'
f' lang=fr>{word}')
html, = page.children
body, = html.children
lines = body.children
assert len(lines) == 2
assert lines[0].children[0].text.endswith(hyphenate_character)
full_text = ''.join(
child.text for line in lines for child in line.children)
assert full_text.replace(hyphenate_character, '') == word
@assert_no_logs
def test_hyphenate_manual_2():
for i in range(1, len('hy phénation')):
for hyphenate_character in ('!', 'ù ù'):
word = 'hy phénation'[:i] + '\u00ad' + 'hy phénation'[i:]
page, = render_pages(
'<html style="width: 5em; font-family: weasyprint">'
'<style>@font-face {'
' src: url(weasyprint.otf); font-family: weasyprint}</style>'
'<body style="hyphens: manual;'
f' hyphenate-character: \'{hyphenate_character}\'"'
f' lang=fr>{word}')
html, = page.children
body, = html.children
lines = body.children
assert len(lines) in (2, 3)
full_text = ''.join(
child.text for line in lines for child in line.children)
full_text = full_text.replace(hyphenate_character, '')
if lines[0].children[0].text.endswith(hyphenate_character):
assert full_text == word
else:
assert lines[0].children[0].text.endswith('y')
if len(lines) == 3:
assert lines[1].children[0].text.endswith(
hyphenate_character)
@assert_no_logs
def test_hyphenate_manual_3():
# Automatic hyphenation opportunities within a word must be ignored if the
# word contains a conditional hyphen, in favor of the conditional
# hyphen(s).
page, = render_pages(
'<html style="width: 0.1em" lang="en">'
'<body style="hyphens: auto">in­lighten­lighten­in')
html, = page.children
body, = html.children
line_1, line_2, line_3, line_4 = body.children
assert line_1.children[0].text == 'in\xad‐'
assert line_2.children[0].text == 'lighten\xad‐'
assert line_3.children[0].text == 'lighten\xad‐'
assert line_4.children[0].text == 'in'
@assert_no_logs
def test_hyphenate_limit_zone_1():
page, = render_pages(
'<html style="width: 12em; font-family: weasyprint">'
'<style>'
' @font-face {src: url(weasyprint.otf); font-family: weasyprint}'
'</style>'
'<body style="hyphens: auto;'
'hyphenate-limit-zone: 0" lang=fr>'
'mmmmm hyphénation')
html, = page.children
body, = html.children
lines = body.children
assert len(lines) == 2
assert lines[0].children[0].text.endswith('‐')
full_text = ''.join(line.children[0].text for line in lines)
assert full_text.replace('‐', '') == 'mmmmm hyphénation'
@assert_no_logs
def test_hyphenate_limit_zone_2():
page, = render_pages(
'<html style="width: 12em; font-family: weasyprint">'
'<style>'
' @font-face {src: url(weasyprint.otf); font-family: weasyprint}'
'</style>'
'<body style="hyphens: auto;'
'hyphenate-limit-zone: 9em" lang=fr>'
'mmmmm hyphénation')
html, = page.children
body, = html.children
lines = body.children
assert len(lines) > 1
assert lines[0].children[0].text.endswith('mm')
full_text = ''.join(line.children[0].text for line in lines)
assert full_text == 'mmmmmhyphénation'
@assert_no_logs
def test_hyphenate_limit_zone_3():
page, = render_pages(
'<html style="width: 12em; font-family: weasyprint">'
'<style>'
' @font-face {src: url(weasyprint.otf); font-family: weasyprint}'
'</style>'
'<body style="hyphens: auto;'
'hyphenate-limit-zone: 5%" lang=fr>'
'mmmmm hyphénation')
html, = page.children
body, = html.children
lines = body.children
assert len(lines) == 2
assert lines[0].children[0].text.endswith('‐')
full_text = ''.join(line.children[0].text for line in lines)
assert full_text.replace('‐', '') == 'mmmmm hyphénation'
@assert_no_logs
def test_hyphenate_limit_zone_4():
page, = render_pages(
'<html style="width: 12em; font-family: weasyprint">'
'<style>'
' @font-face {src: url(weasyprint.otf); font-family: weasyprint}'
'</style>'
'<body style="hyphens: auto;'
'hyphenate-limit-zone: 95%" lang=fr>'
'mmmmm hyphénation')
html, = page.children
body, = html.children
lines = body.children
assert len(lines) > 1
assert lines[0].children[0].text.endswith('mm')
full_text = ''.join(line.children[0].text for line in lines)
assert full_text == 'mmmmmhyphénation'
@assert_no_logs
@pytest.mark.parametrize('css, result', (
('auto', 2),
('auto auto 0', 2),
('0 0 0', 2),
('4 4 auto', 1),
('6 2 4', 2),
('auto 1 auto', 2),
('7 auto auto', 1),
('6 auto auto', 2),
('5 2', 2),
('3', 2),
('2 4 6', 1),
('auto 4', 1),
('auto 2', 2),
))
def test_hyphenate_limit_chars(css, result):
page, = render_pages(
'<html style="width: 1em; font-family: weasyprint">'
'<style>'
' @font-face {src: url(weasyprint.otf); font-family: weasyprint}'
'</style>'
'<body style="hyphens: auto;'
f'hyphenate-limit-chars: {css}" lang=en>'
'hyphen')
html, = page.children
body, = html.children
lines = body.children
assert len(lines) == result
@assert_no_logs
@pytest.mark.parametrize('css', (
# light·en
'3 3 3', # 'en' is shorter than 3
'3 6 2', # 'light' is shorter than 6
'8', # 'lighten' is shorter than 8
))
def test_hyphenate_limit_chars_punctuation(css):
# See https://github.com/Kozea/WeasyPrint/issues/109
page, = render_pages(
'<html style="width: 1em; font-family: weasyprint">'
'<style>'
' @font-face {src: url(weasyprint.otf); font-family: weasyprint}'
'</style>'
'<body style="hyphens: auto;'
f'hyphenate-limit-chars: {css}" lang=en>'
'..lighten..')
html, = page.children
body, = html.children
lines = body.children
assert len(lines) == 1
@assert_no_logs
@pytest.mark.parametrize('wrap, text, test, full_text', (
('anywhere', 'aaaaaaaa', lambda a: a > 1, 'aaaaaaaa'),
('break-word', 'aaaaaaaa', lambda a: a > 1, 'aaaaaaaa'),
('normal', 'aaaaaaaa', lambda a: a == 1, 'aaaaaaaa'),
('break-word', 'hyphenations', lambda a: a > 3,
'hy\u2010phen\u2010ations'),
('break-word', "A splitted word. An hyphenated word.",
lambda a: a > 8, "Asplittedword.Anhy\u2010phen\u2010atedword."),
))
def test_overflow_wrap(wrap, text, test, full_text):
page, = render_pages('''
<style>
@font-face {src: url(weasyprint.otf); font-family: weasyprint}
body {width: 80px; overflow: hidden; font-family: weasyprint}
span {overflow-wrap: %s}
</style>
<body style="hyphens: auto;" lang="en">
<span>%s
''' % (wrap, text))
html, = page.children
body, = html.children
lines = []
for line in body.children:
box, = line.children
textBox, = box.children
lines.append(textBox.text)
lines_full_text = ''.join(line for line in lines)
assert test(len(lines))
assert full_text == lines_full_text
@assert_no_logs
@pytest.mark.parametrize('span_css, expected_lines', (
# overflow-wrap: anywhere and break-word are only allowed to break a word
# "if there are no otherwise-acceptable break points in the line", which
# means they should not split a word if it fits cleanly into the next line.
# This can be done accidentally if it is in its own inline element.
('overflow-wrap: anywhere', ['aaa', 'bbb']),
('overflow-wrap: break-word', ['aaa', 'bbb']),
# On the other hand, word-break: break-all mandates a break anywhere at the
# end of a line, even if the word could fit cleanly onto the next line.
('word-break: break-all', ['aaa b', 'bb']),
))
def test_wrap_overflow_word_break(span_css, expected_lines):
page, = render_pages('''
<style>
@font-face {src: url(weasyprint.otf); font-family: weasyprint}
body {width: 80px; overflow: hidden; font-family: weasyprint}
span {%s}
</style>
<body>
<span>aaa </span><span>bbb
''' % span_css)
html, = page.children
body, = html.children
lines = body.children
lines = []
print(body.children)
for line in body.children:
line_text = ''
for span_box in line.children:
line_text += span_box.children[0].text
lines.append(line_text)
assert lines == expected_lines
@assert_no_logs
@pytest.mark.parametrize('wrap, text, body_width, expected_width', (
('anywhere', 'aaaaaa', 10, 20),
('anywhere', 'aaaaaa', 40, 40),
('break-word', 'aaaaaa', 40, 120),
('normal', 'aaaaaa', 40, 120),
))
def test_overflow_wrap_2(wrap, text, body_width, expected_width):
page, = render_pages('''
<style>
@font-face {src: url(weasyprint.otf); font-family: weasyprint}
body {width: %dpx; font-family: weasyprint; font-size: 20px}
table {overflow-wrap: %s}
</style>
<table><tr><td>%s''' % (body_width, wrap, text))
html, = page.children
body, = html.children
table_wrapper, = body.children
table, = table_wrapper.children
row_group, = table.children
tr, = row_group.children
td, = tr.children
assert td.width == expected_width
@assert_no_logs
@pytest.mark.parametrize('wrap, text, body_width, expected_width', (
# TODO: broken with Pango 1.50,
# see https://gitlab.gnome.org/GNOME/pango/-/issues/646
# ('anywhere', 'aaaaaa', 10, 20),
('anywhere', 'aaaaaa', 40, 40),
('break-word', 'aaaaaa', 40, 120),
('normal', 'abcdef', 40, 120),
))
def test_overflow_wrap_trailing_space(wrap, text, body_width, expected_width):
page, = render_pages('''
<style>
@font-face {src: url(weasyprint.otf); font-family: weasyprint}
body {width: %dpx; font-family: weasyprint; font-size: 20px}
table {overflow-wrap: %s}
</style>
<table><tr><td>%s ''' % (body_width, wrap, text))
html, = page.children
body, = html.children
table_wrapper, = body.children
table, = table_wrapper.children
row_group, = table.children
tr, = row_group.children
td, = tr.children
assert td.width == expected_width
def white_space_lines(width, space):
page, = render_pages('''
<style>
body { font-size: 100px; width: %dpx }
span { white-space: %s }
</style>
<body><span>This + \n is text''' % (width, space))
html, = page.children
body, = html.children
return body.children
@assert_no_logs
def test_white_space_1():
line1, line2, line3, line4 = white_space_lines(1, 'normal')
box1, = line1.children
text1, = box1.children
assert text1.text == 'This'
box2, = line2.children
text2, = box2.children
assert text2.text == '+'
box3, = line3.children
text3, = box3.children
assert text3.text == 'is'
box4, = line4.children
text4, = box4.children
assert text4.text == 'text'
@assert_no_logs
def test_white_space_2():
line1, line2 = white_space_lines(1, 'pre')
box1, = line1.children
text1, = box1.children
assert text1.text == 'This + '
box2, = line2.children
text2, = box2.children
assert text2.text == ' is text'
@assert_no_logs
def test_white_space_3():
line1, = white_space_lines(1, 'nowrap')
box1, = line1.children
text1, = box1.children
assert text1.text == 'This + is text'
@assert_no_logs
def test_white_space_4():
line1, line2, line3, line4, line5 = white_space_lines(1, 'pre-wrap')
box1, = line1.children
text1, = box1.children
assert text1.text == 'This '
box2, = line2.children
text2, = box2.children
assert text2.text == '+ '
box3, = line3.children
text3, = box3.children
assert text3.text == ' '
box4, = line4.children
text4, = box4.children
assert text4.text == 'is '
box5, = line5.children
text5, = box5.children
assert text5.text == 'text'
@assert_no_logs
def test_white_space_5():
line1, line2, line3, line4 = white_space_lines(1, 'pre-line')
box1, = line1.children
text1, = box1.children
assert text1.text == 'This'
box2, = line2.children
text2, = box2.children
assert text2.text == '+'
box3, = line3.children
text3, = box3.children
assert text3.text == 'is'
box4, = line4.children
text4, = box4.children
assert text4.text == 'text'
@assert_no_logs
def test_white_space_6():
line1, = white_space_lines(1000000, 'normal')
box1, = line1.children
text1, = box1.children
assert text1.text == 'This + is text'
@assert_no_logs
def test_white_space_7():
line1, line2 = white_space_lines(1000000, 'pre')
box1, = line1.children
text1, = box1.children
assert text1.text == 'This + '
box2, = line2.children
text2, = box2.children
assert text2.text == ' is text'
@assert_no_logs
def test_white_space_8():
line1, = white_space_lines(1000000, 'nowrap')
box1, = line1.children
text1, = box1.children
assert text1.text == 'This + is text'
@assert_no_logs
def test_white_space_9():
line1, line2 = white_space_lines(1000000, 'pre-wrap')
box1, = line1.children
text1, = box1.children
assert text1.text == 'This + '
box2, = line2.children
text2, = box2.children
assert text2.text == ' is text'
@assert_no_logs
def test_white_space_10():
line1, line2 = white_space_lines(1000000, 'pre-line')
box1, = line1.children
text1, = box1.children
assert text1.text == 'This +'
box2, = line2.children
text2, = box2.children
assert text2.text == 'is text'
@assert_no_logs
def test_white_space_11():
# Test regression: https://github.com/Kozea/WeasyPrint/issues/813
page, = render_pages('''
<style>
pre { width: 0 }
</style>
<body><pre>This<br/>is text''')
html, = page.children
body, = html.children
pre, = body.children
line1, line2 = pre.children
text1, box = line1.children
assert text1.text == 'This'
assert box.element_tag == 'br'
text2, = line2.children
assert text2.text == 'is text'
@assert_no_logs
def test_white_space_12():
# Test regression: https://github.com/Kozea/WeasyPrint/issues/813
page, = render_pages('''
<style>
pre { width: 0 }
</style>
<body><pre>This is <span>lol</span> text''')
html, = page.children
body, = html.children
pre, = body.children
line1, = pre.children
text1, span, text2 = line1.children
assert text1.text == 'This is '
assert span.element_tag == 'span'
assert text2.text == ' text'
@assert_no_logs
@pytest.mark.parametrize('value, width', (
(8, 144), # (2 + (8 - 1)) * 16
(4, 80), # (2 + (4 - 1)) * 16
('3em', 64), # (2 + (3 - 1)) * 16
('25px', 41), # 2 * 16 + 25 - 1 * 16
# (0, 32), # See Layout.set_tabs
))
def test_tab_size(value, width):
page, = render_pages('''
<style>
@font-face {src: url(weasyprint.otf); font-family: weasyprint}
pre { tab-size: %s; font-family: weasyprint }
</style>
<pre>a	a</pre>
''' % value)
html, = page.children
body, = html.children
paragraph, = body.children
line, = paragraph.children
assert line.width == width
@assert_no_logs
def test_text_transform():
page, = render_pages('''
<style>
p { text-transform: capitalize }
p+p { text-transform: uppercase }
p+p+p { text-transform: lowercase }
p+p+p+p { text-transform: full-width }
p+p+p+p+p { text-transform: none }
</style>
<p>hé lO1</p><p>hé lO1</p><p>hé lO1</p><p>hé lO1</p><p>hé lO1</p>
''')
html, = page.children
body, = html.children
p1, p2, p3, p4, p5 = body.children
line1, = p1.children
text1, = line1.children
assert text1.text == 'Hé Lo1'
line2, = p2.children
text2, = line2.children
assert text2.text == 'HÉ LO1'
line3, = p3.children
text3, = line3.children
assert text3.text == 'hé lo1'
line4, = p4.children
text4, = line4.children
assert text4.text == '\uff48é\u3000\uff4c\uff2f\uff11'
line5, = p5.children
text5, = line5.children
assert text5.text == 'hé lO1'
@assert_no_logs
def test_text_floating_pre_line():
# Test regression: https://github.com/Kozea/WeasyPrint/issues/610
page, = render_pages('''
<div style="float: left; white-space: pre-line">This is
oh this end </div>
''')
@assert_no_logs
@pytest.mark.parametrize(
'leader, content', (
('dotted', '.'),
('solid', '_'),
('space', ' '),
('" .-"', ' .-'),
)
)
def test_leader_content(leader, content):
page, = render_pages('''
<style>div::after { content: leader(%s) }</style>
<div></div>
''' % leader)
html, = page.children
body, = html.children
div, = body.children
line, = div.children
after, = line.children
inline, = after.children
assert inline.children[0].text == content
@pytest.mark.xfail
@assert_no_logs
def test_max_lines():
page, = render_pages('''
<style>
@page {size: 10px 10px;}
@font-face {src: url(weasyprint.otf); font-family: weasyprint}
p {
font-family: weasyprint;
font-size: 2px;
max-lines: 2;
}
</style>
<p>
abcd efgh ijkl
</p>
''')
html, = page.children
body, = html.children
p1, p2 = body.children
line1, line2 = p1.children
line3, = p2.children
text1, = line1.children
text2, = line2.children
text3, = line3.children
assert text1.text == 'abcd'
assert text2.text == 'efgh'
assert text3.text == 'ijkl'
@assert_no_logs
def test_continue():
page, = render_pages('''
<style>
@page {size: 10px 4px;}
@font-face {src: url(weasyprint.otf); font-family: weasyprint}
div {
continue: discard;
font-family: weasyprint;
font-size: 2px;
}
</style>
<div>
abcd efgh ijkl
</div>
''')
html, = page.children
body, = html.children
p, = body.children
line1, line2 = p.children
text1, = line1.children
text2, = line2.children
assert text1.text == 'abcd'
assert text2.text == 'efgh'
| 30.959215 | 79 | 0.603293 |
8d0088cba9d02e30d0af3ca974b8990116143841 | 1,404 | py | Python | 19-dyn-attr-prop/oscon/explore2.py | matteoshen/example-code | b54c22a1b8cee3fc53d1473cb38ca46eb179b4c3 | [
"MIT"
] | 5,651 | 2015-01-06T21:58:46.000Z | 2022-03-31T13:39:07.000Z | 19-dyn-attr-prop/oscon/explore2.py | matteoshen/example-code | b54c22a1b8cee3fc53d1473cb38ca46eb179b4c3 | [
"MIT"
] | 42 | 2016-12-11T19:17:11.000Z | 2021-11-23T19:41:16.000Z | 19-dyn-attr-prop/oscon/explore2.py | matteoshen/example-code | b54c22a1b8cee3fc53d1473cb38ca46eb179b4c3 | [
"MIT"
] | 2,394 | 2015-01-18T10:57:38.000Z | 2022-03-31T11:41:12.000Z | """
explore2.py: Script to explore the OSCON schedule feed
>>> from osconfeed import load
>>> raw_feed = load()
>>> feed = FrozenJSON(raw_feed)
>>> len(feed.Schedule.speakers)
357
>>> sorted(feed.Schedule.keys())
['conferences', 'events', 'speakers', 'venues']
>>> feed.Schedule.speakers[-1].name
'Carina C. Zona'
>>> talk = feed.Schedule.events[40]
>>> talk.name
'There *Will* Be Bugs'
>>> talk.speakers
[3471, 5199]
>>> talk.flavor
Traceback (most recent call last):
...
KeyError: 'flavor'
"""
# BEGIN EXPLORE2
from collections import abc
class FrozenJSON:
"""A read-only façade for navigating a JSON-like object
using attribute notation
"""
def __new__(cls, arg): # <1>
if isinstance(arg, abc.Mapping):
return super().__new__(cls) # <2>
elif isinstance(arg, abc.MutableSequence): # <3>
return [cls(item) for item in arg]
else:
return arg
def __init__(self, mapping):
self.__data = {}
for key, value in mapping.items():
if iskeyword(key):
key += '_'
self.__data[key] = value
def __getattr__(self, name):
if hasattr(self.__data, name):
return getattr(self.__data, name)
else:
return FrozenJSON(self.__data[name]) # <4>
# END EXPLORE2
| 25.527273 | 59 | 0.570513 |
2951691d7199bc3ac372a38c09522d8d04267b3a | 1,219 | py | Python | notebooks/initilization.py | mssalvador/NextProject | b9e223f8f1de803fd3865c3f2148a417f88556da | [
"Apache-2.0"
] | 1 | 2017-10-10T07:00:46.000Z | 2017-10-10T07:00:46.000Z | notebooks/initilization.py | mssalvador/NextProject | b9e223f8f1de803fd3865c3f2148a417f88556da | [
"Apache-2.0"
] | null | null | null | notebooks/initilization.py | mssalvador/NextProject | b9e223f8f1de803fd3865c3f2148a417f88556da | [
"Apache-2.0"
] | 2 | 2018-11-19T09:07:49.000Z | 2018-11-28T12:54:25.000Z | # Make sure that Python starts in Workflow-folder or else the modules will be screewed up!
import sys
import os
import getpass
import datetime
# Graphics importation and pandas
import seaborn as sb
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
sb.set(style='whitegrid')
# Something about adding a thing to Python path.
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
print(module_path)
# What's the time?
today = datetime.date.today()
# Identify user
user = getpass.getuser()
if user == "sidsel":
parquet_path = "/home/sidsel/workspace/sparkdata/parquet"
elif user == "svanhmic":
parquet_path = "/home/svanhmic/workspace/data/DABAI/sparkdata/parquet"
# Start the logger.
import logging
logger_tester = logging.getLogger(__name__)
logger_tester.setLevel(logging.INFO)
logger_file_handler_param = logging.FileHandler(
'/tmp/workflow_notebook_test_{!s}.log'.format(today.strftime('%d-%m-%Y')))
logger_formatter_param = logging.Formatter(
'%(asctime)s;%(levelname)s;%(name)s;%(message)s')
logger_tester.addHandler(logger_file_handler_param)
logger_file_handler_param.setFormatter(logger_formatter_param)
| 29.02381 | 90 | 0.770304 |
1f1b6728e330570d93c0f2f11a65791a018afe8f | 753 | py | Python | POO/arquivos-livro/07/Python3/DeputadoFederal.py | ell3a/estudos-python | 09808a462aa3e73ad433501acb11f62217548af8 | [
"MIT"
] | null | null | null | POO/arquivos-livro/07/Python3/DeputadoFederal.py | ell3a/estudos-python | 09808a462aa3e73ad433501acb11f62217548af8 | [
"MIT"
] | null | null | null | POO/arquivos-livro/07/Python3/DeputadoFederal.py | ell3a/estudos-python | 09808a462aa3e73ad433501acb11f62217548af8 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# coding: utf-8
"""Classe DeputadoFederal """
from Politico import Politico
class DeputadoFederal(Politico):
""" Classe Deputado Federal """
def __init__(self, nome, partido, estado):
""" Construtor da classe DeputadoFederal """
Politico.__init__(self)
self.set_nome(nome)
self.set_partido(partido)
self.set_estado(estado)
self.set_funcao("propor na Câmara leis federais em benefício da população.")
def apresentacao(self):
super(DeputadoFederal, self).apresentacao()
print ('sou deputado federal')
print ('Minha função é ' + self.get_funcao())
print ('Fui eleito por ' + self.get_estado())
print ('============================')
| 31.375 | 84 | 0.622842 |
e92468edf79bdfe38cfd10b452629a87fff851cc | 97,778 | py | Python | synchrophasor/frame.py | aleneus/pypmu | 66e6c495a91efb8a6018061ff7955a6654b1404d | [
"BSD-3-Clause"
] | 50 | 2016-03-30T18:48:34.000Z | 2022-03-11T01:41:37.000Z | synchrophasor/frame.py | aleneus/pypmu | 66e6c495a91efb8a6018061ff7955a6654b1404d | [
"BSD-3-Clause"
] | 32 | 2017-10-20T06:52:38.000Z | 2022-03-18T14:13:03.000Z | synchrophasor/frame.py | aleneus/pypmu | 66e6c495a91efb8a6018061ff7955a6654b1404d | [
"BSD-3-Clause"
] | 36 | 2016-05-17T19:43:14.000Z | 2022-03-17T18:37:22.000Z | """
# IEEE Std C37.118.2 - 2011 Frame Implementation #
This script provides implementation of IEEE Standard for Synchrophasor
Data Transfer for Power Systems.
**IEEE C37.118.2 standard** defines four types of frames:
* Data Frames.
* Configuration Frames (multiple versions).
* Command Frames.
* Header Frames.
"""
import collections
from abc import ABCMeta, abstractmethod
from struct import pack, unpack
from time import time
from math import sqrt, atan2
from synchrophasor.utils import crc16xmodem
from synchrophasor.utils import list2bytes
__author__ = "Stevan Sandi"
__copyright__ = "Copyright (c) 2016, Tomo Popovic, Stevan Sandi, Bozo Krstajic"
__credits__ = []
__license__ = "BSD-3"
__version__ = "1.0.0-alpha"
class CommonFrame(metaclass=ABCMeta):
"""
## CommonFrame ##
CommonFrame is abstract class which represents words (fields) common to all frame types.
Class contains two abstract methods:
* ``convert2bytes()`` - for converting frame to bytes convenient for sending.
* ``convert2frame()`` - which converts array of bytes to specific frame.
Both of these methods must must be implemented for each frame type.
Following attributes are common for all frame types:
**Attributes:**
* ``frame_type`` **(int)** - Defines frame type.
* ``version`` **(int)** - Standard version. Default value: ``1``.
* ``pmu_id_code`` **(int)** - Data stream ID number.
* ``soc`` **(int)** - UNIX timestamp. Default value: ``None``.
* ``frasec`` **(int)** - Fraction of second and Time Quality. Default value: ``None``.
**Raises:**
FrameError
When it's not possible to create valid frame, usually due invalid parameter value.
"""
FRAME_TYPES = { "data": 0, "header": 1, "cfg1": 2, "cfg2": 3, "cfg3": 5, "cmd": 4 }
# Invert FRAME_TYPES codes to get FRAME_TYPE_WORDS
FRAME_TYPES_WORDS = { code: word for word, code in FRAME_TYPES.items() }
def __init__(self, frame_type, pmu_id_code, soc=None, frasec=None, version=1):
"""
CommonFrame abstract class
:param string frame_type: Defines frame type
:param int pmu_id_code: Standard version. Default value: ``1``
:param int soc:
:param int frasec:
:param int version:
:return:
"""
self.set_frame_type(frame_type)
self.set_version(version)
self.set_id_code(pmu_id_code)
if soc or frasec:
self.set_time(soc, frasec)
def set_frame_type(self, frame_type):
"""
### set_frame_type() ###
Setter for ``frame_type``.
**Params:**
* ``frame_type`` **(int)** - Should be one of 6 possible values from FRAME_TYPES dict.
Frame types with integer and binary representations are shown below.
______________________________________________________________________________________
+--------------+----------+-----------+
| Frame type | Decimal | Binary |
+--------------+----------+-----------+
| Data | 0 | 000 |
+--------------+----------+-----------+
| Header | 1 | 001 |
+--------------+----------+-----------+
| Config v1 | 2 | 010 |
+--------------+----------+-----------+
| Config v2 | 3 | 011 |
+--------------+----------+-----------+
| Command | 4 | 100 |
+--------------+----------+-----------+
| Config v3 | 5 | 101 |
+--------------+----------+-----------+
**Raises:**
FrameError
When ``frame type`` value provided is not specified in ``FRAME_TYPES``.
"""
if frame_type not in CommonFrame.FRAME_TYPES:
raise FrameError("Unknown frame type. Possible options: [data, header, cfg1, cfg2, cfg3, cmd].")
else:
self._frame_type = CommonFrame.FRAME_TYPES[frame_type]
def get_frame_type(self):
return CommonFrame.FRAME_TYPES_WORDS[self._frame_type]
def extract_frame_type(byte_data):
"""This method will only return type of the frame. It shall be used for stream splitter
since there is no need to create instance of specific frame which will cause lower performance."""
# Check if frame is valid
if not CommandFrame._check_crc(byte_data):
raise FrameError("CRC failed. Frame not valid.")
# Get second byte and determine frame type by shifting right to get higher 4 bits
frame_type = int.from_bytes([byte_data[1]], byteorder="big", signed=False) >> 4
return CommonFrame.FRAME_TYPES_WORDS[frame_type]
def set_version(self, version):
"""
### set_version() ###
Setter for frame IEEE standard ``version``.
**Params:**
* ``version`` **(int)** - Should be number between ``1`` and ``15``.
**Raises:**
FrameError
When ``version`` value provided is out of range.
"""
if not 1 <= version <= 15:
raise FrameError("VERSION number out of range. 1<= VERSION <= 15")
else:
self._version = version
def get_version(self):
return self._version
def set_id_code(self, id_code):
"""
### set_id_code() ###
Setter for ``pmu_id_code`` as data stream identified.
**Params:**
* ``id_code`` **(int)** - Should be number between ``1`` and ``65534``.
**Raises:**
FrameError
When ``id_code`` value provided is out of range.
"""
if not 1 <= id_code <= 65534:
raise FrameError("ID CODE out of range. 1 <= ID_CODE <= 65534")
else:
self._pmu_id_code = id_code
def get_id_code(self):
return self._pmu_id_code
def set_time(self, soc=None, frasec=None):
"""
### set_time() ###
Setter for ``soc`` and ``frasec``. If values for ``soc`` or ``frasec`` are
not provided this method will calculate them.
**Params:**
* ``soc`` **(int)** - UNIX timestamp, 32-bit unsigned number. See ``set_soc()``
method.
* ``frasec`` **(int)** or **(tuple)** - Fracion of second and Time Quality. See
``set_frasec`` method.
**Raises:**
FrameError
When ``soc`` value provided is out of range.
When ``frasec`` is not valid.
"""
t = time() # Get current timestamp
if soc:
self.set_soc(soc)
else:
self.set_soc(int(t)) # Get current timestamp
if frasec:
if isinstance(frasec, collections.Sequence):
self.set_frasec(*frasec)
else:
self.set_frasec(frasec) # Just set fraction of second and use default values for other arguments.
else:
# Calculate fraction of second (after decimal point) using only first 7 digits to avoid
# overflow (24 bit number).
self.set_frasec(int((((repr((t % 1))).split("."))[1])[0:6]))
def set_soc(self, soc):
"""
### set_soc() ###
Setter for ``soc`` as second of century.
**Params:**
* ``soc`` **(int)** - UNIX timestamp, should be between ``0`` and ``4294967295``.
**Raises:**
FrameError
When ``soc`` value provided is out of range.
"""
if not 0 <= soc <= 4294967295:
raise FrameError("Time stamp out of range. 0 <= SOC <= 4294967295")
else:
self._soc = soc
def get_soc(self):
return self._soc
def set_frasec(self, fr_seconds, leap_dir="+", leap_occ=False, leap_pen=False, time_quality=0):
"""
### set_frasec() ###
Setter for ``frasec`` as Fraction of Second and Time Quality.
**Params:**
* ``fr_seconds`` **(int)** - Fraction of Second as 24-bit unsigned number.
Should be between ``0`` and ``16777215``.
* ``leap_dir`` **(char)** - Leap Second Direction: ``+`` for add (``0``), ``-`` for
delete (``1``).
Default value: ``+``.
* ``leap_occ`` **(bool)** - Leap Second Occurred: ``True`` in the first second after
the leap second occurs and remains set for 24h.
* ``leap_pen`` **(bool)** - Leap Second Pending: ``True`` not more than 60 s nor less
than 1 s before a leap second occurs, and cleared in the second after the leap
second occurs.
* ``time_quality`` **(int)** - Message Time Quality represents worst-case clock
accuracy according to UTC. Table below shows code values. Should be between ``0``
and ``15``.
__________________________________________________________________________________________
+------------+----------+---------------------------+
| Binary | Decimal | Value |
+------------+----------+---------------------------+
| 1111 | 15 | Fault - clock failure. |
+------------+----------+---------------------------+
| 1011 | 11 | Time within 10s of UTC. |
+------------+----------+---------------------------+
| 1010 | 10 | Time within 1s of UTC. |
+------------+----------+---------------------------+
| 1001 | 9 | Time within 10^-1s of UTC.|
+------------+----------+---------------------------+
| 1000 | 8 | Time within 10^-2s of UTC.|
+------------+----------+---------------------------+
| 0111 | 7 | Time within 10^-3s of UTC.|
+------------+----------+---------------------------+
| 0110 | 6 | Time within 10^-4s of UTC.|
+------------+----------+---------------------------+
| 0101 | 5 | Time within 10^-5s of UTC.|
+------------+----------+---------------------------+
| 0100 | 4 | Time within 10^-6s of UTC.|
+------------+----------+---------------------------+
| 0011 | 3 | Time within 10^-7s of UTC.|
+------------+----------+---------------------------+
| 0010 | 2 | Time within 10^-8s of UTC.|
+------------+----------+---------------------------+
| 0001 | 1 | Time within 10^-9s of UTC.|
+------------+----------+---------------------------+
| 0000 | 0 | Clock locked to UTC. |
+------------+----------+---------------------------+
**Raises:**
FrameError
When ``fr_seconds`` value provided is out of range.
When ``time_quality`` value provided is out of range.
"""
if not 0 <= fr_seconds <= 16777215:
raise FrameError("Frasec out of range. 0 <= FRASEC <= 16777215 ")
if (not 0 <= time_quality <= 15) or (time_quality in [12, 13, 14]):
raise FrameError("Time quality flag out of range. 0 <= MSG_TQ <= 15")
if leap_dir not in ["+", "-"]:
raise FrameError("Leap second direction must be '+' or '-'")
frasec = 1 << 1 # Bit 7: Reserved for future use. Not important but it will be 1 for easier byte forming.
if leap_dir == "-": # Bit 6: Leap second direction [+ = 0] and [- = 1].
frasec |= 1
frasec <<= 1
if leap_occ: # Bit 5: Leap Second Occurred, 1 in first second after leap second, remains 24h.
frasec |= 1
frasec <<= 1
if leap_pen: # Bit 4: Leap Second Pending - shall be 1 not more then 60s nor less than 1s before leap second.
frasec |= 1
frasec <<= 4 # Shift left 4 bits for message time quality
# Bit 3 - 0: Message Time Quality indicator code - integer representation of bits (check table).
frasec |= time_quality
mask = 1 << 7 # Change MSB to 0 for standard compliance.
frasec ^= mask
frasec <<= 24 # Shift 24 bits for fractional time.
frasec |= fr_seconds # Bits 23-0: Fraction of second.
self._frasec = frasec
def get_frasec(self):
return self._int2frasec(self._frasec)
@staticmethod
def _int2frasec(frasec_int):
tq = frasec_int >> 24
leap_dir = tq & 0b01000000
leap_occ = tq & 0b00100000
leap_pen = tq & 0b00010000
time_quality = tq & 0b00001111
# Reassign values to create Command frame
leap_dir = "-" if leap_dir else "+"
leap_occ = bool(leap_occ)
leap_pen = bool(leap_pen)
fr_seconds = frasec_int & (2**23-1)
return fr_seconds, leap_dir, leap_occ, leap_pen, time_quality
@staticmethod
def _get_data_format_size(data_format):
"""
### get_data_format() ###
Getter for frame data format.
**Params:**
* ``data_format`` **(bytes)** - Data format in data frames. Should be 16-bit flag.
**Returns:**
* ``dict`` with PHASOR, ANALOG, and FREQ measurement size in bytes.
``{'phasor' : phasors_byte_size, 'analog' : analog_byte_size, 'freq' : freq_byte_size}``
"""
if (data_format & 2) != 0: # If second bit in data_format is 0 16x2 bits = 4 bytes otherwise 8 (2xfloat).
phasors_byte_size = 8
else:
phasors_byte_size = 4
if (data_format & 4) != 0: # If third bit in data_format is 0 16 bits = 2 bytes otherwise 4 bytes (float).
analog_byte_size = 4
else:
analog_byte_size = 2
if (data_format & 8) != 0: # If fourth bit in data_format is 0 16 bits = 2 bytes otherwise 4 bytes (float).
freq_byte_size = 4
else:
freq_byte_size = 2
return { "phasor": phasors_byte_size, "analog": analog_byte_size, "freq": freq_byte_size }
def set_data_format(self, data_format, num_streams):
"""
### set_data_format() ###
Setter for frame data format. If number of data streams sent by PMUs is larger then
``1`` data format should be provided for each data stream. Data format might be
represented as integer number as shown in table below where ordered letters represent
``(PHASOR_RECT/POLAR; PHASOR_INT/FLOAT; ANALOG_INT/FLOAT; FREQ_INT/FLOAT)`` format, where
``R`` means RECTANGULAR, ``P`` means POLAR, ``I`` means 16 bit INTEGER and ``F`` means FLOAT.
Beside this, data format might be provided as tuple of bool values ordered as mentioned
before.
__________________________________________________________________________________________
+--------------+----------+
| Data Format | Decimal |
+--------------+----------+
| (R;I;I;I) | 0 |
+--------------+----------+
| (P;I;I;I) | 1 |
+--------------+----------+
| (R;F;I;I) | 2 |
+--------------+----------+
| (P;F;I;I) | 3 |
+--------------+----------+
| (R;I;F;I) | 4 |
+--------------+----------+
| (P;I;F;I) | 5 |
+--------------+----------+
| (R;F;F;I) | 6 |
+--------------+----------+
| (P;F;F;I) | 7 |
+--------------+----------+
| (R;I;I;F) | 8 |
+--------------+----------+
| (P;I;I;F) | 9 |
+--------------+----------+
| (R;F;I;F) | 10 |
+--------------+----------+
| (P;F;I;F) | 11 |
+--------------+----------+
| (R;I;F;F) | 12 |
+--------------+----------+
| (P;I;F;F) | 13 |
+--------------+----------+
| (R;F;F;F) | 14 |
+--------------+----------+
| (P;F;F;F) | 15 |
+--------------+----------+
**Params:**
* ``data_format`` **(mixed)** - If number of data streams is larger then ``1`` should be list
of tuples or integers, otherwise single ``tuple`` or ``int`` expected.
* ``num_streams`` **(int)** - Number of data measurement streams packed in one data frame.
**Raises:**
FrameError
When length of ``data_format`` list is not equal to ``num_stream`` and ``num_stream`` is
larger then ``1``.
When ``data_format`` value provided is out of range.
"""
if num_streams > 1:
if not isinstance(data_format, list) or num_streams != len(data_format):
raise FrameError("When NUM_STREAMS > 1 provide FORMATs as list with NUM_STREAMS elements.")
data_formats = [] # Format tuples transformed to ints
for format_type in data_format:
if isinstance(format_type, tuple): # If data formats are specified as tuples then convert them to ints
data_formats.append(CommonFrame._format2int(*format_type))
else:
if not 0 <= format_type <= 15: # If data formats are specified as ints check range
raise FrameError("Format Type out of range. 0 <= FORMAT <= 15")
else:
data_formats.append(format_type)
self._data_format = data_formats
else:
if isinstance(data_format, tuple):
self._data_format = CommonFrame._format2int(*data_format)
else:
if not 0 <= data_format <= 15:
raise FrameError("Format Type out of range. 0 <= FORMAT <= 15")
self._data_format = data_format
def get_data_format(self):
if isinstance(self._data_format, list):
return [self._int2format(df) for df in self._data_format]
else:
return self._int2format(self._data_format)
@staticmethod
def _format2int(phasor_polar=False, phasor_float=False, analogs_float=False, freq_float=False):
"""
### format2int() ###
Convert ``boolean`` representation of data format to integer.
**Params:**
* ``phasor_polar`` **(bool)** - If ``True`` phasor represented using magnitude and angle (polar)
else rectangular.
* ``phasor_float`` **(bool)** - If ``True`` phasor represented using floating point format else
represented as 16 bit integer.
* ``analogs_float`` **(bool)** - If ``True`` analog values represented using floating point notation
else represented as 16 bit integer.
* ``freq_float`` **(bool)** - If ``True`` FREQ/DFREQ represented using floating point notation
else represented as 16 bit integer.
**Returns:**
* ``int`` representation of data format.
"""
data_format = 1 << 1
if freq_float:
data_format |= 1
data_format <<= 1
if analogs_float:
data_format |= 1
data_format <<= 1
if phasor_float:
data_format |= 1
data_format <<= 1
if phasor_polar:
data_format |= 1
mask = 1 << 4
data_format ^= mask
return data_format
@staticmethod
def _int2format(data_format):
phasor_polar = data_format & 0b0001
phasor_float = data_format & 0b0010
analogs_float = data_format & 0b0100
freq_float = data_format & 0b1000
return bool(phasor_polar), bool(phasor_float), bool(analogs_float), bool(freq_float)
@staticmethod
def _check_crc(byte_data):
crc_calculated = crc16xmodem(byte_data[0:-2], 0xffff).to_bytes(2, "big") # Calculate CRC
if byte_data[-2:] != crc_calculated:
return False
return True
@abstractmethod
def convert2bytes(self, byte_message):
# SYNC word in CommonFrame starting with AA hex word + frame type + version
sync_b = (0xaa << 8) | (self._frame_type << 4) | self._version
sync_b = sync_b.to_bytes(2, "big")
# FRAMESIZE: 2B SYNC + 2B FRAMESIZE + 2B IDCODE + 4B SOC + 4B FRASEC + len(Command) + 2B CHK
frame_size_b = (16 + len(byte_message)).to_bytes(2, "big")
# PMU ID CODE
pmu_id_code_b = self._pmu_id_code.to_bytes(2, "big")
# If timestamp not given set timestamp
if not hasattr(self, "_soc") and not hasattr(self, "_frasec"):
self.set_time()
elif not self._soc and not self._frasec:
self.set_time()
# SOC
soc_b = self._soc.to_bytes(4, "big")
# FRASEC
frasec_b = self._frasec.to_bytes(4, "big")
# CHK
crc_chk_b = crc16xmodem(sync_b + frame_size_b + pmu_id_code_b + soc_b + frasec_b + byte_message, 0xffff)
return sync_b + frame_size_b + pmu_id_code_b + soc_b + frasec_b + byte_message + crc_chk_b.to_bytes(2, "big")
@abstractmethod
def convert2frame(byte_data, cfg=None):
convert_method = {
0: DataFrame.convert2frame,
1: HeaderFrame.convert2frame,
2: ConfigFrame1.convert2frame,
3: ConfigFrame2.convert2frame,
4: CommandFrame.convert2frame,
5: ConfigFrame3.convert2frame,
}
if not CommonFrame._check_crc(byte_data):
raise FrameError("CRC failed. Frame not valid.")
# Get second byte and determine frame type by shifting right to get higher 4 bits
frame_type = int.from_bytes([byte_data[1]], byteorder="big", signed=False) >> 4
if frame_type == 0: # DataFrame pass Configuration to decode message
return convert_method[frame_type](byte_data, cfg)
return convert_method[frame_type](byte_data)
class ConfigFrame1(CommonFrame):
"""
## ConfigFrame1 ##
ConfigFrame1 is class which represents configuration frame v1.
Configuration frame version 1 carries info about device reporting
ability.
Class implements two abstract methods from super class.
* ``convert2bytes()`` - for converting ConfigFrame1 to bytes.
* ``convert2frame()`` - which converts array of bytes to ConfigFrame1.
Each instance of ConfigFrame1 class will have following attributes.
**Attributes:**
* ``frame_type`` **(int)** - Defines frame type. Inherited from ``CommonFrame``.
* ``version`` **(int)** - Standard version. Inherited from ``CommonFrame``. Default value: ``1``.
* ``pmu_id_code`` **(int)** - PMU Id code which identifies data stream. Inherited from ``CommonFrame``.
* ``soc`` **(int)** - UNIX timestamp. Default value: ``None``. Inherited from ``CommonFrame``.
* ``frasec`` **(int)** - Fraction of second and Time Quality. Default value: ``None``.
Inherited from ``CommonFrame``.
* ``time_base`` **(int)** - Resolution of the fractional second time stamp in all frames.
* ``num_pmu`` **(int)** - Number of PMUs (data streams) included in single ``DataFrame``.
* ``multistreaming`` **(bool)** - ``True`` if ``num_pmu > 1``. That means data frame consist of multiple
measurement streams.
* ``station_name`` **(mixed)** - Station name ``(string)`` or station names ``(list)`` if ``multistreaming``.
* ``id_code`` **(mixed)** - Measurement stream ID code ``(int)`` or ``(list)`` if ``multistreaming``. Each ID
identifies source PMU of each data block.
* ``data_format`` **(mixed)** - Data format for each data stream. Inherited from ``CommonFrame``.
* ``phasor_num`` **(mixed)** - Number of phasors ``(int)`` or ``(list)`` if ``multistreaming``.
* ``analog_num`` **(mixed)** - Number of analog values ``(int)`` or ``(list)`` if ``multistreaming``.
* ``digital_num`` **(mixed)** - Number of digital status words ``(int)`` or ``(list)`` if ``multistreaming``.
* ``channel_names`` **(list)** - List of phasor and channel names for phasor, analog and digital channel.
If ``multistreaming`` it's list of lists.
* ``ph_units`` **(list)** - Conversion factor for phasor channels. If ``multistreaming`` list of lists.
* ``an_units`` **(list)** - Conversion factor for analog channels. If ``multistreaming`` list of lists.
* ``dig_units`` **(list)** - Mask words for digital status word. If ``multistreaming`` list of lists.
* ``fnom`` **(mixed)** - Nominal frequency code and flags. If ``multistreaming`` list of ints.
* ``cfg_count`` **(mixed)** - Configuration change count. If ``multistreaming`` list of ints.
* ``data_rate`` **(int)** - Frames per second or seconds per frame (if negative ``int``).
**Raises:**
FrameError
When it's not possible to create valid frame, usually due invalid parameter value.
"""
def __init__(self, pmu_id_code, time_base, num_pmu, station_name, id_code, data_format, phasor_num, analog_num,
digital_num, channel_names, ph_units, an_units, dig_units, f_nom, cfg_count, data_rate,
soc=None, frasec=None, version=1):
super().__init__("cfg1", pmu_id_code, soc, frasec, version) # Init CommonFrame with 'cfg1' frame type
self.set_time_base(time_base)
self.set_num_pmu(num_pmu)
self.set_stn_names(station_name)
self.set_stream_id_code(id_code)
self.set_data_format(data_format, num_pmu)
self.set_phasor_num(phasor_num)
self.set_analog_num(analog_num)
self.set_digital_num(digital_num)
self.set_channel_names(channel_names)
self.set_phasor_units(ph_units)
self.set_analog_units(an_units)
self.set_digital_units(dig_units)
self.set_fnom(f_nom)
self.set_cfg_count(cfg_count)
self.set_data_rate(data_rate)
def set_time_base(self, time_base):
"""
### set_time_base() ###
Setter for time base. Resolution of the fractional second time stamp (FRASEC).
Bits 31-24: Reserved for flags (high 8 bits).
Bits 23-0: 24-bit unsigned integer which subdivision of the second that the FRASEC
is based on.
**Params:**
* ``time_base`` **(int)** - Should be number between ``1`` and ``16777215``.
**Raises:**
FrameError
When ``time_base`` value provided is out of range.
"""
if not 1 <= time_base <= 16777215:
raise FrameError("Time Base out of range. 1 <= TIME_BASE <= 16777215 ")
else:
self._time_base = time_base
def get_time_base(self):
return self._time_base
def set_num_pmu(self, num_pmu):
"""
### set_num_pmu() ###
Setter for number of PMUs. The number of PMUs included in data frame. No limit
specified. The actual limit will be determined by the limit of 65 535 bytes in one
frame (FRAMESIZE filed).
Also, if ``num_pmu`` > ``1`` multistreaming will be set to ``True`` meaning that
more then one data stream will be sent inside data frame.
**Params:**
* ``num_pmu`` **(int)** - Should be number between ``1`` and ``65535``.
**Raises:**
FrameError
When ``num_pmu`` value provided is out of range.
"""
if not 1 <= num_pmu <= 65535:
raise FrameError("Number of PMUs out of range. 1 <= NUM_PMU <= 65535")
else:
self._num_pmu = num_pmu
self._multistreaming = True if num_pmu > 1 else False
def get_num_pmu(self):
return self._num_pmu
def is_multistreaming(self):
return self._multistreaming
def set_stn_names(self, station_name):
"""
### set_stn_names() ###
Setter for station names.
If ``multistreaming`` should be list of ``num_pmu`` station names otherwise 16
character ASCII string.
**Params:**
* ``station_name`` **(mixed)** - Should be 16 bytes (16 ASCII characters) string
or list of strings.
**Raises:**
FrameError
When ``station_name`` is not list with length ``num_pmu`` when ``multistreaming``.
"""
if self._multistreaming:
if not isinstance(station_name, list) or self._num_pmu != len(station_name):
raise FrameError("When NUM_PMU > 1 provide station names as list with NUM_PMU elements.")
self._station_name = [station[:16].ljust(16, " ") for station in station_name]
else:
self._station_name = station_name[:16].ljust(16, " ")
def get_station_name(self):
return self._station_name
def set_stream_id_code(self, id_code):
"""
### set_config_id_code() ###
Setter for data stream IDs inside data frame.
If ``multistreaming`` should be
a list of IDs otherwise should be same as ``pmu_id_code``.
**Params:**
* ``id_code`` **(mixed)** - Should be number between ``1`` and ``65534``.
If ``multistreaming`` list of numbers.
**Raises:**
FrameError
When ``id_code`` is not list with length ``num_pmu`` when ``multistreaming``.
When ``id_code`` value is out of range.
"""
if self._multistreaming:
if not isinstance(id_code, list) or self._num_pmu != len(id_code):
raise FrameError("When NUM_PMU > 1 provide PMU ID codes as list with NUM_PMU elements.")
for stream_id in id_code:
if not 1 <= stream_id <= 65534:
raise FrameError("ID CODE out of range. 1 <= ID_CODE <= 65534")
else:
if not 1 <= id_code <= 65534:
raise FrameError("ID CODE out of range. 1 <= ID_CODE <= 65534")
self._id_code = id_code
def get_stream_id_code(self):
return self._id_code
def set_phasor_num(self, phasor_num):
"""
### set_phasor_num() ###
Setter for number of phasor measurements. Should be specified for each
data stream in data frame.
If ``multistreaming`` should be a list of ``integers`` otherwise should be ``integer``.
**Params:**
* ``phasor_num`` **(mixed)** - Should be integer between ``1`` and ``65535``.
If ``multistreaming`` list of numbers.
**Raises:**
FrameError
When ``phasor_num`` is not list with length ``num_pmu`` when ``multistreaming``.
When ``phasor_num`` value is out of range.
"""
if self._multistreaming:
if not isinstance(phasor_num, list) or self._num_pmu != len(phasor_num):
raise FrameError("When NUM_PMU > 1 provide PHNMR as list with NUM_PMU elements.")
for phnmr in phasor_num:
if not 0 <= phnmr <= 65535:
raise FrameError("Number of phasors out of range. 0 <= PHNMR <= 65535")
else:
if not 0 <= phasor_num <= 65535:
raise FrameError("Number of phasors out of range. 0 <= PHNMR <= 65535")
self._phasor_num = phasor_num
def get_phasor_num(self):
return self._phasor_num
def set_analog_num(self, analog_num):
"""
### set_analog_num() ###
Setter for number analog values. Should be specified for each
data stream in data frame.
If ``multistreaming`` should be a list of ``integers`` otherwise should be ``integer``.
**Params:**
* ``analog_num`` **(mixed)** - Should be integer between ``1`` and ``65535``.
If ``multistreaming`` list of numbers.
**Raises:**
FrameError
When ``analog_num`` is not list with length ``num_pmu`` when ``multistreaming``.
When ``analog_num`` value is out of range.
"""
if self._multistreaming:
if not isinstance(analog_num, list) or self._num_pmu != len(analog_num):
raise FrameError("When NUM_PMU > 1 provide ANNMR as list with NUM_PMU elements.")
for annmr in analog_num:
if not 0 <= annmr <= 65535:
raise FrameError("Number of phasors out of range. 0 <= ANNMR <= 65535")
else:
if not 0 <= analog_num <= 65535:
raise FrameError("Number of phasors out of range. 0 <= ANNMR <= 65535")
self._analog_num = analog_num
def get_analog_num(self):
return self._analog_num
def set_digital_num(self, digital_num):
"""
### set_digital_num() ###
Setter for number of digital status words. Should be specified for each
data stream in data frame.
If ``multistreaming`` should be a list of ``integers`` otherwise should be ``integer``.
**Params:**
* ``digital_num`` **(mixed)** - Should be integer between ``1`` and ``65535``.
If ``multistreaming`` list of numbers.
**Raises:**
FrameError
When ``digital_num`` is not list with length ``num_pmu`` when ``multistreaming``.
When ``digital_num`` value is out of range.
"""
if self._multistreaming:
if not isinstance(digital_num, list) or self._num_pmu != len(digital_num):
raise FrameError("When NUM_PMU > 1 provide DGNMR as list with NUM_PMU elements.")
for dgnmr in digital_num:
if not 0 <= dgnmr <= 65535:
raise FrameError("Number of phasors out of range. 0 <= DGNMR <= 65535")
else:
if not 0 <= digital_num <= 65535:
raise FrameError("Number of phasors out of range. 0 <= DGNMR <= 65535")
self._digital_num = digital_num
def get_digital_num(self):
return self._digital_num
def set_channel_names(self, channel_names):
"""
### set_channel_names() ###
Setter for phasor and channel names.
**Params:**
* ``channel_names`` **(list)** - Should be list of strings (16 ASCII character) with
``PHASOR_NUM`` + ``ANALOG_NUM`` + 16 * ``DIGITAL_NUM`` elements.
If ``multistreaming`` should be list of lists.
**Raises:**
FrameError
When ``channel_names`` is not list of lists with length ``num_pmu`` when ``multistreaming``.
When ``channel_names`` is not list with ``PHASOR_NUM`` + ``ANALOG_NUM`` +
+ 16 * ``DIGITAL_NUM`` elements.
"""
if self._multistreaming:
if not all(isinstance(el, list) for el in channel_names) or self._num_pmu != len(channel_names):
raise FrameError("When NUM_PMU > 1 provide CHNAM as list of lists with NUM_PMU elements.")
channel_name_list = []
for i, chnam in enumerate(channel_names):
# Channel names must be list with PHNMR + ANNMR + 16*DGNMR elements. Each bit in one digital word
# (16bit) has it's own label.
if (self._phasor_num[i] + self._analog_num[i] + 16 * self._digital_num[i]) != len(chnam):
raise FrameError("Provide CHNAM as list with PHNMR + ANNMR + 16*DGNMR elements for each stream.")
channel_name_list.append([chn[:16].ljust(16, " ") for chn in chnam])
self._channel_names = channel_name_list
else:
if not isinstance(channel_names, list) or \
(self._phasor_num + self._analog_num + 16 * self._digital_num) != len(channel_names):
raise FrameError("Provide CHNAM as list with PHNMR + ANNMR + 16*DGNMR elements.")
self._channel_names = [channel[:16].ljust(16, " ") for channel in channel_names]
def get_channel_names(self):
return self._channel_names
def set_phasor_units(self, ph_units):
"""
### set_phasor_units() ###
Setter for phasor channels conversion factor.
**Params:**
* ``ph_units`` **(list)** - Should be list of tuples ``(scale, phasor_type)``
where phasor type is ``'i'`` for current and ``'v'`` for voltage.
If ``multistreaming`` should be list of lists.
**Raises:**
FrameError
When ``ph_units`` is not list of lists with length ``num_pmu`` when ``multistreaming``.
When ``ph_units`` element is not tuple.
"""
if self._multistreaming:
if not all(isinstance(el, list) for el in ph_units) or self._num_pmu != len(ph_units):
raise FrameError("When NUM_PMU > 1 provide PHUNIT as list of lists.")
phunit_list = []
for i, ph_unit in enumerate(ph_units):
if not all(isinstance(el, tuple) for el in ph_unit) or self._phasor_num[i] != len(ph_unit):
raise FrameError("Provide PHUNIT as list of tuples with PHNMR elements. "
"Ex: [(1234,'u'),(1234, 'i')]")
ph_values = []
for ph_tuple in ph_unit:
ph_values.append(ConfigFrame1._phunit2int(*ph_tuple))
phunit_list.append(ph_values)
self._ph_units = phunit_list
else:
if not all(isinstance(el, tuple) for el in ph_units) or self._phasor_num != len(ph_units):
raise FrameError("Provide PHUNIT as list of tuples with PHNMR elements. Ex: [(1234,'u'),(1234, 'i')]")
self._ph_units = [ConfigFrame1._phunit2int(*phun) for phun in ph_units]
def get_ph_units(self):
if all(isinstance(el, list) for el in self._ph_units):
return [[self._int2phunit(unit) for unit in ph_units] for ph_units in self._ph_units]
else:
return [self._int2phunit(ph_unit) for ph_unit in self._ph_units]
@staticmethod
def _phunit2int(scale, phasor_type="v"):
"""
### phunit2int() ###
Convert method for phasor channels conversion factor.
MSB: If phasor type is ``v`` then MSB will be ``0``.
If phasor type is ``i`` then MSB will be ``1``.
LSB: Unsigned 24 bit word in 10^-5 V or amperes per bit
to scale 16-bit integer data.
If transmitted data is in floating-point format, LSB 24 bit value
shall be ignored.
**Params:**
* ``scale`` **(int)** - scale factor.
* ``phasor_type`` **(char)** - ``v`` - voltage, ``i`` - current.
Default value: ``v``.
**Returns:**
* ``int`` which represents phasor channels conversion factor.
**Raises:**
FrameError
When ``scale`` is out of range.
"""
if not 0 <= scale <= 16777215:
raise ValueError("PHUNIT scale out of range. 0 <= PHUNIT <= 16777215.")
if phasor_type not in ["v", "i"]:
raise ValueError("Phasor type should be 'v' or 'i'.")
if phasor_type == "i":
phunit = 1 << 24
return phunit | scale
else:
return scale
@staticmethod
def _int2phunit(ph_unit):
phasor_type = ph_unit & 0xff000000
scale = ph_unit & 0x00ffffff
if phasor_type > 0: # Current PH unit
return scale, "i"
else:
return scale, "v"
def set_analog_units(self, an_units):
"""
### set_analog_units() ###
Setter for analog channels conversion factor.
**Params:**
* ``an_units`` **(list)** - Should be list of tuples ``(scale, analog_type)``
where analog type is ``'pow'`` for single point-on-wave, ``'rms'`` for RMS of
analog input and ``'peak`` for peak of analog input.
If ``multistreaming`` should be list of lists.
**Raises:**
FrameError
When ``an_units`` is not list of lists with length ``num_pmu`` when ``multistreaming``.
When ``an_units`` element is not tuple.
"""
if self._multistreaming:
if not all(isinstance(el, list) for el in an_units) or self._num_pmu != len(an_units):
raise FrameError("When NUM_PMU > 1 provide ANUNIT as list of lists.")
anunit_list = []
for i, an_unit in enumerate(an_units):
if not all(isinstance(el, tuple) for el in an_unit) or self._analog_num[i] != len(an_unit):
raise FrameError("Provide ANUNIT as list of tuples with ANNMR elements. "
"Ex: [(1234,'pow'),(1234, 'rms')]")
an_values = []
for an_tuple in an_unit:
an_values.append(ConfigFrame1._anunit2int(*an_tuple))
anunit_list.append(an_values)
self._an_units = anunit_list
else:
if not all(isinstance(el, tuple) for el in an_units) or self._analog_num != len(an_units):
raise FrameError("Provide ANUNIT as list of tuples with ANNMR elements. "
"Ex: [(1234,'pow'),(1234, 'rms')]")
self._an_units = [ConfigFrame1._anunit2int(*anun) for anun in an_units]
def get_analog_units(self):
if all(isinstance(el, list) for el in self._an_units):
return [[self._int2anunit(unit) for unit in an_unit] for an_unit in self._an_units]
else:
return [self._int2anunit(an_unit) for an_unit in self._an_units]
@staticmethod
def _anunit2int(scale, anunit_type="pow"):
"""
### anunit2int() ###
Convert method for analog channels conversion factor.
MSB: If analog type is ``pow`` then MSB will be ``0``.
If analog type is ``rms`` then MSB will be ``1`` and
if analog type is ``peak`` then MSB will be ``2``.
LSB: Signed 24 bit word for user defined scaling.
**Params:**
* ``scale`` **(int)** - scale factor.
* ``anunit_type`` **(char)** - ``pow`` - single point on wave,
``rms`` - RMS of analog input and ``peak`` - peak of analog input.
Also might be user defined. Default value: ``pow``.
**Returns:**
* ``int`` which represents analog channels conversion factor.
**Raises:**
FrameError
When ``scale`` is out of range.
"""
if not -8388608 <= scale <= 8388608:
raise FrameError("ANUNIT scale out of range. -8388608 <= ANUNIT <= 8388608.")
scale &= 0xffffff # 24-bit signed integer
anunit = 1 << 24
if anunit_type == "pow": # TODO: User defined analog units
anunit |= scale
return anunit ^ (1 << 24)
if anunit_type == "rms":
anunit |= scale
return anunit
if anunit_type == "peak":
anunit |= scale
return anunit ^ (3 << 24)
@staticmethod
def _int2anunit(an_unit):
TYPES = { "0": "pow", "1": "rms", "2": "peak" }
an_unit_byte = an_unit.to_bytes(4, byteorder="big", signed=True)
an_type = int.from_bytes(an_unit_byte[0:1], byteorder="big", signed=False)
an_scale = int.from_bytes(an_unit_byte[1:4], byteorder="big", signed=True)
return an_scale, TYPES[str(an_type)]
def set_digital_units(self, dig_units):
"""
### set_digital_units() ###
Setter for mask words for digital status words.
Two 16 bit words are provided for each digital word.
The first will be used to indicate the normal status of the
digital inputs by returning 0 when XORed with the status word.
The second will indicate the current valid inputs to the PMU
by having a bit set in the binary position corresponding to the
digital input and all other bits set to 0.
**Params:**
* ``dig_units`` **(list)** - Should be list of tuples ``(first_mask, second_mask)``.
If ``multistreaming`` should be list of lists.
**Raises:**
FrameError
When ``dig_units`` is not list of lists with length ``num_pmu`` when ``multistreaming``.
When ``dig_units`` element is not tuple.
"""
if self._multistreaming:
if not all(isinstance(el, list) for el in dig_units) or self._num_pmu != len(dig_units):
raise FrameError("When NUM_PMU > 1 provide DIGUNIT as list of lists.")
digunit_list = []
for i, dig_unit in enumerate(dig_units):
if not all(isinstance(el, tuple) for el in dig_unit) or self._digital_num[i] != len(dig_unit):
raise FrameError("Provide DIGUNIT as list of tuples with DGNMR elements. "
"Ex: [(0x0000,0xffff),(0x0011, 0xff0f)]")
dig_values = []
for dig_tuple in dig_unit:
dig_values.append(ConfigFrame1._digunit2int(*dig_tuple))
digunit_list.append(dig_values)
self._dig_units = digunit_list
else:
if not all(isinstance(el, tuple) for el in dig_units) or self._digital_num != len(dig_units):
raise FrameError("Provide DIGUNIT as list of tuples with DGNMR elements. "
"Ex: [(0x0000,0xffff),(0x0011, 0xff0f)]")
self._dig_units = [ConfigFrame1._digunit2int(*dgun) for dgun in dig_units]
def get_digital_units(self):
if all(isinstance(el, list) for el in self._dig_units):
return [[self._int2digunit(unit) for unit in dig_unit] for dig_unit in self._dig_units]
else:
return [self._int2digunit(dig_unit) for dig_unit in self._dig_units]
@staticmethod
def _digunit2int(first_mask, second_mask):
"""
### digunit2int() ###
Generate digital status word mask.
**Params:**
* ``first_mask`` **(int)** - status indicator.
* ``second_mask`` **(int)** - valid input indicator.
**Returns:**
* ``int`` which digital status word mask.
**Raises:**
FrameError
When ``first_mask`` is out of range.
When ``second_mask`` is out of range.
"""
if not 0 <= first_mask <= 65535:
raise FrameError("DIGUNIT first mask must be 16-bit word. 0x0000 <= first_mask <= 0xffff")
if not 0 <= first_mask <= 65535:
raise FrameError("DIGUNIT second mask must be 16-bit word. 0x0000 <= second_mask <= 0xffff")
return (first_mask << 16) | second_mask
@staticmethod
def _int2digunit(dig_unit):
first = dig_unit & 0xffff0000
second = dig_unit & 0x0000ffff
return first, second
def set_fnom(self, f_nom):
"""
### set_fnom() ###
Setter for nominal line frequency.
Should be ``50`` or ``60`` Hz.
**Params:**
* ``f_nom`` **(int)** - ``50`` or ``60`` Hz. If ``multistreaming``
should be list of ints.
**Raises:**
FrameError
When ``f_nom`` is not ``50`` or ``60``.
When ``f_nom`` is not list of int with length ``num_pmu`` when
``multistreaming``.
"""
if self._multistreaming:
if not isinstance(f_nom, list) or self._num_pmu != len(f_nom):
raise FrameError("When NUM_PMU > 1 provide FNOM as list with NUM_PMU elements.")
fnom_list = []
for fnom in f_nom:
fnom_list.append(ConfigFrame1._fnom2int(fnom))
self._f_nom = fnom_list
else:
self._f_nom = ConfigFrame1._fnom2int(f_nom)
def get_fnom(self):
if isinstance(self._f_nom, list):
return [self._int2fnom(fnom) for fnom in self._f_nom]
else:
return self._int2fnom(self._f_nom)
@staticmethod
def _fnom2int(fnom=60):
"""
### fnom2int() ###
Convert line frequency to code.
60 Hz = ``0`` and 50 Hz = ``1``.
**Params:**
* ``fnom`` **(int)** - Nominal line frequency. Default value: 60.
**Returns:**
* ``int`` [``0`` or ``1``]
**Raises:**
FrameError
When ``fnom`` is not 50 or 60.
"""
if fnom != 50 and fnom != 60:
raise FrameError("Fundamental frequency must be 50 or 60.")
if fnom == 50:
return 1
else:
return 0
@staticmethod
def _init2fnom(fnom):
if fnom:
return 50
else:
return 60
@staticmethod
def _int2fnom(fnom_int):
if fnom_int == 0:
return 60
else:
return 50
def set_cfg_count(self, cfg_count):
"""
### set_cfg_count() ###
Setter for configuration change count.
Factory default: ``0``. This count will be the number of changes
of configuration of this message stream.
**Params:**
* ``cfg_count`` **(mixed)** - Number of changes. Sholud be list of ints
if ``multistreaming``.
**Raises:**
FrameError.
When ``cfg_count`` is not list of ints with length ``num_pmu`` when
``multistreaming``.
When ``cfg_count`` is out of range.
"""
if self._multistreaming:
if not isinstance(cfg_count, list) or self._num_pmu != len(cfg_count):
raise FrameError("When NUM_PMU > 1 provide CFGCNT as list with NUM_PMU elements.")
cfgcnt_list = []
for cfgcnt in cfg_count:
if not 0 <= cfgcnt <= 65535:
raise FrameError("CFGCNT out of range. 0 <= CFGCNT <= 65535.")
cfgcnt_list.append(cfgcnt)
self._cfg_count = cfgcnt_list
else:
if not 0 <= cfg_count <= 65535:
raise FrameError("CFGCNT out of range. 0 <= CFGCNT <= 65535.")
self._cfg_count = cfg_count
def get_cfg_count(self):
return self._cfg_count
def set_data_rate(self, data_rate):
"""
### set_data_rate() ###
Setter for rate of phasor data transmission.
If ``data_rate > 0`` rate is number of frames per second.
If ``data_rate < 0`` rate is negative of seconds per frame.
**Params:**
* ``data_rate`` **(int)** - Rate of phasor data transmission.
**Raises:**
FrameError.
When ``data_rate`` is out of range.
"""
if not -32767 <= data_rate <= 32767:
raise FrameError("DATA_RATE out of range. -32 767 <= DATA_RATE <= 32 767.")
self._data_rate = data_rate
def get_data_rate(self):
return self._data_rate
def convert2bytes(self):
if not self._multistreaming:
cfg_b = self._time_base.to_bytes(4, "big") + self._num_pmu.to_bytes(2, "big") + \
str.encode(self._station_name) + self._id_code.to_bytes(2, "big") + \
self._data_format.to_bytes(2, "big") + self._phasor_num.to_bytes(2, "big") + \
self._analog_num.to_bytes(2, "big") + self._digital_num.to_bytes(2, "big") + \
str.encode("".join(self._channel_names)) + list2bytes(self._ph_units, 4) + \
list2bytes(self._an_units, 4) + list2bytes(self._dig_units, 4) + \
self._f_nom.to_bytes(2, "big") + self._cfg_count.to_bytes(2, "big") + \
self._data_rate.to_bytes(2, "big", signed=True)
else:
cfg_b = self._time_base.to_bytes(4, "big") + self._num_pmu.to_bytes(2, "big")
# Concatenate configurations as many as num_pmu tells
for i in range(self._num_pmu):
cfg_b_i = str.encode(self._station_name[i]) + self._id_code[i].to_bytes(2, "big") + \
self._data_format[i].to_bytes(2, "big") + self._phasor_num[i].to_bytes(2, "big") + \
self._analog_num[i].to_bytes(2, "big") + self._digital_num[i].to_bytes(2, "big") + \
str.encode("".join(self._channel_names[i])) + list2bytes(self._ph_units[i], 4) + \
list2bytes(self._an_units[i], 4) + list2bytes(self._dig_units[i], 4) + \
self._f_nom[i].to_bytes(2, "big") + self._cfg_count[i].to_bytes(2, "big")
cfg_b += cfg_b_i
cfg_b += self._data_rate.to_bytes(2, "big", signed=True)
return super().convert2bytes(cfg_b)
@staticmethod
def convert2frame(byte_data):
try:
if not CommonFrame._check_crc(byte_data):
raise FrameError("CRC failed. Configuration frame not valid.")
pmu_code = int.from_bytes(byte_data[4:6], byteorder="big", signed=False)
soc = int.from_bytes(byte_data[6:10], byteorder="big", signed=False)
frasec = CommonFrame._int2frasec(int.from_bytes(byte_data[10:14], byteorder="big", signed=False))
time_base_int = int.from_bytes(byte_data[14:18], byteorder="big", signed=False)
time_base = time_base_int & 0x00ffffff # take only first 24 LSB bits
num_pmu = int.from_bytes(byte_data[18:20], byteorder="big", signed=False)
start_byte = 20
if num_pmu > 1: # Loop through configurations for each
station_name, id_code, data_format, phasor_num, analog_num, digital_num, channel_names, ph_units, \
an_units, dig_units, fnom, cfg_count = [[] for _ in range(12)]
for i in range(num_pmu):
station_name.append(byte_data[start_byte:start_byte+16].decode("ascii"))
start_byte += 16
id_code.append(int.from_bytes(byte_data[start_byte:start_byte+2], byteorder="big", signed=False))
start_byte += 2
data_format.append(int.from_bytes(byte_data[start_byte:start_byte+2], byteorder="big", signed=False)
& 0x000f)
start_byte += 2
phasor_num.append(int.from_bytes(byte_data[start_byte:start_byte+2], byteorder="big", signed=False))
start_byte += 2
analog_num.append(int.from_bytes(byte_data[start_byte:start_byte+2], byteorder="big", signed=False))
start_byte += 2
digital_num.append(int.from_bytes(byte_data[start_byte:start_byte+2], byteorder="big", signed=False))
start_byte += 2
stream_channel_names = []
for _ in range(phasor_num[i] + analog_num[i] + 16*digital_num[i]):
stream_channel_names.append(byte_data[start_byte:start_byte+16].decode("ascii"))
start_byte += 16
channel_names.append(stream_channel_names)
stream_ph_units = []
for _ in range(phasor_num[i]):
ph_unit = int.from_bytes(byte_data[start_byte:start_byte+4], byteorder="big", signed=False)
stream_ph_units.append(ConfigFrame1._int2phunit(ph_unit))
start_byte += 4
ph_units.append(stream_ph_units)
stream_an_units = []
for _ in range(analog_num[i]):
an_unit = int.from_bytes(byte_data[start_byte:start_byte+4], byteorder="big", signed=True)
stream_an_units.append(ConfigFrame1._int2anunit(an_unit))
start_byte += 4
an_units.append(stream_an_units)
stream_dig_units = []
for _ in range(digital_num[i]):
stream_dig_units.append(ConfigFrame1._int2digunit(
int.from_bytes(byte_data[start_byte:start_byte+4], byteorder="big", signed=False)))
start_byte += 4
dig_units.append(stream_dig_units)
fnom.append(ConfigFrame1._int2fnom(int.from_bytes(byte_data[start_byte:start_byte + 2],
byteorder="big", signed=False)))
start_byte += 2
cfg_count.append(int.from_bytes(byte_data[start_byte:start_byte+2], byteorder="big", signed=False))
start_byte += 2
else:
station_name = byte_data[start_byte:start_byte+16].decode("ascii")
start_byte += 16
id_code = int.from_bytes(byte_data[start_byte:start_byte+2], byteorder="big", signed=False)
start_byte += 2
data_format_int = int.from_bytes(byte_data[start_byte:start_byte+2], byteorder="big", signed=False)
data_format = data_format_int & 0x000f # Take only first 4 LSB bits
start_byte += 2
phasor_num = int.from_bytes(byte_data[start_byte:start_byte+2], byteorder="big", signed=False)
start_byte += 2
analog_num = int.from_bytes(byte_data[start_byte:start_byte+2], byteorder="big", signed=False)
start_byte += 2
digital_num = int.from_bytes(byte_data[start_byte:start_byte+2], byteorder="big", signed=False)
start_byte += 2
channel_names = []
for _ in range(phasor_num + analog_num + 16*digital_num):
channel_names.append(byte_data[start_byte:start_byte+16].decode("ascii"))
start_byte += 16
ph_units = []
for _ in range(phasor_num):
ph_unit_int = int.from_bytes(byte_data[start_byte:start_byte+4], byteorder="big", signed=False)
ph_units.append(ConfigFrame1._int2phunit(ph_unit_int))
start_byte += 4
an_units = []
for _ in range(analog_num):
an_unit = int.from_bytes(byte_data[start_byte:start_byte+4], byteorder="big", signed=False)
an_units.append(ConfigFrame1._int2anunit(an_unit))
start_byte += 4
dig_units = []
for _ in range(digital_num):
dig_units.append(ConfigFrame1._int2digunit(
int.from_bytes(byte_data[start_byte:start_byte+4], byteorder="big", signed=False)))
start_byte += 4
fnom = ConfigFrame1._int2fnom(int.from_bytes(byte_data[start_byte:start_byte + 2],
byteorder="big", signed=False))
start_byte += 2
cfg_count = int.from_bytes(byte_data[start_byte:start_byte+2], byteorder="big", signed=False)
start_byte += 2
data_rate = int.from_bytes(byte_data[-4:-2], byteorder="big", signed=True)
return ConfigFrame1(pmu_code, time_base, num_pmu, station_name, id_code, data_format, phasor_num,
analog_num, digital_num, channel_names, ph_units, an_units, dig_units, fnom, cfg_count,
data_rate, soc, frasec)
except Exception as error:
raise FrameError("Error while creating Config frame: " + str(error))
class ConfigFrame2(ConfigFrame1):
"""
## ConfigFrame2 ##
ConfigFrame2 is class which represents configuration frame v2.
Carries info about current data stream.
Class implements two abstract methods from super class.
* ``convert2bytes()`` - for converting ConfigFrame2 to bytes.
* ``convert2frame()`` - which converts array of bytes to ConfigFrame2.
Each instance of ConfigFrame2 class will have following attributes.
**Attributes:**
* ``frame_type`` **(int)** - Defines frame type. Inherited from ``CommonFrame``.
* ``version`` **(int)** - Standard version. Inherited from ``CommonFrame``. Default value: ``1``.
* ``pmu_id_code`` **(int)** - PMU Id code which identifies data stream. Inherited from ``CommonFrame``.
* ``soc`` **(int)** - UNIX timestamp. Default value: ``None``. Inherited from ``CommonFrame``.
* ``frasec`` **(int)** - Fraction of second and Time Quality. Default value: ``None``.
Inherited from ``CommonFrame``.
* ``time_base`` **(int)** - Resolution of the fractional second time stamp in all frames.
* ``num_pmu`` **(int)** - Number of PMUs (data streams) included in single ``DataFrame``.
* ``multistreaming`` **(bool)** - ``True`` if ``num_pmu > 1``. That means data frame consist of multiple
measurement streams.
* ``station_name`` **(mixed)** - Station name ``(string)`` or station names ``(list)`` if ``multistreaming``.
* ``id_code`` **(mixed)** - Measurement stream ID code ``(int)`` or ``(list)`` if ``multistreaming``. Each ID
identifies source PMU of each data block.
* ``data_format`` **(mixed)** - Data format for each data stream. Inherited from ``CommonFrame``.
* ``phasor_num`` **(mixed)** - Number of phasors ``(int)`` or ``(list)`` if ``multistreaming``.
* ``analog_num`` **(mixed)** - Number of analog values ``(int)`` or ``(list)`` if ``multistreaming``.
* ``digital_num`` **(mixed)** - Number of digital status words ``(int)`` or ``(list)`` if ``multistreaming``.
* ``channel_names`` **(list)** - List of phasor and channel names for phasor, analog and digital channel.
If ``multistreaming`` it's list of lists.
* ``ph_units`` **(list)** - Conversion factor for phasor channels. If ``multistreaming`` list of lists.
* ``an_units`` **(list)** - Conversion factor for analog channels. If ``multistreaming`` list of lists.
* ``dig_units`` **(list)** - Mask words for digital status word. If ``multistreaming`` list of lists.
* ``fnom`` **(mixed)** - Nominal frequency code and flags. If ``multistreaming`` list of ints.
* ``cfg_count`` **(mixed)** - Configuration change count. If ``multistreaming`` list of ints.
* ``data_rate`` **(int)** - Frames per second or seconds per frame (if negative ``int``).
**Raises:**
FrameError
When it's not possible to create valid frame, usually due invalid parameter value.
"""
def __init__(self, pmu_id_code, time_base, num_pmu, station_name, id_code, data_format, phasor_num, analog_num,
digital_num, channel_names, ph_units, an_units, dig_units, f_nom, cfg_count, data_rate,
soc=None, frasec=None, version=1):
super().__init__(pmu_id_code, time_base, num_pmu, station_name, id_code, data_format, phasor_num, analog_num,
digital_num, channel_names, ph_units, an_units, dig_units, f_nom, cfg_count,
data_rate, soc, frasec, version)
super().set_frame_type("cfg2")
@staticmethod
def convert2frame(byte_data):
cfg = ConfigFrame1.convert2frame(byte_data)
cfg.set_frame_type("cfg2")
cfg.__class__ = ConfigFrame2 # Casting to derived class
return cfg
class ConfigFrame3(CommonFrame):
"""
## ConfigFrame3 ##
ConfigFrame3 is class which represents configuration frame v3.
Class implements two abstract methods from super class.
* ``convert2bytes()`` - for converting ConfigFrame3 to bytes.
* ``convert2frame()`` - which converts array of bytes to ConfigFrame3.
Each instance of ConfigFrame3 class will have following attributes.
**Attributes:**
* ``frame_type`` **(int)** - Defines frame type. Inherited from ``CommonFrame``.
* ``version`` **(int)** - Standard version. Inherited from ``CommonFrame``. Default value: ``1``.
* ``pmu_id_code`` **(int)** - PMU Id code which identifies data stream. Inherited from ``CommonFrame``.
* ``soc`` **(int)** - UNIX timestamp. Default value: ``None``. Inherited from ``CommonFrame``.
* ``frasec`` **(int)** - Fraction of second and Time Quality. Default value: ``None``.
Inherited from ``CommonFrame``.
* ``time_base`` **(int)** - Resolution of the fractional second time stamp in all frames.
* ``num_pmu`` **(int)** - Number of PMUs (data streams) included in single ``DataFrame``.
* ``multistreaming`` **(bool)** - ``True`` if ``num_pmu > 1``. That means data frame consist of multiple
measurement streams.
* ``station_name`` **(mixed)** - Station name ``(string)`` or station names ``(list)`` if ``multistreaming``.
* ``id_code`` **(mixed)** - Measurement stream ID code ``(int)`` or ``(list)`` if ``multistreaming``. Each ID
identifies source PMU of each data block.
* ``data_format`` **(mixed)** - Data format for each data stream. Inherited from ``CommonFrame``.
* ``phasor_num`` **(mixed)** - Number of phasors ``(int)`` or ``(list)`` if ``multistreaming``.
* ``analog_num`` **(mixed)** - Number of analog values ``(int)`` or ``(list)`` if ``multistreaming``.
* ``digital_num`` **(mixed)** - Number of digital status words ``(int)`` or ``(list)`` if ``multistreaming``.
* ``channel_names`` **(list)** - List of phasor and channel names for phasor, analog and digital channel.
If ``multistreaming`` it's list of lists.
* ``ph_units`` **(list)** - Conversion factor for phasor channels. If ``multistreaming`` list of lists.
* ``an_units`` **(list)** - Conversion factor for analog channels. If ``multistreaming`` list of lists.
* ``dig_units`` **(list)** - Mask words for digital status word. If ``multistreaming`` list of lists.
* ``fnom`` **(mixed)** - Nominal frequency code and flags. If ``multistreaming`` list of ints.
* ``cfg_count`` **(mixed)** - Configuration change count. If ``multistreaming`` list of ints.
* ``data_rate`` **(int)** - Frames per second or seconds per frame (if negative ``int``).
**Raises:**
FrameError
When it's not possible to create valid frame, usually due invalid parameter value.
"""
pass # TODO: Implement Configuration Frame v3
class DataFrame(CommonFrame):
MEASUREMENT_STATUS = { "ok": 0, "error": 1, "test": 2, "verror": 3 }
MEASUREMENT_STATUS_WORDS = { code: word for word, code in MEASUREMENT_STATUS.items() }
UNLOCKED_TIME = { "<10": 0, "<100": 1, "<1000": 2, ">1000": 3 }
UNLOCKED_TIME_WORDS = { code: word for word, code in UNLOCKED_TIME.items() }
TIME_QUALITY = { "n/a": 0, "<100ns": 1, "<1us": 2, "<10us": 3, "<100us": 4, "<1ms": 5, "<10ms": 6, ">10ms": 7}
TIME_QUALITY_WORDS = { code: word for word, code in TIME_QUALITY.items() }
TRIGGER_REASON = { "manual": 0, "magnitude_low": 1, "magnitude_high": 2, "phase_angle_diff": 3,
"frequency_high_or_log": 4, "df/dt_high": 5, "reserved": 6, "digital": 7 }
TRIGGER_REASON_WORDS = { code: word for word, code in TRIGGER_REASON.items() }
def __init__(self, pmu_id_code, stat, phasors, freq, dfreq, analog, digital, cfg, soc=None, frasec=None):
if not isinstance(cfg, ConfigFrame2):
raise FrameError("CFG should describe current data stream (ConfigurationFrame2)")
# Common frame for Configuration frame 2 with PMU simulator ID CODE which sends configuration frame.
super().__init__("data", pmu_id_code, soc, frasec)
self.cfg = cfg
self.set_stat(stat)
self.set_phasors(phasors)
self.set_freq(freq)
self.set_dfreq(dfreq)
self.set_analog(analog)
self.set_digital(digital)
def set_stat(self, stat):
if self.cfg._num_pmu > 1:
if not isinstance(stat, list) or self.cfg._num_pmu != len(stat):
raise TypeError("When number of measurements > 1 provide STAT as list with NUM_MEASUREMENTS elements.")
stats = [] # Format tuples transformed to ints
for stat_el in stat:
# If stat is specified as tuple then convert them to int
if isinstance(stat_el, tuple):
stats.append(DataFrame._stat2int(*stat_el))
else:
# If data formats are specified as ints check range
if not 0 <= stat_el <= 65536:
raise ValueError("STAT out of range. 0 <= STAT <= 65536")
else:
stats.append(stat_el)
self._stat = stats
else:
if isinstance(stat, tuple):
self._stat = DataFrame._stat2int(*stat)
else:
if not 0 <= stat <= 65536:
raise ValueError("STAT out of range. 0 <= STAT <= 65536")
else:
self._stat = stat
def get_stat(self):
if isinstance(self._stat, list):
return [DataFrame._int2stat(stat) for stat in self._stat]
else:
return DataFrame._int2stat(self._stat)
@staticmethod
def _stat2int(measurement_status="ok", sync=True, sorting="timestamp", trigger=False, cfg_change=False,
modified=False, time_quality=5, unlocked="<10", trigger_reason=0):
if isinstance(measurement_status, str):
measurement_status = DataFrame.MEASUREMENT_STATUS[measurement_status]
if isinstance(time_quality, str):
time_quality = DataFrame.TIME_QUALITY[time_quality]
if isinstance(unlocked, str):
unlocked = DataFrame.UNLOCKED_TIME[unlocked]
if isinstance(trigger_reason, str):
trigger_reason = DataFrame.TRIGGER_REASON[trigger_reason]
stat = measurement_status << 2
if not sync:
stat |= 1
stat <<= 1
if not sorting == "timestamp":
stat |= 1
stat <<= 1
if trigger:
stat |= 1
stat <<= 1
if cfg_change:
stat |= 1
stat <<= 1
if modified:
stat |= 1
stat <<= 3
stat |= time_quality
stat <<= 2
stat |= unlocked
stat <<= 4
return stat | trigger_reason
@staticmethod
def _int2stat(stat):
measurement_status = DataFrame.MEASUREMENT_STATUS_WORDS[stat >> 15]
sync = bool(stat & 0x2000)
if stat & 0x1000:
sorting = "arrival"
else:
sorting = "timestamp"
trigger = bool(stat & 0x800)
cfg_change = bool(stat & 0x400)
modified = bool(stat & 0x200)
time_quality = DataFrame.TIME_QUALITY_WORDS[stat & 0x1c0]
unlocked = DataFrame.UNLOCKED_TIME_WORDS[stat & 0x30]
trigger_reason = DataFrame.TRIGGER_REASON_WORDS[stat & 0xf]
return measurement_status, sync, sorting, trigger, cfg_change, modified, time_quality, unlocked, trigger_reason
def set_phasors(self, phasors):
phasors_list = [] # Format tuples transformed to ints
if self.cfg._num_pmu > 1:
if not isinstance(phasors, list) or self.cfg._num_pmu != len(phasors):
raise TypeError("When number of measurements > 1 provide PHASORS as list of tuple list with "
"NUM_MEASUREMENTS elements.")
if not isinstance(self.cfg._data_format, list) or self.cfg._num_pmu != len(self.cfg._data_format):
raise TypeError("When number of measurements > 1 provide DATA_FORMAT as list with "
"NUM_MEASUREMENTS elements.")
for i, phasor in enumerate(phasors):
if not isinstance(phasor, list) or self.cfg._phasor_num[i] != len(phasor):
raise TypeError("Provide PHASORS as list of tuples with PHASOR_NUM tuples")
ph_measurements = []
for phasor_measurement in phasor:
ph_measurements.append(DataFrame._phasor2int(phasor_measurement, self.cfg._data_format[i]))
phasors_list.append(ph_measurements)
else:
if not isinstance(phasors, list) or self.cfg._phasor_num != len(phasors):
raise TypeError("Provide PHASORS as list of tuples with PHASOR_NUM tuples")
for phasor_measurement in phasors:
phasors_list.append(DataFrame._phasor2int(phasor_measurement, self.cfg._data_format))
self._phasors = phasors_list
def get_phasors(self, convert2polar=True):
if all(isinstance(el, list) for el in self._phasors):
phasors = [[DataFrame._int2phasor(ph, self.cfg._data_format[i]) for ph in phasor]
for i, phasor in enumerate(self._phasors)]
if convert2polar:
for i, stream_phasors in enumerate(phasors):
if not self.cfg.get_data_format()[i][1]: # If not float representation scale back
stream_phasors = [tuple([ph*self.cfg.get_ph_units()[i][j][0]*0.00001 for ph in phasor])
for j, phasor in enumerate(stream_phasors)]
phasors[i] = stream_phasors
if not self.cfg.get_data_format()[i][0]: # If not polar convert to polar representation
stream_phasors = [(sqrt(ph[0]**2 + ph[1]**2), atan2(ph[1], ph[0])) for ph in stream_phasors]
phasors[i] = stream_phasors
else:
phasors = [DataFrame._int2phasor(phasor, self.cfg._data_format) for phasor in self._phasors]
if not self.cfg.get_data_format()[1]: # If not float representation scale back
phasors = [tuple([ph*self.cfg.get_ph_units()[i][0]*0.00001 for ph in phasor])
for i, phasor in enumerate(phasors)]
if not self.cfg.get_data_format()[0]: # If not polar convert to polar representation
phasors = [(sqrt(ph[0]**2 + ph[1]**2), atan2(ph[1], ph[0])) for ph in phasors]
return phasors
@staticmethod
def _phasor2int(phasor, data_format):
if not isinstance(phasor, tuple):
raise TypeError("Provide phasor measurement as tuple. Rectangular - (Re, Im); Polar - (Mg, An).")
if isinstance(data_format, int):
data_format = DataFrame._int2format(data_format)
if data_format[0]: # Polar representation
if data_format[1]: # Floating Point
if not -3.142 <= phasor[1] <= 3.142:
raise ValueError("Angle must be in range -3.14 <= ANGLE <= 3.14")
mg = pack("!f", float(phasor[0]))
an = pack("!f", float(phasor[1]))
measurement = mg + an
else: # Polar 16-bit representations
if not 0 <= phasor[0] <= 65535:
raise ValueError("Magnitude must be 16-bit unsigned integer. 0 <= MAGNITUDE <= 65535.")
if not -31416 <= phasor[1] <= 31416:
raise ValueError("Angle must be 16-bit signed integer in radians x (10^-4). "
"-31416 <= ANGLE <= 31416.")
mg = pack("!H", phasor[0])
an = pack("!h", phasor[1])
measurement = mg + an
else:
if data_format[1]: # Rectangular floating point representation
re = pack("!f", float(phasor[0]))
im = pack("!f", float(phasor[1]))
measurement = re + im
else:
if not ((-32767 <= phasor[0] <= 32767) or (-32767 <= phasor[1] <= 32767)):
raise ValueError("Real and imaginary value must be 16-bit signed integers. "
"-32767 <= (Re,Im) <= 32767.")
re = pack("!h", phasor[0])
im = pack("!h", phasor[1])
measurement = re + im
return int.from_bytes(measurement, "big", signed=False)
@staticmethod
def _int2phasor(phasor, data_format):
if isinstance(data_format, int):
data_format = DataFrame._int2format(data_format)
if data_format[1]: # Float representation
phasor = unpack("!ff", phasor.to_bytes(8, "big", signed=False))
elif data_format[0]: # Polar integer
phasor = unpack("!Hh", phasor.to_bytes(4, "big", signed=False))
else: # Rectangular integer
phasor = unpack("!hh", phasor.to_bytes(4, "big", signed=False))
return phasor
def set_freq(self, freq):
if self.cfg._num_pmu > 1:
if not isinstance(freq, list) or self.cfg._num_pmu != len(freq):
raise TypeError("When number of measurements > 1 provide FREQ as list with "
"NUM_MEASUREMENTS elements.")
if not isinstance(self.cfg._data_format, list) or self.cfg._num_pmu != len(self.cfg._data_format):
raise TypeError("When number of measurements > 1 provide DATA_FORMAT as list with "
"NUM_MEASUREMENTS elements.")
freq_list = [] # Format tuples transformed to ints
for i, fr in enumerate(freq):
freq_list.append(DataFrame._freq2int(fr, self.cfg._data_format[i]))
self._freq = freq_list
else:
self._freq = DataFrame._freq2int(freq, self.cfg._data_format)
def get_freq(self):
if isinstance(self._freq, list):
freq = [DataFrame._int2freq(fr, self.cfg._data_format[i]) for i, fr in enumerate(self._freq)]
else:
freq = DataFrame._int2freq(self._freq, self.cfg._data_format)
return freq
def _freq2int(freq, data_format):
if isinstance(data_format, int):
data_format = DataFrame._int2format(data_format)
if data_format[3]: # FREQ/DFREQ floating point
if not -32.767 <= freq <= 32.767:
raise ValueError("FREQ must be in range -32.767 <= FREQ <= 32.767.")
freq = unpack("!I", pack("!f", float(freq)))[0]
else:
if not -32767 <= freq <= 32767:
raise ValueError("FREQ must be 16-bit signed integer. -32767 <= FREQ <= 32767.")
freq = unpack("!H", pack("!h", freq))[0]
return freq
def _int2freq(freq, data_format):
if isinstance(data_format, int):
data_format = DataFrame._int2format(data_format)
if data_format[3]: # FREQ/DFREQ floating point
freq = unpack("!f", pack("!I", freq))[0]
else:
freq = unpack("!h", pack("!H", freq))[0]
return freq
def set_dfreq(self, dfreq):
if self.cfg._num_pmu > 1:
if not isinstance(dfreq, list) or self.cfg._num_pmu != len(dfreq):
raise TypeError("When number of measurements > 1 provide DFREQ as list with "
"NUM_MEASUREMENTS elements.")
if not isinstance(self.cfg._data_format, list) or self.cfg._num_pmu != len(self.cfg._data_format):
raise TypeError("When number of measurements > 1 provide DATA_FORMAT as list with "
"NUM_MEASUREMENTS elements.")
dfreq_list = [] # Format tuples transformed to ints
for i, dfr in enumerate(dfreq):
dfreq_list.append(DataFrame._dfreq2int(dfr, self.cfg._data_format[i]))
self._dfreq = dfreq_list
else:
self._dfreq = DataFrame._dfreq2int(dfreq, self.cfg._data_format)
def get_dfreq(self):
if isinstance(self._dfreq, list):
dfreq = [DataFrame._int2dfreq(dfr, self.cfg._data_format[i]) for i, dfr in enumerate(self._dfreq)]
else:
dfreq = DataFrame._int2dfreq(self._dfreq, self.cfg._data_format)
return dfreq
def _dfreq2int(dfreq, data_format):
if isinstance(data_format, int):
data_format = DataFrame._int2format(data_format)
if data_format[3]: # FREQ/DFREQ floating point
dfreq = unpack("!I", pack("!f", float(dfreq)))[0]
else:
if not -32767 <= dfreq <= 32767:
raise ValueError("DFREQ must be 16-bit signed integer. -32767 <= DFREQ <= 32767.")
dfreq = unpack("!H", pack("!h", dfreq))[0]
return dfreq
def _int2dfreq(dfreq, data_format):
if isinstance(data_format, int):
data_format = DataFrame._int2format(data_format)
if data_format[3]: # FREQ/DFREQ floating point
dfreq = unpack("!f", pack("!I", dfreq))[0]
else:
dfreq = unpack("!h", pack("!H", dfreq))[0]
return dfreq
def set_analog(self, analog):
analog_list = []
# Format tuples transformed to ints
if self.cfg._num_pmu > 1:
if not isinstance(analog, list) or self.cfg._num_pmu != len(analog):
raise TypeError("When number of measurements > 1 provide ANALOG as list of list with "
"NUM_MEASUREMENTS elements.")
if not isinstance(self.cfg._data_format, list) or self.cfg._num_pmu != len(self.cfg._data_format):
raise TypeError("When number of measurements > 1 provide DATA_FORMAT as list with "
"NUM_MEASUREMENTS elements.")
for i, an in enumerate(analog):
if not isinstance(an, list) or self.cfg._analog_num[i] != len(an):
raise TypeError("Provide ANALOG as list with ANALOG_NUM elements")
an_measurements = []
for analog_measurement in an:
an_measurements.append(DataFrame._analog2int(analog_measurement, self.cfg._data_format[i]))
analog_list.append(an_measurements)
else:
if not isinstance(analog, list) or self.cfg._analog_num!= len(analog):
raise TypeError("Provide ANALOG as list with ANALOG_NUM elements")
for analog_measurement in analog:
analog_list.append(DataFrame._analog2int(analog_measurement, self.cfg._data_format))
self._analog = analog_list
def get_analog(self):
if all(isinstance(el, list) for el in self._analog):
analog = [[DataFrame._int2analog(an, self.cfg._data_format[i]) for an in analog]
for i, analog in enumerate(self._analog)]
else:
analog = [DataFrame._int2analog(an, self.cfg._data_format) for an in self._analog]
return analog
def _analog2int(analog, data_format):
if isinstance(data_format, int):
data_format = DataFrame._int2format(data_format)
if data_format[2]: # ANALOG float
analog = unpack("!I", pack("!f", float(analog)))[0]
else:
# User defined ranges - but fit in 16-bit (u)signed integer
if not -32767 <= analog <= 32767:
raise ValueError("ANALOG must be in range -32767 <= FREQ <= 65535.")
analog = unpack("!H", pack("!h", analog))[0]
return analog
def _int2analog(analog, data_format):
if isinstance(data_format, int):
data_format = DataFrame._int2format(data_format)
if data_format[2]: # ANALOG float
analog = unpack("!f", pack("!I", analog))[0]
else:
analog = unpack("!h", pack("!H", analog))[0]
return analog
def set_digital(self, digital):
digital_list = []
# Format tuples transformed to ints
if self.cfg._num_pmu > 1:
if not isinstance(digital, list) or self.cfg._num_pmu != len(digital):
raise TypeError("When number of measurements > 1 provide DIGITAL as list of lists with "
"NUM_MEASUREMENTS elements.")
for i, dig in enumerate(digital):
if not isinstance(dig, list) or self.cfg._digital_num[i] != len(dig):
raise TypeError("Provide DIGITAL as list with DIGITAL_NUM elements")
dig_measurements = []
for digital_measurement in dig:
dig_measurements.append(DataFrame._digital2int(digital_measurement))
digital_list.append(dig_measurements)
else:
if not isinstance(digital, list) or self.cfg._digital_num != len(digital):
raise TypeError("Provide DIGITAL as list with DIGITAL_NUM elements")
for digital_measurement in digital:
digital_list.append(DataFrame._digital2int(digital_measurement))
self._digital = digital_list
def get_digital(self):
return self._digital
def _digital2int(digital):
if not -32767 <= digital <= 65535:
raise ValueError("DIGITAL must be 16 bit word. -32767 <= DIGITAL <= 65535.")
return unpack("!H", pack("!H", digital))[0]
def get_measurements(self):
measurements = []
if self.cfg._num_pmu > 1:
frequency = [self.cfg.get_fnom()[i] + freq / 1000 for i, freq in enumerate(self.get_freq())]
for i in range(self.cfg._num_pmu):
measurement = { "stream_id": self.cfg.get_stream_id_code()[i],
"stat": self.get_stat()[i][0],
"phasors": self.get_phasors()[i],
"analog": self.get_analog()[i],
"digital": self.get_digital()[i],
"frequency": self.cfg.get_fnom()[i] + self.get_freq()[i] / 1000,
"rocof": self.get_dfreq()[i]}
measurements.append(measurement)
else:
measurements.append({ "stream_id": self.cfg.get_stream_id_code(),
"stat": self.get_stat()[0],
"phasors": self.get_phasors(),
"analog": self.get_analog(),
"digital": self.get_digital(),
"frequency": self.cfg.get_fnom() + self.get_freq() / 1000,
"rocof": self.get_dfreq()
})
data_frame = { "pmu_id": self._pmu_id_code,
"time": self.get_soc() + self.get_frasec()[0] / self.cfg.get_time_base(),
"measurements": measurements }
return data_frame
def convert2bytes(self):
# Convert DataFrame message to bytes
if not self.cfg._num_pmu > 1:
data_format_size = CommonFrame._get_data_format_size(self.cfg._data_format)
df_b = self._stat.to_bytes(2, "big") + list2bytes(self._phasors, data_format_size["phasor"]) + \
self._freq.to_bytes(data_format_size["freq"], "big") + \
self._dfreq.to_bytes(data_format_size["freq"], "big") + \
list2bytes(self._analog, data_format_size["analog"]) + list2bytes(self._digital, 2)
else:
# Concatenate measurements as many as num_measurements tells
df_b = None
for i in range(self.cfg._num_pmu):
data_format_size = CommonFrame._get_data_format_size(self.cfg._data_format[i])
df_b_i = self._stat[i].to_bytes(2, "big") + \
list2bytes(self._phasors[i], data_format_size["phasor"]) + \
self._freq[i].to_bytes(data_format_size["freq"], "big") + \
self._dfreq[i].to_bytes(data_format_size["freq"], "big") + \
list2bytes(self._analog[i], data_format_size["analog"]) + \
list2bytes(self._digital[i], 2)
if df_b:
df_b += df_b_i
else:
df_b = df_b_i
return super().convert2bytes(df_b)
@staticmethod
def convert2frame(byte_data, cfg):
try:
if not CommonFrame._check_crc(byte_data):
raise FrameError("CRC failed. Configuration frame not valid.")
num_pmu = cfg.get_num_pmu()
data_format = cfg.get_data_format()
phasor_num = cfg.get_phasor_num()
analog_num = cfg.get_analog_num()
digital_num = cfg.get_digital_num()
pmu_code = int.from_bytes(byte_data[4:6], byteorder="big", signed=False)
soc = int.from_bytes(byte_data[6:10], byteorder="big", signed=False)
frasec = CommonFrame._int2frasec(int.from_bytes(byte_data[10:14], byteorder="big", signed=False))
start_byte = 14
if num_pmu > 1:
stat, phasors, freq, dfreq, analog, digital = [[] for _ in range(6)]
for i in range(num_pmu):
st = DataFrame._int2stat(int.from_bytes(byte_data[start_byte:start_byte+2],
byteorder="big", signed=False))
stat.append(st)
start_byte += 2
phasor_size = 8 if data_format[i][1] else 4
stream_phasors = []
for _ in range(phasor_num[i]):
phasor = DataFrame._int2phasor(int.from_bytes(byte_data[start_byte:start_byte+phasor_size],
byteorder="big", signed=False), data_format[i])
stream_phasors.append(phasor)
start_byte += phasor_size
phasors.append(stream_phasors)
freq_size = 4 if data_format[i][3] else 2
stream_freq = DataFrame._int2freq(int.from_bytes(byte_data[start_byte:start_byte+freq_size],
byteorder="big", signed=False), data_format[i])
start_byte += freq_size
freq.append(stream_freq)
stream_dfreq = DataFrame._int2dfreq(int.from_bytes(byte_data[start_byte:start_byte+freq_size],
byteorder="big", signed=False), data_format[i])
start_byte += freq_size
dfreq.append(stream_dfreq)
analog_size = 4 if data_format[i][2] else 2
stream_analog = []
for _ in range(analog_num[i]):
an = DataFrame._int2analog(int.from_bytes(byte_data[start_byte:start_byte+analog_size],
byteorder="big", signed=False), data_format[i])
stream_analog.append(an)
start_byte += analog_size
analog.append(stream_analog)
stream_digital = []
for _ in range(digital_num[i]):
dig = int.from_bytes(byte_data[start_byte:start_byte+2], byteorder="big", signed=False)
stream_digital.append(dig)
start_byte += 2
digital.append(stream_digital)
else:
stat = DataFrame._int2stat(int.from_bytes(byte_data[start_byte:start_byte+2],
byteorder="big", signed=False))
start_byte += 2
phasor_size = 8 if data_format[1] else 4
phasors = []
for _ in range(phasor_num):
phasor = DataFrame._int2phasor(int.from_bytes(byte_data[start_byte:start_byte+phasor_size],
byteorder="big", signed=False), data_format)
phasors.append(phasor)
start_byte += phasor_size
freq_size = 4 if data_format[3] else 2
freq = DataFrame._int2freq(int.from_bytes(byte_data[start_byte:start_byte+freq_size],
byteorder="big", signed=False), data_format)
start_byte += freq_size
dfreq = DataFrame._int2dfreq(int.from_bytes(byte_data[start_byte:start_byte+freq_size], byteorder="big",
signed=False), data_format)
start_byte += freq_size
analog_size = 4 if data_format[2] else 2
analog = []
for _ in range(analog_num):
an = DataFrame._int2analog(int.from_bytes(byte_data[start_byte:start_byte+analog_size],
byteorder="big", signed=False), data_format)
analog.append(an)
start_byte += analog_size
digital = []
for _ in range(digital_num):
dig = int.from_bytes(byte_data[start_byte:start_byte+2], byteorder="big", signed=False)
digital.append(dig)
start_byte += 2
return DataFrame(pmu_code, stat, phasors, freq, dfreq, analog, digital, cfg, soc, frasec)
except Exception as error:
raise FrameError("Error while creating Data frame: " + str(error))
class CommandFrame(CommonFrame):
COMMANDS = { "stop": 1, "start": 2, "header": 3, "cfg1": 4, "cfg2": 5, "cfg3": 6, "extended": 8 }
# Invert CommandFrame.COMMANDS to get COMMAND_WORDS
COMMAND_WORDS = { code: word for word, code in COMMANDS.items() }
def __init__(self, pmu_id_code, command, extended_frame=None, soc=None, frasec=None):
super().__init__("cmd", pmu_id_code, soc, frasec)
self.set_command(command)
self.set_extended_frame(extended_frame)
def set_command(self, command):
if command in CommandFrame.COMMANDS:
self._command = CommandFrame.COMMANDS[command]
else:
self._command = CommandFrame._command2int(command)
def get_command(self):
return CommandFrame.COMMAND_WORDS[self._command]
@staticmethod
def _command2int(command):
if not 0 <= command <= 65535:
raise ValueError("Undesignated command code must be 16bit word. 0 <= COMMAND <= 65535")
else:
return command
def set_extended_frame(self, extended_frame):
if extended_frame is not None:
self._extended_frame = CommandFrame._extended2int(extended_frame)
@staticmethod
def _extended2int(extended_frame):
if len(extended_frame) > 65518:
raise ValueError("Extended frame size to large. len(EXTENDED_FRAME) < 65518")
else:
return extended_frame
def convert2bytes(self):
if self._command == 8:
cmd_b = self._command.to_bytes(2, "big") + self._extended_frame
else:
cmd_b = self._command.to_bytes(2, "big")
return super().convert2bytes(cmd_b)
@staticmethod
def convert2frame(byte_data):
try:
if not CommonFrame._check_crc(byte_data):
raise FrameError("CRC failed. Command frame not valid.")
pmu_code = int.from_bytes(byte_data[4:6], byteorder="big", signed=False)
soc = int.from_bytes(byte_data[6:10], byteorder="big", signed=False)
frasec = CommonFrame._int2frasec(int.from_bytes(byte_data[10:14], byteorder="big", signed=False))
command_int = int.from_bytes(byte_data[14:16], byteorder="big", signed=False)
command = [command for command, code in CommandFrame.COMMANDS.items() if code == command_int]
# Should match only one Command
if len(command) == 1:
# Convert list to string
command = "".join(command)
else:
# User defined command
command = command_int
# Check if extended frame
if command == "extended":
extended_frame = byte_data[16:-2]
else:
extended_frame = None
return CommandFrame(pmu_code, command, extended_frame, soc, frasec)
except Exception as error:
raise FrameError("Error while creating Command frame: " + str(error))
class HeaderFrame(CommonFrame):
def __init__(self, pmu_id_code, header, soc=None, frasec=None):
super().__init__("header", pmu_id_code, soc, frasec)
self.set_header(header)
def set_header(self, header):
self._header = header
def get_header(self):
return self._header
def convert2bytes(self):
header_b = str.encode(self._header)
return super().convert2bytes(header_b)
@staticmethod
def convert2frame(byte_data):
try:
if not CommonFrame._check_crc(byte_data):
raise FrameError("CRC failed. Header frame not valid.")
pmu_code = int.from_bytes(byte_data[4:6], byteorder="big", signed=False)
soc = int.from_bytes(byte_data[6:10], byteorder="big", signed=False)
frasec = CommonFrame._int2frasec(int.from_bytes(byte_data[10:14], byteorder="big", signed=False))
header_message = byte_data[14:-2]
header_message = str(header_message)
return HeaderFrame(pmu_code, header_message, soc, frasec)
except Exception as error:
raise FrameError("Error while creating Header frame: " + str(error))
class FrameError(BaseException):
pass
| 36.662167 | 121 | 0.559983 |
a93aa38f88e7f4a3756cf0005fa3dc39e99d3f5d | 7,275 | py | Python | argo/workflows/client/models/v1alpha1_cluster_workflow_template.py | argentumcode/argo-client-python | 31c1519056379d3f046d4b522f37af87243fdbb4 | [
"Apache-2.0"
] | null | null | null | argo/workflows/client/models/v1alpha1_cluster_workflow_template.py | argentumcode/argo-client-python | 31c1519056379d3f046d4b522f37af87243fdbb4 | [
"Apache-2.0"
] | null | null | null | argo/workflows/client/models/v1alpha1_cluster_workflow_template.py | argentumcode/argo-client-python | 31c1519056379d3f046d4b522f37af87243fdbb4 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Argo Server API
You can get examples of requests and responses by using the CLI with `--gloglevel=9`, e.g. `argo list --gloglevel=9` # noqa: E501
The version of the OpenAPI document: v3.0.4
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from argo.workflows.client.configuration import Configuration
class V1alpha1ClusterWorkflowTemplate(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1alpha1WorkflowTemplateSpec'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, local_vars_configuration=None): # noqa: E501
"""V1alpha1ClusterWorkflowTemplate - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
self.metadata = metadata
self.spec = spec
@property
def api_version(self):
"""Gets the api_version of this V1alpha1ClusterWorkflowTemplate. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.io.k8s.community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1alpha1ClusterWorkflowTemplate. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1alpha1ClusterWorkflowTemplate.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.io.k8s.community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1alpha1ClusterWorkflowTemplate. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1alpha1ClusterWorkflowTemplate. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.io.k8s.community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1alpha1ClusterWorkflowTemplate. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1alpha1ClusterWorkflowTemplate.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.io.k8s.community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1alpha1ClusterWorkflowTemplate. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1alpha1ClusterWorkflowTemplate. # noqa: E501
:return: The metadata of this V1alpha1ClusterWorkflowTemplate. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1alpha1ClusterWorkflowTemplate.
:param metadata: The metadata of this V1alpha1ClusterWorkflowTemplate. # noqa: E501
:type: V1ObjectMeta
"""
if self.local_vars_configuration.client_side_validation and metadata is None: # noqa: E501
raise ValueError("Invalid value for `metadata`, must not be `None`") # noqa: E501
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1alpha1ClusterWorkflowTemplate. # noqa: E501
:return: The spec of this V1alpha1ClusterWorkflowTemplate. # noqa: E501
:rtype: V1alpha1WorkflowTemplateSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1alpha1ClusterWorkflowTemplate.
:param spec: The spec of this V1alpha1ClusterWorkflowTemplate. # noqa: E501
:type: V1alpha1WorkflowTemplateSpec
"""
if self.local_vars_configuration.client_side_validation and spec is None: # noqa: E501
raise ValueError("Invalid value for `spec`, must not be `None`") # noqa: E501
self._spec = spec
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1ClusterWorkflowTemplate):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha1ClusterWorkflowTemplate):
return True
return self.to_dict() != other.to_dict()
| 35.487805 | 312 | 0.642062 |
2ce133e947758702b600cbe91ea61fce4f9aa2d4 | 2,169 | py | Python | tools/MetaMaptoStandoff.py | bepnye/brat | 28acfb2d3cce20bd4d4ff1a67690e271675841f2 | [
"CC-BY-3.0"
] | 20 | 2015-01-26T01:39:44.000Z | 2020-05-30T19:04:14.000Z | tools/MetaMaptoStandoff.py | bepnye/brat | 28acfb2d3cce20bd4d4ff1a67690e271675841f2 | [
"CC-BY-3.0"
] | 7 | 2015-04-11T12:57:42.000Z | 2016-04-08T13:43:44.000Z | tools/MetaMaptoStandoff.py | bepnye/brat | 28acfb2d3cce20bd4d4ff1a67690e271675841f2 | [
"CC-BY-3.0"
] | 13 | 2015-01-26T01:39:45.000Z | 2022-03-09T16:45:09.000Z | #!/usr/bin/env python
# Script to convert MetaMap "fielded" ("-N" argument) output into
# standoff with reference to the original text.
import sys
import re
import os
import codecs
# Regex for the "signature" of a metamap "fielded" output line
FIELDED_OUTPUT_RE = re.compile(r'^\d+\|')
class taggedEntity:
def __init__(self, startOff, endOff, eType, idNum):
self.startOff = startOff
self.endOff = endOff
self.eType = eType
self.idNum = idNum
def __str__(self):
return "T%d\t%s %d %d" % (self.idNum, self.eType, self.startOff, self.endOff)
def MetaMap_lines_to_standoff(metamap_lines, reftext=None):
tagged = []
idseq = 1
for l in metamap_lines:
l = l.rstrip('\n')
# silently skip lines that don't match the expected format
if not FIELDED_OUTPUT_RE.match(l):
continue
# format is pipe-separated ("|") fields, the ones of interest
# are in the following indices:
# 3: preferred text form
# 4: CUI
# 5: semantic type (MetaMap code)
# 8: start offset and length of match
fields = l.split('|')
if len(fields) < 9:
print >> sys.stderr, "Note: skipping unparseable MetaMap output line: %s" % l
continue
ctext, CUI, semtype, offset = fields[3], fields[4], fields[5], fields[8]
# strip surrounding brackets from semantic type
semtype = semtype.replace('[','').replace(']','')
# parse length; note that this will only pick the of multiple
# discontinuous spans if they occur (simple heuristic for the
# head)
m = re.match(r'^(?:\d+:\d+,)*(\d+):(\d+)$', offset)
start, length = m.groups()
start, length = int(start), int(length)
tagged.append(taggedEntity(start, start+length, semtype, idseq))
idseq += 1
print >> sys.stderr, "MetaMaptoStandoff: returning %s tagged spans" % len(tagged)
return tagged
if __name__ == "__main__":
lines = [l for l in sys.stdin]
standoff = MetaMap_lines_to_standoff(lines)
for s in standoff:
print s
| 30.125 | 89 | 0.60627 |
0c947e60037da287445dd61858383e88f6442963 | 20,370 | py | Python | create_db.py | deontologician/d-d-scripts | 08c673b56bede01d4a106a9512469218976bc18f | [
"MIT"
] | 1 | 2016-05-09T04:26:29.000Z | 2016-05-09T04:26:29.000Z | create_db.py | deontologician/d-d-scripts | 08c673b56bede01d4a106a9512469218976bc18f | [
"MIT"
] | null | null | null | create_db.py | deontologician/d-d-scripts | 08c673b56bede01d4a106a9512469218976bc18f | [
"MIT"
] | null | null | null | from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import models as M
import logging
logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
logging.basicConfig()
def setup_db(db_filename):
'''Creates and populates the sqlite database. Nothing will be done if it
exists and the tables are already defined'''
engine = create_engine("sqlite:///{}".format(db_filename))
M.Base.metadata.create_all(bind=engine)
return engine
def initialize_database(session):
'''Sets up all reference tables'''
session.add_all([
M.Dice('d4', sides=4),
M.Dice('d6', sides=6),
M.Dice('d8', sides=8),
M.Dice('d10', sides=10),
M.Dice('d12', sides=12),
M.Dice('d20', sides=20),
])
session.add_all([
M.Size('Tiny', space=0.5),
M.Size('Small', space=1),
M.Size('Medium', space=1),
M.Size('Large', space=2),
M.Size('Huge', space=3),
M.Size('Gargantuan', space=4),
])
session.add_all([
M.StatType('Language'),
M.StatType('Skill'),
M.StatType('Damage'),
M.StatType('Proficiency'),
])
session.add_all([
M.BonusType('Armor'),
M.BonusType('Enhancement'),
M.BonusType('Feat'),
M.BonusType('Item'),
M.BonusType('Power'),
M.BonusType('Proficiency'),
M.BonusType('Racial'),
M.BonusType('Untyped'),
])
session.add_all([
M.PowerSource('Martial',
'Represents military training or general '
'prowess with physical weaponry'),
M.PowerSource('Divine',
'represents that powers come due to a '
'connection with a deity or the spiritual realm'),
M.PowerSource('Arcane', 'Your powers come from magical sources'),
M.PowerSource('Psionic', 'Your powers are mental'),
M.PowerSource('Primal', 'Your powers come from a deep connection '
'to the world and the spirits'),
M.PowerSource('Shadow', 'Your powers come form opening a well of '
'energy from the Shadowfell')
])
session.add_all([
M.DamageType('Acid',
'Corrosive liquid'),
M.DamageType('Cold',
'Ice crystals, arctic air, or frigid liquid'),
M.DamageType('Fire',
'Explosive bursts, fiery rays, or simple ignition'),
M.DamageType('Force',
'Invisible energy formed into incredibly hard, '
'yet nonsolid shapes'),
M.DamageType('Lightning',
'Electrical energy'),
M.DamageType('Necrotic',
'Purple-black energy that deadens flesh '
'and wounds the soul'),
M.DamageType('Poison',
"Toxins that reduce a creature's hit points"),
M.DamageType('Psychic',
'Effects that target the mind'),
M.DamageType('Radiant',
'Searing white light or shimmering colors'),
M.DamageType('Thunder',
'Shock waves and deafening sounds'),
])
session.add_all([
M.Alignment('Lawful Good', good=True, lawful=True),
M.Alignment('Good', good=True),
M.Alignment('Unaligned'),
M.Alignment('Evil', evil=True),
M.Alignment('Chaotic Evil', evil=True, chaotic=True),
])
session.add_all([
M.WeaponGroup('Axe'),
M.WeaponGroup('Bow'),
M.WeaponGroup('CrossBow'),
M.WeaponGroup('Flail'),
M.WeaponGroup('Hammer'),
M.WeaponGroup('Heavy Blade'),
M.WeaponGroup('Light Blade'),
M.WeaponGroup('Mace'),
M.WeaponGroup('Pick'),
M.WeaponGroup('Polearm'),
M.WeaponGroup('Sling'),
M.WeaponGroup('Spear'),
M.WeaponGroup('Staff'),
M.WeaponGroup('Unarmed'),
])
session.add_all([
M.WeaponProperty('Heavy Thrown'),
M.WeaponProperty('High Crit'),
M.WeaponProperty('Light Thrown'),
M.WeaponProperty('Load Free'),
M.WeaponProperty('Load Minor'),
M.WeaponProperty('Off-Hand'),
M.WeaponProperty('Reach'),
M.WeaponProperty('Small'),
M.WeaponProperty('Versatile'),
])
session.add_all([
M.WeaponCategory('Simple Melee', simple=True, melee=True),
M.WeaponCategory('Military Melee', military=True, melee=True),
M.WeaponCategory('Superior Melee', superior=True, melee=True),
M.WeaponCategory('Improvised Melee', improvised=True, melee=True),
M.WeaponCategory('Simple Ranged', simple=True, ranged=True),
M.WeaponCategory('Military Ranged', military=True, ranged=True),
M.WeaponCategory('Superior Ranged', superior=True, ranged=True),
M.WeaponCategory('Improvised Ranged', improvised=True, ranged=True),
])
session.add_all([
M.ArmorType('Cloth', weight='Light'),
M.ArmorType('Leather', weight='Light'),
M.ArmorType('Hide', weight='Light'),
M.ArmorType('Chainmail', weight='Heavy'),
M.ArmorType('Scale', weight='Heavy'),
M.ArmorType('Plate', weight='Heavy'),
])
session.add_all([
M.Deity('Avandra',
alignmentname='Good',
domains=['Change',
'Luck',
'Travel'],
patron_of='Halfling'),
M.Deity('Bahamut',
alignmentname='Lawful Good',
domains=['Justice',
'Protection',
'Nobility'],
patron_of='Dragonborn'),
M.Deity('Moradin',
alignmentname='Lawful Good',
domains=['Family',
'Community',
'Creation'],
patron_of='Dwarf'),
M.Deity('Pelor',
alignmentname='Good',
domains=['Sun',
'Agriculture',
'Time'],
season='Summer'),
M.Deity('Corellon',
alignmentname='Unaligned',
domains=['Beauty',
'Art',
'Magic',
'The Fey'],
patron_of='Eladrin',
season='Spring'),
M.Deity('Erathis',
alignmentname='Unaligned',
domains=['Civilization',
'Inventions',
'Law']),
M.Deity('Ioun',
alignmentname='Unaligned',
domains=['Knowledge',
'Skill',
'Prophecy']),
M.Deity('Kord',
alignmentname='Unaligned',
domains=['Storms',
'Battle',
'Strength']),
M.Deity('Melora',
alignmentname='Unaligned',
domains=['Wilderness',
'Nature',
'Sea']),
M.Deity('Raven Queen',
alignmentname='Unaligned',
domains=['Death',
'Fate',
'Doom'],
season='Winter'),
M.Deity('Sehanine',
alignmentname='Unaligned',
domains=['Illusion',
'Love',
'The Moon'],
season='Autumn',
patron_of='Elf'),
M.Deity('Asmodeus',
alignmentname='Evil',
domains=['Tyranny',
'Domination']),
M.Deity('Bane',
alignmentname='Evil',
domains=['War', 'Conquest']),
M.Deity('Gruumsh',
alignmentname='Chaotic Evil',
domains=['Slaughter', 'Destruction']),
M.Deity('Lolth',
alignmentname='Chaotic Evil',
domains=['Shadow', 'Lies']),
M.Deity('Tharizdun'),
M.Deity('Tiamat',
alignmentname='Evil',
domains=['Greed', 'Envy']),
M.Deity('Torog',
alignmentname='Evil',
domains=['The Underdark']),
M.Deity('Vecna',
alignmentname='Evil',
domains=['The Undead', 'Necromancy']),
M.Deity('Zehir',
alignmentname='Evil',
domains=['Darkness', 'Poison']),
])
session.add_all([
M.Skill('Acrobatics', M.DEX, armor_penalty=True),
M.Skill('Arcana', M.INT),
M.Skill('Athletics', M.STR, armor_penalty=True),
M.Skill('Bluff', M.CHA),
M.Skill('Diplomacy', M.CHA),
M.Skill('Dungeoneering', M.WIS),
M.Skill('Endurance', M.CON, armor_penalty=True),
M.Skill('Heal', M.WIS),
M.Skill('History', M.INT),
M.Skill('Insight', M.WIS),
M.Skill('Intimidate', M.CHA),
M.Skill('Nature', M.WIS),
M.Skill('Perception', M.WIS),
M.Skill('Religion', M.INT),
M.Skill('Stealth', M.DEX, armor_penalty=True),
M.Skill('Streetwise', M.CHA),
M.Skill('Thievery', M.DEX, armor_penalty=True),
])
session.add_all([
M.Language('Common', script='Common'),
M.Language('Deep Speech', script='Rellanic'),
M.Language('Draconic', script='Iokharic'),
M.Language('Dwarven', script='Davek'),
M.Language('Elven', script='Rellanic'),
M.Language('Giant', script='Davek'),
M.Language('Goblin', script='Common'),
M.Language('Primordial', script='Barazhad'),
M.Language('Supernal', script='Supernal'),
M.Language('Abyssal', script='Barazhad'),
])
deva = M.Race(
'Deva',
effect=M.Effect(
intelligence=+2,
wisdom=+2,
speed=6,
vision='Normal',
stats=[
M.SkillStat('History', +2),
M.SkillStat('Religion', +2),
M.LanguageStat('Common'),
]))
dragonborn = M.Race(
'Dragonborn',
effect=M.Effect(
strength=+2,
charisma=+2,
vision='Normal',
speed=6,
stats=[
M.SkillStat('History', +2),
M.SkillStat('Intimidate', +2),
M.LanguageStat('Common'),
M.LanguageStat('Draconic'),
]))
dwarf = M.Race(
'Dwarf',
effect=M.Effect(
constitution=+2,
wisdom=+2,
vision='Low-Light',
speed=5,
stats=[
M.SkillStat('Dungeoneering', +2),
M.SkillStat('Endurance', +2),
M.LanguageStat('Common'),
M.LanguageStat('Dwarven'),
]))
eladrin = M.Race(
'Eladrin',
effect=M.Effect(
dexterity=+2,
intelligence=+2,
vision='Low-Light',
speed=6,
stats=[
M.SkillStat('History', +2),
M.SkillStat('Arcana', +2),
M.LanguageStat('Common'),
M.LanguageStat('Elven'),
]))
elf = M.Race(
'Elf',
effect=M.Effect(
dexterity=+2,
wisdom=+2,
vision='Low-Light',
speed=7,
stats=[
M.SkillStat('Nature', +2),
M.SkillStat('Perception', +2),
M.LanguageStat('Common'),
M.LanguageStat('Elven'),
]))
half_elf = M.Race(
'Half-Elf',
effect=M.Effect(
constitution=+2,
charisma=+2,
vision='Low-Light',
speed=6,
stats=[
M.SkillStat('Diplomacy', +2),
M.SkillStat('Insight', +2),
M.LanguageStat('Common'),
M.LanguageStat('Elven'),
]))
halfling = M.Race(
'Halfling',
sizename='Small',
effect=M.Effect(
dexterity=+2,
charisma=+2,
vision='Normal',
speed=6,
stats=[
M.SkillStat('Acrobatics', +2),
M.SkillStat('Thievery', +2),
M.LanguageStat('Common'),
]))
human = M.Race(
'Human',
effect=M.Effect(
vision='Normal',
speed=6,
fortitude=+1,
reflex=+1,
will=+1,
stats=[
M.LanguageStat('Common'),
]))
tiefling = M.Race(
'Tiefling',
effect=M.Effect(
intelligence=+2,
charisma=+2,
vision='Low-Light',
speed=6,
stats=[
M.SkillStat('Bluff', +2),
M.SkillStat('Stealth', +2),
M.LanguageStat('Common'),
]))
gnome = M.Race(
'Gnome',
sizename='Small',
effect=M.Effect(
intelligence=+2,
charisma=+2,
vision='Low-Light',
speed=5,
stats=[
M.SkillStat('Arcana', +2),
M.SkillStat('Stealth', +2),
M.LanguageStat('Common'),
M.LanguageStat('Elven'),
]))
goliath = M.Race(
'Goliath',
effect=M.Effect(
strength=+2,
constitution=+2,
vision='Normal',
speed=6,
stats=[
M.SkillStat('Athletics', +2),
M.SkillStat('Nature', +2),
M.LanguageStat('Common'),
]))
half_orc = M.Race(
'Half-Orc',
effect=M.Effect(
strength=+2,
dexterity=+2,
vision='Low-Light',
speed=6,
will=+1,
stats=[
M.SkillStat('Endurance', +2),
M.SkillStat('Intimidate', +2),
M.LanguageStat('Common'),
M.LanguageStat('Giant'),
]))
longtooth_shifter = M.Race(
'Longtooth Shifter',
effect=M.Effect(
strength=+2,
wisdom=+2,
vision='Low-Light',
speed=6,
stats=[
M.SkillStat('Athletics', +2),
M.SkillStat('Endurance', +2),
M.LanguageStat('Common'),
]))
razorclaw_shifter = M.Race(
'Razorclaw Shifter',
effect=M.Effect(
strength=+2,
wisdom=+2,
vision='Low-Light',
speed=6,
stats=[
M.SkillStat('Acrobatics', +2),
M.SkillStat('Stealth', +2),
M.LanguageStat('Common'),
]))
session.add_all([deva, dragonborn, dwarf, eladrin, elf, half_elf, halfling,
human, tiefling, gnome, goliath, half_orc,
longtooth_shifter, razorclaw_shifter])
cleric = M.Class(
'Cleric',
powersource_name='Divine',
role='Leader',
effects=[
M.Effect(
'Cleric Class Benefits',
healing_surges=7,
stats=[
M.ProficiencyStat(M.WeaponCategory('Simple Melee')),
M.ProficiencyStat(M.WeaponCategory('Simple Ranged')),
M.ProficiencyStat(M.ArmorType('Light')),
M.ProficiencyStat(M.ArmorType('Cloth')),
]
)
])
fighter = M.Class(
'Fighter',
powersource_name='Martial',
role='Defender',
effects=[
M.Effect(
'Fighter Class Benefits',
healing_surges=9
)
])
paladin = M.Class(
'Paladin',
powersource_name='Divine',
role='Defender',
effects=[
M.Effect(
'Paladin Class Benefits',
healing_surges=10
)
])
ranger = M.Class(
'Ranger',
powersource_name='Martial',
role='Striker',
effects=[
M.Effect(
'Ranger Class Benefits',
healing_surges=6
)
])
rogue = M.Class(
'Rogue',
powersource_name='Martial',
role='Striker',
effects=[
M.Effect(
'Rogue Class Benefits',
healing_surges=6
)
])
warlock = M.Class(
'Warlock',
powersource_name='Arcane',
role='Striker',
effects=[
M.Effect(
'Warlock Class Benefits',
healing_surges=6
)
])
warlord = M.Class(
'Warlord',
powersource_name='Martial',
role='Leader',
effects=[
M.Effect(
'Warlord Class Benefits',
healing_surges=7
)
])
wizard = M.Class(
'Wizard',
powersource_name='Arcane',
role='Controller',
effects=[
M.Effect(
'Wizard Class Benefits',
healing_surges=6
)
])
avenger = M.Class(
'Avenger',
powersource_name='Divine',
role='Striker',
effects=[
M.Effect(
'Avenger Class Benefits',
healing_surges=7
)
])
barbarian = M.Class(
'Barbarian',
powersource_name='Primal',
role='Striker',
effects=[
M.Effect(
'Barbarian Class Benefits',
healing_surges=8
)
])
bard = M.Class(
'Bard',
powersource_name='Arcane',
role='Leader',
effects=[
M.Effect(
'Bard Class Benefits',
healing_surges=7
)
])
druid = M.Class(
'Druid',
powersource_name='Primal',
role='Controller',
effects=[
M.Effect(
'Druid Class Benefits',
healing_surges=7
)
])
invoker = M.Class(
'Invoker',
powersource_name='Divine',
role='Controller',
effects=[
M.Effect(
'Invoker Class Benefits',
healing_surges=6
)
])
shaman = M.Class(
'Shaman',
powersource_name='Primal',
role='Leader',
effects=[
M.Effect(
'Shaman Class Benefits',
healing_surges=7
)
])
sorcerer = M.Class(
'Sorcerer',
powersource_name='Arcane',
role='Striker',
effects=[
M.Effect(
'Sorcerer Class Benefits',
healing_surges=6
)
])
warden = M.Class(
'Warden',
powersource_name='Primal',
role='Defender',
effects=[
M.Effect(
'Warden Class Benefits',
healing_surges=9
)
])
session.add_all([cleric, fighter, paladin, ranger, rogue, warlock, warlord,
wizard, avenger, barbarian, bard, druid, invoker, shaman,
sorcerer, warden])
session.add_all([
M.Armor('Cloth Armor (basic clothing)', typename='Cloth', weight=4),
M.Armor('Feyweave Armor', typename='Cloth', weight=5, ac_bonus=+1),
M.Armor('Starweave Armor', typename='Cloth', weight=3, ac_bonus=+2),
M.Armor('Leather Armor', typename='Leather', weight=15, ac_bonus=+2),
M.Armor('Feyleather Armor',
typename='Leather', weight=15, ac_bonus=+3),
M.Armor('Starleather Armor',
typename='Leather', weight=15, ac_bonus=+4),
M.Armor('Hide Armor',
typename='Hide', weight=25, check=-1, ac_bonus=+3),
])
def get_session(db_name):
'''Easily obtains a session'''
engine = setup_db(db_name)
return sessionmaker(bind=engine)()
def main(filename):
sess = get_session(filename)
initialize_database(sess)
sess.commit()
if __name__ == '__main__':
main('test1.db')
| 31.630435 | 79 | 0.469219 |
d579269c7047fc1b611c2042aeae36de75946013 | 12,034 | py | Python | back/datasetgenerator.py | camilodoa/ai-melts-ice | 80f06d7e4030fdcae25325ab6f291ea723edae56 | [
"MIT"
] | 2 | 2020-06-01T18:27:58.000Z | 2020-06-02T04:35:32.000Z | back/datasetgenerator.py | camilodoa/ai-melts-ice | 80f06d7e4030fdcae25325ab6f291ea723edae56 | [
"MIT"
] | 25 | 2020-03-24T18:25:39.000Z | 2022-01-19T16:54:20.000Z | back/datasetgenerator.py | camilodoa/ai-melts-ice | 80f06d7e4030fdcae25325ab6f291ea723edae56 | [
"MIT"
] | null | null | null | from geopy.geocoders import Nominatim
import pandas as pd
import numpy as np
import pickle
import time
import requests
import urllib.request as urllib
class Syracuse():
'''
Object used to query Syracuse deportation database
Website: https://trac.syr.edu/phptools/immigration/arrest/
'''
def __init__(self):
self.cities = 385
self.counties = 1972
def query(self, county):
url = (
'https://trac.syr.edu/phptools/immigration/arrest/graph.php?stat='
'count×cale=fymon&county=[COUNTY]&timeunit=number'
).replace('[COUNTY]', county)
headers = {
'content-type': 'application/json'
}
return requests.get(url, headers=headers).json()
class Census():
'''
Object used to query US Gov't census dataset
Website: https://www2.census.gov/programs-surveys/popest/datasets/2010-2019/counties/totals/
'''
def __init__(self):
self.link = 'https://www2.census.gov/programs-surveys/popest/datasets/2010-2019/counties/totals/co-est2019-alldata.csv'
def download_population(self):
data = urllib.urlopen(self.link)
data = data.read()
with open('./population2010-2019.csv', 'wb') as f:
f.write(data)
def save_state_codes(self):
mapping = {
'Armed Forces America' : 'AA',
'Armed Forces' : 'AE',
'Alaska' : 'AK',
'Alabama' : 'AL',
'Armed Forces Pacific' : 'AP',
'Arkansas' : 'AR',
'Arizona' : 'AZ',
'California' : 'CA',
'Colorado' : 'CO',
'Connecticut' : 'CT',
'Washington DC' : 'DC',
'Delaware' : 'DE',
'Florida' : 'FL',
'Georgia' : 'GA',
'Guam' : 'GU',
'Hawaii' : 'HI',
'Iowa' : 'IA',
'Idaho' : 'ID',
'Illinois' : 'IL',
'Indiana' : 'IN',
'Kansas' : 'KS',
'Kentucky' : 'KY',
'Louisiana' : 'LA',
'Massachusetts' : 'MA',
'Maryland' : 'MD',
'Maine' : 'ME',
'Michigan' : 'MI',
'Minnesota' : 'MN',
'Missouri' : 'MO',
'Mississippi' : 'MS',
'Montana' : 'MT',
'North Carolina' : 'NC',
'North Dakota' : 'ND',
'Nebraska' : 'NE',
'New Hampshire' : 'NH',
'New Jersey' : 'NJ',
'New Mexico' : 'NM',
'Nevada' : 'NV',
'New York' : 'NY',
'Ohio' : 'OH',
'Oklahoma' : 'OK',
'Oregon' : 'OR',
'Pennsylvania' : 'PA',
'Puerto Rico' : 'PR',
'Rhode Island' : 'RI',
'South Carolina' : 'SC',
'South Dakota' : 'SD',
'Tennessee' : 'TN',
'Texas' : 'TX',
'Utah' : 'UT',
'Virginia' : 'VA',
'Virgin Islands' : 'VI',
'Vermont' : 'VT',
'Washington' : 'WA',
'Wisconsin' : 'WI',
'West Virginia' : 'WV',
'Wyoming' : 'WY'
}
pickle.dump(mapping,open( 'states.dict', 'wb' ))
return mapping
def load_state_codes(self):
return pickle.load(open('states.dict','rb'))
class Generator():
'''
Generator class. Used to initialize() an ICE arrests dataset by querying
the Syracuse TRAC web API.
'''
def __init__(self, reinit = False, reinit_locations = False):
# Reinitialize dataset (T/F)
self.reinit = reinit
self.reinit_locations = reinit_locations
self.s = Syracuse()
self.c = Census()
self.initialize()
def initialize(self):
'''
Initializes and saves Syracuse data to file (arrests2014-2018.csv)
'''
if not self.reinit: return
ok = ''
# Reinit ICE data?
while ok != 'y' and ok != 'n':
ok = input('Download arrests2014-2018.csv? Y/N: ').lower().strip()
if ok.lower() == 'n': pass
elif ok.lower() == 'y': self.download_arrests().to_csv('arrests2014-2018.csv', index = False)
# Reinit county to location data
# This is a costly operation, so there is a seperate check for it
if self.reinit_locations: self.download_locations()
# Reinit population data?
ok = ''
while ok != 'y' and ok != 'n':
ok = input('Download population2010-2019.csv? Y/N: ').lower().strip()
if ok.lower() == 'n': pass
elif ok.lower() == 'y':
c = Census()
c.download_population()
# Reinit dataset?
ok = ''
while ok != 'y' and ok != 'n':
ok = input('Create dataset.csv? Y/N: ').lower().strip()
if ok.lower() == 'n': pass
elif ok.lower() == 'y':
self.create_dataset().to_csv('dataset.csv', index = False)
self.save_segmented_dataset()
def fetch_arrests(self, county):
'''
Queries Syracuse DB for data on a particular county, based on #
'''
return self.s.query(str(county))
def download_arrests(self):
'''
Fills out empty Pandas DF with Syracuse data
'''
df = pd.DataFrame()
# Create dictionary to make the transition to dataset easier
dict = {}
for county in range(self.s.counties):
json = self.fetch_arrests(county)
print(json['title'])
if json['title'] == '': continue
for point in json['timeline']:
date = pd.to_datetime(point['fymon'])
county = json['title']
if dict.get(date) == None: dict[date] = {'Date' : date}
dict[date][county] = int(point['number'])
# Transfer dictionary layout to dataset
for data in dict.values():
df = df.append(data, ignore_index = True, sort = False)
df = df.fillna(0)
df['Date'] = pd.to_datetime(df['Date'])
df = df.sort_values(by = ['Date'])
return df
def download_locations(self):
'''
Make and save a dictionary for translating county name to coordinate
'''
df = self.load_arrests().drop(['Date'], axis = 1)
geolocator, mapping = Nominatim(user_agent='ai-melts-ice', timeout=None), {}
for i, county in enumerate(df.columns):
location = geolocator.geocode(county)
mapping.update({county : [location.latitude, location.longitude]})
print(i, {county : [location.longitude, location.latitude]})
# Don't spam their servers too much
time.sleep(1)
pickle.dump(mapping,open( 'coordinates.dict', 'wb' ))
return mapping
def create_dataset(self):
'''
Used to create a dataset with arrest, population, and location data
for training
'''
population = self.load_population()
states = self.c.load_state_codes()
locations = self.load_locations()
arrests = self.load_arrests()
# Load population and dates that we care about
population = population.filter(
items=['CTYNAME', 'POPESTIMATE2014', 'POPESTIMATE2015',
'POPESTIMATE2016', 'POPESTIMATE2017', 'POPESTIMATE2018',
'POPESTIMATE2019']
)
# Match population counties to dataset counties
curr_code = None
for i in population.index:
# Current county
curr = population.loc[i, 'CTYNAME']
# County name attempted to be converted to state code
state = states.get(curr)
# Special case
if curr == 'District of Columbia':
population.loc[i, 'CTYNAME'] = 'District of Columbia, DC'
continue
# If the current county name is a state name, that will be the state
# the next counties are in
if state != None:
# Set the current code to the state code
curr_code = state
# If the current county is not a state, rename it using the
# last found state code
elif curr_code != None:
population.loc[i, 'CTYNAME'] = curr + ', ' + curr_code
# Find values in ICE dataset that aren't in population dataset
missing = []
for key, value in locations.items():
if not (population['CTYNAME'] == key).any():
missing.append(key)
# Filter rows based on whether they're in ICE dataset
population = population[population['CTYNAME'].map(lambda x : locations.get(x) is not None)]
# Drop values in ICE dataset that aren't in population dataset
arrests = arrests.drop(columns=missing, axis = 1)
# Create new dataset for ML
no_date = arrests.drop(['Date'], axis=1)
labels = ['{0} - arrests', '{0} - population', '{0} - longitude', '{0} - latitude']
new_columns = [label.format(county) for county in no_date.columns for label in labels]
new_columns.append('Date')
dataset = pd.DataFrame(index=arrests.index, columns=new_columns)
# Populate dataset
for i in arrests.index:
year = arrests.loc[i, 'Date'][-2:]
dataset.loc[i, 'Date'] = arrests.loc[i, 'Date']
for j in population.index:
# Retrieve data
county = population.loc[j, 'CTYNAME']
pop = population.loc[j, 'POPESTIMATE20' + year]
arr = arrests.loc[i, county]
longitude = locations.get(county)[0]
latitude = locations.get(county)[1]
# Fill in new dataset
dataset.loc[i, labels[0].format(county)] = arr
dataset.loc[i, labels[1].format(county)] = pop
dataset.loc[i, labels[2].format(county)] = longitude
dataset.loc[i, labels[3].format(county)] = latitude
return dataset
def load_locations(self):
return pickle.load(open('coordinates.dict','rb'))
def load_arrests(self):
return pd.read_csv('arrests2014-2018.csv', encoding = 'utf8')
def load_population(self):
return pd.read_csv('population2010-2019.csv', encoding = 'ISO-8859-1')
def load_dataset(self, parse_dates = False):
return pd.read_csv('dataset.csv', infer_datetime_format=True, parse_dates=['Date']) if parse_dates else pd.read_csv('dataset.csv', encoding = 'utf8')
def split(self, df, n_steps):
'''
Splits data frame into X Y pairs for network.
n_steps is the number of months in each X.
'''
n = len(df.values)
# sequences_x = df.values
# sequences_x = df.values.reshape(n, 1960, 4)
x_columns = [col for col in df.columns]
sequences_x = df[x_columns].values
# Arrange our output to only contain arrests per county
y_columns = [col for col in df.columns if 'arrests' in col]
sequences_y = df[y_columns].values
X, Y = [], []
for k in range(n):
# End of pattern
end_ix = k + n_steps
# Check for out of bounds exceptions
if end_ix > len(sequences_x) - 1: break
# Input and output parts of the pattern
seq_x, seq_y = sequences_x[k : end_ix, :], sequences_y[end_ix, :]
X.append(seq_x)
Y.append(seq_y)
return np.array(X), np.array(Y)
def convert(self, df, n_steps, lo, hi):
'''
Converts table tuple into X input for network
'''
sequences = df.values
X = []
if hi == 0: X.append(sequences[lo:, :])
else: X.append(sequences[lo:hi, :])
return np.array(X)
if __name__ == '__main__':
'Usage'
g = Generator(reinit = False)
| 35.922388 | 157 | 0.533572 |
21de040d794a4c015a327ae6ef79ba25fe13e094 | 7,007 | py | Python | openstack_dashboard/contrib/sahara/content/data_processing/clusters/tabs.py | timpricecatalyst/horizon | 8279ae0ed464e62e1c91e78341342160f8a07172 | [
"Apache-2.0"
] | 1 | 2021-01-20T00:14:15.000Z | 2021-01-20T00:14:15.000Z | openstack_dashboard/contrib/sahara/content/data_processing/clusters/tabs.py | timpricecatalyst/horizon | 8279ae0ed464e62e1c91e78341342160f8a07172 | [
"Apache-2.0"
] | 1 | 2019-10-27T15:57:25.000Z | 2019-10-27T15:57:25.000Z | openstack_dashboard/contrib/sahara/content/data_processing/clusters/tabs.py | timpricecatalyst/horizon | 8279ae0ed464e62e1c91e78341342160f8a07172 | [
"Apache-2.0"
] | 15 | 2017-01-12T10:40:00.000Z | 2019-04-19T08:28:05.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tables
from horizon import tabs
from openstack_dashboard.contrib.sahara.content. \
data_processing.utils import workflow_helpers as helpers
from openstack_dashboard.api import glance
from openstack_dashboard.api import network
from openstack_dashboard.api import neutron
from openstack_dashboard.api import nova
from openstack_dashboard.contrib.sahara.api import sahara as saharaclient
LOG = logging.getLogger(__name__)
class GeneralTab(tabs.Tab):
name = _("General Info")
slug = "cluster_details_tab"
template_name = "project/data_processing.clusters/_details.html"
def get_context_data(self, request):
cluster_id = self.tab_group.kwargs['cluster_id']
cluster_info = {}
try:
sahara = saharaclient.client(request)
cluster = sahara.clusters.get(cluster_id)
for info_key, info_val in cluster.info.items():
for key, val in info_val.items():
if str(val).startswith(('http://', 'https://')):
cluster.info[info_key][key] = build_link(val)
base_image = glance.image_get(request,
cluster.default_image_id)
if getattr(cluster, 'cluster_template_id', None):
cluster_template = saharaclient.safe_call(
sahara.cluster_templates.get,
cluster.cluster_template_id)
else:
cluster_template = None
if getattr(cluster, 'neutron_management_network', None):
net_id = cluster.neutron_management_network
network = neutron.network_get(request, net_id)
net_name = network.name_or_id
else:
net_name = None
cluster_info.update({"cluster": cluster,
"base_image": base_image,
"cluster_template": cluster_template,
"network": net_name})
except Exception as e:
LOG.error("Unable to fetch cluster details: %s" % str(e))
return cluster_info
def build_link(url):
return "<a href='" + url + "' target=\"_blank\">" + url + "</a>"
class NodeGroupsTab(tabs.Tab):
name = _("Node Groups")
slug = "cluster_nodegroups_tab"
template_name = (
"project/data_processing.clusters/_nodegroups_details.html")
def get_context_data(self, request):
cluster_id = self.tab_group.kwargs['cluster_id']
try:
sahara = saharaclient.client(request)
cluster = sahara.clusters.get(cluster_id)
for ng in cluster.node_groups:
if ng["flavor_id"]:
ng["flavor_name"] = (
nova.flavor_get(request, ng["flavor_id"]).name)
if ng["floating_ip_pool"]:
ng["floating_ip_pool_name"] = (
self._get_floating_ip_pool_name(
request, ng["floating_ip_pool"]))
if ng.get("node_group_template_id", None):
ng["node_group_template"] = saharaclient.safe_call(
sahara.node_group_templates.get,
ng["node_group_template_id"])
ng["security_groups_full"] = helpers.get_security_groups(
request, ng["security_groups"])
except Exception:
cluster = {}
exceptions.handle(request,
_("Unable to get node group details."))
return {"cluster": cluster}
def _get_floating_ip_pool_name(self, request, pool_id):
pools = [pool for pool in network.floating_ip_pools_list(
request) if pool.id == pool_id]
return pools[0].name if pools else pool_id
class Instance(object):
def __init__(self, name=None, id=None, internal_ip=None,
management_ip=None):
self.name = name
self.id = id
self.internal_ip = internal_ip
self.management_ip = management_ip
class InstancesTable(tables.DataTable):
name = tables.Column("name",
link="horizon:project:instances:detail",
verbose_name=_("Name"))
internal_ip = tables.Column("internal_ip",
verbose_name=_("Internal IP"))
management_ip = tables.Column("management_ip",
verbose_name=_("Management IP"))
class Meta(object):
name = "cluster_instances"
verbose_name = _("Cluster Instances")
class InstancesTab(tabs.TableTab):
name = _("Instances")
slug = "cluster_instances_tab"
template_name = "project/data_processing.clusters/_instances_details.html"
table_classes = (InstancesTable, )
def get_cluster_instances_data(self):
cluster_id = self.tab_group.kwargs['cluster_id']
try:
sahara = saharaclient.client(self.request)
cluster = sahara.clusters.get(cluster_id)
instances = []
for ng in cluster.node_groups:
for instance in ng["instances"]:
instances.append(Instance(
name=instance["instance_name"],
id=instance["instance_id"],
internal_ip=instance.get("internal_ip",
"Not assigned"),
management_ip=instance.get("management_ip",
"Not assigned")))
except Exception:
instances = []
exceptions.handle(self.request,
_("Unable to fetch instance details."))
return instances
class EventLogTab(tabs.Tab):
name = _("Cluster Events")
slug = "cluster_event_log"
template_name = "project/data_processing.clusters/_event_log.html"
def get_context_data(self, request, **kwargs):
cluster_id = self.tab_group.kwargs['cluster_id']
kwargs["cluster_id"] = cluster_id
kwargs['data_update_url'] = request.get_full_path()
return kwargs
class ClusterDetailsTabs(tabs.TabGroup):
slug = "cluster_details"
tabs = (GeneralTab, NodeGroupsTab, InstancesTab, EventLogTab)
sticky = True
| 35.75 | 78 | 0.601256 |
63f567723eeb891b38109f2cd18a56390bc1815a | 516 | py | Python | python/151. Reverse Words in a String.py | DragonYong/Happy-Algorithm | 7ca585d5990f5c4587ab4e22178ecb88e5d57d91 | [
"Apache-2.0"
] | null | null | null | python/151. Reverse Words in a String.py | DragonYong/Happy-Algorithm | 7ca585d5990f5c4587ab4e22178ecb88e5d57d91 | [
"Apache-2.0"
] | 1 | 2020-08-31T09:32:56.000Z | 2020-08-31T09:32:56.000Z | python/151. Reverse Words in a String.py | DragonYong/Happy-Algorithm | 7ca585d5990f5c4587ab4e22178ecb88e5d57d91 | [
"Apache-2.0"
] | null | null | null | class Solution:
def reverseWords(self, s: str) -> str:
if not s or s = "":
return None
s = s.strip()
temp = s.split(" ")
for i in temp:
print("${}$".format(i))
stack = [i for i in temp if i != ""]
return " ".join(stack[::-1])
if __name__ == "__main__":
solution = Solution()
s = "the sky is blue"
s = "a good example"
print(solution.reverseWords(s))
# 这道通主要是注意细节,中间空格可能会有跟多个,需要吧多余的空格全部删除,还有就是临界的情况,队医空的输入没,需要异常捕获
| 23.454545 | 62 | 0.52907 |
873ad9d7ac2070f9a11b0486adca37d29ee8b0fb | 884 | py | Python | setup.py | skrushinsky/openspy | e6c3aed4e77a4c7d068b900e6a9e3b0c57318471 | [
"MIT"
] | 2 | 2021-07-03T18:15:22.000Z | 2021-07-03T20:25:19.000Z | setup.py | skrushinsky/openspy | e6c3aed4e77a4c7d068b900e6a9e3b0c57318471 | [
"MIT"
] | null | null | null | setup.py | skrushinsky/openspy | e6c3aed4e77a4c7d068b900e6a9e3b0c57318471 | [
"MIT"
] | 1 | 2018-02-16T11:44:38.000Z | 2018-02-16T11:44:38.000Z | from setuptools import setup, find_packages
import sys, os
version = '0.1'
setup(name='openspy',
version=version,
description="Search planes in given range",
long_description="""\
Query [OpenSky](https://opensky-network.org/apidoc/index.html) service
and outputs crafts in a given range (450km from Paris by default).
""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='opensky',
author='Sergey Krushinsky',
author_email='krushinsky@gmail.com',
url='',
license='',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
'requests',
'geopy',
],
tests_require=['nose>=1.0'],
entry_points="""
# -*- Entry points: -*-
""",
)
| 28.516129 | 95 | 0.623303 |
33f42490db6411c1eedd6b9e99cfa48741e51322 | 1,536 | py | Python | common/sendRequests.py | sinnosong/ApiAutoTest | a8f98507e996f9ae2e88cb275527cab44aa3a7d4 | [
"MIT"
] | 5 | 2020-08-04T04:31:30.000Z | 2021-07-14T01:31:39.000Z | common/sendRequests.py | sinnosong/ApiAutoTest | a8f98507e996f9ae2e88cb275527cab44aa3a7d4 | [
"MIT"
] | null | null | null | common/sendRequests.py | sinnosong/ApiAutoTest | a8f98507e996f9ae2e88cb275527cab44aa3a7d4 | [
"MIT"
] | 1 | 2020-08-04T07:01:11.000Z | 2020-08-04T07:01:11.000Z | import os,sys,json
import configparser
from config import setting
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
class SendRequests():
"""发送请求数据"""
def sendRequests(self,session,apiData):
# 读取config中的测试host,port
con = configparser.ConfigParser()
con.read(setting.TEST_CONFIG, encoding="utf-8")
test_host = con.get("test_host","host")
test_port = con.get("test_host","port")
# 读取表格内容获取响应参数
try:
method = apiData["method"]
url = "http://" + test_host +":"+ test_port + apiData["url"]
if apiData["params"] == "":
par = None
else:
par =eval(apiData["params"])
if apiData["headers"] == "":
h = None
else:
h = eval(apiData["headers"])
if apiData["body"] == "":
body_data = None
else:
body_data = eval(apiData["body"])
type = apiData["type"]
v = False
if type == "data":
body = body_data
elif type == "json":
body = json.dumps(body_data)
else:
body = body_data
re = session.request(method=method,url=url,headers=h,params=par,data=body,verify=v)
return re
except Exception as e:
print(e) | 35.72093 | 99 | 0.458984 |
39e7e8d5d1e601dbd65a4b1c8d775171e3d137ce | 2,268 | py | Python | 03-opencv-lab/example08_classify_shapes.py | iproduct/course-robotics-npmg | 0feb2ded46007ba87b8128f1f2e039036ef274bd | [
"Apache-2.0"
] | null | null | null | 03-opencv-lab/example08_classify_shapes.py | iproduct/course-robotics-npmg | 0feb2ded46007ba87b8128f1f2e039036ef274bd | [
"Apache-2.0"
] | null | null | null | 03-opencv-lab/example08_classify_shapes.py | iproduct/course-robotics-npmg | 0feb2ded46007ba87b8128f1f2e039036ef274bd | [
"Apache-2.0"
] | 1 | 2021-03-17T09:08:02.000Z | 2021-03-17T09:08:02.000Z | import cv2
import numpy as np
import example06_imgstacking as stacking
def empty(arg):
pass
def classify_figure(contour, w, h):
num_vertices = len(contour)
ratio = w / float(h)
if num_vertices < 3:
return "unrecognized"
elif num_vertices == 3:
return "Triangle"
elif num_vertices == 4:
if ratio > 0.95 and ratio < 1.05:
return "Square"
else:
return "Rectangle"
elif num_vertices > 4:
if ratio > 0.95 and ratio < 1.05:
return "Circle"
else:
return "Ellipse"
def find_contours(img):
contours, hierachy = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for contour in contours:
area = cv2.contourArea(contour)
if area > 50:
print("area = ", area)
cv2.drawContours(img_copy, contour, -1, (0,0,255), 2)
perimeter = cv2.arcLength(contour, True)
print("perimeter = ", perimeter)
approx = cv2.approxPolyDP(contour, 0.02*perimeter, True)
print("Vertices:", len(approx))
x, y, w, h = cv2.boundingRect(approx)
cv2.rectangle(img_copy, (x, y), (x + w, y + h), (0,255,0), 2)
figure = classify_figure(approx, w, h)
cv2.putText(img_copy, f'{figure}', (x, y + h//2), cv2.QT_FONT_NORMAL, 1, (0, 0, 255), 2, cv2.LINE_AA )
if __name__ == '__main__':
cv2.namedWindow("Trackbars")
cv2.resizeWindow("Trackbars", 480, 100)
cv2.createTrackbar("Treshold 1", "Trackbars", 50, 255, empty)
cv2.createTrackbar("Treshold 2", "Trackbars", 50, 255, empty)
img = cv2.imread("resources/geometric-shapes.png")
blurred = cv2.GaussianBlur(img,(7,7), 1)
blank = np.zeros_like(img)
while True:
t1 = cv2.getTrackbarPos("Treshold 1", "Trackbars")
t2 = cv2.getTrackbarPos("Treshold 2", "Trackbars")
canny = cv2.Canny(blurred, t1, t2)
img_copy = img.copy();
find_contours(canny);
img_stack = stacking.stack_images(((img, blurred, canny),
(img_copy, blank, blank)), 0.7)
cv2.imshow("Shapes", img_stack)
if cv2.waitKey(10) & 0xFF == 27:
break
cv2.destroyAllWindows() | 34.892308 | 114 | 0.582011 |
f5087825ee567645c2d88251ea0f39e6007de37c | 32,486 | py | Python | pycln/utils/scan.py | vantage-ola/pycln | 492fd68898827b5bbee0d971e2eab5249bfae272 | [
"MIT"
] | 1 | 2021-07-08T04:57:03.000Z | 2021-07-08T04:57:03.000Z | pycln/utils/scan.py | vantage-ola/pycln | 492fd68898827b5bbee0d971e2eab5249bfae272 | [
"MIT"
] | null | null | null | pycln/utils/scan.py | vantage-ola/pycln | 492fd68898827b5bbee0d971e2eab5249bfae272 | [
"MIT"
] | null | null | null | """Pycln source code AST analysis utility."""
import ast
import os
import sys
from dataclasses import dataclass
from enum import Enum, unique
from functools import wraps
from pathlib import Path
from typing import Any, Callable, List, Optional, Set, Tuple, TypeVar, Union, cast
from . import _nodes, iou, pathu
from ._exceptions import ReadPermissionError, UnexpandableImportStar, UnparsableFile
# Constants.
PY38_PLUS = sys.version_info >= (3, 8)
PY39_PLUS = sys.version_info >= (3, 9)
IMPORT_EXCEPTIONS = {"ImportError", "ImportWarning", "ModuleNotFoundError"}
__ALL__ = "__all__"
NAMES_TO_SKIP = frozenset(
{
"__name__",
"__doc__",
"__package__",
"__loader__",
"__spec__",
"__build_class__",
"__import__",
__ALL__,
}
)
SUBSCRIPT_TYPE_VARIABLE = frozenset(
{
"AbstractSet",
"AsyncContextManager",
"AsyncGenerator",
"AsyncIterable",
"AsyncIterator",
"Awaitable",
"ByteString",
"Callable",
"ChainMap",
"ClassVar",
"Collection",
"Container",
"ContextManager",
"Coroutine",
"Counter",
"DefaultDict",
"Deque",
"Dict",
"FrozenSet",
"Generator",
"IO",
"ItemsView",
"Iterable",
"Iterator",
"KeysView",
"List",
"Mapping",
"MappingView",
"Match",
"MutableMapping",
"MutableSequence",
"MutableSet",
"Optional",
"Pattern",
"Reversible",
"Sequence",
"Set",
"SupportsRound",
"Tuple",
"Type",
"Union",
"ValuesView",
# Python >=3.7:
"Literal",
# Python >=3.8:
"OrderedDict",
# Python >=3.9:
"tuple",
"list",
"dict",
"set",
"frozenset",
"type",
}
)
# Custom types.
FunctionT = TypeVar("FunctionT", bound=Callable[..., Any])
FunctionDefT = TypeVar(
"FunctionDefT", bound=Union[ast.FunctionDef, ast.AsyncFunctionDef]
)
def recursive(func: FunctionT) -> FunctionT:
"""decorator to make `ast.NodeVisitor` work recursive.
:param func: `ast.NodeVisitor.visit_*` function.
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
func(self, *args, **kwargs)
self.generic_visit(*args)
return cast(FunctionT, wrapper)
@dataclass
class ImportStats:
"""Import statements statistics."""
import_: Set[_nodes.Import]
from_: Set[_nodes.ImportFrom]
def __iter__(self):
return iter([self.import_, self.from_])
@dataclass
class SourceStats:
"""Source code (`ast.Name`, `ast.Attribute`) statistics."""
#: Included on `__iter__`.
name_: Set[str]
attr_: Set[str]
#: Not included on `__iter__`.
names_to_skip: Set[str]
def __iter__(self):
return iter([self.name_, self.attr_])
class SourceAnalyzer(ast.NodeVisitor):
"""AST source code analyzer.
>>> import ast
>>> source = "source.py"
>>> with open(source, "r") as sourcef:
>>> source_lines = sourcef.readlines()
>>> tree = ast.parse("".join(source_lines))
>>> analyzer = SourceAnalyzer(source_lines)
>>> analyzer.visit(tree)
>>> source_stats, import_stats = analyzer.get_stats()
:param source_lines: source code as string lines,
required only when Python < 3.8.
:raises ValueError: when Python < 3.8 and no source code lines provided.
"""
def __init__(self, source_lines: Optional[List[str]] = None):
if not PY38_PLUS and source_lines is None:
# Bad class usage.
raise ValueError("Please provide source lines for Python < 3.8.")
self._lines = source_lines
self._import_stats = ImportStats(set(), set())
self._imports_to_skip: Set[Union[_nodes.Import, _nodes.ImportFrom]] = set()
self._source_stats = SourceStats(set(), set(), set())
@recursive
def visit_Import(self, node: ast.Import):
if node not in self._imports_to_skip:
py38_node = self._get_py38_import_node(node)
self._import_stats.import_.add(py38_node)
@recursive
def visit_ImportFrom(self, node: ast.ImportFrom):
if node not in self._imports_to_skip:
py38_node = self._get_py38_import_from_node(node)
if not str(py38_node.module).startswith("__"):
self._import_stats.from_.add(py38_node)
@recursive
def visit_Name(self, node: ast.Name):
self._source_stats.name_.add(node.id)
@recursive
def visit_Attribute(self, node: ast.Attribute):
self._source_stats.attr_.add(node.attr)
@recursive
def visit_Call(self, node: ast.Call):
#: Support casting case.
#: >>> from typing import cast
#: >>> import xxx, yyy
#: >>> zzz = cast("xxx", yyy)
#: Issue: https://github.com/hadialqattan/pycln/issues/26
func = node.func
if getattr(func, "id", "") == "cast" or (
getattr(func, "attr", "") == "cast"
and getattr(func.value, "id", "") == "typing" # type: ignore
):
self._parse_string(node.args[0]) # type: ignore
@recursive
def visit_Subscript(self, node: ast.Subscript) -> None:
#: Support semi string type hints.
#: >>> from ast import Import
#: >>> from typing import List
#: >>> def foo(bar: List["Import"]):
#: >>> pass
#: Issue: https://github.com/hadialqattan/pycln/issues/32
value = getattr(node, "value", "")
if getattr(value, "id", "") in SUBSCRIPT_TYPE_VARIABLE or (
hasattr(value, "value") and getattr(value.value, "id", "") == "typing"
):
if PY39_PLUS:
s_val = node.slice # type: ignore
else:
s_val = node.slice.value # type: ignore
for elt in getattr(s_val, "elts", ()) or (s_val,):
try:
self._parse_string(elt) # type: ignore
except UnparsableFile:
#: Ignore errors when parsing Literal
#: that are not valid identifiers.
#:
#: >>> from typing import Literal
#: >>> L: Literal[" "] = " "
#:
#: Issue: https://github.com/hadialqattan/pycln/issues/41
pass
@recursive
def visit_Try(self, node: ast.Try):
"""Support any try/except block that has import error(s).
Add any import that placed on try/except block that has import error
to `self._imports_to_skip`.
exp:- two of these imports would not be used and should not be removed:
>>> try:
>>> import foo2
>>> except ModuleNotFoundError:
>>> import foo3
>>> else:
>>> import foo38
supported exceptions (`IMPORT_EXCEPTIONS`):
- ModuleNotFoundError
- ImportError.
- ImportWarning.
supported blocks:
- try.
- except.
- else.
"""
is_skip_case = False
def add_imports_to_skip(body: List[ast.stmt]) -> None:
"""Add all try/except/else blocks body import children to
`self._imports_to_skip`.
:param body: ast.List to iterate over.
"""
for child in body:
if hasattr(child, "names"):
self._imports_to_skip.add(child) # type: ignore
for handler in node.handlers:
if hasattr(handler.type, "elts"):
for name in getattr(handler.type, "elts", []):
if hasattr(name, "id") and name.id in IMPORT_EXCEPTIONS:
is_skip_case = True
break
elif hasattr(handler.type, "id"):
if getattr(handler.type, "id", "") in IMPORT_EXCEPTIONS:
is_skip_case = True
if is_skip_case:
add_imports_to_skip(handler.body)
if is_skip_case:
for body in (node.body, node.orelse):
add_imports_to_skip(body)
@recursive
def visit_AnnAssign(self, node: ast.AnnAssign):
#: Support string type annotations.
#: >>> from typing import List
#: >>> foo: "List[str]" = []
self._visit_string_type_annotation(node)
@recursive
def visit_arg(self, node: ast.arg):
# Support Python ^3.8 type comments.
self._visit_type_comment(node)
#: Support arg string type annotations.
#: >>> from typing import Tuple
#: >>> def foo(bar: "Tuple[str, int]"):
#: ... pass
self._visit_string_type_annotation(node)
@recursive
def visit_FunctionDef(self, node: FunctionDefT):
# Support Python ^3.8 type comments.
self._visit_type_comment(node)
#: Support string type annotations.
#: >>> from typing import List
#: >>> def foo() -> 'List[str]':
#: >>> pass
self._visit_string_type_annotation(node)
# Support `ast.AsyncFunctionDef`.
visit_AsyncFunctionDef = visit_FunctionDef
@recursive
def visit_Assign(self, node: ast.Assign):
# Support Python ^3.8 type comments.
self._visit_type_comment(node)
id_ = getattr(node.targets[0], "id", None)
# These names will be skipped on import `*` case.
if id_ in NAMES_TO_SKIP:
self._source_stats.names_to_skip.add(id_)
# Support `__all__` dunder overriding cases.
if id_ == __ALL__:
if isinstance(node.value, (ast.List, ast.Tuple, ast.Set)):
#: Support normal `__all__` dunder overriding:
#:
#: >>> import x, y, z
#: >>>
#: >>> __all__ = ["x", "y", "z"]
self._add_list_names(node.value.elts)
elif isinstance(node.value, ast.BinOp):
#: Support `__all__` dunder overriding with
#: add (`+`) binary operator (concatenation):
#:
#: >>> import x, y, z, i, j
#: >>>
#: >>> __all__ = ["x"] + ["y", "z"] + ["i", "j"]
#:
#: Issue: https://github.com/hadialqattan/pycln/issues/28
self._add_concatenated_list_names(node.value)
@recursive
def visit_AugAssign(self, node: ast.AugAssign):
id_ = getattr(node.target, "id", None)
# Support `__all__` with `+=` operator case.
if id_ == __ALL__:
if isinstance(node.value, (ast.List, ast.Tuple, ast.Set)):
#: Support `__all__` dunder overriding with
#: only `+=` operator:
#:
#: >>> import x, y, z
#: >>>
#: >>> __all__ += ["x", "y", "z"]
self._add_list_names(node.value.elts)
elif isinstance(node.value, ast.BinOp):
#: Support `__all__` dunder overriding with
#: both `+=` and `+` operators:
#:
#: >>> import x, y, z
#: >>>
#: >>> __all__ += ["x", "y"] + ["z"]
self._add_concatenated_list_names(node.value)
@recursive
def visit_Expr(self, node: ast.Expr):
#: Support `__all__` dunder overriding with
#: `append` and `extend` operations:
#:
#: >>> import x, y, z
#: >>>
#: >>> __all__ = ["x"]
#: >>> __all__.append("y")
#: >>> __all__.extend(["z"])
#:
#: Issue: https://github.com/hadialqattan/pycln/issues/29
node_value = node.value
if (
isinstance(node_value, ast.Call)
and isinstance(node_value.func, ast.Attribute)
and isinstance(node_value.func.value, ast.Name)
and node_value.func.value.id == __ALL__
):
func_attr = node_value.func.attr
if func_attr == "append":
self._add_list_names(node_value.args)
elif func_attr == "extend":
for arg in node_value.args:
if isinstance(arg, ast.List):
self._add_list_names(arg.elts)
def _visit_string_type_annotation(
self, node: Union[ast.AnnAssign, ast.arg, FunctionDefT]
) -> None:
# Support string type annotations.
if isinstance(node, (ast.AnnAssign, ast.arg)):
annotation = node.annotation
else:
annotation = node.returns
self._parse_string(annotation) # type: ignore
def _visit_type_comment(
self, node: Union[ast.Assign, ast.arg, FunctionDefT]
) -> None:
#: Support Python ^3.8 type comments.
#:
#: This feature is only available for Python ^3.8.
#: PEP 526 -- Syntax for Variable Annotations.
#: For more information:
#: - https://www.python.org/dev/peps/pep-0526/
#: - https://docs.python.org/3.8/library/ast.html#ast.parse
type_comment = getattr(node, "type_comment", None)
if type_comment:
if isinstance(node, (ast.Assign, ast.arg)):
mode = "eval"
else:
mode = "func_type"
try:
tree = parse_ast(type_comment, mode=mode)
self._add_name_attr(tree)
except UnparsableFile:
#: Ignore errors when it's not a valid type comment.
#:
#: Sometimes we find nodes (comments)
#: satisfy PIP-526 type comment rules, but they're not valid.
#:
#: Issue: https://github.com/hadialqattan/pycln/issues/58
pass
def _parse_string(self, node: Union[ast.Constant, ast.Str]) -> None:
# Parse string names/attrs.
if isinstance(node, (ast.Constant, ast.Str)):
val = getattr(node, "value", "") or getattr(node, "s", "")
if val and isinstance(val, str):
tree = parse_ast(val, mode="eval")
self._add_name_attr(tree)
def _add_concatenated_list_names(self, node: ast.BinOp) -> None:
#: Safely add `["x", "y"] + ["i", "j"]`
#: `const/str` names to `self._source_stats.name_`.
if isinstance(node.right, (ast.List, ast.Tuple, ast.Set)):
self._add_list_names(node.right.elts)
if isinstance(node.left, (ast.List, ast.Tuple, ast.Set)):
self._add_list_names(node.left.elts)
elif isinstance(node.left, ast.BinOp):
self._add_concatenated_list_names(node.left)
def _add_list_names(self, node: List[ast.expr]) -> None:
# Safely add list `const/str` names to `self._source_stats.name_`.
for item in node:
if isinstance(item, (ast.Constant, ast.Str)):
key = "s" if hasattr(item, "s") else "value"
value = getattr(item, key, "")
if value and isinstance(value, str):
self._source_stats.name_.add(value)
def _add_name_attr(self, tree: ast.AST):
# Add any `ast.Name` or `ast.Attribute`
# child to `self._source_stats`.
for node in ast.walk(tree):
if isinstance(node, ast.Name):
self._source_stats.name_.add(node.id)
elif isinstance(node, ast.Attribute):
self._source_stats.attr_.add(node.attr)
def _get_py38_import_node(self, node: ast.Import) -> _nodes.Import:
# Convert any Python < 3.8 `ast.Import`
# to `_nodes.Import` in order to support `end_lineno`.
if hasattr(node, "end_lineno"):
end_lineno = node.end_lineno
else:
line = self._lines[node.lineno - 1]
multiline = SourceAnalyzer._is_parentheses(line) is not None
end_lineno = node.lineno + (1 if multiline else 0)
location = _nodes.NodeLocation((node.lineno, node.col_offset), end_lineno)
return _nodes.Import(location=location, names=node.names)
def _get_py38_import_from_node(self, node: ast.ImportFrom) -> _nodes.ImportFrom:
# Convert any Python < 3.8 `ast.ImportFrom`
# to `_nodes.ImportFrom` in order to support `end_lineno`.
if hasattr(node, "end_lineno"):
end_lineno = node.end_lineno
else:
line = self._lines[node.lineno - 1]
is_parentheses = SourceAnalyzer._is_parentheses(line)
multiline = is_parentheses is not None
end_lineno = (
node.lineno
if not multiline
else self._get_end_lineno(node.lineno, is_parentheses)
)
location = _nodes.NodeLocation((node.lineno, node.col_offset), end_lineno)
return _nodes.ImportFrom(
location=location,
names=node.names,
module=node.module,
level=node.level,
)
@staticmethod
def _is_parentheses(import_from_line: str) -> Optional[bool]:
# Return importFrom multi-line type.
# ('(' => True), ('\\' => False) else None.
if "(" in import_from_line:
return True
elif "\\" in import_from_line:
return False
else:
return None
def _get_end_lineno(self, lineno: int, is_parentheses: bool) -> int:
# Get `ast.ImportFrom` `end_lineno` of the given `lineno`.
lines_len = len(self._lines)
for end_lineno in range(lineno, lines_len):
if is_parentheses:
if ")" in self._lines[end_lineno]:
end_lineno += 1
break
else:
if "\\" not in self._lines[end_lineno]:
end_lineno += 1
break
return end_lineno
def get_stats(self) -> Tuple[SourceStats, ImportStats]:
"""Get source analyzer results.
:returns: tuple of `ImportStats` and `SourceStats`.
"""
return self._source_stats, self._import_stats
class ImportablesAnalyzer(ast.NodeVisitor):
"""Get set of all importable names from given `ast.Module`.
>>> import ast
>>> source = "source.py"
>>> with open(source, "r") as sourcef:
>>> tree = ast.parse(sourcef.read())
>>> analyzer = ImportablesAnalyzer(source)
>>> analyzer.visit(tree)
>>> importable_names = analyzer.get_stats()
:param path: a file path that belongs to the given `ast.Module`.
"""
def __init__(self, path: Path):
self._not_importables: Set[Union[ast.Name, str]] = set()
self._importables: Set[str] = set()
self._has_all = False
self._path = path
@recursive
def visit_Assign(self, node: ast.Assign):
id_ = getattr(node.targets[0], "id", None)
# Support `__all__` dunder overriding cases.
if id_ == __ALL__:
self._has_all = True
self._importables.clear()
if isinstance(node.value, (ast.List, ast.Tuple, ast.Set)):
#: Support normal `__all__` dunder overriding:
#:
#: >>> import x, y, z
#: >>>
#: >>> __all__ = ["x", "y", "z"]
self._add_list_names(node.value.elts)
elif isinstance(node.value, ast.BinOp):
#: Support `__all__` dunder overriding with
#: add (`+`) binary operator (concatenation):
#:
#: >>> import x, y, z, i, j
#: >>>
#: >>> __all__ = ["x"] + ["y", "z"] + ["i", "j"]
#:
#: Issue: https://github.com/hadialqattan/pycln/issues/28
self._add_concatenated_list_names(node.value)
@recursive
def visit_AugAssign(self, node: ast.AugAssign):
id_ = getattr(node.target, "id", None)
# Support `__all__` with `+=` operator case.
if id_ == __ALL__:
self._has_all = True
if isinstance(node.value, (ast.List, ast.Tuple, ast.Set)):
#: Support `__all__` dunder overriding with
#: only `+=` operator:
#:
#: >>> import x, y, z
#: >>>
#: >>> __all__ += ["x", "y", "z"]
self._add_list_names(node.value.elts)
elif isinstance(node.value, ast.BinOp):
#: Support `__all__` dunder overriding with
#: both `+=` and `+` operators:
#:
#: >>> import x, y, z
#: >>>
#: >>> __all__ += ["x", "y"] + ["z"]
self._add_concatenated_list_names(node.value)
@recursive
def visit_Expr(self, node: ast.Expr):
#: Support `__all__` dunder overriding with
#: `append` and `extend` operations:
#:
#: >>> import x, y, z
#: >>>
#: >>> __all__ = ["x"]
#: >>> __all__.append("y")
#: >>> __all__.extend(["z"])
#:
#: Issue: https://github.com/hadialqattan/pycln/issues/29
node_value = node.value
if (
isinstance(node_value, ast.Call)
and isinstance(node_value.func, ast.Attribute)
and isinstance(node_value.func.value, ast.Name)
and node_value.func.value.id == __ALL__
):
func_attr = node_value.func.attr
if func_attr == "append":
self._add_list_names(node_value.args)
elif func_attr == "extend":
for arg in node_value.args:
if isinstance(arg, ast.List):
self._add_list_names(arg.elts)
@recursive
def visit_Import(self, node: ast.Import):
# Analyze each import statement.
for alias in node.names:
name = alias.asname if alias.asname else alias.name
self._importables.add(name)
@recursive
def visit_ImportFrom(self, node: ast.ImportFrom):
# Analyze each importFrom statement.
try:
if node.names[0].name == "*":
# Expand import star if possible.
node = cast(ast.ImportFrom, expand_import_star(node, self._path))
for alias in node.names:
name = alias.asname if alias.asname else alias.name
self._importables.add(name)
except UnexpandableImportStar: # pragma: no cover
# * We shouldn't do anything because it's not importable.
pass # pragma: no cover
@recursive
def visit_FunctionDef(self, node: FunctionDefT):
# Add function name as importable name.
if node.name not in self._not_importables:
self._importables.add(node.name)
self._compute_not_importables(node)
# Support `ast.AsyncFunctionDef`.
visit_AsyncFunctionDef = visit_FunctionDef
@recursive
def visit_ClassDef(self, node: ast.ClassDef):
# Add class name as importable name.
if node.name not in self._not_importables:
self._importables.add(node.name)
self._compute_not_importables(node)
@recursive
def visit_Name(self, node: ast.Name):
if isinstance(node.ctx, ast.Store):
# Except not-importables.
if node not in self._not_importables:
self._importables.add(node.id)
def _add_concatenated_list_names(self, node: ast.BinOp) -> None:
#: Safely add `["x", "y"] + ["i", "j"]`
#: `const/str` names to `self._importables`.
if isinstance(node.right, (ast.List, ast.Tuple, ast.Set)):
self._add_list_names(node.right.elts)
if isinstance(node.left, (ast.List, ast.Tuple, ast.Set)):
self._add_list_names(node.left.elts)
elif isinstance(node.left, ast.BinOp):
self._add_concatenated_list_names(node.left)
def _add_list_names(self, node: List[ast.expr]) -> None:
# Safely add list `const/str` names to `self._importables`.
for item in node:
if isinstance(item, (ast.Constant, ast.Str)):
key = "s" if hasattr(item, "s") else "value"
value = getattr(item, key, "")
if value and isinstance(value, str):
self._importables.add(value)
def _compute_not_importables(self, node: Union[FunctionDefT, ast.ClassDef]):
# Compute class/function not-importables.
for node_ in ast.iter_child_nodes(node):
if isinstance(node_, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)):
self._not_importables.add(cast(str, node_.name))
if isinstance(node_, ast.Assign):
for target in node_.targets:
self._not_importables.add(cast(ast.Name, target))
def get_stats(self) -> Set[str]:
if self._path.name == "__init__.py":
for path in os.listdir(self._path.parent):
file_path = self._path.parent.joinpath(path)
if file_path.is_dir() or path.endswith(".py"):
self._importables.add(path.split(".")[0])
return self._importables
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node
(override)."""
# Continue visiting if only if `__all__` has not overridden.
if (not self._has_all) or isinstance(node, ast.AugAssign):
for _, value in ast.iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, ast.AST):
self.visit(item)
elif isinstance(value, ast.AST):
self.visit(value)
@unique
class HasSideEffects(Enum):
"""SideEffects values."""
YES = 1
MAYBE = 0.5
NO = 0
#: Some names aren't modules.
NOT_MODULE = -1
#: Just in case an exception has raised
#: while parsing a file.
NOT_KNOWN = -2
class SideEffectsAnalyzer(ast.NodeVisitor):
"""Check if the given `ast.Module` has side effects or not.
>>> import ast
>>> source = "source.py"
>>> with open(source, "r") as sourcef:
>>> tree = ast.parse(sourcef.read())
>>> analyzer = SideEffectsAnalyzer()
>>> analyzer.visit(tree)
>>> has_side_effects = analyzer.has_side_effects()
"""
def __init__(self):
self._not_side_effects: Set[ast.Call] = set()
self._has_side_effects = HasSideEffects.NO
@recursive
def visit_FunctionDef(self, node: FunctionDefT):
# Mark any call inside a function as not-side-effect.
self._compute_not_side_effects(node)
# Support `ast.AsyncFunctionDef`.
visit_AsyncFunctionDef = visit_FunctionDef
@recursive
def visit_ClassDef(self, node: ast.ClassDef):
# Mark any call inside a class as not-side-effect.
self._compute_not_side_effects(node)
def _compute_not_side_effects(
self, node: Union[ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef]
) -> None:
# Mark any call inside the given `node` as not-side-effect.
for node_ in ast.iter_child_nodes(node):
if isinstance(node_, ast.Expr):
if isinstance(node_.value, ast.Call):
self._not_side_effects.add(node_.value)
@recursive
def visit_Call(self, node: ast.Call):
if node not in self._not_side_effects:
self._has_side_effects = HasSideEffects.YES
@recursive
def visit_Import(self, node: ast.Import):
self._has_side_effects = SideEffectsAnalyzer._check_names(node.names)
@recursive
def visit_ImportFrom(self, node: ast.ImportFrom):
packages = node.module.split(".") if node.module else []
packages_aliases = [ast.alias(name=name, asname=None) for name in packages]
self._has_side_effects = SideEffectsAnalyzer._check_names(packages_aliases)
if self._has_side_effects is HasSideEffects.NO:
self._has_side_effects = SideEffectsAnalyzer._check_names(node.names)
@staticmethod
def _check_names(names: List[ast.alias]) -> HasSideEffects:
# Check if imported names has side effects or not.
for alias in names:
# All standard lib modules doesn't has side effects
# except `pathu.IMPORTS_WITH_SIDE_EFFECTS`.
if alias.name in pathu.get_standard_lib_names():
continue
# Known side effects.
if alias.name in pathu.IMPORTS_WITH_SIDE_EFFECTS:
return HasSideEffects.YES
# [Here instead of doing that, we can make the analyzer
# works recursively inside each not known import.
return HasSideEffects.MAYBE
# I choosed this way because it's almost %100 we will end
# with a file that has side effects :-) ].
return HasSideEffects.NO
def has_side_effects(self) -> HasSideEffects:
return self._has_side_effects
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node
(override)."""
# Continue visiting if only if there's no know side effects.
if self._has_side_effects is HasSideEffects.NO:
for _, value in ast.iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, ast.AST):
self.visit(item)
elif isinstance(value, ast.AST):
self.visit(value)
def expand_import_star(
node: Union[ast.ImportFrom, _nodes.ImportFrom], path: Path
) -> Union[ast.ImportFrom, _nodes.ImportFrom]:
"""Expand import star statement, replace the `*` with a list of ast.alias.
:param node: `_nodes/ast.ImportFrom` node that has a '*' as `alias.name`.
:param path: where the node has imported.
:returns: expanded `_nodes/ast.ImportFrom` (same input node type).
:raises UnexpandableImportStar: when `ReadPermissionError`,
`UnparsableFile` or `ModuleNotFoundError` raised.
"""
mpath = pathu.get_import_from_path(path, "*", node.module, node.level)
importables: Set[str] = set()
try:
if mpath:
content, _, _ = iou.safe_read(mpath, permissions=(os.R_OK,))
tree = parse_ast(content, mpath)
analyzer = ImportablesAnalyzer(mpath)
analyzer.visit(tree)
importables = analyzer.get_stats()
else:
name = ("." * node.level) + (node.module if node.module else "")
raise ModuleNotFoundError(name=name)
except (ReadPermissionError, UnparsableFile, ModuleNotFoundError) as err:
msg = (
err
if not isinstance(err, ModuleNotFoundError)
else f"{err.name!r} module not found or it's a C wrapped module!"
)
if hasattr(node, "location"):
location = node.location # type: ignore # pragma: nocover.
else:
location = _nodes.NodeLocation(
(node.lineno, node.col_offset), 0 # type: ignore
)
raise UnexpandableImportStar(path, location, str(msg)) from err
# Create `ast.alias` for each name.
node.names.clear()
for name in importables:
node.names.append(ast.alias(name=name, asname=None))
return node
def parse_ast(source_code: str, path: Path = Path(""), mode: str = "exec") -> ast.AST:
"""Parse the given `source_code` AST.
:param source_code: python source code.
:param path: `source_code` file path.
:param mode: `ast.parse` mode.
:returns: `ast.AST` (source code AST).
:raises UnparsableFile: if the compiled source is invalid,
or the source contains null bytes.
"""
try:
if PY38_PLUS:
# Include type_comments when Python >=3.8.
# For more information https://www.python.org/dev/peps/pep-0526/ .
tree = ast.parse(source_code, mode=mode, type_comments=True)
else:
tree = ast.parse(source_code, mode=mode)
return tree
except (SyntaxError, IndentationError, ValueError) as err:
raise UnparsableFile(path, err) from err
| 36.015521 | 88 | 0.566367 |
2aa9fbba0ad35824355e4404c8911cac5257b303 | 39,970 | py | Python | catboost/python-package/ut/medium/canondata/test.test_export_to_python_with_cat_features_CPU-40_/model.py | karina-usmanova/catboost | 7eff3b7e2e9e8793ab27ea21b2d39a9238f1ba02 | [
"Apache-2.0"
] | 1 | 2019-07-08T09:14:39.000Z | 2019-07-08T09:14:39.000Z | catboost/python-package/ut/medium/canondata/test.test_export_to_python_with_cat_features_CPU-40_/model.py | karina-usmanova/catboost | 7eff3b7e2e9e8793ab27ea21b2d39a9238f1ba02 | [
"Apache-2.0"
] | null | null | null | catboost/python-package/ut/medium/canondata/test.test_export_to_python_with_cat_features_CPU-40_/model.py | karina-usmanova/catboost | 7eff3b7e2e9e8793ab27ea21b2d39a9238f1ba02 | [
"Apache-2.0"
] | null | null | null | ### Types to hold CTR's data
class catboost_model_ctr(object):
def __init__(self, base_hash, base_ctr_type, target_border_idx, prior_num, prior_denom, shift, scale):
self.base_hash = base_hash
self.base_ctr_type = base_ctr_type
self.target_border_idx = target_border_idx
self.prior_num = prior_num
self.prior_denom = prior_denom
self.shift = shift
self.scale = scale
def calc(self, count_in_class, total_count):
ctr = (count_in_class + self.prior_num) / float(total_count + self.prior_denom)
return (ctr + self.shift) * self.scale
class catboost_bin_feature_index_value(object):
def __init__(self, bin_index, check_value_equal, value):
self.bin_index = bin_index
self.check_value_equal = check_value_equal
self.value = value
class catboost_ctr_mean_history(object):
def __init__(self, sum, count):
self.sum = sum
self.count = count
class catboost_ctr_value_table(object):
def __init__(self, index_hash_viewer, target_classes_count, counter_denominator, ctr_mean_history, ctr_total):
self.index_hash_viewer = index_hash_viewer
self.target_classes_count = target_classes_count
self.counter_denominator = counter_denominator
self.ctr_mean_history = ctr_mean_history
self.ctr_total = ctr_total
def resolve_hash_index(self, hash):
try:
return self.index_hash_viewer[hash]
except KeyError:
return None
class catboost_ctr_data(object):
def __init__(self, learn_ctrs):
self.learn_ctrs = learn_ctrs
class catboost_projection(object):
def __init__(self, transposed_cat_feature_indexes, binarized_indexes):
self.transposed_cat_feature_indexes = transposed_cat_feature_indexes
self.binarized_indexes = binarized_indexes
class catboost_compressed_model_ctr(object):
def __init__(self, projection, model_ctrs):
self.projection = projection
self.model_ctrs = model_ctrs
class catboost_model_ctrs_container(object):
def __init__(self, used_model_ctrs_count, compressed_model_ctrs, ctr_data):
self.used_model_ctrs_count = used_model_ctrs_count
self.compressed_model_ctrs = compressed_model_ctrs
self.ctr_data = ctr_data
### Model data
class catboost_model(object):
float_features_index = [
0, 1, 2, 3, 4, 5,
]
float_feature_count = 6
cat_feature_count = 11
binary_feature_count = 24
tree_count = 40
float_feature_borders = [
[18.5, 19.5, 34.5, 68.5, 71],
[126119, 200721, 215061, 231641.5, 281044.5, 337225.5, 553548.5],
[5.5, 6.5, 9.5, 12.5, 13.5, 14.5],
[1087, 3280, 5842, 7493, 11356, 17537.5],
[808.5, 1622.5, 1738, 1862, 1881.5, 1944.5, 2396],
[36.5, 42, 70]
]
tree_depth = [3, 6, 6, 6, 3, 3, 4, 0, 0, 0, 4, 2, 1, 5, 5, 6, 0, 3, 4, 5, 1, 1, 4, 6, 1, 6, 1, 0, 0, 2, 2, 2, 2, 1, 1, 0, 1, 0, 1, 1]
tree_split_border = [5, 4, 6, 1, 1, 4, 8, 7, 2, 4, 3, 1, 6, 1, 1, 2, 6, 1, 6, 5, 1, 4, 2, 2, 7, 4, 2, 5, 3, 4, 5, 6, 1, 4, 1, 3, 1, 2, 3, 3, 2, 255, 3, 4, 1, 7, 5, 6, 7, 3, 2, 2, 8, 6, 2, 1, 1, 2, 1, 1, 3, 2, 3, 1, 1, 1, 2, 7, 1, 4, 3, 5, 1, 6, 2, 3, 4, 1, 8, 4, 5, 3, 1, 1, 1, 1, 3, 9, 5, 1, 2, 2, 4, 3, 5, 2, 7, 4, 5]
tree_split_feature_index = [3, 4, 7, 8, 0, 1, 13, 7, 10, 13, 1, 4, 4, 3, 5, 12, 1, 14, 2, 1, 21, 14, 7, 3, 4, 0, 13, 3, 11, 12, 2, 3, 22, 7, 10, 0, 11, 20, 20, 5, 18, 6, 7, 2, 18, 4, 7, 13, 1, 2, 21, 2, 13, 3, 1, 14, 15, 11, 7, 21, 14, 5, 3, 13, 19, 12, 8, 13, 4, 13, 13, 3, 17, 3, 0, 14, 3, 23, 13, 3, 0, 4, 16, 9, 1, 20, 14, 13, 7, 2, 4, 14, 2, 12, 13, 17, 13, 14, 4]
tree_split_xor_mask = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 254, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
cat_features_index = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
one_hot_cat_feature_index = [9]
one_hot_hash_values = [
[-2114564283]
]
ctr_feature_borders = [
[0.999998987, 1.99999905, 3.99999905, 6.99999905, 8.99999905, 10.999999, 12.999999],
[2.99999905, 6.99999905],
[8.99999905],
[6.99999905, 8.99999905],
[0.999998987, 7.99999905, 12.999999],
[6.99999905, 8.99999905, 10.999999, 12.999999],
[6.99999905, 7.99999905, 8.99999905, 9.99999905, 10.999999, 11.999999, 12.999999, 13.999999, 14.999999],
[0.999998987, 1.99999905, 12.999999, 13.999999],
[6.99999905],
[11.999999],
[12.999999, 13.999999],
[2.99999905, 4.99999905],
[10.999999],
[10.999999, 11.999999, 14.999999],
[0.999998987, 1.99999905],
[12.999999],
[5.99999905]
]
## Aggregated array of leaf values for trees. Each tree is represented by a separate line:
leaf_values = [
0.01999999955296516, 0, 0.004999999888241291, 0, 0.0247826081417177, 0, 0.00599999986588955, 0,
0, 0, -0.0001499999932944775, 0.003470380363463545, 0, 0, -0.0001499999932944775, 0.01156760844676067, 0, 0.007314130275453562, 0, 0.02304875726047787, 0, 0.007314130275453562, 0, 0.0117026084407257, 0, 0, 0, -4.499999798834327e-05, 0, 0, 0, -0.0001858695569083744, 0, 0, 0, 0.01462826055090712, 0, 0, 0, 0, 0, 0, -3.749999832361938e-05, 0.01346434753710809, 0, 0, 0, 0.01247329165524577, 0, 0, 0, 0.02416112479182612, 0, 0, 0, 0.02413580509986337, 0, 0, 0, 0.009504347624726918, 0, 0, 0, 0.007314130275453562, 0, 0.007314130275453562, 0, 0.02149234735704245, 0, 0.007314130275453562, 0, 0.007314130275453562,
0, 0.02203329959438511, 0.01445557903672388, 0.02090051891739597, 0, 0.00729380723279871, 0, 0.007259274299613786, 0, 0, 0, 0, 0, 0, 0, 0, 0, -0.0001611926015748829, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01202224912856518, 0.02519934044647189, 0.009296993208359132, 0.02221987841480803, -0.000205247925650846, 0, 0, 0.007220580590130218, 0, 0, 0, 0, -8.059727904824544e-05, 0, 0, 0.007375742772626784, -0.0002868521611795479, 0.003876930736150383, -0.0002571521625005358, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0.01131705219977818, 0.009131405919731996, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -0.0003064845022101746, 0, 0, 0.00705971162972376, 0, 0, 0, 0, 0, 0, 0, 0, 0.00694392679444102, 0.006976014856599559, 0, 0, 0.01327883733386207, 0.02343944511749118, 0, 0, 0, 0, 0, 0, -0.0003242219493700392, 0, 0, 0, 0, 0, 0, 0, 0.004397972187707204, 0.01173991380755557, 0.01624559285904699, 0.02105239301807314, 0, 0, 0, 0, 0, 0, 0, 0.007320424703068537,
0.01713816872353237, -0.00101015110586199, 0.02594558349416469, 0.01660501053748116, 0, -0.0004294048680491817, 0, -0.0008169148706745303,
0.01079041022645152, 0, 0, 0, 0.0225548573429452, -0.000185029282570031, 0.005030479022316899, 0,
0.01822020916108734, -0.0006713420243358168, 0, 0, 0.01880095670709041, 0, 0.02380558714888851, 0, 0.001016917894030677, -0.0003968415563199083, 0, 0, 0.006448589981753908, 0, 0.01031087564815217, 0,
0.01898745826649682,
0.01843426598513581,
0.01789719074776575,
0.003626164206099096, 0, 0.004123476059396431, 0, 0.01456064847424626, 0, 0.01391736939318713, 0, 0.007175222476750585, 0, 0, 0, 0.01974902256669395, 0, 0.01185448186361029, -0.0007772380101248539,
0.01185945775793138, 0.01195241593405586, 0.02004545284761915, 0.01234895501775566,
0.01527028129781319, 0.01721777215972029,
0, 0.005793334092104274, 0, 0, -0.003267594832604089, 0, 0, 0, 0, 0.005832489972446285, 0, 0, 0.007280922462546561, 0.005710425653552725, 0, 0, 0, 0.005621030028584721, 0, 0, 0.01494774093086465, 0.01422939839545966, -0.0009225094290911208, 0, 0, 0.005592167556776282, 0, 0, 0.01925234309298835, 0.01503979076092118, 0, 0,
0, 0, 0.0003863817991255305, 0.0007474773296608814, 0, 0, 0, 0, 0, 0, 0.01311749469323431, -0.0007109599610244532, 0, 0, 0, -0.0009155906085275851, 0.01139257112470219, 0, 0.01389686556604557, 0.01032920541091394, 0, 0, 0, 0, 0, 0.005578872304312634, 0.01982624716374476, 0.01629702279451762, 0, 0, 0, 0,
0, 0, 0, 0, 0.005612829102526628, 0, 0, 0, 0.005669068941309504, 0, -0.002606908836528224, 0, 0.01061311920064227, 0, 0.004480053186381886, 0, 0.00896671715823245, 0, 0, 0, 0.01498684932222408, 0, 0, 0, 0.005464782019512482, 0, 0.005297583763136753, 0, 0.01783260039940101, 0.008305039985800648, 0.01933907115132087, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -0.001222107327861213, 0,
0.0148392595791752,
0, 0, 0.004105107418691836, 0, 0.008163211477585839, 0, 0.0151995635234467, 0.01444822488487993,
0.005410760471687652, 0, 0.0083333543295834, 0.00503256071756729, 0.008415827235429293, 0.008226817711936436, 0.01272815382610973, 0.01839492637265765, 0, 0, 0.002841451847506272, -0.001809399723544643, -0.003171913592586823, -0.003547323033035719, 0.008179852565058206, 0.01042743776402821,
0.005796650788699248, 0.002692987287766075, 0, 0, 0, 0.005493297219148043, -0.001813419916642705, 0, 0.002869765075874072, 0, 0, 0, 0.008596719468130386, 0, 0, 0, 0, 0, 0, 0, 0.01723908915402649, 0.006092324227009916, 0, 0, 0, 0, 0, 0, 0.01549819505598307, 0.006372982395348424, -0.003046820126714731, -0.001364968436561024,
0.001096351010433316, 0.014152553874154,
0.005603526527597412, 0.01722920563235135,
0.005196358775439976, -0.003340520693045765, 0, 0, 0.007089145630978554, -0.001441788993578166, 0.01610700788538342, 0.01137615551467872, 0, 0, 0, 0, 0, 0, -0.003011475138625672, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -0.001382394190892858, 0, 0, 0, 0, 0.01143028454967799, 0, 0, 0.004443048908259466, 0.01654964369981381, 0, 0, 0, 0, 0, 0, 0.006519587515475378, 0.001982080052685937, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -0.002041951231623278, -0.001377552766169704, 0, -0.001689507561771382,
0.005107116137383578, 0.01585163020787507,
-0.00142395170697102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.00881514938107701, -0.001405524490828592, 0, 0, -0.00183492725007898, 0, 0, 0, 0.01212449211578207, 0, 0.004336758318876998, 0, 0, 0, 0, 0, -0.001607402132489359, -0.002064939967902643, 0, 0, 0, 0, 0, 0, 0.007098120247080991, 0, 0, 0, 0, 0, 0, 0, 0.01004718114540055, -0.002564842467288145, 0, 0, 0.002477421733919167, 0, 0, 0, 0.01491703120771285, 0, 0, 0, 0, 0, 0, 0,
0.01145660192587347, 0.01076895500564115,
0.01090754352091524,
0.01058975644274552,
0, 0.003856719423080162, 0.01443359608522935, 0.001705163252909523,
0.008127758385080027, 0.009034179229127411, 0.004592230418904769, 0.01066678917785803,
0.0109106895535775, 0.003980974553640618, 0.009769372557730071, 0.0005892245494980584,
0.007979979713054886, -0.002690460576320364, 0.01288174062248397, 0.0111351686241983,
0.002966763715415119, 0.01235475487698945,
0.003872195066324391, 0.01178306226981817,
0.008673887277552682,
0.002668892048570584, 0.01189125063927196,
0.008181141734920867,
0.01162672140433912, 0.002337625654666836,
0.008095517950130804, -0.001003802595541243
]
model_ctrs = catboost_model_ctrs_container(
used_model_ctrs_count = 17,
compressed_model_ctrs = [
catboost_compressed_model_ctr(
projection = catboost_projection(
transposed_cat_feature_indexes = [3],
binarized_indexes = []
),
model_ctrs = [
catboost_model_ctr(base_hash = 14216163332699387099, base_ctr_type = "Borders", target_border_idx = 0, prior_num = 0, prior_denom = 1, shift = -0, scale = 15),
catboost_model_ctr(base_hash = 14216163332699387099, base_ctr_type = "Borders", target_border_idx = 0, prior_num = 0.5, prior_denom = 1, shift = -0, scale = 15)
]
),
catboost_compressed_model_ctr(
projection = catboost_projection(
transposed_cat_feature_indexes = [3],
binarized_indexes = [
catboost_bin_feature_index_value(bin_index = 3, check_value_equal = 0, value = 4),
catboost_bin_feature_index_value(bin_index = 0, check_value_equal = 0, value = 5),
catboost_bin_feature_index_value(bin_index = 4, check_value_equal = 0, value = 3)
]
),
model_ctrs = [
catboost_model_ctr(base_hash = 425524955817535461, base_ctr_type = "Borders", target_border_idx = 0, prior_num = 1, prior_denom = 1, shift = -0, scale = 15)
]
),
catboost_compressed_model_ctr(
projection = catboost_projection(
transposed_cat_feature_indexes = [4],
binarized_indexes = []
),
model_ctrs = [
catboost_model_ctr(base_hash = 16890222057671696979, base_ctr_type = "Counter", target_border_idx = 0, prior_num = 0, prior_denom = 1, shift = -0, scale = 15)
]
),
catboost_compressed_model_ctr(
projection = catboost_projection(
transposed_cat_feature_indexes = [5],
binarized_indexes = []
),
model_ctrs = [
catboost_model_ctr(base_hash = 14216163332699387101, base_ctr_type = "Borders", target_border_idx = 0, prior_num = 0, prior_denom = 1, shift = -0, scale = 15),
catboost_model_ctr(base_hash = 14216163332699387101, base_ctr_type = "Borders", target_border_idx = 0, prior_num = 0.5, prior_denom = 1, shift = -0, scale = 15),
catboost_model_ctr(base_hash = 14216163332699387101, base_ctr_type = "Borders", target_border_idx = 0, prior_num = 1, prior_denom = 1, shift = -0, scale = 15),
catboost_model_ctr(base_hash = 16890222057671696978, base_ctr_type = "Counter", target_border_idx = 0, prior_num = 0, prior_denom = 1, shift = -0, scale = 15)
]
),
catboost_compressed_model_ctr(
projection = catboost_projection(
transposed_cat_feature_indexes = [5],
binarized_indexes = [
catboost_bin_feature_index_value(bin_index = 1, check_value_equal = 0, value = 2)
]
),
model_ctrs = [
catboost_model_ctr(base_hash = 692698791827290762, base_ctr_type = "Borders", target_border_idx = 0, prior_num = 1, prior_denom = 1, shift = -0, scale = 15)
]
),
catboost_compressed_model_ctr(
projection = catboost_projection(
transposed_cat_feature_indexes = [7],
binarized_indexes = []
),
model_ctrs = [
catboost_model_ctr(base_hash = 14216163332699387103, base_ctr_type = "Borders", target_border_idx = 0, prior_num = 0, prior_denom = 1, shift = -0, scale = 15),
catboost_model_ctr(base_hash = 14216163332699387103, base_ctr_type = "Borders", target_border_idx = 0, prior_num = 1, prior_denom = 1, shift = -0, scale = 15)
]
),
catboost_compressed_model_ctr(
projection = catboost_projection(
transposed_cat_feature_indexes = [8],
binarized_indexes = []
),
model_ctrs = [
catboost_model_ctr(base_hash = 14216163332699387072, base_ctr_type = "Borders", target_border_idx = 0, prior_num = 0, prior_denom = 1, shift = -0, scale = 15),
catboost_model_ctr(base_hash = 14216163332699387072, base_ctr_type = "Borders", target_border_idx = 0, prior_num = 0.5, prior_denom = 1, shift = -0, scale = 15),
catboost_model_ctr(base_hash = 14216163332699387072, base_ctr_type = "Borders", target_border_idx = 0, prior_num = 1, prior_denom = 1, shift = -0, scale = 15)
]
),
catboost_compressed_model_ctr(
projection = catboost_projection(
transposed_cat_feature_indexes = [10],
binarized_indexes = []
),
model_ctrs = [
catboost_model_ctr(base_hash = 14216163332699387074, base_ctr_type = "Borders", target_border_idx = 0, prior_num = 0, prior_denom = 1, shift = -0, scale = 15),
catboost_model_ctr(base_hash = 14216163332699387074, base_ctr_type = "Borders", target_border_idx = 0, prior_num = 0.5, prior_denom = 1, shift = -0, scale = 15)
]
),
catboost_compressed_model_ctr(
projection = catboost_projection(
transposed_cat_feature_indexes = [10],
binarized_indexes = [
catboost_bin_feature_index_value(bin_index = 3, check_value_equal = 0, value = 6),
catboost_bin_feature_index_value(bin_index = 0, check_value_equal = 0, value = 2),
catboost_bin_feature_index_value(bin_index = 3, check_value_equal = 0, value = 4)
]
),
model_ctrs = [
catboost_model_ctr(base_hash = 13033542383760369867, base_ctr_type = "Borders", target_border_idx = 0, prior_num = 0.5, prior_denom = 1, shift = -0, scale = 15)
]
)
],
ctr_data = catboost_ctr_data(
learn_ctrs = {
425524955817535461 :
catboost_ctr_value_table(
index_hash_viewer = {18446744073709551615 : 0, 12653652018840522049 : 4, 15085635429984554305 : 8, 14668331998361267939 : 0, 6371181315837451172 : 5, 18446744073709551615 : 0, 18446744073709551615 : 0, 2365187170603376679 : 2, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 6366972096803066445 : 11, 7160913711096172174 : 9, 725004896848258735 : 7, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 12915814276774330580 : 3, 12850586852799469109 : 13, 14440651936148345046 : 1, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 8119489818390003804 : 10, 8913431432683109533 : 12, 952684959061181628 : 6, 18446744073709551615 : 0},
target_classes_count = 2,
counter_denominator = 0,
ctr_mean_history = [catboost_ctr_mean_history(sum = 8.40779e-45, count = 57), catboost_ctr_mean_history(sum = 0, count = 5), catboost_ctr_mean_history(sum = 1.4013e-45, count = 5), catboost_ctr_mean_history(sum = 2.8026e-45, count = 0), catboost_ctr_mean_history(sum = 4.2039e-45, count = 6), catboost_ctr_mean_history(sum = 0, count = 3), catboost_ctr_mean_history(sum = 2.8026e-45, count = 1), catboost_ctr_mean_history(sum = 1.4013e-45, count = 1), catboost_ctr_mean_history(sum = 0, count = 1), catboost_ctr_mean_history(sum = 1.4013e-45, count = 0), catboost_ctr_mean_history(sum = 4.2039e-45, count = 0), catboost_ctr_mean_history(sum = 1.4013e-45, count = 0), catboost_ctr_mean_history(sum = 1.4013e-45, count = 0), catboost_ctr_mean_history(sum = 1.4013e-45, count = 0)],
ctr_total = [6, 57, 0, 5, 1, 5, 2, 0, 3, 6, 0, 3, 2, 1, 1, 1, 0, 1, 1, 0, 3, 0, 1, 0, 1, 0, 1, 0]
),
692698791827290762 :
catboost_ctr_value_table(
index_hash_viewer = {18446744073709551615 : 0, 3581428127016485793 : 5, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 14455983217430950149 : 4, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 13125720576600207402 : 8, 5967870314491345259 : 6, 9724886183021484844 : 1, 2436149079269713547 : 7, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 1236773280081879954 : 2, 16151796118569799858 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 8312525161425951098 : 3, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 13605281311626526238 : 9, 18446744073709551615 : 0},
target_classes_count = 2,
counter_denominator = 0,
ctr_mean_history = [catboost_ctr_mean_history(sum = 0, count = 18), catboost_ctr_mean_history(sum = 0, count = 2), catboost_ctr_mean_history(sum = 1.68156e-44, count = 8), catboost_ctr_mean_history(sum = 0, count = 7), catboost_ctr_mean_history(sum = 0, count = 2), catboost_ctr_mean_history(sum = 0, count = 6), catboost_ctr_mean_history(sum = 1.4013e-44, count = 12), catboost_ctr_mean_history(sum = 0, count = 19), catboost_ctr_mean_history(sum = 0, count = 2), catboost_ctr_mean_history(sum = 0, count = 3)],
ctr_total = [0, 18, 0, 2, 12, 8, 0, 7, 0, 2, 0, 6, 10, 12, 0, 19, 0, 2, 0, 3]
),
13033542383760369867 :
catboost_ctr_value_table(
index_hash_viewer = {8127566760675494400 : 3, 16133203970344820352 : 6, 18446744073709551615 : 0, 18446744073709551615 : 0, 17493291581550525284 : 2, 11079641284750479812 : 8, 3155078433189509382 : 7, 1373856113935573863 : 4, 18446744073709551615 : 0, 18446744073709551615 : 0, 15470940414085713834 : 1, 8124029294275766379 : 9, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 3126373835522511222 : 5, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 16842020874822597533 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0},
target_classes_count = 2,
counter_denominator = 0,
ctr_mean_history = [catboost_ctr_mean_history(sum = 2.38221e-44, count = 67), catboost_ctr_mean_history(sum = 0, count = 2), catboost_ctr_mean_history(sum = 0, count = 2), catboost_ctr_mean_history(sum = 0, count = 6), catboost_ctr_mean_history(sum = 1.4013e-45, count = 0), catboost_ctr_mean_history(sum = 2.8026e-45, count = 0), catboost_ctr_mean_history(sum = 1.4013e-45, count = 0), catboost_ctr_mean_history(sum = 0, count = 1), catboost_ctr_mean_history(sum = 0, count = 1), catboost_ctr_mean_history(sum = 1.4013e-45, count = 0)],
ctr_total = [17, 67, 0, 2, 0, 2, 0, 6, 1, 0, 2, 0, 1, 0, 0, 1, 0, 1, 1, 0]
),
14216163332699387072 :
catboost_ctr_value_table(
index_hash_viewer = {18446744073709551615 : 0, 18446744073709551615 : 0, 8473802870189803490 : 2, 7071392469244395075 : 1, 18446744073709551615 : 0, 8806438445905145973 : 3, 619730330622847022 : 0, 18446744073709551615 : 0},
target_classes_count = 2,
counter_denominator = 0,
ctr_mean_history = [catboost_ctr_mean_history(sum = 0, count = 12), catboost_ctr_mean_history(sum = 1.4013e-45, count = 5), catboost_ctr_mean_history(sum = 2.94273e-44, count = 61), catboost_ctr_mean_history(sum = 0, count = 1)],
ctr_total = [0, 12, 1, 5, 21, 61, 0, 1]
),
14216163332699387074 :
catboost_ctr_value_table(
index_hash_viewer = {2136296385601851904 : 0, 7428730412605434673 : 1, 9959754109938180626 : 3, 14256903225472974739 : 5, 8056048104805248435 : 2, 18446744073709551615 : 0, 12130603730978457510 : 6, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 10789443546307262781 : 4, 18446744073709551615 : 0, 18446744073709551615 : 0},
target_classes_count = 2,
counter_denominator = 0,
ctr_mean_history = [catboost_ctr_mean_history(sum = 2.8026e-44, count = 73), catboost_ctr_mean_history(sum = 0, count = 2), catboost_ctr_mean_history(sum = 0, count = 2), catboost_ctr_mean_history(sum = 1.4013e-45, count = 0), catboost_ctr_mean_history(sum = 0, count = 1), catboost_ctr_mean_history(sum = 0, count = 1), catboost_ctr_mean_history(sum = 1.4013e-45, count = 0)],
ctr_total = [20, 73, 0, 2, 0, 2, 1, 0, 0, 1, 0, 1, 1, 0]
),
14216163332699387099 :
catboost_ctr_value_table(
index_hash_viewer = {18446744073709551615 : 0, 15379737126276794113 : 5, 18446744073709551615 : 0, 14256903225472974739 : 2, 18048946643763804916 : 4, 2051959227349154549 : 3, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 7024059537692152076 : 6, 18446744073709551615 : 0, 15472181234288693070 : 1, 8864790892067322495 : 0},
target_classes_count = 2,
counter_denominator = 0,
ctr_mean_history = [catboost_ctr_mean_history(sum = 1.4013e-44, count = 58), catboost_ctr_mean_history(sum = 1.4013e-45, count = 6), catboost_ctr_mean_history(sum = 1.4013e-45, count = 5), catboost_ctr_mean_history(sum = 4.2039e-45, count = 6), catboost_ctr_mean_history(sum = 0, count = 4), catboost_ctr_mean_history(sum = 2.8026e-45, count = 0), catboost_ctr_mean_history(sum = 7.00649e-45, count = 0)],
ctr_total = [10, 58, 1, 6, 1, 5, 3, 6, 0, 4, 2, 0, 5, 0]
),
14216163332699387101 :
catboost_ctr_value_table(
index_hash_viewer = {18446744073709551615 : 0, 18446744073709551615 : 0, 13987540656699198946 : 4, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18089724839685297862 : 5, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 10353740403438739754 : 2, 3922001124998993866 : 0, 13686716744772876732 : 1, 18293943161539901837 : 3, 18446744073709551615 : 0, 18446744073709551615 : 0},
target_classes_count = 2,
counter_denominator = 0,
ctr_mean_history = [catboost_ctr_mean_history(sum = 0, count = 37), catboost_ctr_mean_history(sum = 0, count = 4), catboost_ctr_mean_history(sum = 3.08286e-44, count = 20), catboost_ctr_mean_history(sum = 0, count = 13), catboost_ctr_mean_history(sum = 0, count = 2), catboost_ctr_mean_history(sum = 0, count = 3)],
ctr_total = [0, 37, 0, 4, 22, 20, 0, 13, 0, 2, 0, 3]
),
14216163332699387103 :
catboost_ctr_value_table(
index_hash_viewer = {3607388709394294015 : 5, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18356215166324018775 : 0, 18365206492781874408 : 4, 18446744073709551615 : 0, 18446744073709551615 : 0, 14559146096844143499 : 1, 18446744073709551615 : 0, 18446744073709551615 : 0, 11416626865500250542 : 3, 5549384008678792175 : 2},
target_classes_count = 2,
counter_denominator = 0,
ctr_mean_history = [catboost_ctr_mean_history(sum = 0, count = 14), catboost_ctr_mean_history(sum = 0, count = 22), catboost_ctr_mean_history(sum = 0, count = 22), catboost_ctr_mean_history(sum = 2.66247e-44, count = 17), catboost_ctr_mean_history(sum = 2.8026e-45, count = 3), catboost_ctr_mean_history(sum = 1.4013e-45, count = 1)],
ctr_total = [0, 14, 0, 22, 0, 22, 19, 17, 2, 3, 1, 1]
),
16890222057671696978 :
catboost_ctr_value_table(
index_hash_viewer = {18446744073709551615 : 0, 18446744073709551615 : 0, 13987540656699198946 : 4, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18089724839685297862 : 5, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 10353740403438739754 : 2, 3922001124998993866 : 0, 13686716744772876732 : 1, 18293943161539901837 : 3, 18446744073709551615 : 0, 18446744073709551615 : 0},
target_classes_count = 0,
counter_denominator = 42,
ctr_mean_history = [catboost_ctr_mean_history(sum = 5.1848e-44, count = 4), catboost_ctr_mean_history(sum = 5.88545e-44, count = 13), catboost_ctr_mean_history(sum = 2.8026e-45, count = 3)],
ctr_total = [37, 4, 42, 13, 2, 3]
),
16890222057671696979 :
catboost_ctr_value_table(
index_hash_viewer = {7537614347373541888 : 4, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 5903587924673389870 : 1, 18278593470046426063 : 6, 10490918088663114479 : 8, 18446744073709551615 : 0, 407784798908322194 : 10, 5726141494028968211 : 3, 1663272627194921140 : 0, 8118089682304925684 : 5, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 18446744073709551615 : 0, 15431483020081801594 : 9, 18446744073709551615 : 0, 18446744073709551615 : 0, 1403990565605003389 : 2, 3699047549849816830 : 11, 14914630290137473119 : 7},
target_classes_count = 0,
counter_denominator = 28,
ctr_mean_history = [catboost_ctr_mean_history(sum = 4.2039e-45, count = 28), catboost_ctr_mean_history(sum = 2.66247e-44, count = 4), catboost_ctr_mean_history(sum = 2.8026e-44, count = 2), catboost_ctr_mean_history(sum = 5.60519e-45, count = 10), catboost_ctr_mean_history(sum = 4.2039e-45, count = 1), catboost_ctr_mean_history(sum = 7.00649e-45, count = 2)],
ctr_total = [3, 28, 19, 4, 20, 2, 4, 10, 3, 1, 5, 2]
)
}
)
)
### Routines to compute CTRs
def calc_hash(a, b):
max_int = 0xffFFffFFffFFffFF
MAGIC_MULT = 0x4906ba494954cb65
return (MAGIC_MULT * ((a + MAGIC_MULT * b) & max_int)) & max_int
def calc_hashes(binarized_features, hashed_cat_features, transposed_cat_feature_indexes, binarized_feature_indexes):
result = 0
for cat_feature_index in transposed_cat_feature_indexes:
result = calc_hash(result, hashed_cat_features[cat_feature_index])
for bin_feature_index in binarized_feature_indexes:
binary_feature = binarized_features[bin_feature_index.bin_index]
if not(bin_feature_index.check_value_equal):
result = calc_hash(result, 1 if (binary_feature >= bin_feature_index.value) else 0)
else:
result = calc_hash(result, 1 if (binary_feature == bin_feature_index.value) else 0)
return result
def calc_ctrs(model_ctrs, binarized_features, hashed_cat_features, result):
ctr_hash = 0
result_index = 0
for i in range(len(model_ctrs.compressed_model_ctrs)):
proj = model_ctrs.compressed_model_ctrs[i].projection
ctr_hash = calc_hashes(binarized_features, hashed_cat_features, proj.transposed_cat_feature_indexes, proj.binarized_indexes)
for j in range(len(model_ctrs.compressed_model_ctrs[i].model_ctrs)):
ctr = model_ctrs.compressed_model_ctrs[i].model_ctrs[j]
learn_ctr = model_ctrs.ctr_data.learn_ctrs[ctr.base_hash]
ctr_type = ctr.base_ctr_type
bucket = learn_ctr.resolve_hash_index(ctr_hash)
if bucket is None:
result[result_index] = ctr.calc(0, 0)
else:
if ctr_type == "BinarizedTargetMeanValue" or ctr_type == "FloatTargetMeanValue":
ctr_mean_history = learn_ctr.ctr_mean_history[bucket]
result[result_index] = ctr.calc(ctr_mean_history.sum, ctr_mean_history.count)
elif ctr_type == "Counter" or ctr_type == "FeatureFreq":
ctr_total = learn_ctr.ctr_total
denominator = learn_ctr.counter_denominator
result[result_index] = ctr.calc(ctr_total[bucket], denominator)
elif ctr_type == "Buckets":
ctr_history = learn_ctr.ctr_total
target_classes_count = learn_ctr.target_classes_count
total_count = 0
good_count = ctr_history[bucket * target_classes_count + ctr.target_border_idx];
for class_id in range(target_classes_count):
total_count += ctr_history[bucket * target_classes_count + class_id]
result[result_index] = ctr.calc(good_count, total_count)
else:
ctr_history = learn_ctr.ctr_total;
target_classes_count = learn_ctr.target_classes_count;
if target_classes_count > 2:
good_count = 0
total_count = 0
for class_id in range(ctr.target_border_idx + 1):
total_count += ctr_history[bucket * target_classes_count + class_id]
for class_id in range(ctr.target_border_idx + 1, target_classes_count):
good_count += ctr_history[bucket * target_classes_count + class_id]
total_count += good_count;
result[result_index] = ctr.calc(good_count, total_count);
else:
result[result_index] = ctr.calc(ctr_history[bucket * 2 + 1], ctr_history[bucket * 2] + ctr_history[bucket * 2 + 1])
result_index += 1
cat_features_hashes = {
"Female": -2114564283,
"Protective-serv": -2075156126,
"Assoc-voc": -2029370604,
"Married-civ-spouse": -2019910086,
"Federal-gov": -1993066135,
"Transport-moving": -1903253868,
"Farming-fishing": -1888947309,
"Prof-school": -1742589394,
"Self-emp-inc": -1732053524,
"?": -1576664757,
"Handlers-cleaners": -1555793520,
"0": -1438285038,
"Philippines": -1437257447,
"Male": -1291328762,
"11th": -1209300766,
"Unmarried": -1158645841,
"Local-gov": -1105932163,
"Divorced": -993514283,
"Some-college": -870577664,
"Asian-Pac-Islander": -787966085,
"Sales": -760428919,
"Self-emp-not-inc": -661998850,
"Widowed": -651660490,
"Masters": -453513993,
"State-gov": -447941100,
"Doctorate": -434936054,
"White": -218697806,
"Own-child": -189887997,
"Amer-Indian-Eskimo": -86031875,
"Exec-managerial": -26537793,
"Husband": 60472414,
"Italy": 117615621,
"Not-in-family": 143014663,
"n": 239748506,
"Married-spouse-absent": 261588508,
"Prof-specialty": 369959660,
"Assoc-acdm": 475479755,
"Adm-clerical": 495735304,
"Bachelors": 556725573,
"HS-grad": 580496350,
"Craft-repair": 709691013,
"Other-relative": 739168919,
"Other-service": 786213683,
"9th": 840896980,
"Separated": 887350706,
"10th": 888723975,
"Mexico": 972041323,
"Hong": 995245846,
"1": 1121341681,
"Tech-support": 1150039955,
"Black": 1161225950,
"Canada": 1510821218,
"Wife": 1708186408,
"United-States": 1736516096,
"Never-married": 1959200218,
"Machine-op-inspct": 2039859473,
"7th-8th": 2066982375,
"Private": 2084267031,
}
def hash_uint64(string):
return cat_features_hashes.get(str(string), 0x7fFFffFF)
### Applicator for the CatBoost model
def apply_catboost_model(float_features, cat_features=[], ntree_start=0, ntree_end=catboost_model.tree_count):
"""
Applies the model built by CatBoost.
Parameters
----------
float_features : list of float features
cat_features : list of categorical features
You need to pass float and categorical features separately in the same order they appeared in train dataset.
For example if you had features f1,f2,f3,f4, where f2 and f4 were considered categorical, you need to pass here float_features=f1,f3, cat_features=f2,f4
Returns
-------
prediction : formula value for the model and the features
"""
if ntree_end == 0:
ntree_end = catboost_model.tree_count
else:
ntree_end = min(ntree_end, catboost_model.tree_count)
model = catboost_model
assert len(float_features) >= model.float_feature_count
assert len(cat_features) >= model.cat_feature_count
# Binarise features
binary_features = [0] * model.binary_feature_count
binary_feature_index = 0
for i in range(len(model.float_feature_borders)):
for border in model.float_feature_borders[i]:
binary_features[binary_feature_index] += 1 if (float_features[model.float_features_index[i]] > border) else 0
binary_feature_index += 1
transposed_hash = [0] * model.cat_feature_count
for i in range(model.cat_feature_count):
transposed_hash[i] = hash_uint64(cat_features[i])
if len(model.one_hot_cat_feature_index) > 0:
cat_feature_packed_indexes = {}
for i in range(model.cat_feature_count):
cat_feature_packed_indexes[model.cat_features_index[i]] = i
for i in range(len(model.one_hot_cat_feature_index)):
cat_idx = cat_feature_packed_indexes[model.one_hot_cat_feature_index[i]]
hash = transposed_hash[cat_idx]
for border_idx in range(len(model.one_hot_hash_values[i])):
binary_features[binary_feature_index] |= (1 if hash == model.one_hot_hash_values[i][border_idx] else 0) * (border_idx + 1)
binary_feature_index += 1
if hasattr(model, 'model_ctrs') and model.model_ctrs.used_model_ctrs_count > 0:
ctrs = [0.] * model.model_ctrs.used_model_ctrs_count;
calc_ctrs(model.model_ctrs, binary_features, transposed_hash, ctrs)
for i in range(len(model.ctr_feature_borders)):
for border in model.ctr_feature_borders[i]:
binary_features[binary_feature_index] += 1 if ctrs[i] > border else 0
binary_feature_index += 1
# Extract and sum values from trees
result = 0.
tree_splits_index = 0
current_tree_leaf_values_index = 0
for tree_id in range(ntree_start, ntree_end):
current_tree_depth = model.tree_depth[tree_id]
index = 0
for depth in range(current_tree_depth):
border_val = model.tree_split_border[tree_splits_index + depth]
feature_index = model.tree_split_feature_index[tree_splits_index + depth]
xor_mask = model.tree_split_xor_mask[tree_splits_index + depth]
index |= ((binary_features[feature_index] ^ xor_mask) >= border_val) << depth
result += model.leaf_values[current_tree_leaf_values_index + index]
tree_splits_index += current_tree_depth
current_tree_leaf_values_index += (1 << current_tree_depth)
return result
| 70.743363 | 868 | 0.649687 |
8f6ba7dfcd7821d2b2e41fa9923925ad77d9d4b3 | 25,624 | py | Python | matdeeplearn/process/process.py | usccolumbia/deeperGATGNN | 24ee53b23d1559040b7aab971768434753b582ff | [
"MIT"
] | 19 | 2021-09-29T20:32:50.000Z | 2022-02-22T11:32:18.000Z | matdeeplearn/process/process.py | usccolumbia/deeperGATGNN | 24ee53b23d1559040b7aab971768434753b582ff | [
"MIT"
] | 1 | 2021-11-23T07:37:50.000Z | 2021-11-23T07:37:50.000Z | matdeeplearn/process/process.py | usccolumbia/deeperGATGNN | 24ee53b23d1559040b7aab971768434753b582ff | [
"MIT"
] | 2 | 2021-09-30T10:18:37.000Z | 2022-01-30T01:08:05.000Z | import os
import sys
import time
import csv
import json
import warnings
import numpy as np
import ase
import glob
from ase import io
from scipy.stats import rankdata
from scipy import interpolate
##torch imports
import torch
import torch.nn.functional as F
from torch_geometric.data import DataLoader, Dataset, Data, InMemoryDataset
from torch_geometric.utils import dense_to_sparse, degree, add_self_loops
import torch_geometric.transforms as T
from torch_geometric.utils import degree
################################################################################
# Data splitting
################################################################################
##basic train, val, test split
def split_data(
dataset,
train_ratio,
val_ratio,
test_ratio,
seed=np.random.randint(1, 1e6),
save=False,
):
dataset_size = len(dataset)
if (train_ratio + val_ratio + test_ratio) <= 1:
train_length = int(dataset_size * train_ratio)
val_length = int(dataset_size * val_ratio)
test_length = int(dataset_size * test_ratio)
unused_length = dataset_size - train_length - val_length - test_length
(
train_dataset,
val_dataset,
test_dataset,
unused_dataset,
) = torch.utils.data.random_split(
dataset,
[train_length, val_length, test_length, unused_length],
generator=torch.Generator().manual_seed(seed),
)
print(
"train length:",
train_length,
"val length:",
val_length,
"test length:",
test_length,
"unused length:",
unused_length,
"seed :",
seed,
)
return train_dataset, val_dataset, test_dataset
else:
print("invalid ratios")
##Basic CV split
def split_data_CV(dataset, num_folds=5, seed=np.random.randint(1, 1e6), save=False):
dataset_size = len(dataset)
fold_length = int(dataset_size / num_folds)
unused_length = dataset_size - fold_length * num_folds
folds = [fold_length for i in range(num_folds)]
folds.append(unused_length)
cv_dataset = torch.utils.data.random_split(
dataset, folds, generator=torch.Generator().manual_seed(seed)
)
print("fold length :", fold_length, "unused length:", unused_length, "seed", seed)
return cv_dataset[0:num_folds]
################################################################################
# Pytorch datasets
################################################################################
##Fetch dataset; processes the raw data if specified
def get_dataset(data_path, target_index, reprocess="False", processing_args=None):
if processing_args == None:
processed_path = "processed"
else:
processed_path = processing_args.get("processed_path", "processed")
transforms = GetY(index=target_index)
if os.path.exists(data_path) == False:
print("Data not found in:", data_path)
sys.exit()
if reprocess == "True":
os.system("rm -rf " + os.path.join(data_path, processed_path))
process_data(data_path, processed_path, processing_args)
if os.path.exists(os.path.join(data_path, processed_path, "data.pt")) == True:
dataset = StructureDataset(
data_path,
processed_path,
transforms,
)
elif os.path.exists(os.path.join(data_path, processed_path, "data0.pt")) == True:
dataset = StructureDataset_large(
data_path,
processed_path,
transforms,
)
else:
process_data(data_path, processed_path, processing_args)
if os.path.exists(os.path.join(data_path, processed_path, "data.pt")) == True:
dataset = StructureDataset(
data_path,
processed_path,
transforms,
)
elif os.path.exists(os.path.join(data_path, processed_path, "data0.pt")) == True:
dataset = StructureDataset_large(
data_path,
processed_path,
transforms,
)
return dataset
##Dataset class from pytorch/pytorch geometric; inmemory case
class StructureDataset(InMemoryDataset):
def __init__(
self, data_path, processed_path="processed", transform=None, pre_transform=None
):
self.data_path = data_path
self.processed_path = processed_path
super(StructureDataset, self).__init__(data_path, transform, pre_transform)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self):
return []
@property
def processed_dir(self):
return os.path.join(self.data_path, self.processed_path)
@property
def processed_file_names(self):
file_names = ["data.pt"]
return file_names
##Dataset class from pytorch/pytorch geometric
class StructureDataset_large(Dataset):
def __init__(
self, data_path, processed_path="processed", transform=None, pre_transform=None
):
self.data_path = data_path
self.processed_path = processed_path
super(StructureDataset_large, self).__init__(
data_path, transform, pre_transform
)
@property
def raw_file_names(self):
return []
@property
def processed_dir(self):
return os.path.join(self.data_path, self.processed_path)
@property
def processed_file_names(self):
# file_names = ["data.pt"]
file_names = []
for file_name in glob.glob(self.processed_dir + "/data*.pt"):
file_names.append(os.path.basename(file_name))
# print(file_names)
return file_names
def len(self):
return len(self.processed_file_names)
def get(self, idx):
data = torch.load(os.path.join(self.processed_dir, "data_{}.pt".format(idx)))
return data
################################################################################
# Processing
################################################################################
def create_global_feat(atoms_index_arr):
comp = np.zeros(108)
temp = np.unique(atoms_index_arr,return_counts=True)
for i in range(len(temp[0])):
comp[temp[0][i]]=temp[1][i]/temp[1].sum()
return comp.reshape(1,-1)
def process_data(data_path, processed_path, processing_args):
##Begin processing data
print("Processing data to: " + os.path.join(data_path, processed_path))
assert os.path.exists(data_path), "Data path not found in " + data_path
##Load dictionary
if processing_args["dictionary_source"] != "generated":
if processing_args["dictionary_source"] == "default":
print("Using default dictionary.")
atom_dictionary = get_dictionary(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"dictionary_default.json",
)
)
elif processing_args["dictionary_source"] == "blank":
print(
"Using blank dictionary. Warning: only do this if you know what you are doing"
)
atom_dictionary = get_dictionary(
os.path.join(
os.path.dirname(os.path.realpath(__file__)), "dictionary_blank.json"
)
)
else:
dictionary_file_path = os.path.join(
data_path, processing_args["dictionary_path"]
)
if os.path.exists(dictionary_file_path) == False:
print("Atom dictionary not found, exiting program...")
sys.exit()
else:
print("Loading atom dictionary from file.")
atom_dictionary = get_dictionary(dictionary_file_path)
##Load targets
target_property_file = os.path.join(data_path, processing_args["target_path"])
assert os.path.exists(target_property_file), (
"targets not found in " + target_property_file
)
with open(target_property_file) as f:
reader = csv.reader(f)
target_data = [row for row in reader]
##Read db file if specified
ase_crystal_list = []
if processing_args["data_format"] == "db":
db = ase.db.connect(os.path.join(data_path, "data.db"))
row_count = 0
# target_data=[]
for row in db.select():
# target_data.append([str(row_count), row.get('target')])
ase_temp = row.toatoms()
ase_crystal_list.append(ase_temp)
row_count = row_count + 1
if row_count % 500 == 0:
print("db processed: ", row_count)
##Process structure files and create structure graphs
data_list = []
for index in range(0, len(target_data)):
structure_id = target_data[index][0]
data = Data()
##Read in structure file using ase
if processing_args["data_format"] != "db":
ase_crystal = ase.io.read(
os.path.join(
data_path, structure_id + "." + processing_args["data_format"]
)
)
data.ase = ase_crystal
else:
ase_crystal = ase_crystal_list[index]
data.ase = ase_crystal
##Compile structure sizes (# of atoms) and elemental compositions
if index == 0:
length = [len(ase_crystal)]
elements = [list(set(ase_crystal.get_chemical_symbols()))]
else:
length.append(len(ase_crystal))
elements.append(list(set(ase_crystal.get_chemical_symbols())))
##Obtain distance matrix with ase
distance_matrix = ase_crystal.get_all_distances(mic=True)
##Create sparse graph from distance matrix
distance_matrix_trimmed = threshold_sort(
distance_matrix,
processing_args["graph_max_radius"],
processing_args["graph_max_neighbors"],
adj=False,
)
distance_matrix_trimmed = torch.Tensor(distance_matrix_trimmed)
out = dense_to_sparse(distance_matrix_trimmed)
edge_index = out[0]
edge_weight = out[1]
self_loops = True
if self_loops == True:
edge_index, edge_weight = add_self_loops(
edge_index, edge_weight, num_nodes=len(ase_crystal), fill_value=0
)
data.edge_index = edge_index
data.edge_weight = edge_weight
distance_matrix_mask = (
distance_matrix_trimmed.fill_diagonal_(1) != 0
).int()
elif self_loops == False:
data.edge_index = edge_index
data.edge_weight = edge_weight
distance_matrix_mask = (distance_matrix_trimmed != 0).int()
data.edge_descriptor = {}
data.edge_descriptor["distance"] = edge_weight
data.edge_descriptor["mask"] = distance_matrix_mask
target = target_data[index][1:]
y = torch.Tensor(np.array([target], dtype=np.float32))
data.y = y
_atoms_index = ase_crystal.get_atomic_numbers()
gatgnn_glob_feat = create_global_feat(_atoms_index)
gatgnn_glob_feat = np.repeat(gatgnn_glob_feat,len(_atoms_index),axis=0)
data.glob_feat = torch.Tensor(gatgnn_glob_feat).float()
# pos = torch.Tensor(ase_crystal.get_positions())
# data.pos = pos
z = torch.LongTensor(ase_crystal.get_atomic_numbers())
data.z = z
###placeholder for state feature
u = np.zeros((3))
u = torch.Tensor(u[np.newaxis, ...])
data.u = u
data.structure_id = [[structure_id] * len(data.y)]
if processing_args["verbose"] == "True" and (
(index + 1) % 500 == 0 or (index + 1) == len(target_data)
):
print("Data processed: ", index + 1, "out of", len(target_data))
# if index == 0:
# print(data)
# print(data.edge_weight, data.edge_attr[0])
data_list.append(data)
##
n_atoms_max = max(length)
species = list(set(sum(elements, [])))
species.sort()
num_species = len(species)
if processing_args["verbose"] == "True":
print(
"Max structure size: ",
n_atoms_max,
"Max number of elements: ",
num_species,
)
print("Unique species:", species)
crystal_length = len(ase_crystal)
data.length = torch.LongTensor([crystal_length])
##Generate node features
if processing_args["dictionary_source"] != "generated":
##Atom features(node features) from atom dictionary file
for index in range(0, len(data_list)):
atom_fea = np.vstack(
[
atom_dictionary[str(data_list[index].ase.get_atomic_numbers()[i])]
for i in range(len(data_list[index].ase))
]
).astype(float)
# print([
# atom_dictionary[data_list[index].ase.get_atomic_numbers()[i]]
# for i in range(len(data_list[index].ase))
# ]);
# exit()
data_list[index].x = torch.Tensor(atom_fea)
elif processing_args["dictionary_source"] == "generated":
##Generates one-hot node features rather than using dict file
from sklearn.preprocessing import LabelBinarizer
lb = LabelBinarizer()
lb.fit(species)
for index in range(0, len(data_list)):
data_list[index].x = torch.Tensor(
lb.transform(data_list[index].ase.get_chemical_symbols())
)
##Adds node degree to node features (appears to improve performance)
for index in range(0, len(data_list)):
data_list[index] = OneHotDegree(
data_list[index], processing_args["graph_max_neighbors"] + 1
)
##Get graphs based on voronoi connectivity; todo: also get voronoi features
##avoid use for the time being until a good approach is found
processing_args["voronoi"] = "False"
if processing_args["voronoi"] == "True":
from pymatgen.core.structure import Structure
from pymatgen.analysis.structure_analyzer import VoronoiConnectivity
from pymatgen.io.ase import AseAtomsAdaptor
Converter = AseAtomsAdaptor()
for index in range(0, len(data_list)):
pymatgen_crystal = Converter.get_structure(data_list[index].ase)
# double check if cutoff distance does anything
Voronoi = VoronoiConnectivity(
pymatgen_crystal, cutoff=processing_args["graph_max_radius"]
)
connections = Voronoi.max_connectivity
distance_matrix_voronoi = threshold_sort(
connections,
9999,
processing_args["graph_max_neighbors"],
reverse=True,
adj=False,
)
distance_matrix_voronoi = torch.Tensor(distance_matrix_voronoi)
out = dense_to_sparse(distance_matrix_voronoi)
edge_index_voronoi = out[0]
edge_weight_voronoi = out[1]
edge_attr_voronoi = distance_gaussian(edge_weight_voronoi)
edge_attr_voronoi = edge_attr_voronoi.float()
data_list[index].edge_index_voronoi = edge_index_voronoi
data_list[index].edge_weight_voronoi = edge_weight_voronoi
data_list[index].edge_attr_voronoi = edge_attr_voronoi
if index % 500 == 0:
print("Voronoi data processed: ", index)
##makes SOAP and SM features from dscribe
if processing_args["SOAP_descriptor"] == "True":
if True in data_list[0].ase.pbc:
periodicity = True
else:
periodicity = False
from dscribe.descriptors import SOAP
make_feature_SOAP = SOAP(
species=species,
rcut=processing_args["SOAP_rcut"],
nmax=processing_args["SOAP_nmax"],
lmax=processing_args["SOAP_lmax"],
sigma=processing_args["SOAP_sigma"],
periodic=periodicity,
sparse=False,
average="inner",
rbf="gto",
crossover=False,
)
for index in range(0, len(data_list)):
features_SOAP = make_feature_SOAP.create(data_list[index].ase)
data_list[index].extra_features_SOAP = torch.Tensor(features_SOAP)
if processing_args["verbose"] == "True" and index % 500 == 0:
if index == 0:
print(
"SOAP length: ",
features_SOAP.shape,
)
print("SOAP descriptor processed: ", index)
elif processing_args["SM_descriptor"] == "True":
if True in data_list[0].ase.pbc:
periodicity = True
else:
periodicity = False
from dscribe.descriptors import SineMatrix, CoulombMatrix
if periodicity == True:
make_feature_SM = SineMatrix(
n_atoms_max=n_atoms_max,
permutation="eigenspectrum",
sparse=False,
flatten=True,
)
else:
make_feature_SM = CoulombMatrix(
n_atoms_max=n_atoms_max,
permutation="eigenspectrum",
sparse=False,
flatten=True,
)
for index in range(0, len(data_list)):
features_SM = make_feature_SM.create(data_list[index].ase)
data_list[index].extra_features_SM = torch.Tensor(features_SM)
if processing_args["verbose"] == "True" and index % 500 == 0:
if index == 0:
print(
"SM length: ",
features_SM.shape,
)
print("SM descriptor processed: ", index)
##Generate edge features
if processing_args["edge_features"] == "True":
##Distance descriptor using a Gaussian basis
distance_gaussian = GaussianSmearing(
0, 1, processing_args["graph_edge_length"], 0.2
)
# print(GetRanges(data_list, 'distance'))
NormalizeEdge(data_list, "distance")
# print(GetRanges(data_list, 'distance'))
for index in range(0, len(data_list)):
data_list[index].edge_attr = distance_gaussian(
data_list[index].edge_descriptor["distance"]
)
if processing_args["verbose"] == "True" and (
(index + 1) % 500 == 0 or (index + 1) == len(target_data)
):
print("Edge processed: ", index + 1, "out of", len(target_data))
Cleanup(data_list, ["ase", "edge_descriptor"])
if os.path.isdir(os.path.join(data_path, processed_path)) == False:
os.mkdir(os.path.join(data_path, processed_path))
##Save processed dataset to file
if processing_args["dataset_type"] == "inmemory":
data, slices = InMemoryDataset.collate(data_list)
torch.save((data, slices), os.path.join(data_path, processed_path, "data.pt"))
elif processing_args["dataset_type"] == "large":
for i in range(0, len(data_list)):
torch.save(
data_list[i],
os.path.join(
os.path.join(data_path, processed_path), "data_{}.pt".format(i)
),
)
################################################################################
# Processing sub-functions
################################################################################
##Selects edges with distance threshold and limited number of neighbors
def threshold_sort(matrix, threshold, neighbors, reverse=False, adj=False):
mask = matrix > threshold
distance_matrix_trimmed = np.ma.array(matrix, mask=mask)
if reverse == False:
distance_matrix_trimmed = rankdata(
distance_matrix_trimmed, method="ordinal", axis=1
)
elif reverse == True:
distance_matrix_trimmed = rankdata(
distance_matrix_trimmed * -1, method="ordinal", axis=1
)
distance_matrix_trimmed = np.nan_to_num(
np.where(mask, np.nan, distance_matrix_trimmed)
)
distance_matrix_trimmed[distance_matrix_trimmed > neighbors + 1] = 0
if adj == False:
distance_matrix_trimmed = np.where(
distance_matrix_trimmed == 0, distance_matrix_trimmed, matrix
)
return distance_matrix_trimmed
elif adj == True:
adj_list = np.zeros((matrix.shape[0], neighbors + 1))
adj_attr = np.zeros((matrix.shape[0], neighbors + 1))
for i in range(0, matrix.shape[0]):
temp = np.where(distance_matrix_trimmed[i] != 0)[0]
adj_list[i, :] = np.pad(
temp,
pad_width=(0, neighbors + 1 - len(temp)),
mode="constant",
constant_values=0,
)
adj_attr[i, :] = matrix[i, adj_list[i, :].astype(int)]
distance_matrix_trimmed = np.where(
distance_matrix_trimmed == 0, distance_matrix_trimmed, matrix
)
return distance_matrix_trimmed, adj_list, adj_attr
##Slightly edited version from pytorch geometric to create edge from gaussian basis
class GaussianSmearing(torch.nn.Module):
def __init__(self, start=0.0, stop=5.0, resolution=50, width=0.05, **kwargs):
super(GaussianSmearing, self).__init__()
offset = torch.linspace(start, stop, resolution)
# self.coeff = -0.5 / (offset[1] - offset[0]).item() ** 2
self.coeff = -0.5 / ((stop - start) * width) ** 2
self.register_buffer("offset", offset)
def forward(self, dist):
dist = dist.unsqueeze(-1) - self.offset.view(1, -1)
return torch.exp(self.coeff * torch.pow(dist, 2))
##Obtain node degree in one-hot representation
def OneHotDegree(data, max_degree, in_degree=False, cat=True):
idx, x = data.edge_index[1 if in_degree else 0], data.x
deg = degree(idx, data.num_nodes, dtype=torch.long)
deg = F.one_hot(deg, num_classes=max_degree + 1).to(torch.float)
if x is not None and cat:
x = x.view(-1, 1) if x.dim() == 1 else x
data.x = torch.cat([x, deg.to(x.dtype)], dim=-1)
else:
data.x = deg
return data
##Obtain dictionary file for elemental features
def get_dictionary(dictionary_file):
with open(dictionary_file) as f:
atom_dictionary = json.load(f)
return atom_dictionary
##Deletes unnecessary data due to slow dataloader
def Cleanup(data_list, entries):
for data in data_list:
for entry in entries:
try:
delattr(data, entry)
except Exception:
pass
##Get min/max ranges for normalized edges
def GetRanges(dataset, descriptor_label):
mean = 0.0
std = 0.0
for index in range(0, len(dataset)):
if len(dataset[index].edge_descriptor[descriptor_label]) > 0:
if index == 0:
feature_max = dataset[index].edge_descriptor[descriptor_label].max()
feature_min = dataset[index].edge_descriptor[descriptor_label].min()
mean += dataset[index].edge_descriptor[descriptor_label].mean()
std += dataset[index].edge_descriptor[descriptor_label].std()
if dataset[index].edge_descriptor[descriptor_label].max() > feature_max:
feature_max = dataset[index].edge_descriptor[descriptor_label].max()
if dataset[index].edge_descriptor[descriptor_label].min() < feature_min:
feature_min = dataset[index].edge_descriptor[descriptor_label].min()
mean = mean / len(dataset)
std = std / len(dataset)
return mean, std, feature_min, feature_max
##Normalizes edges
def NormalizeEdge(dataset, descriptor_label):
mean, std, feature_min, feature_max = GetRanges(dataset, descriptor_label)
for data in dataset:
data.edge_descriptor[descriptor_label] = (
data.edge_descriptor[descriptor_label] - feature_min
) / (feature_max - feature_min)
# WIP
def SM_Edge(dataset):
from dscribe.descriptors import (
CoulombMatrix,
SOAP,
MBTR,
EwaldSumMatrix,
SineMatrix,
)
count = 0
for data in dataset:
n_atoms_max = len(data.ase)
make_feature_SM = SineMatrix(
n_atoms_max=n_atoms_max,
permutation="none",
sparse=False,
flatten=False,
)
features_SM = make_feature_SM.create(data.ase)
features_SM_trimmed = np.where(data.mask == 0, data.mask, features_SM)
features_SM_trimmed = torch.Tensor(features_SM_trimmed)
out = dense_to_sparse(features_SM_trimmed)
edge_index = out[0]
edge_weight = out[1]
data.edge_descriptor["SM"] = edge_weight
if count % 500 == 0:
print("SM data processed: ", count)
count = count + 1
return dataset
################################################################################
# Transforms
################################################################################
##Get specified y index from data.y
class GetY(object):
def __init__(self, index=0):
self.index = index
def __call__(self, data):
# Specify target.
if self.index != -1:
data.y = data.y[0][self.index]
return data
| 35.539528 | 94 | 0.586169 |
3163143cc4b5bc0a36011b5a3944f83ff4870417 | 10,496 | py | Python | dpmModule/jobs/windbreaker.py | OniOniOn-/maplestory_dpm_calc | fbe824f01ab8e8210b174dd9db8295da80c267cd | [
"MIT"
] | 1 | 2020-09-24T06:20:00.000Z | 2020-09-24T06:20:00.000Z | dpmModule/jobs/windbreaker.py | OniOniOn-/maplestory_dpm_calc | fbe824f01ab8e8210b174dd9db8295da80c267cd | [
"MIT"
] | null | null | null | dpmModule/jobs/windbreaker.py | OniOniOn-/maplestory_dpm_calc | fbe824f01ab8e8210b174dd9db8295da80c267cd | [
"MIT"
] | 2 | 2020-09-24T06:20:02.000Z | 2021-08-01T01:34:15.000Z | from ..kernel import core
from ..character import characterKernel as ck
from ..status.ability import Ability_tool
from ..execution.rules import RuleSet, ConcurrentRunRule, ComplexConditionRule
from . import globalSkill
from .jobbranch import bowmen
from .jobclass import cygnus
from math import ceil
from typing import Any, Dict
class JobGenerator(ck.JobGenerator):
def __init__(self):
super(JobGenerator, self).__init__()
self.vEnhanceNum = 10
self.jobtype = "DEX"
self.jobname = "윈드브레이커"
self.ability_list = Ability_tool.get_ability_set(
"boss_pdamage", "crit", "buff_rem"
)
def get_ruleset(self):
ruleset = RuleSet()
ruleset.add_rule(ConcurrentRunRule("소울 컨트랙트", "크리티컬 리인포스"), RuleSet.BASE)
def howling_gale_rule(howling_gale, critical_reinforce):
if critical_reinforce.is_active():
return True
if howling_gale.judge(1, -1) and critical_reinforce.is_cooltime_left(
25000, -1
):
return False
return True
ruleset.add_rule(
ComplexConditionRule("하울링 게일", ["크리티컬 리인포스"], howling_gale_rule),
RuleSet.BASE,
)
return ruleset
def get_modifier_optimization_hint(self):
return core.CharacterModifier(pdamage=45, armor_ignore=15)
def get_passive_skill_list(
self, vEhc, chtr: ck.AbstractCharacter, options: Dict[str, Any]
):
passive_level = chtr.get_base_modifier().passive_level + self.combat
ElementalExpert = core.InformedCharacterModifier("엘리멘탈 엑스퍼트", patt=10)
ElementalHarmony = core.InformedCharacterModifier(
"엘리멘탈 하모니", stat_main=chtr.level // 2
)
WhisperOfWind = core.InformedCharacterModifier("위스퍼 오브 윈드", att=20)
PhisicalTraining = core.InformedCharacterModifier(
"피지컬 트레이닝", stat_main=30, stat_sub=30
)
WindBlessingPassive = core.InformedCharacterModifier(
"윈드 블레싱(패시브)",
pstat_main=15 + passive_level // 3,
patt=10 + ceil(passive_level / 3),
)
BowExpert = core.InformedCharacterModifier(
"보우 엑스퍼트",
att=30 + passive_level,
crit_damage=20 + passive_level // 2,
pdamage_indep=25 + passive_level // 3,
boss_pdamage=40 + passive_level,
)
return [
ElementalExpert,
ElementalHarmony,
WhisperOfWind,
PhisicalTraining,
BowExpert,
WindBlessingPassive,
]
def get_not_implied_skill_list(
self, vEhc, chtr: ck.AbstractCharacter, options: Dict[str, Any]
):
passive_level = chtr.get_base_modifier().passive_level + self.combat
WeaponConstant = core.InformedCharacterModifier("무기상수", pdamage_indep=30)
Mastery = core.InformedCharacterModifier(
"숙련도", pdamage_indep=-7.5 + 0.5 * ceil(passive_level / 2)
)
return [WeaponConstant, Mastery]
def generate(self, vEhc, chtr: ck.AbstractCharacter, options: Dict[str, Any]):
"""
코강 순서:
천노-윔-브링어
하이퍼:
트라이플링 윔-리인포스, 인핸스, 더블찬스
천공의 노래-리인포스, 보스 킬러
하울링게일 58회, 볼텍스 스피어 17회 타격
트라이플링 윔 평균치로 계산
"""
base_modifier = chtr.get_base_modifier()
passive_level = base_modifier.passive_level + self.combat
# Buff skills
Storm = core.BuffSkill(
"엘리멘트(스톰)", delay=0, remain=200 * 1000, pdamage=10, rem=True
).wrap(core.BuffSkillWrapper)
SylphsAid = core.BuffSkill(
"실프스 에이드", delay=0, remain=200 * 1000, att=20, crit=10, rem=True
).wrap(core.BuffSkillWrapper)
Albatross = core.BuffSkill(
"알바트로스 맥시멈",
delay=0,
remain=200 * 1000,
att=50 + passive_level,
pdamage=25 + 2 * (passive_level // 3),
armor_ignore=15 + passive_level // 3,
crit=25 + passive_level // 2,
rem=True,
).wrap(core.BuffSkillWrapper)
SharpEyes = core.BuffSkill(
"샤프 아이즈",
delay=660,
remain=(300 + 10 * self.combat) * 1000,
crit=20 + ceil(self.combat / 2),
crit_damage=15 + ceil(self.combat / 2),
rem=True,
).wrap(core.BuffSkillWrapper)
GloryOfGuardians = core.BuffSkill(
name="글로리 오브 가디언즈",
delay=0,
remain=60 * 1000,
cooltime=120 * 1000,
pdamage=10,
).wrap(core.BuffSkillWrapper)
StormBringerDummy = core.BuffSkill(
"스톰 브링어(버프)",
delay=0, # 딜레이 계산 필요
remain=200 * 1000,
).wrap(core.BuffSkillWrapper)
# 하이퍼: 데미지 증가, 확률 10% 증가, 타수 증가
whim_proc = (50 + 10 + passive_level // 2) * 0.01
advanced_proc = (20 + passive_level // 3) * 0.01
TriflingWhim = (
core.DamageSkill(
"트라이플링 윔",
delay=0,
damage=(290 + passive_level * 3) * (1 - advanced_proc)
+ (390 + passive_level * 3) * advanced_proc,
hit=2 * whim_proc,
modifier=core.CharacterModifier(pdamage=20),
)
.setV(vEhc, 1, 2, False)
.wrap(core.DamageSkillWrapper)
)
StormBringer = (
core.DamageSkill("스톰 브링어", delay=0, damage=500, hit=0.3)
.setV(vEhc, 2, 2, True)
.wrap(core.DamageSkillWrapper)
)
# 핀포인트 피어스
PinPointPierce = core.DamageSkill(
"핀포인트 피어스",
delay=690,
damage=340,
hit=2,
cooltime=30 * 1000,
).wrap(core.DamageSkillWrapper)
PinPointPierceDebuff = core.BuffSkill(
"핀포인트 피어스(버프)",
delay=0,
remain=30 * 1000,
cooltime=-1,
pdamage=15,
armor_ignore=15,
).wrap(core.BuffSkillWrapper)
# Damage Skills
# 하이퍼: 데미지 증가, 보스 데미지 증가
target_pdamage = ((120 + self.combat // 2) / 100) ** 3 * 100 - 100
SongOfHeaven = (
core.DamageSkill(
"천공의 노래",
delay=120,
damage=345 + self.combat * 3,
hit=1,
modifier=core.CharacterModifier(
pdamage=target_pdamage + 20, boss_pdamage=30
),
)
.setV(vEhc, 0, 2, False)
.wrap(core.DamageSkillWrapper)
)
CygnusPhalanx = cygnus.PhalanxChargeWrapper(vEhc, 0, 0)
IdleWhim = (
core.DamageSkill(
"아이들 윔",
delay=600,
damage=(500 + 20 * vEhc.getV(4, 4)) * 0.775,
hit=10 * 3,
cooltime=10 * 1000,
red=True,
)
.isV(vEhc, 4, 4)
.wrap(core.DamageSkillWrapper)
)
MercilesswindDOT = core.DotSkill(
"아이들 윔(도트)",
summondelay=0,
delay=1000,
damage=500 + 20 * vEhc.getV(4, 4),
hit=1,
remain=9000,
cooltime=-1,
).wrap(core.DotSkillWrapper)
# Summon Skills
GuidedArrow = bowmen.GuidedArrowWrapper(vEhc, 5, 5)
MirrorBreak, MirrorSpider = globalSkill.SpiderInMirrorBuilder(vEhc, 0, 0)
HowlingGail = core.StackableSummonSkillWrapper(
core.SummonSkill(
"하울링 게일",
summondelay=630,
delay=150,
damage=250 + 10 * vEhc.getV(1, 1),
hit=3,
remain=150 * 58 - 1, # 58타
cooltime=20 * 1000,
).isV(vEhc, 1, 1),
max_stack=2,
)
WindWall = (
core.SummonSkill(
"윈드 월",
summondelay=720,
delay=2000,
damage=550 + vEhc.getV(2, 2) * 22,
hit=5 * 3,
remain=45 * 1000,
cooltime=90 * 1000,
red=True,
modifier=core.CharacterModifier(pdamage_indep=-50),
)
.isV(vEhc, 2, 2)
.wrap(core.SummonSkillWrapper)
)
VortexSphere = (
core.SummonSkill(
"볼텍스 스피어",
summondelay=720,
delay=180,
damage=400 + 16 * vEhc.getV(0, 0),
hit=6,
remain=180 * 17 - 1,
cooltime=35000,
red=True,
)
.isV(vEhc, 0, 0)
.wrap(core.SummonSkillWrapper)
) # 17타
###### Skill Wrapper #####
CriticalReinforce = bowmen.CriticalReinforceWrapper(
vEhc, chtr, 3, 3, 10 + 25 + passive_level // 2 + 20 + ceil(self.combat / 2)
) # 실프스 에이드 + 알바트로스 맥시멈 + 샤프 아이즈
# Damage
SongOfHeaven.onAfters([TriflingWhim, StormBringer])
PinPointPierce.onAfters([PinPointPierceDebuff, TriflingWhim, StormBringer])
MirrorBreak.onAfters([TriflingWhim, StormBringer])
# Summon
CygnusPhalanx.onTicks([TriflingWhim, StormBringer])
HowlingGail.onTicks([TriflingWhim, StormBringer])
VortexSphere.onTicks([TriflingWhim, StormBringer])
IdleWhim.onAfter(MercilesswindDOT)
return (
SongOfHeaven,
[
globalSkill.maple_heros(
chtr.level, name="시그너스 나이츠", combat_level=self.combat
),
globalSkill.useful_combat_orders(),
Storm,
SylphsAid,
Albatross,
SharpEyes,
StormBringerDummy,
cygnus.CygnusBlessWrapper(vEhc, 0, 0, chtr.level),
GloryOfGuardians,
CriticalReinforce,
globalSkill.soul_contract(),
]
+ [
GuidedArrow,
CygnusPhalanx,
HowlingGail,
VortexSphere,
WindWall,
]
+ [
MirrorBreak,
IdleWhim,
PinPointPierce,
]
+ [
PinPointPierceDebuff,
MirrorSpider,
MercilesswindDOT,
] # Not used from scheduler
+ [SongOfHeaven],
)
| 32.395062 | 87 | 0.517245 |
611ccb0d1a3724ce9f9ff7bedbe377df297bbd37 | 7,063 | py | Python | jd_china-joy_card.py | honglt1/jdd | 9fd49a6b09af8e243367067d7c7cd2ae6d418466 | [
"MIT"
] | 1 | 2022-02-09T16:25:04.000Z | 2022-02-09T16:25:04.000Z | jd_china-joy_card.py | honglt1/jdd | 9fd49a6b09af8e243367067d7c7cd2ae6d418466 | [
"MIT"
] | null | null | null | jd_china-joy_card.py | honglt1/jdd | 9fd49a6b09af8e243367067d7c7cd2ae6d418466 | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
"""
cron: 10 10 * * *
new Env('萌虎摇摇乐卡片信息');
"""
import requests
import json
import time
import os
import re
import sys
import random
import string
import urllib
#以下部分参考Curtin的脚本:https://github.com/curtinlv/JD-Script
def randomuserAgent():
global uuid,addressid,iosVer,iosV,clientVersion,iPhone,ADID,area,lng,lat
uuid=''.join(random.sample(['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','0','1','2','3','4','5','6','7','8','9','a','b','c','z'], 40))
addressid = ''.join(random.sample('1234567898647', 10))
iosVer = ''.join(random.sample(["15.1.1","14.5.1", "14.4", "14.3", "14.2", "14.1", "14.0.1"], 1))
iosV = iosVer.replace('.', '_')
clientVersion=''.join(random.sample(["10.3.0", "10.2.7", "10.2.4"], 1))
iPhone = ''.join(random.sample(["8", "9", "10", "11", "12", "13"], 1))
ADID = ''.join(random.sample('0987654321ABCDEF', 8)) + '-' + ''.join(random.sample('0987654321ABCDEF', 4)) + '-' + ''.join(random.sample('0987654321ABCDEF', 4)) + '-' + ''.join(random.sample('0987654321ABCDEF', 4)) + '-' + ''.join(random.sample('0987654321ABCDEF', 12))
area=''.join(random.sample('0123456789', 2)) + '_' + ''.join(random.sample('0123456789', 4)) + '_' + ''.join(random.sample('0123456789', 5)) + '_' + ''.join(random.sample('0123456789', 4))
lng='119.31991256596'+str(random.randint(100,999))
lat='26.1187118976'+str(random.randint(100,999))
UserAgent=''
if not UserAgent:
return f'jdapp;iPhone;10.0.4;{iosVer};{uuid};network/wifi;ADID/{ADID};model/iPhone{iPhone},1;addressid/{addressid};appBuild/167707;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS {iosV} like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/null;supportJDSHWK/1'
else:
return UserAgent
#以上部分参考Curtin的脚本:https://github.com/curtinlv/JD-Script
def printf(text):
print(text)
sys.stdout.flush()
def load_send():
global send
cur_path = os.path.abspath(os.path.dirname(__file__))
sys.path.append(cur_path)
if os.path.exists(cur_path + "/sendNotify.py"):
try:
from sendNotify import send
except:
send=False
print("加载通知服务失败~")
else:
send=False
print("加载通知服务失败~")
load_send()
def get_remarkinfo():
url='http://127.0.0.1:5700/api/envs'
try:
with open('/ql/config/auth.json', 'r') as f:
token=json.loads(f.read())['token']
headers={
'Accept':'application/json',
'authorization':'Bearer '+token,
}
response=requests.get(url=url,headers=headers)
for i in range(len(json.loads(response.text)['data'])):
if json.loads(response.text)['data'][i]['name']=='JD_COOKIE':
try:
if json.loads(response.text)['data'][i]['remarks'].find('@@')==-1:
remarkinfos[json.loads(response.text)['data'][i]['value'].split(';')[1].replace('pt_pin=','')]=json.loads(response.text)['data'][i]['remarks'].replace('remark=','')
else:
remarkinfos[json.loads(response.text)['data'][i]['value'].split(';')[1].replace('pt_pin=','')]=json.loads(response.text)['data'][i]['remarks'].split("@@")[0].replace('remark=','').replace(';','')
except:
pass
except:
print('读取auth.json文件出错,跳过获取备注')
def getcardinfo(ck):
url='https://api.m.jd.com/api'
headers={
'accept':'application/json, text/plain, */*',
'content-type':'application/x-www-form-urlencoded',
'origin':'https://yearfestival.jd.com',
'content-length':'139',
'accept-language':'zh-CN,zh-Hans;q=0.9',
'user-agent':UserAgent,
'referer':'https://yearfestival.jd.com/',
'accept-encoding':'gzip, deflate, br',
'cookie':ck
}
data='appid=china-joy&functionId=collect_bliss_cards_prod&body={"apiMapping":"/api/card/list"}&t='+str(round(time.time() * 1000))+'&loginType=2&loginWQBiz=rdcactivity'
try:
response=requests.post(url=url,headers=headers,data=data)
for i in range(len(json.loads(response.text)['data']['cardList'])):
if json.loads(response.text)['data']['cardList'][i]['cardName']=='万物更新卡':
wwgxk=json.loads(response.text)['data']['cardList'][i]['count']
if json.loads(response.text)['data']['cardList'][i]['cardName']=='肉肉转移卡':
rrzyk=json.loads(response.text)['data']['cardList'][i]['count']
if json.loads(response.text)['data']['cardList'][i]['cardName']=='升职加薪卡':
szjxk=json.loads(response.text)['data']['cardList'][i]['count']
if json.loads(response.text)['data']['cardList'][i]['cardName']=='一键美颜卡':
yjmyk=json.loads(response.text)['data']['cardList'][i]['count']
if json.loads(response.text)['data']['cardList'][i]['cardName']=='无痕摸鱼卡':
whmyk=json.loads(response.text)['data']['cardList'][i]['count']
if json.loads(response.text)['data']['cardList'][i]['cardName']=='逢考必过卡':
fkbgk=json.loads(response.text)['data']['cardList'][i]['count']
if json.loads(response.text)['data']['cardList'][i]['cardName']=='宇宙旅行卡':
yzlxk=json.loads(response.text)['data']['cardList'][i]['count']
if json.loads(response.text)['data']['cardList'][i]['cardName']=='一秒脱单卡':
ymtdk=json.loads(response.text)['data']['cardList'][i]['count']
if json.loads(response.text)['data']['cardList'][i]['cardName']=='水逆退散卡':
sntsk=json.loads(response.text)['data']['cardList'][i]['count']
if json.loads(response.text)['data']['cardList'][i]['cardName']=='时间暂停卡':
sjztk=json.loads(response.text)['data']['cardList'][i]['count']
printf('有'+str(wwgxk)+'张万物更新卡')
printf('其他卡片分布情况如下\n')
printf(str(rrzyk)+' '+str(szjxk)+' '+str(yjmyk))
printf(str(whmyk)+' '+str(fkbgk)+' '+str(yzlxk))
printf(str(ymtdk)+' '+str(sntsk)+' '+str(sjztk)+'\n\n')
except:
printf('获取卡片信息出错')
if __name__ == '__main__':
printf('游戏入口:https://yearfestival.jd.com\n\n\n')
remarkinfos={}
get_remarkinfo()#获取备注
try:
cks = os.environ["JD_COOKIE"].split("&")#获取cookie
except:
f = open("/jd/config/config.sh", "r", encoding='utf-8')
cks = re.findall(r'Cookie[0-9]*="(pt_key=.*?;pt_pin=.*?;)"', f.read())
f.close()
for ck in cks:
ptpin = re.findall(r"pt_pin=(.*?);", ck)[0]
try:
if remarkinfos[ptpin]!='':
printf("--账号:" + remarkinfos[ptpin] + "--")
else:
printf("--无备注账号:" + urllib.parse.unquote(ptpin) + "--")
except:
printf("--账号:" + urllib.parse.unquote(ptpin) + "--")
UserAgent=randomuserAgent()#执行前先随机获取一个UserAgent
getcardinfo(ck) | 45.275641 | 285 | 0.561943 |
ab2b81789f63e420787c7b05a3737a3532e0b0fa | 4,768 | py | Python | docs/conf.py | lei-liu1/OCTIS | 1e8e6be5040b38cf3c458ece4327886dee8568ef | [
"MIT"
] | 340 | 2021-02-23T14:44:52.000Z | 2022-03-28T03:38:55.000Z | docs/conf.py | mpalenciaolivar/OCTIS | 1e8e6be5040b38cf3c458ece4327886dee8568ef | [
"MIT"
] | 50 | 2021-04-19T17:23:10.000Z | 2022-03-16T10:01:49.000Z | docs/conf.py | mpalenciaolivar/OCTIS | 1e8e6be5040b38cf3c458ece4327886dee8568ef | [
"MIT"
] | 30 | 2021-04-19T15:42:53.000Z | 2022-03-21T12:08:47.000Z | #!/usr/bin/env python
#
# octis documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import octis
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'octis'
copyright = "2020, Silvia Terragni"
author = "Silvia Terragni"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = octis.__version__
# The full version, including alpha/beta/rc tags.
release = octis.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'octisdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'octis.tex',
'octis Documentation',
'Silvia Terragni', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'octis',
'octis Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'octis',
'octis Documentation',
author,
'octis',
'One line description of project.',
'Miscellaneous'),
]
| 29.251534 | 77 | 0.683305 |
3839c68febbeafc10edec062bbc095de6eb23da7 | 123 | py | Python | apps/User/tests/__init__.py | Eduardo-RFarias/DjangoReactBackend | b8183ea4b24be5c0aa557ffbc79fc23e0777b8ad | [
"MIT"
] | null | null | null | apps/User/tests/__init__.py | Eduardo-RFarias/DjangoReactBackend | b8183ea4b24be5c0aa557ffbc79fc23e0777b8ad | [
"MIT"
] | null | null | null | apps/User/tests/__init__.py | Eduardo-RFarias/DjangoReactBackend | b8183ea4b24be5c0aa557ffbc79fc23e0777b8ad | [
"MIT"
] | null | null | null | from .BaseAuthenticatedTest import BaseAuthenticatedTest
from .UserTests import UserTests
from .AuthTests import AuthTests
| 30.75 | 56 | 0.878049 |
0fc60b6e16f9bc6695c3a6e21d55ad7897a5a5c7 | 5,551 | py | Python | tkkuih.py | mypapit/tkkuih | c9f2cab5b5aa2386523e415eca8ef4163a12f8b2 | [
"BSD-2-Clause"
] | 1 | 2020-06-04T07:47:58.000Z | 2020-06-04T07:47:58.000Z | tkkuih.py | mypapit/tkkuih | c9f2cab5b5aa2386523e415eca8ef4163a12f8b2 | [
"BSD-2-Clause"
] | null | null | null | tkkuih.py | mypapit/tkkuih | c9f2cab5b5aa2386523e415eca8ef4163a12f8b2 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 30 10:42:50 2020
Copyright (c) 2020, Mohammad Hafiz bin Ismail.
@author: Mohammad Hafiz bin Ismail (mypapit@gmail.com)
@Purpose: Desktop GUI demonstration for tensorflow
Trained using Tensorflow 1.14 with MobileNet 192
##See function "addOpenFile()" and "detectGate" for documentation
p/s: I wrote this under one hour during Movement Control Order (given by the
government to curb COVID-19 spread) in Alor Setar, Malaysia
At the time of this writing (30 March 2020), I haven't venture past my residential
area since 19 March 2020.
If you use this code in your works, please cite it as:
Ismail, M. H. (2020, March 30). tkkuih - Demo application for combining TKInter with Tensorflow 1 Model. Github Repository. https://github.com/mypapit/tkkuih
"""
import tkinter as tk
from tkinter import filedialog, Text,messagebox
import os
from PIL import ImageTk,Image
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
import numpy as np
import tensorflow as tf
import time
imgfile=""
root = tk.Tk()
root.title("Demo Tensorflow TKInter Classification by Mohammad Hafiz bin Ismail (mypapit@gmail.com)")
canvas = tk.Canvas(root, height=600,width=800)
bottomFrame = tk.Frame(root)
bottomFrame.pack(side=tk.BOTTOM)
canvas.pack()
def load_graph(model_file):
graph = tf.Graph()
graph_def = tf.GraphDef()
with open(model_file, "rb") as f:
graph_def.ParseFromString(f.read())
with graph.as_default():
tf.import_graph_def(graph_def)
return graph
def addOpenFile():
"""
##Tested on Inception and MobileNet##
Please edit model_file to suit your model pb file and label_file to specify your label text file
Please see function "detectGate" for further customization.
"""
model_file = "mobile_graph.pb"
label_file = "mobile_labels.txt"
graph = load_graph(model_file)
filename = filedialog.askopenfilename(initialdir="/",title="Select File",filetypes=[("JPEG Files",".jpeg .jpg")])
print("Selected file: %s" % filename)
image = ImageTk.PhotoImage(Image.open(filename))
canvas.create_image(50,50,anchor=tk.NW,image=image)
imgfile = filename
#recognize(filename)
#line ni paling penting untuk pass parameter model file dengan label file
detectGate(graph,label_file,filename)
def read_tensor_from_image_file(file_name,
input_height=299,
input_width=299,
input_mean=0,
input_std=255):
input_name = "file_reader"
output_name = "normalized"
file_reader = tf.read_file(file_name, input_name)
if file_name.endswith(".png"):
image_reader = tf.image.decode_png(
file_reader, channels=3, name="png_reader")
elif file_name.endswith(".gif"):
image_reader = tf.squeeze(
tf.image.decode_gif(file_reader, name="gif_reader"))
elif file_name.endswith(".bmp"):
image_reader = tf.image.decode_bmp(file_reader, name="bmp_reader")
else:
image_reader = tf.image.decode_jpeg(
file_reader, channels=3, name="jpeg_reader")
float_caster = tf.cast(image_reader, tf.float32)
dims_expander = tf.expand_dims(float_caster, 0)
resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])
normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
sess = tf.Session()
result = sess.run(normalized)
return result
def load_labels(label_file):
label = []
proto_as_ascii_lines = tf.gfile.GFile(label_file).readlines()
for l in proto_as_ascii_lines:
label.append(l.rstrip())
return label
def detectGate(graph,label_file,file_name):
"""
#Modify parameter ini untuk Inception
input_height = 299
input_width = 299
#Modify parameter ini untuk MobileNet 224
input_height = 224
input_width = 224
#Modify parameter ini untuk MobileNet compact 192
input_height = 192
input_width = 192
"""
input_height = 192
input_width = 192
input_mean = 0
input_std = 255
input_layer = "Placeholder"
output_layer = "final_result"
t = read_tensor_from_image_file(
file_name,
input_height=input_height,
input_width=input_width,
input_mean=input_mean,
input_std=input_std)
input_name = "import/" + input_layer
output_name = "import/" + output_layer
input_operation = graph.get_operation_by_name(input_name)
output_operation = graph.get_operation_by_name(output_name)
with tf.Session(graph=graph) as sess:
results = sess.run(output_operation.outputs[0], {
input_operation.outputs[0]: t
})
results = np.squeeze(results)
top_k = results.argsort()[-5:][::-1]
labels = load_labels(label_file)
#for i in top_k:
# print(labels[i], results[i])
gresults = float("{:.4f}".format(results[top_k[0]]))
labelandimage = "{0} \r\n {1} - {2}".format(file_name,labels[top_k[0]],gresults)
label = tk.Label(canvas,text=labelandimage,bg="gray")
label.pack()
print (labels[top_k[0]], results[top_k[0]])
def classifyFunction():
if (len(imgfile)<2):
print ("%s file "%imgfile)
messagebox.showinfo("Alert","Please select valid file")
pass
pass
if __name__ == "__main__":
openFile = tk.Button(bottomFrame,text="Classify",padx=10,pady=5,bg="#999999",command=addOpenFile)
openFile.pack()
frame = tk.Frame(root,bg="white")
root.mainloop()
| 25.231818 | 157 | 0.690686 |
612b450e47b7e347e161eabd61491b6fb718b35e | 69 | py | Python | aws_rds_policies/aws_rds_instance_auto_minor_version_upgrade_enabled.py | designing-penguin/panther-analysis | 26034cea4504f43227f8d3789225f6ca7b35dfe0 | [
"Apache-2.0"
] | null | null | null | aws_rds_policies/aws_rds_instance_auto_minor_version_upgrade_enabled.py | designing-penguin/panther-analysis | 26034cea4504f43227f8d3789225f6ca7b35dfe0 | [
"Apache-2.0"
] | null | null | null | aws_rds_policies/aws_rds_instance_auto_minor_version_upgrade_enabled.py | designing-penguin/panther-analysis | 26034cea4504f43227f8d3789225f6ca7b35dfe0 | [
"Apache-2.0"
] | null | null | null | def policy(resource):
return resource['AutoMinorVersionUpgrade']
| 23 | 46 | 0.782609 |
feff04908747f2444dfa1a96a122fd667824b163 | 1,007 | py | Python | src/utils/all_utils.py | CodinjaoftheWorld/DVC_DL_Tensorflow | a01522b69c03f4b2d9f3c8ccc3a1098bfaabfd5c | [
"Apache-2.0"
] | null | null | null | src/utils/all_utils.py | CodinjaoftheWorld/DVC_DL_Tensorflow | a01522b69c03f4b2d9f3c8ccc3a1098bfaabfd5c | [
"Apache-2.0"
] | null | null | null | src/utils/all_utils.py | CodinjaoftheWorld/DVC_DL_Tensorflow | a01522b69c03f4b2d9f3c8ccc3a1098bfaabfd5c | [
"Apache-2.0"
] | null | null | null | import yaml
import os
import csv
import json
import logging
import time
def read_yaml(path_to_yaml: str) -> dict:
with open(path_to_yaml) as yaml_file:
content = yaml.safe_load(yaml_file)
logging.info(f"yaml file: {path_to_yaml} loaded successfully.")
return content
def create_directory(dirs: list):
for dir_path in dirs:
os.makedirs(dir_path, exist_ok=True)
logging.info(f"directory created at {dir_path}")
def save_local_df(data, data_path, index_status=False):
data.to_csv(data_path, index=index_status)
logging.info(f"data is saved at {data_path}")
def save_reports(report: dict, report_path: str, indentation=4):
with open(report_path, "w") as f:
json.dump(report, f, indent=indentation)
logging.info(f"reports are saved at {report_path}")
def get_timestamp(name):
timestamp = time.asctime().replace(" ", "_").replace(":", "_")
unique_name = f"{name}_at_{timestamp}"
return unique_name | 29.617647 | 83 | 0.685204 |
55f195990a1d1ee0551bc31bcf31f41ef69b03b5 | 2,477 | py | Python | src/dataProcessing/extractRelationalSkipgrams.py | yuzhimanhua/PENNER | 9421b230b4ce7018c32a0c085e3beb886c348bba | [
"Apache-2.0"
] | 11 | 2019-04-24T02:17:36.000Z | 2022-01-11T09:23:02.000Z | src/dataProcessing/extractRelationalSkipgrams.py | yuzhimanhua/PENNER | 9421b230b4ce7018c32a0c085e3beb886c348bba | [
"Apache-2.0"
] | null | null | null | src/dataProcessing/extractRelationalSkipgrams.py | yuzhimanhua/PENNER | 9421b230b4ce7018c32a0c085e3beb886c348bba | [
"Apache-2.0"
] | 3 | 2020-07-18T14:29:52.000Z | 2022-02-09T02:19:28.000Z | import sys
import json
from collections import defaultdict
def getRelationalSkipgrams(tokens, start1, end1, start2, end2, window=5):
# with assumption, there is no overlap between entity mentions
# if the number of tokens between two entity mentions is less than the window size
# we extract the relational skipgram for this tuple
if (start1 - end2 > window) or (start2 - end1 > window):
return []
positions = [(-1, 1), (-2, 1), (-3, 1), (-1, 3), (-2, 2), (-1, 2)]
relational_skipgrams = []
max_len = len(tokens)
for pos in positions:
if start1 < start2:
relational_skipgrams.append(' '.join(tokens[max(start1+pos[0],0):start1])+' __ '+' '.join(tokens[end1+1:start2])+' __ '+' '.join(tokens[end2+1:min(end2+1+pos[1], max_len)]))
else:
relational_skipgrams.append(' '.join(tokens[max(start2+pos[0],0):start2])+' __ '+' '.join(tokens[end2+1:start1])+' __ '+' '.join(tokens[end1+1:min(end1+1+pos[1], max_len)]))
return relational_skipgrams
def extractRelationalSkipgrams(inputFileName):
eidPairRelationalSkipgrams2Counts = defaultdict(int)
cnt = 0
with open(inputFileName, "r") as fin:
for line in fin:
cnt += 1
if (cnt % 100000 == 0):
print("Processed %s lines" % cnt)
line = line.strip()
sentInfo = json.loads(line)
tokens = sentInfo['tokens']
eid2positions = defaultdict(list)
for em in sentInfo['entityMentions']:
eid2positions[em['entityId']].append((em['start'],em['end']))
for eid1 in eid2positions.keys():
for eid2 in eid2positions.keys():
if eid2 == eid1:
continue
for idx1 in eid2positions[eid1]:
for idx2 in eid2positions[eid2]:
relational_sgs = getRelationalSkipgrams(tokens, idx1[0], idx1[1], idx2[0], idx2[1])
for relational_sg in relational_sgs:
key = (eid1, eid2, relational_sg)
eidPairRelationalSkipgrams2Counts[key] += 1
return eidPairRelationalSkipgrams2Counts
def saveEidPairRelationalSkipgrams(res, outputFileName):
with open(outputFileName, "w") as fout:
for ele in res:
fout.write(str(ele[0])+"\t"+str(ele[1])+"\t"+ele[2]+"\t"+str(res[ele])+"\n")
if __name__ == "__main__":
if len(sys.argv) != 2:
print('Usage: extractRelationalSkipgrams.py -data')
exit(1)
corpusName = sys.argv[1]
inputFileName = "../../data/"+corpusName+"/sentences.json"
outputFileName = "../../data/"+corpusName+"/eidPairRelationalSkipgramsCounts.txt"
res = extractRelationalSkipgrams(inputFileName)
saveEidPairRelationalSkipgrams(res, outputFileName)
| 39.951613 | 176 | 0.69237 |
ebcc69747c5d1fba784195fdf55e14394d192968 | 10,175 | py | Python | Demo/pdist/cvslib.py | cemeyer/tauthon | 2c3328c5272cffa2a544542217181c5828afa7ed | [
"PSF-2.0"
] | 2,293 | 2015-01-02T12:46:10.000Z | 2022-03-29T09:45:43.000Z | python/src/Demo/pdist/cvslib.py | weiqiangzheng/sl4a | d3c17dca978cbeee545e12ea240a9dbf2a6999e9 | [
"Apache-2.0"
] | 315 | 2015-05-31T11:55:46.000Z | 2022-01-12T08:36:37.000Z | python/src/Demo/pdist/cvslib.py | weiqiangzheng/sl4a | d3c17dca978cbeee545e12ea240a9dbf2a6999e9 | [
"Apache-2.0"
] | 1,033 | 2015-01-04T07:48:40.000Z | 2022-03-24T09:34:37.000Z | """Utilities for CVS administration."""
import string
import os
import time
import md5
import fnmatch
if not hasattr(time, 'timezone'):
time.timezone = 0
class File:
"""Represent a file's status.
Instance variables:
file -- the filename (no slashes), None if uninitialized
lseen -- true if the data for the local file is up to date
eseen -- true if the data from the CVS/Entries entry is up to date
(this implies that the entry must be written back)
rseen -- true if the data for the remote file is up to date
proxy -- RCSProxy instance used to contact the server, or None
Note that lseen and rseen don't necessary mean that a local
or remote file *exists* -- they indicate that we've checked it.
However, eseen means that this instance corresponds to an
entry in the CVS/Entries file.
If lseen is true:
lsum -- checksum of the local file, None if no local file
lctime -- ctime of the local file, None if no local file
lmtime -- mtime of the local file, None if no local file
If eseen is true:
erev -- revision, None if this is a no revision (not '0')
enew -- true if this is an uncommitted added file
edeleted -- true if this is an uncommitted removed file
ectime -- ctime of last local file corresponding to erev
emtime -- mtime of last local file corresponding to erev
extra -- 5th string from CVS/Entries file
If rseen is true:
rrev -- revision of head, None if non-existent
rsum -- checksum of that revision, Non if non-existent
If eseen and rseen are both true:
esum -- checksum of revision erev, None if no revision
Note
"""
def __init__(self, file = None):
if file and '/' in file:
raise ValueError, "no slash allowed in file"
self.file = file
self.lseen = self.eseen = self.rseen = 0
self.proxy = None
def __cmp__(self, other):
return cmp(self.file, other.file)
def getlocal(self):
try:
self.lmtime, self.lctime = os.stat(self.file)[-2:]
except os.error:
self.lmtime = self.lctime = self.lsum = None
else:
self.lsum = md5.new(open(self.file).read()).digest()
self.lseen = 1
def getentry(self, line):
words = string.splitfields(line, '/')
if self.file and words[1] != self.file:
raise ValueError, "file name mismatch"
self.file = words[1]
self.erev = words[2]
self.edeleted = 0
self.enew = 0
self.ectime = self.emtime = None
if self.erev[:1] == '-':
self.edeleted = 1
self.erev = self.erev[1:]
if self.erev == '0':
self.erev = None
self.enew = 1
else:
dates = words[3]
self.ectime = unctime(dates[:24])
self.emtime = unctime(dates[25:])
self.extra = words[4]
if self.rseen:
self.getesum()
self.eseen = 1
def getremote(self, proxy = None):
if proxy:
self.proxy = proxy
try:
self.rrev = self.proxy.head(self.file)
except (os.error, IOError):
self.rrev = None
if self.rrev:
self.rsum = self.proxy.sum(self.file)
else:
self.rsum = None
if self.eseen:
self.getesum()
self.rseen = 1
def getesum(self):
if self.erev == self.rrev:
self.esum = self.rsum
elif self.erev:
name = (self.file, self.erev)
self.esum = self.proxy.sum(name)
else:
self.esum = None
def putentry(self):
"""Return a line suitable for inclusion in CVS/Entries.
The returned line is terminated by a newline.
If no entry should be written for this file,
return "".
"""
if not self.eseen:
return ""
rev = self.erev or '0'
if self.edeleted:
rev = '-' + rev
if self.enew:
dates = 'Initial ' + self.file
else:
dates = gmctime(self.ectime) + ' ' + \
gmctime(self.emtime)
return "/%s/%s/%s/%s/\n" % (
self.file,
rev,
dates,
self.extra)
def report(self):
print '-'*50
def r(key, repr=repr, self=self):
try:
value = repr(getattr(self, key))
except AttributeError:
value = "?"
print "%-15s:" % key, value
r("file")
if self.lseen:
r("lsum", hexify)
r("lctime", gmctime)
r("lmtime", gmctime)
if self.eseen:
r("erev")
r("enew")
r("edeleted")
r("ectime", gmctime)
r("emtime", gmctime)
if self.rseen:
r("rrev")
r("rsum", hexify)
if self.eseen:
r("esum", hexify)
class CVS:
"""Represent the contents of a CVS admin file (and more).
Class variables:
FileClass -- the class to be instantiated for entries
(this should be derived from class File above)
IgnoreList -- shell patterns for local files to be ignored
Instance variables:
entries -- a dictionary containing File instances keyed by
their file name
proxy -- an RCSProxy instance, or None
"""
FileClass = File
IgnoreList = ['.*', '@*', ',*', '*~', '*.o', '*.a', '*.so', '*.pyc']
def __init__(self):
self.entries = {}
self.proxy = None
def setproxy(self, proxy):
if proxy is self.proxy:
return
self.proxy = proxy
for e in self.entries.values():
e.rseen = 0
def getentries(self):
"""Read the contents of CVS/Entries"""
self.entries = {}
f = self.cvsopen("Entries")
while 1:
line = f.readline()
if not line: break
e = self.FileClass()
e.getentry(line)
self.entries[e.file] = e
f.close()
def putentries(self):
"""Write CVS/Entries back"""
f = self.cvsopen("Entries", 'w')
for e in self.values():
f.write(e.putentry())
f.close()
def getlocalfiles(self):
list = self.entries.keys()
addlist = os.listdir(os.curdir)
for name in addlist:
if name in list:
continue
if not self.ignored(name):
list.append(name)
list.sort()
for file in list:
try:
e = self.entries[file]
except KeyError:
e = self.entries[file] = self.FileClass(file)
e.getlocal()
def getremotefiles(self, proxy = None):
if proxy:
self.proxy = proxy
if not self.proxy:
raise RuntimeError, "no RCS proxy"
addlist = self.proxy.listfiles()
for file in addlist:
try:
e = self.entries[file]
except KeyError:
e = self.entries[file] = self.FileClass(file)
e.getremote(self.proxy)
def report(self):
for e in self.values():
e.report()
print '-'*50
def keys(self):
keys = self.entries.keys()
keys.sort()
return keys
def values(self):
def value(key, self=self):
return self.entries[key]
return map(value, self.keys())
def items(self):
def item(key, self=self):
return (key, self.entries[key])
return map(item, self.keys())
def cvsexists(self, file):
file = os.path.join("CVS", file)
return os.path.exists(file)
def cvsopen(self, file, mode = 'r'):
file = os.path.join("CVS", file)
if 'r' not in mode:
self.backup(file)
return open(file, mode)
def backup(self, file):
if os.path.isfile(file):
bfile = file + '~'
try: os.unlink(bfile)
except os.error: pass
os.rename(file, bfile)
def ignored(self, file):
if os.path.isdir(file): return True
for pat in self.IgnoreList:
if fnmatch.fnmatch(file, pat): return True
return False
# hexify and unhexify are useful to print MD5 checksums in hex format
hexify_format = '%02x' * 16
def hexify(sum):
"Return a hex representation of a 16-byte string (e.g. an MD5 digest)"
if sum is None:
return "None"
return hexify_format % tuple(map(ord, sum))
def unhexify(hexsum):
"Return the original from a hexified string"
if hexsum == "None":
return None
sum = ''
for i in range(0, len(hexsum), 2):
sum = sum + chr(string.atoi(hexsum[i:i+2], 16))
return sum
unctime_monthmap = {}
def unctime(date):
if date == "None": return None
if not unctime_monthmap:
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
i = 0
for m in months:
i = i+1
unctime_monthmap[m] = i
words = string.split(date) # Day Mon DD HH:MM:SS YEAR
year = string.atoi(words[4])
month = unctime_monthmap[words[1]]
day = string.atoi(words[2])
[hh, mm, ss] = map(string.atoi, string.splitfields(words[3], ':'))
ss = ss - time.timezone
return time.mktime((year, month, day, hh, mm, ss, 0, 0, 0))
def gmctime(t):
if t is None: return "None"
return time.asctime(time.gmtime(t))
def test_unctime():
now = int(time.time())
t = time.gmtime(now)
at = time.asctime(t)
print 'GMT', now, at
print 'timezone', time.timezone
print 'local', time.ctime(now)
u = unctime(at)
print 'unctime()', u
gu = time.gmtime(u)
print '->', gu
print time.asctime(gu)
def test():
x = CVS()
x.getentries()
x.getlocalfiles()
## x.report()
import rcsclient
proxy = rcsclient.openrcsclient()
x.getremotefiles(proxy)
x.report()
if __name__ == "__main__":
test()
| 27.876712 | 74 | 0.542506 |
e0230f9c40697623fbe2858d97b4e39a783a0210 | 3,106 | py | Python | main/forms.py | adityatelange/django-ebook-audiobook-website | c07647df78b1807d585b3f14678e7726ba89f5c7 | [
"MIT"
] | 2 | 2019-09-10T09:42:23.000Z | 2019-11-28T08:56:09.000Z | main/forms.py | adityatelange/django-ebook-audiobook-website | c07647df78b1807d585b3f14678e7726ba89f5c7 | [
"MIT"
] | null | null | null | main/forms.py | adityatelange/django-ebook-audiobook-website | c07647df78b1807d585b3f14678e7726ba89f5c7 | [
"MIT"
] | null | null | null | import logging
from django import forms
from django.contrib.auth import authenticate
from django.contrib.auth.forms import (UserCreationForm as DjangoUserCreationForm)
from django.contrib.auth.forms import UsernameField
from django.core.mail import send_mail
from django.forms import inlineformset_factory
from . import models
from . import widgets
logger = logging.getLogger(__name__)
class ContactForm(forms.Form):
name = forms.CharField(label='Your Name', max_length=100)
email = forms.EmailField(label="Enter your E-mail id", max_length=254)
message = forms.CharField(
max_length=500, widget=forms.Textarea
)
def send_mail(self):
logger.info("Sending email to customer service")
message = "From: {0}\nEmail: {1}\nMessage: {2}".format(self.cleaned_data["name"],
self.cleaned_data["email"],
self.cleaned_data["message"], )
send_mail("Site message", message, "site@booktime.domain", [self.cleaned_data["email"]],
fail_silently=False, )
class UserCreationForm(DjangoUserCreationForm):
class Meta(DjangoUserCreationForm.Meta):
model = models.User
fields = ("email",)
field_classes = {"email": UsernameField}
def send_mail(self):
logger.info("Sending signup email for email=%s", self.cleaned_data["email"], )
message = "Welcome {}".format(self.cleaned_data["email"])
send_mail("Welcome to BookTime", message, "site@booktime.domain", [self.cleaned_data["email"]],
fail_silently=True, )
class AuthenticationForm(forms.Form):
email = forms.EmailField()
password = forms.CharField(strip=False, widget=forms.PasswordInput)
def __init__(self, request=None, *args, **kwargs):
self.request = request
self.user = None
super().__init__(*args, **kwargs)
def clean(self):
email = self.cleaned_data.get("email")
password = self.cleaned_data.get("password")
if email is not None and password:
self.user = authenticate(self.request, email=email, password=password)
if self.user is None:
raise forms.ValidationError("Invalid email/password combination.")
logger.info("Authentication successful for email=%s", email)
return self.cleaned_data
def get_user(self):
return self.user
BasketLineFormSet = inlineformset_factory(
models.Basket,
models.BasketLine,
fields=("quantity",),
extra=0,
widgets={"quantity": widgets.PlusMinusNumberInput()},
)
class AddressSelectionForm(forms.Form):
billing_address = forms.ModelChoiceField(
queryset=None)
shipping_address = forms.ModelChoiceField(
queryset=None)
def __init__(self, user, *args, **kwargs):
super().__init__(*args, **kwargs)
queryset = models.Address.objects.filter(user=user)
self.fields['billing_address'].queryset = queryset
self.fields['shipping_address'].queryset = queryset
| 34.511111 | 103 | 0.658081 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.