blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
74fc9af774f47ce134ed14553905d0770ee726c3
|
27c94d7e040902d3cdadd5862b15e67ec2ee4b6e
|
/exps/NATS-Bench/tss-collect.py
|
af06d4363c823eb00c97d8030a697e79f79932a1
|
[
"MIT"
] |
permissive
|
D-X-Y/AutoDL-Projects
|
8a0779a7710d809af2b052787928d8d34c14d0d9
|
f46486e21b71ae6459a700be720d7648b5429569
|
refs/heads/main
| 2023-08-13T10:53:49.550889
| 2022-04-24T22:18:16
| 2022-04-24T22:18:16
| 168,538,768
| 989
| 197
|
MIT
| 2022-04-24T22:16:21
| 2019-01-31T14:30:50
|
Python
|
UTF-8
|
Python
| false
| false
| 18,114
|
py
|
tss-collect.py
|
##############################################################################
# NATS-Bench: Benchmarking NAS Algorithms for Architecture Topology and Size #
##############################################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2020.08 #
##############################################################################
# This file is used to re-orangize all checkpoints (created by main-tss.py) #
# into a single benchmark file. Besides, for each trial, we will merge the #
# information of all its trials into a single file. #
# #
# Usage: #
# python exps/NATS-Bench/tss-collect.py #
##############################################################################
import os, re, sys, time, shutil, random, argparse, collections
import numpy as np
from copy import deepcopy
import torch
from tqdm import tqdm
from pathlib import Path
from collections import defaultdict, OrderedDict
from typing import Dict, Any, Text, List
from xautodl.log_utils import AverageMeter, time_string, convert_secs2time
from xautodl.config_utils import load_config, dict2config
from xautodl.datasets import get_datasets
from xautodl.models import CellStructure, get_cell_based_tiny_net, get_search_spaces
from xautodl.procedures import (
bench_pure_evaluate as pure_evaluate,
get_nas_bench_loaders,
)
from xautodl.utils import get_md5_file
from nats_bench import pickle_save, pickle_load, ArchResults, ResultsCount
from nas_201_api import NASBench201API
api = NASBench201API(
"{:}/.torch/NAS-Bench-201-v1_0-e61699.pth".format(os.environ["HOME"])
)
NATS_TSS_BASE_NAME = "NATS-tss-v1_0" # 2020.08.28
def create_result_count(
used_seed: int,
dataset: Text,
arch_config: Dict[Text, Any],
results: Dict[Text, Any],
dataloader_dict: Dict[Text, Any],
) -> ResultsCount:
xresult = ResultsCount(
dataset,
results["net_state_dict"],
results["train_acc1es"],
results["train_losses"],
results["param"],
results["flop"],
arch_config,
used_seed,
results["total_epoch"],
None,
)
net_config = dict2config(
{
"name": "infer.tiny",
"C": arch_config["channel"],
"N": arch_config["num_cells"],
"genotype": CellStructure.str2structure(arch_config["arch_str"]),
"num_classes": arch_config["class_num"],
},
None,
)
if "train_times" in results: # new version
xresult.update_train_info(
results["train_acc1es"],
results["train_acc5es"],
results["train_losses"],
results["train_times"],
)
xresult.update_eval(
results["valid_acc1es"], results["valid_losses"], results["valid_times"]
)
else:
network = get_cell_based_tiny_net(net_config)
network.load_state_dict(xresult.get_net_param())
if dataset == "cifar10-valid":
xresult.update_OLD_eval(
"x-valid", results["valid_acc1es"], results["valid_losses"]
)
loss, top1, top5, latencies = pure_evaluate(
dataloader_dict["{:}@{:}".format("cifar10", "test")], network.cuda()
)
xresult.update_OLD_eval(
"ori-test",
{results["total_epoch"] - 1: top1},
{results["total_epoch"] - 1: loss},
)
xresult.update_latency(latencies)
elif dataset == "cifar10":
xresult.update_OLD_eval(
"ori-test", results["valid_acc1es"], results["valid_losses"]
)
loss, top1, top5, latencies = pure_evaluate(
dataloader_dict["{:}@{:}".format(dataset, "test")], network.cuda()
)
xresult.update_latency(latencies)
elif dataset == "cifar100" or dataset == "ImageNet16-120":
xresult.update_OLD_eval(
"ori-test", results["valid_acc1es"], results["valid_losses"]
)
loss, top1, top5, latencies = pure_evaluate(
dataloader_dict["{:}@{:}".format(dataset, "valid")], network.cuda()
)
xresult.update_OLD_eval(
"x-valid",
{results["total_epoch"] - 1: top1},
{results["total_epoch"] - 1: loss},
)
loss, top1, top5, latencies = pure_evaluate(
dataloader_dict["{:}@{:}".format(dataset, "test")], network.cuda()
)
xresult.update_OLD_eval(
"x-test",
{results["total_epoch"] - 1: top1},
{results["total_epoch"] - 1: loss},
)
xresult.update_latency(latencies)
else:
raise ValueError("invalid dataset name : {:}".format(dataset))
return xresult
def account_one_arch(arch_index, arch_str, checkpoints, datasets, dataloader_dict):
information = ArchResults(arch_index, arch_str)
for checkpoint_path in checkpoints:
checkpoint = torch.load(checkpoint_path, map_location="cpu")
used_seed = checkpoint_path.name.split("-")[-1].split(".")[0]
ok_dataset = 0
for dataset in datasets:
if dataset not in checkpoint:
print(
"Can not find {:} in arch-{:} from {:}".format(
dataset, arch_index, checkpoint_path
)
)
continue
else:
ok_dataset += 1
results = checkpoint[dataset]
assert results[
"finish-train"
], "This {:} arch seed={:} does not finish train on {:} ::: {:}".format(
arch_index, used_seed, dataset, checkpoint_path
)
arch_config = {
"channel": results["channel"],
"num_cells": results["num_cells"],
"arch_str": arch_str,
"class_num": results["config"]["class_num"],
}
xresult = create_result_count(
used_seed, dataset, arch_config, results, dataloader_dict
)
information.update(dataset, int(used_seed), xresult)
if ok_dataset == 0:
raise ValueError("{:} does not find any data".format(checkpoint_path))
return information
def correct_time_related_info(arch_index: int, arch_infos: Dict[Text, ArchResults]):
# calibrate the latency based on NAS-Bench-201-v1_0-e61699.pth
cifar010_latency = (
api.get_latency(arch_index, "cifar10-valid", hp="200")
+ api.get_latency(arch_index, "cifar10", hp="200")
) / 2
cifar100_latency = api.get_latency(arch_index, "cifar100", hp="200")
image_latency = api.get_latency(arch_index, "ImageNet16-120", hp="200")
for hp, arch_info in arch_infos.items():
arch_info.reset_latency("cifar10-valid", None, cifar010_latency)
arch_info.reset_latency("cifar10", None, cifar010_latency)
arch_info.reset_latency("cifar100", None, cifar100_latency)
arch_info.reset_latency("ImageNet16-120", None, image_latency)
train_per_epoch_time = list(
arch_infos["12"].query("cifar10-valid", 777).train_times.values()
)
train_per_epoch_time = sum(train_per_epoch_time) / len(train_per_epoch_time)
eval_ori_test_time, eval_x_valid_time = [], []
for key, value in arch_infos["12"].query("cifar10-valid", 777).eval_times.items():
if key.startswith("ori-test@"):
eval_ori_test_time.append(value)
elif key.startswith("x-valid@"):
eval_x_valid_time.append(value)
else:
raise ValueError("-- {:} --".format(key))
eval_ori_test_time, eval_x_valid_time = float(np.mean(eval_ori_test_time)), float(
np.mean(eval_x_valid_time)
)
nums = {
"ImageNet16-120-train": 151700,
"ImageNet16-120-valid": 3000,
"ImageNet16-120-test": 6000,
"cifar10-valid-train": 25000,
"cifar10-valid-valid": 25000,
"cifar10-train": 50000,
"cifar10-test": 10000,
"cifar100-train": 50000,
"cifar100-test": 10000,
"cifar100-valid": 5000,
}
eval_per_sample = (eval_ori_test_time + eval_x_valid_time) / (
nums["cifar10-valid-valid"] + nums["cifar10-test"]
)
for hp, arch_info in arch_infos.items():
arch_info.reset_pseudo_train_times(
"cifar10-valid",
None,
train_per_epoch_time
/ nums["cifar10-valid-train"]
* nums["cifar10-valid-train"],
)
arch_info.reset_pseudo_train_times(
"cifar10",
None,
train_per_epoch_time / nums["cifar10-valid-train"] * nums["cifar10-train"],
)
arch_info.reset_pseudo_train_times(
"cifar100",
None,
train_per_epoch_time / nums["cifar10-valid-train"] * nums["cifar100-train"],
)
arch_info.reset_pseudo_train_times(
"ImageNet16-120",
None,
train_per_epoch_time
/ nums["cifar10-valid-train"]
* nums["ImageNet16-120-train"],
)
arch_info.reset_pseudo_eval_times(
"cifar10-valid",
None,
"x-valid",
eval_per_sample * nums["cifar10-valid-valid"],
)
arch_info.reset_pseudo_eval_times(
"cifar10-valid", None, "ori-test", eval_per_sample * nums["cifar10-test"]
)
arch_info.reset_pseudo_eval_times(
"cifar10", None, "ori-test", eval_per_sample * nums["cifar10-test"]
)
arch_info.reset_pseudo_eval_times(
"cifar100", None, "x-valid", eval_per_sample * nums["cifar100-valid"]
)
arch_info.reset_pseudo_eval_times(
"cifar100", None, "x-test", eval_per_sample * nums["cifar100-valid"]
)
arch_info.reset_pseudo_eval_times(
"cifar100", None, "ori-test", eval_per_sample * nums["cifar100-test"]
)
arch_info.reset_pseudo_eval_times(
"ImageNet16-120",
None,
"x-valid",
eval_per_sample * nums["ImageNet16-120-valid"],
)
arch_info.reset_pseudo_eval_times(
"ImageNet16-120",
None,
"x-test",
eval_per_sample * nums["ImageNet16-120-valid"],
)
arch_info.reset_pseudo_eval_times(
"ImageNet16-120",
None,
"ori-test",
eval_per_sample * nums["ImageNet16-120-test"],
)
return arch_infos
def simplify(save_dir, save_name, nets, total, sup_config):
dataloader_dict = get_nas_bench_loaders(6)
hps, seeds = ["12", "200"], set()
for hp in hps:
sub_save_dir = save_dir / "raw-data-{:}".format(hp)
ckps = sorted(list(sub_save_dir.glob("arch-*-seed-*.pth")))
seed2names = defaultdict(list)
for ckp in ckps:
parts = re.split("-|\.", ckp.name)
seed2names[parts[3]].append(ckp.name)
print("DIR : {:}".format(sub_save_dir))
nums = []
for seed, xlist in seed2names.items():
seeds.add(seed)
nums.append(len(xlist))
print(" [seed={:}] there are {:} checkpoints.".format(seed, len(xlist)))
assert (
len(nets) == total == max(nums)
), "there are some missed files : {:} vs {:}".format(max(nums), total)
print("{:} start simplify the checkpoint.".format(time_string()))
datasets = ("cifar10-valid", "cifar10", "cifar100", "ImageNet16-120")
# Create the directory to save the processed data
# full_save_dir contains all benchmark files with trained weights.
# simplify_save_dir contains all benchmark files without trained weights.
full_save_dir = save_dir / (save_name + "-FULL")
simple_save_dir = save_dir / (save_name + "-SIMPLIFY")
full_save_dir.mkdir(parents=True, exist_ok=True)
simple_save_dir.mkdir(parents=True, exist_ok=True)
# all data in memory
arch2infos, evaluated_indexes = dict(), set()
end_time, arch_time = time.time(), AverageMeter()
# save the meta information
temp_final_infos = {
"meta_archs": nets,
"total_archs": total,
"arch2infos": None,
"evaluated_indexes": set(),
}
pickle_save(temp_final_infos, str(full_save_dir / "meta.pickle"))
pickle_save(temp_final_infos, str(simple_save_dir / "meta.pickle"))
for index in tqdm(range(total)):
arch_str = nets[index]
hp2info = OrderedDict()
full_save_path = full_save_dir / "{:06d}.pickle".format(index)
simple_save_path = simple_save_dir / "{:06d}.pickle".format(index)
for hp in hps:
sub_save_dir = save_dir / "raw-data-{:}".format(hp)
ckps = [
sub_save_dir / "arch-{:06d}-seed-{:}.pth".format(index, seed)
for seed in seeds
]
ckps = [x for x in ckps if x.exists()]
if len(ckps) == 0:
raise ValueError("Invalid data : index={:}, hp={:}".format(index, hp))
arch_info = account_one_arch(
index, arch_str, ckps, datasets, dataloader_dict
)
hp2info[hp] = arch_info
hp2info = correct_time_related_info(index, hp2info)
evaluated_indexes.add(index)
to_save_data = OrderedDict(
{"12": hp2info["12"].state_dict(), "200": hp2info["200"].state_dict()}
)
pickle_save(to_save_data, str(full_save_path))
for hp in hps:
hp2info[hp].clear_params()
to_save_data = OrderedDict(
{"12": hp2info["12"].state_dict(), "200": hp2info["200"].state_dict()}
)
pickle_save(to_save_data, str(simple_save_path))
arch2infos[index] = to_save_data
# measure elapsed time
arch_time.update(time.time() - end_time)
end_time = time.time()
need_time = "{:}".format(
convert_secs2time(arch_time.avg * (total - index - 1), True)
)
# print('{:} {:06d}/{:06d} : still need {:}'.format(time_string(), index, total, need_time))
print("{:} {:} done.".format(time_string(), save_name))
final_infos = {
"meta_archs": nets,
"total_archs": total,
"arch2infos": arch2infos,
"evaluated_indexes": evaluated_indexes,
}
save_file_name = save_dir / "{:}.pickle".format(save_name)
pickle_save(final_infos, str(save_file_name))
# move the benchmark file to a new path
hd5sum = get_md5_file(str(save_file_name) + ".pbz2")
hd5_file_name = save_dir / "{:}-{:}.pickle.pbz2".format(NATS_TSS_BASE_NAME, hd5sum)
shutil.move(str(save_file_name) + ".pbz2", hd5_file_name)
print(
"Save {:} / {:} architecture results into {:} -> {:}.".format(
len(evaluated_indexes), total, save_file_name, hd5_file_name
)
)
# move the directory to a new path
hd5_full_save_dir = save_dir / "{:}-{:}-full".format(NATS_TSS_BASE_NAME, hd5sum)
hd5_simple_save_dir = save_dir / "{:}-{:}-simple".format(NATS_TSS_BASE_NAME, hd5sum)
shutil.move(full_save_dir, hd5_full_save_dir)
shutil.move(simple_save_dir, hd5_simple_save_dir)
# save the meta information for simple and full
# final_infos['arch2infos'] = None
# final_infos['evaluated_indexes'] = set()
def traverse_net(max_node):
aa_nas_bench_ss = get_search_spaces("cell", "nats-bench")
archs = CellStructure.gen_all(aa_nas_bench_ss, max_node, False)
print(
"There are {:} archs vs {:}.".format(
len(archs), len(aa_nas_bench_ss) ** ((max_node - 1) * max_node / 2)
)
)
random.seed(88) # please do not change this line for reproducibility
random.shuffle(archs)
assert (
archs[0].tostr()
== "|avg_pool_3x3~0|+|nor_conv_1x1~0|skip_connect~1|+|nor_conv_1x1~0|skip_connect~1|skip_connect~2|"
), "please check the 0-th architecture : {:}".format(archs[0])
assert (
archs[9].tostr()
== "|avg_pool_3x3~0|+|none~0|none~1|+|skip_connect~0|none~1|nor_conv_3x3~2|"
), "please check the 9-th architecture : {:}".format(archs[9])
assert (
archs[123].tostr()
== "|avg_pool_3x3~0|+|avg_pool_3x3~0|nor_conv_1x1~1|+|none~0|avg_pool_3x3~1|nor_conv_3x3~2|"
), "please check the 123-th architecture : {:}".format(archs[123])
return [x.tostr() for x in archs]
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="NATS-Bench (topology search space)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--base_save_dir",
type=str,
default="./output/NATS-Bench-topology",
help="The base-name of folder to save checkpoints and log.",
)
parser.add_argument(
"--max_node", type=int, default=4, help="The maximum node in a cell."
)
parser.add_argument(
"--channel", type=int, default=16, help="The number of channels."
)
parser.add_argument(
"--num_cells", type=int, default=5, help="The number of cells in one stage."
)
parser.add_argument("--check_N", type=int, default=15625, help="For safety.")
parser.add_argument(
"--save_name", type=str, default="process", help="The save directory."
)
args = parser.parse_args()
nets = traverse_net(args.max_node)
if len(nets) != args.check_N:
raise ValueError(
"Pre-num-check failed : {:} vs {:}".format(len(nets), args.check_N)
)
save_dir = Path(args.base_save_dir)
simplify(
save_dir,
args.save_name,
nets,
args.check_N,
{"name": "infer.tiny", "channel": args.channel, "num_cells": args.num_cells},
)
|
d99044df2d9c67439c322047fb18a8697d20d3e5
|
fa3f6d4e9169fb95f828013d179d03accdff381b
|
/grr/core/grr_response_core/lib/rdfvalues/__init__.py
|
97f0a8767f3e910dd4b242b2c4e782de9d30f367
|
[
"Apache-2.0"
] |
permissive
|
google/grr
|
c51a2bd251ed2f7adae538541990a2cc01fdcc8c
|
44c0eb8c938302098ef7efae8cfd6b90bcfbb2d6
|
refs/heads/master
| 2023-09-05T20:02:36.823914
| 2023-07-26T09:34:09
| 2023-07-26T09:34:09
| 14,909,673
| 4,683
| 927
|
Apache-2.0
| 2023-07-26T09:34:10
| 2013-12-04T00:17:53
|
Python
|
UTF-8
|
Python
| false
| false
| 120
|
py
|
__init__.py
|
#!/usr/bin/env python
"""AFF4 RDFValue implementations.
This module contains the various RDFValue implementations.
"""
|
5c7816c873138bb6fc8d689faef1f4c417f641a2
|
5db0fab37c2b8a618d85d3b60fab9f806c416474
|
/src/python/pants/backend/docker/goals/tailor_test.py
|
15a3700ddc62204739f51b3f201fa3f8f95fb414
|
[
"Apache-2.0"
] |
permissive
|
pantsbuild/pants
|
4988d1ac5474ec95f94ce2218aeb759401e4b011
|
98cbda8545f0d58c586ed2daa76fefd729d5e0d5
|
refs/heads/main
| 2023-09-05T03:44:17.646899
| 2023-09-01T19:52:09
| 2023-09-01T19:52:09
| 7,209,075
| 2,708
| 593
|
Apache-2.0
| 2023-09-14T19:33:33
| 2012-12-17T17:39:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,861
|
py
|
tailor_test.py
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.docker.goals.tailor import PutativeDockerTargetsRequest
from pants.backend.docker.goals.tailor import rules as docker_tailor_rules
from pants.backend.docker.target_types import DockerImageTarget
from pants.core.goals.tailor import AllOwnedSources, PutativeTarget, PutativeTargets
from pants.engine.rules import QueryRule
from pants.testutil.rule_runner import RuleRunner
def test_find_putative_targets() -> None:
rule_runner = RuleRunner(
rules=[
*docker_tailor_rules(),
QueryRule(PutativeTargets, [PutativeDockerTargetsRequest, AllOwnedSources]),
],
target_types=[DockerImageTarget],
)
rule_runner.write_files(
{
"src/docker_ok/Dockerfile": "",
"src/docker_orphan/Dockerfile": "",
"src/docker_orphan/Dockerfile.two": "",
}
)
pts = rule_runner.request(
PutativeTargets,
[
PutativeDockerTargetsRequest(("src/docker_ok", "src/docker_orphan")),
AllOwnedSources(["src/docker_ok/Dockerfile"]),
],
)
assert (
PutativeTargets(
[
PutativeTarget.for_target_type(
DockerImageTarget,
path="src/docker_orphan",
name="docker",
triggering_sources=["Dockerfile"],
),
PutativeTarget.for_target_type(
DockerImageTarget,
path="src/docker_orphan",
name="docker",
triggering_sources=["Dockerfile.two"],
kwargs={"source": "Dockerfile.two"},
),
]
)
== pts
)
|
3855aedeb25b84e3f8e838af4d8e3978efa6ee17
|
4daab5ba90185bae65169ebb8183c635385ab3f8
|
/examples/tutorials/j_NEB.py
|
0fdca4105a307e988ce2cb751547a264007076c6
|
[
"MIT"
] |
permissive
|
duartegroup/autodE
|
bcf69440bd04411f97d39df0df0ae1f2bf6feb8c
|
4d6667592f083dfcf38de6b75c4222c0a0e7b60b
|
refs/heads/master
| 2023-09-01T15:08:16.028378
| 2023-07-25T08:09:05
| 2023-07-25T08:09:05
| 196,085,570
| 132
| 42
|
MIT
| 2023-09-12T15:20:54
| 2019-07-09T21:20:27
|
Python
|
UTF-8
|
Python
| false
| false
| 965
|
py
|
j_NEB.py
|
import multiprocessing
import autode as ade
orca = ade.methods.ORCA()
# Set the keywords so autodE can extract gradients at PBE/def2-SV(P)
orca.keywords.grad = ["PBE", "def2-SV(P)", "EnGrad"]
if multiprocessing.cpu_count() < 10 or not orca.is_available:
exit("This example requires an ORCA install and 10 processing cores")
# Nudged elastic band (NEB) calculations are available using all methods
# that support gradient evaluations (all of them!). For example, to
# set up a set of images and relax to the ~minimum energy path for a
# Diels Alder reaction between benzoquinone and cyclopentadiene
neb = ade.NEB.from_end_points(
ade.Molecule("_data/DielsAlder/reactant.xyz"),
ade.Molecule("_data/DielsAlder/product.xyz"),
num=5,
)
neb.calculate(method=orca, n_cores=10)
# will have generated a plot of the relaxation, along with a .xyz
# trajectory of the initial and final NEB path
# To use a climbing image NEB replace ade.NEB with ade.CINEB
|
ab65c7d7c553e8605c966bceb49c517129922b95
|
ce32e0e1b9568c710a3168abc3c638d6f9f6c31b
|
/vnpy/gateway/okexo/okexo_gateway.py
|
237f111aadb0705be4167d733d9651349191c9f5
|
[
"MIT"
] |
permissive
|
msincenselee/vnpy
|
55ae76ca32cae47369a66bd2d6589c13d7a0bdd4
|
7f4fd3cd202712b083ed7dc2f346ba4bb1bda6d7
|
refs/heads/vnpy2
| 2022-05-19T10:06:55.504408
| 2022-03-19T15:26:01
| 2022-03-19T15:26:01
| 38,525,806
| 359
| 158
|
MIT
| 2020-09-09T00:09:12
| 2015-07-04T07:27:46
|
C++
|
UTF-8
|
Python
| false
| false
| 27,129
|
py
|
okexo_gateway.py
|
"""
Author: KeKe
"""
import hashlib
import hmac
import sys
import time
import json
import base64
import zlib
from copy import copy
from datetime import datetime, timedelta
from threading import Lock
from urllib.parse import urlencode
from typing import Dict, List
from requests import ConnectionError
from vnpy.event.engine import EventEngine
from vnpy.api.rest import Request, RestClient
from vnpy.api.websocket import WebsocketClient
from vnpy.trader.constant import (
Direction,
Exchange,
OrderType,
Product,
Status,
Interval
)
from vnpy.trader.gateway import BaseGateway
from vnpy.trader.object import (
TickData,
OrderData,
TradeData,
AccountData,
ContractData,
PositionData,
BarData,
OrderRequest,
CancelRequest,
SubscribeRequest,
HistoryRequest
)
REST_HOST: str = "https://www.okex.com"
WEBSOCKET_HOST: str = "wss://real.okex.com:8443/ws/v3"
STATE_OKEXO2VT: Dict[str, Status] = {
"0": Status.NOTTRADED,
"-2": Status.NOTTRADED,
"1": Status.PARTTRADED,
"2": Status.ALLTRADED,
"-1": Status.CANCELLED,
}
ORDERTYPE_OKEXO2VT: Dict[str, OrderType] = {
"0": OrderType.LIMIT,
"1": OrderType.MARKET,
}
SIDE_OKEXO2VT: Dict[str, Direction] = {
"buy": Direction.LONG,
"sell": Direction.SHORT,
}
SIDE_VT2OKEXO: Dict[Direction, str] = {
Direction.LONG: "buy",
Direction.SHORT: "sell",
}
INTERVAL_VT2OKEXO: Dict[Interval, str] = {
Interval.MINUTE: "60",
Interval.HOUR: "3600",
Interval.DAILY: "86400",
}
underlyings: set = set()
class OkexoGateway(BaseGateway):
"""
VN Trader Gateway for OKEX connection.
"""
default_setting = {
"API Key": "",
"Secret Key": "",
"Passphrase": "",
"会话数": 3,
"代理地址": "",
"代理端口": "",
}
exchanges: List[Exchange] = [Exchange.OKEX]
def __init__(self, event_engine: EventEngine):
"""Constructor"""
super().__init__(event_engine, "OKEXO")
self.rest_api = OkexoRestApi(self)
self.ws_api = OkexoWebsocketApi(self)
self.orders: Dict[str, OrderData] = {}
def connect(self, setting: dict) -> None:
""""""
key = setting["API Key"]
secret = setting["Secret Key"]
passphrase = setting["Passphrase"]
session_number = setting["会话数"]
proxy_host = setting["代理地址"]
proxy_port = setting["代理端口"]
if proxy_port.isdigit():
proxy_port = int(proxy_port)
else:
proxy_port = 0
self.rest_api.connect(key, secret, passphrase,
session_number, proxy_host, proxy_port)
self.ws_api.connect(key, secret, passphrase, proxy_host, proxy_port)
def subscribe(self, req: SubscribeRequest) -> None:
""""""
self.ws_api.subscribe(req)
def send_order(self, req: OrderRequest) -> str:
""""""
return self.rest_api.send_order(req)
def cancel_order(self, req: CancelRequest) -> Request:
""""""
self.rest_api.cancel_order(req)
def query_account(self) -> None:
""""""
pass
def query_position(self) -> None:
""""""
pass
def query_history(self, req: HistoryRequest) -> List[BarData]:
""""""
return self.rest_api.query_history(req)
def close(self) -> None:
""""""
self.rest_api.stop()
self.ws_api.stop()
def on_order(self, order: OrderData) -> None:
""""""
self.orders[order.orderid] = order
super().on_order(order)
def get_order(self, orderid: str):
""""""
return self.orders.get(orderid, None)
class OkexoRestApi(RestClient):
"""
OKEXO REST API
"""
def __init__(self, gateway: "OkexoGateway"):
""""""
super().__init__()
self.gateway: OkexoGateway = gateway
self.gateway_name: str = gateway.gateway_name
self.key: str = ""
self.secret: str = ""
self.passphrase: str = ""
self.order_count: int = 10000
self.order_count_lock: Lock = Lock()
self.connect_time: int = 0
def sign(self, request: Request) -> Request:
"""
Generate OKEXo signature.
"""
# Sign
timestamp = get_timestamp()
request.data = json.dumps(request.data)
if request.params:
path = request.path + "?" + urlencode(request.params)
else:
path = request.path
msg = timestamp + request.method + path + request.data
signature = generate_signature(msg, self.secret)
# Add headers
request.headers = {
"OK-ACCESS-KEY": self.key,
"OK-ACCESS-SIGN": signature,
"OK-ACCESS-TIMESTAMP": timestamp,
"OK-ACCESS-PASSPHRASE": self.passphrase,
"Content-Type": "application/json"
}
return request
def connect(
self,
key: str,
secret: str,
passphrase: str,
session_number: int,
proxy_host: str,
proxy_port: int,
) -> None:
"""
Initialize connection to REST server.
"""
self.key = key
self.secret = secret.encode()
self.passphrase = passphrase
self.connect_time = int(datetime.now().strftime("%y%m%d%H%M%S"))
self.init(REST_HOST, proxy_host, proxy_port)
self.start(session_number)
self.gateway.write_log("REST API启动成功")
self.query_time()
self.query_underlying()
def _new_order_id(self) -> int:
with self.order_count_lock:
self.order_count += 1
return self.order_count
def send_order(self, req: OrderRequest) -> str:
""""""
# Need both offset and direction for sending order.
orderid = f"a{self.connect_time}{self._new_order_id()}"
if req.direction == Direction.LONG:
side = "buy"
else:
side = "sell"
data = {
"client_oid": orderid,
"instrument_id": req.symbol,
"price": str(req.price),
"size": str(int(req.volume)),
"side": side,
}
if req.type == OrderType.MARKET:
data["match_price"] = "1"
else:
data["match_price"] = "0"
order = req.create_order_data(orderid, self.gateway_name)
self.add_request(
"POST",
"/api/option/v3/order",
callback=self.on_send_order,
data=data,
extra=order,
on_failed=self.on_send_order_failed,
on_error=self.on_send_order_error,
)
self.gateway.on_order(order)
return order.vt_orderid
def cancel_order(self, req: CancelRequest) -> Request:
""""""
item = req.symbol.split("-")
underlying = f"{item[0]}-{item[1]}"
path = f"/api/option/v3/cancel_order/{underlying}/{req.orderid}"
self.add_request(
"POST",
path,
callback=self.on_cancel_order,
on_error=self.on_cancel_order_error,
on_failed=self.on_cancel_order_failed,
extra=req
)
def query_underlying(self) -> Request:
""""""
self.add_request(
"GET",
"/api/option/v3/underlying",
callback=self.on_query_underlying
)
def query_contract(self) -> Request:
""""""
for underlying in underlyings:
self.add_request(
"GET",
f"/api/option/v3/instruments/{underlying}",
callback=self.on_query_contract
)
def query_account(self) -> Request:
""""""
for underlying in underlyings:
self.add_request(
"GET",
f"/api/option/v3/accounts/{underlying}",
callback=self.on_query_account
)
def query_order(self) -> Request:
""""""
for underlying in underlyings:
# get waiting orders
self.add_request(
"GET",
f"/api/option/v3/orders/{underlying}?state=0",
callback=self.on_query_order
)
# get part traded orders
self.add_request(
"GET",
f"/api/option/v3/orders/{underlying}?state=1",
callback=self.on_query_order
)
def query_position(self) -> Request:
""""""
for underlying in underlyings:
self.add_request(
"GET",
f"/api/option/v3/{underlying}/position",
callback=self.on_query_position
)
def query_time(self) -> Request:
""""""
self.add_request(
"GET",
"/api/general/v3/time",
callback=self.on_query_time
)
def on_query_underlying(self, data: List[str], request: Request) -> None:
""""""
for underlying in data:
underlyings.add(underlying)
self.gateway.write_log("期权标的信息查询成功")
self.query_contract()
def on_query_contract(self, data: List, request: Request) -> None:
""""""
if not data:
return
for instrument_data in data:
symbol = instrument_data["instrument_id"]
contract = ContractData(
symbol=symbol,
exchange=Exchange.OKEX,
name=symbol,
product=Product.OPTION,
size=float(instrument_data["lot_size"]),
pricetick=float(instrument_data["tick_size"]),
option_strike=int(instrument_data["strike"]),
option_underlying=instrument_data["underlying"],
option_type=instrument_data["option_type"],
option_expiry=datetime.strptime(instrument_data["delivery"], "%Y-%m-%dT%H:%M:%S.%fZ"),
option_portfolio=instrument_data["underlying"] + "_O",
option_index=instrument_data["strike"],
history_data=True,
gateway_name=self.gateway_name,
)
self.gateway.on_contract(contract)
self.gateway.write_log("期权合约信息查询成功")
# Start websocket api after instruments data collected
self.gateway.ws_api.start()
# and query pending orders
self.query_account()
self.query_position()
self.query_order()
def on_query_account(self, data: dict, request: Request) -> None:
""""""
equity = float(data["equity"])
if equity:
account = AccountData(
accountid=data["underlying"],
balance=float(data["equity"]),
frozen=float(data.get("margin_for_unfilled", 0)),
gateway_name=self.gateway_name,
)
self.gateway.on_account(account)
self.gateway.write_log(f"{account.accountid}账户资金查询成功")
def on_query_position(self, data: dict, request: Request) -> None:
""""""
if not data["holding"]:
return
for pos_data in data["holding"]:
pos = PositionData(
symbol=pos_data["instrument_id"],
exchange=Exchange.OKEX,
direction=Direction.NET,
volume=int(pos_data["position"]),
frozen=float(pos_data["avail_position"]) - float(pos_data["avail_position"]),
price=float(pos_data["avg_cost"]),
pnl=float(pos_data["realized_pnl"]),
gateway_name=self.gateway_name,
)
self.gateway.on_position(pos)
def on_query_order(self, data: dict, request: Request) -> None:
""""""
for order_data in data["order_info"]:
direction = SIDE_OKEXO2VT[order_data["side"]]
order = OrderData(
symbol=order_data["instrument_id"],
exchange=Exchange.OKEX,
type=ORDERTYPE_OKEXO2VT[order_data["order_type"]],
orderid=order_data["client_oid"],
direction=direction,
traded=int(order_data["filled_qty"]),
price=float(order_data["price"]),
volume=float(order_data["size"]),
time=utc_to_local(order_data["timestamp"]).strftime("%H:%M:%S"),
status=STATE_OKEXO2VT[order_data["state"]],
gateway_name=self.gateway_name,
)
self.gateway.on_order(order)
def on_query_time(self, data: dict, request: Request) -> None:
""""""
server_time = data["iso"]
local_time = datetime.utcnow().isoformat()
msg = f"服务器时间:{server_time},本机时间:{local_time}"
self.gateway.write_log(msg)
def on_send_order_failed(self, status_code: str, request: Request) -> None:
"""
Callback when sending order failed on server.
"""
order = request.extra
order.status = Status.REJECTED
order.time = datetime.now().strftime("%H:%M:%S.%f")
self.gateway.on_order(order)
msg = f"委托失败,状态码:{status_code},信息:{request.response.text}"
self.gateway.write_log(msg)
def on_send_order_error(
self, exception_type: type, exception_value: Exception, tb, request: Request
) -> None:
"""
Callback when sending order caused exception.
"""
order = request.extra
order.status = Status.REJECTED
self.gateway.on_order(order)
# Record exception if not ConnectionError
if not issubclass(exception_type, ConnectionError):
self.on_error(exception_type, exception_value, tb, request)
def on_send_order(self, data: dict, request: Request) -> None:
"""
Websocket will push a new order status
"""
order = request.extra
error_msg = data["error_message"]
if error_msg:
order.status = Status.REJECTED
self.gateway.on_order(order)
self.gateway.write_log(f"委托失败:{error_msg}")
def on_cancel_order_error(
self, exception_type: type, exception_value: Exception, tb, request: Request
) -> None:
"""
Callback when cancelling order failed on server.
"""
# Record exception if not ConnectionError
if not issubclass(exception_type, ConnectionError):
self.on_error(exception_type, exception_value, tb, request)
def on_cancel_order(self, data, request) -> None:
"""
Websocket will push a new order status
"""
pass
def on_cancel_order_failed(self, status_code: int, request: Request) -> None:
"""
If cancel failed, mark order status to be rejected.
"""
req = request.extra
order = self.gateway.get_order(req.orderid)
if order:
order.status = Status.REJECTED
self.gateway.on_order(order)
def on_failed(self, status_code: int, request: Request) -> None:
"""
Callback to handle request failed.
"""
msg = f"请求失败,状态码:{status_code},信息:{request.response.text}"
self.gateway.write_log(msg)
def on_error(
self, exception_type: type, exception_value: Exception, tb, request: Request
) -> None:
"""
Callback to handler request exception.
"""
msg = f"触发异常,状态码:{exception_type},信息:{exception_value}"
self.gateway.write_log(msg)
sys.stderr.write(
self.exception_detail(exception_type, exception_value, tb, request)
)
def query_history(self, req: HistoryRequest) -> List[BarData]:
""""""
buf = {}
end_time = None
for i in range(10):
path = f"/api/option/v3/instruments/{req.symbol}/candles"
# Create query params
params = {
"granularity": INTERVAL_VT2OKEXO[req.interval]
}
if end_time:
params["end"] = end_time
# Get response from server
resp = self.request(
"GET",
path,
params=params
)
# Break if request failed with other status code
if resp.status_code // 100 != 2:
msg = f"获取历史数据失败,状态码:{resp.status_code},信息:{resp.text}"
self.gateway.write_log(msg)
break
else:
data = resp.json()
if not data:
msg = f"获取历史数据为空"
break
for l in data:
ts, o, h, l, c, v, _ = l
dt = utc_to_local(ts)
bar = BarData(
symbol=req.symbol,
exchange=req.exchange,
datetime=dt,
interval=req.interval,
volume=float(v),
open_price=float(o),
high_price=float(h),
low_price=float(l),
close_price=float(c),
gateway_name=self.gateway_name
)
buf[bar.datetime] = bar
begin = data[-1][0]
end = data[0][0]
msg = f"获取历史数据成功,{req.symbol} - {req.interval.value},{begin} - {end}"
self.gateway.write_log(msg)
# Update start time
end_time = begin
index = list(buf.keys())
index.sort()
history = [buf[i] for i in index]
return history
class OkexoWebsocketApi(WebsocketClient):
""""""
def __init__(self, gateway):
""""""
super().__init__()
self.ping_interval: int = 20 # OKEX use 30 seconds for ping
self.gateway: OkexoGateway = gateway
self.gateway_name: str = gateway.gateway_name
self.key: str = ""
self.secret: str = ""
self.passphrase: str = ""
self.trade_count: int = 10000
self.connect_time: int = 0
self.callbacks: Dict[str, callable] = {}
self.ticks: Dict[str, TickData] = {}
def connect(
self,
key: str,
secret: str,
passphrase: str,
proxy_host: str,
proxy_port: int
) -> None:
""""""
self.key = key
self.secret = secret.encode()
self.passphrase = passphrase
self.connect_time = int(datetime.now().strftime("%y%m%d%H%M%S"))
self.init(WEBSOCKET_HOST, proxy_host, proxy_port)
def unpack_data(self, data) -> json.JSONDecoder:
""""""
return json.loads(zlib.decompress(data, -zlib.MAX_WBITS))
def subscribe(self, req: SubscribeRequest) -> None:
"""
Subscribe to tick data upate.
"""
tick = TickData(
symbol=req.symbol,
exchange=req.exchange,
name=req.symbol,
datetime=datetime.now(),
gateway_name=self.gateway_name,
)
self.ticks[req.symbol] = tick
channel_ticker = f"option/ticker:{req.symbol}"
channel_depth = f"option/depth5:{req.symbol}"
self.callbacks[channel_ticker] = self.on_ticker
self.callbacks[channel_depth] = self.on_depth
req = {
"op": "subscribe",
"args": [channel_ticker, channel_depth]
}
self.send_packet(req)
def on_connected(self) -> None:
""""""
self.gateway.write_log("Websocket API连接成功")
self.login()
def on_disconnected(self) -> None:
""""""
self.gateway.write_log("Websocket API连接断开")
def on_packet(self, packet: dict) -> None:
""""""
if "event" in packet:
event = packet["event"]
if event == "subscribe":
return
elif event == "error":
msg = packet["message"]
self.gateway.write_log(f"Websocket API请求异常:{msg}")
elif event == "login":
self.on_login(packet)
else:
channel = packet["table"]
data = packet["data"]
callback = self.callbacks.get(channel, None)
if callback:
for d in data:
callback(d)
def on_error(self, exception_type: type, exception_value: Exception, tb) -> None:
""""""
msg = f"触发异常,状态码:{exception_type},信息:{exception_value}"
self.gateway.write_log(msg)
sys.stderr.write(self.exception_detail(exception_type, exception_value, tb))
def login(self) -> None:
"""
Need to login befores subscribe to websocket topic.
"""
timestamp = str(time.time())
msg = timestamp + "GET" + "/users/self/verify"
signature = generate_signature(msg, self.secret)
req = {
"op": "login",
"args": [
self.key,
self.passphrase,
timestamp,
signature.decode("utf-8")
]
}
self.send_packet(req)
self.callbacks["login"] = self.on_login
def subscribe_topic(self) -> None:
"""
Subscribe to all private topics.
"""
self.callbacks["option/ticker"] = self.on_ticker
self.callbacks["option/depth5"] = self.on_depth
self.callbacks["option/account"] = self.on_account
self.callbacks["option/order"] = self.on_order
self.callbacks["option/position"] = self.on_position
# Subscribe to order update
channels = []
for underlying in underlyings:
channel = f"option/order:{underlying}"
channels.append(channel)
req = {
"op": "subscribe",
"args": channels
}
self.send_packet(req)
# Subscribe to account update
channels = []
for underlying in underlyings:
channel = f"option/account:{underlying}"
channels.append(channel)
req = {
"op": "subscribe",
"args": channels
}
self.send_packet(req)
# Subscribe to position update
channels = []
for underlying in underlyings:
channel = f"option/position:{underlying}"
channels.append(channel)
req = {
"op": "subscribe",
"args": channels
}
self.send_packet(req)
def on_login(self, data: dict) -> None:
""""""
success = data.get("success", False)
if success:
self.gateway.write_log("Websocket API登录成功")
self.subscribe_topic()
else:
self.gateway.write_log("Websocket API登录失败")
def on_ticker(self, data: dict) -> None:
""""""
symbol = data["instrument_id"]
tick = self.ticks.get(symbol, None)
if not tick:
return
tick.last_price = float(data["last"])
tick.high_price = float(data["high_24h"])
tick.low_price = float(data["low_24h"])
tick.volume = float(data["volume_24h"])
tick.datetime = utc_to_local(data["timestamp"])
self.gateway.on_tick(copy(tick))
def on_depth(self, data: dict) -> None:
""""""
symbol = data["instrument_id"]
tick = self.ticks.get(symbol, None)
if not tick:
return
bids = data["bids"]
asks = data["asks"]
for n, buf in enumerate(bids):
price, volume, _, __ = buf
tick.__setattr__("bid_price_%s" % (n + 1), price)
tick.__setattr__("bid_volume_%s" % (n + 1), volume)
for n, buf in enumerate(asks):
price, volume, _, __ = buf
tick.__setattr__("ask_price_%s" % (n + 1), price)
tick.__setattr__("ask_volume_%s" % (n + 1), volume)
tick.datetime = utc_to_local(data["timestamp"])
self.gateway.on_tick(copy(tick))
def on_order(self, data: dict) -> None:
""""""
direction = SIDE_OKEXO2VT[data["side"]]
order = OrderData(
symbol=data["instrument_id"],
exchange=Exchange.OKEX,
type=ORDERTYPE_OKEXO2VT[data["order_type"]],
orderid=data["client_oid"],
direction=direction,
price=float(data["price"]),
volume=float(data["size"]),
traded=float(data["filled_qty"]),
time=utc_to_local(data["timestamp"]).strftime("%H:%M:%S"),
status=STATE_OKEXO2VT[data["state"]],
gateway_name=self.gateway_name,
)
self.gateway.on_order(copy(order))
trade_volume = data.get("last_fill_qty", 0)
if not trade_volume or float(trade_volume) == 0:
return
self.trade_count += 1
tradeid = f"{self.connect_time}{self.trade_count}"
trade = TradeData(
symbol=order.symbol,
exchange=order.exchange,
orderid=order.orderid,
tradeid=tradeid,
direction=order.direction,
offset=order.offset,
price=float(data["last_fill_px"]),
volume=float(trade_volume),
time=order.time,
gateway_name=self.gateway_name,
)
self.gateway.on_trade(trade)
def on_account(self, data: dict) -> None:
""""""
account = AccountData(
accountid=data["underlying"],
balance=float(data["equity"]),
frozen=float(data.get("margin_for_unfilled", 0)),
gateway_name=self.gateway_name
)
self.gateway.on_account(account)
def on_position(self, data: dict) -> None:
""""""
pos = PositionData(
symbol=data["instrument_id"],
exchange=Exchange.OKEX,
direction=Direction.NET,
volume=int(data["position"]),
frozen=float(data["avail_position"]) - float(data["avail_position"]),
price=float(data["avg_cost"]),
pnl=float(data["realized_pnl"]),
gateway_name=self.gateway_name,
)
self.gateway.on_position(pos)
def generate_signature(msg: str, secret_key: str) -> bytes:
"""OKEX V3 signature"""
return base64.b64encode(hmac.new(secret_key, msg.encode(), hashlib.sha256).digest())
def get_timestamp() -> str:
""""""
now = datetime.utcnow()
timestamp = now.isoformat("T", "milliseconds")
return timestamp + "Z"
def utc_to_local(timestamp) -> datetime:
time = datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S.%fZ")
utc_time = time + timedelta(hours=8)
return utc_time
|
f1acb7b7ecdd1d2e80befc09c2c6b587592a1ae3
|
aad43bc0dfa9b0d5371f8b21bd9aaa1a8bf29b9c
|
/src/sagemaker_sklearn_container/serving.py
|
e0eef7825a57e0766f20f8f3f0b2d6b831049caa
|
[
"Apache-2.0"
] |
permissive
|
aws/sagemaker-scikit-learn-container
|
da4f0ffe3aca78cd3010b78a84f953b9ab8bdb9e
|
95efcdae49dc6cfc99f3d21012446e0229162274
|
refs/heads/master
| 2023-09-04T10:57:35.709876
| 2023-08-11T17:26:03
| 2023-08-11T17:26:03
| 155,597,776
| 147
| 124
|
Apache-2.0
| 2023-08-11T17:26:37
| 2018-10-31T17:28:35
|
Python
|
UTF-8
|
Python
| false
| false
| 6,116
|
py
|
serving.py
|
# Copyright 2019-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License'). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the 'license' file accompanying this file. This file is
# distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import os
import importlib
import logging
import numpy as np
import sagemaker_sklearn_container.exceptions as exc
from sagemaker_containers.beta.framework import (
content_types, encoders, env, modules, transformer, worker, server)
from sagemaker_sklearn_container.serving_mms import start_model_server
logging.basicConfig(format='%(asctime)s %(levelname)s - %(name)s - %(message)s', level=logging.INFO)
logging.getLogger('boto3').setLevel(logging.INFO)
logging.getLogger('s3transfer').setLevel(logging.INFO)
logging.getLogger('botocore').setLevel(logging.WARN)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def is_multi_model():
return os.environ.get('SAGEMAKER_MULTI_MODEL')
def default_model_fn(model_dir):
"""Loads a model. For Scikit-learn, a default function to load a model is not provided.
Users should provide customized model_fn() in script.
Args:
model_dir: a directory where model is saved.
Returns: A Scikit-learn model.
"""
return transformer.default_model_fn(model_dir)
def default_input_fn(input_data, content_type):
"""Takes request data and de-serializes the data into an object for prediction.
When an InvokeEndpoint operation is made against an Endpoint running SageMaker model server,
the model server receives two pieces of information:
- The request Content-Type, for example "application/json"
- The request data, which is at most 5 MB (5 * 1024 * 1024 bytes) in size.
The input_fn is responsible to take the request data and pre-process it before prediction.
Args:
input_data (obj): the request data.
content_type (str): the request Content-Type.
Returns:
(obj): data ready for prediction.
"""
np_array = encoders.decode(input_data, content_type)
return np_array.astype(np.float32) if content_type in content_types.UTF8_TYPES else np_array
def default_predict_fn(input_data, model):
"""A default predict_fn for Scikit-learn. Calls a model on data deserialized in input_fn.
Args:
input_data: input data (Numpy array) for prediction deserialized by input_fn
model: Scikit-learn model loaded in memory by model_fn
Returns: a prediction
"""
output = model.predict(input_data)
return output
def default_output_fn(prediction, accept):
"""Function responsible to serialize the prediction for the response.
Args:
prediction (obj): prediction returned by predict_fn .
accept (str): accept content-type expected by the client.
Returns:
(worker.Response): a Flask response object with the following args:
* Args:
response: the serialized data to return
accept: the content-type that the data was transformed to.
"""
return worker.Response(encoders.encode(prediction, accept), accept, mimetype=accept)
def _user_module_transformer(user_module):
model_fn = getattr(user_module, "model_fn", default_model_fn)
input_fn = getattr(user_module, "input_fn", None)
predict_fn = getattr(user_module, "predict_fn", None)
output_fn = getattr(user_module, "output_fn", None)
transform_fn = getattr(user_module, "transform_fn", None)
if transform_fn and (input_fn or predict_fn or output_fn):
raise exc.UserError("Cannot use transform_fn implementation with input_fn, predict_fn, and/or output_fn")
if transform_fn is not None:
return transformer.Transformer(model_fn=model_fn, transform_fn=transform_fn)
else:
return transformer.Transformer(
model_fn=model_fn,
input_fn=input_fn or default_input_fn,
predict_fn=predict_fn or default_predict_fn,
output_fn=output_fn or default_output_fn,
)
def _user_module_execution_parameters_fn(user_module):
return getattr(user_module, 'execution_parameters_fn', None)
def import_module(module_name, module_dir):
try: # if module_name already exists, use the existing one
user_module = importlib.import_module(module_name)
except ImportError: # if the module has not been loaded, 'modules' downloads and installs it.
user_module = modules.import_module(module_dir, module_name)
except Exception: # this shouldn't happen
logger.info("Encountered an unexpected error.")
raise
user_module_transformer = _user_module_transformer(user_module)
user_module_transformer.initialize()
return user_module_transformer, _user_module_execution_parameters_fn(user_module)
app = None
def main(environ, start_response):
global app
if app is None:
serving_env = env.ServingEnv()
user_module_transformer, execution_parameters_fn = import_module(serving_env.module_name,
serving_env.module_dir)
app = worker.Worker(transform_fn=user_module_transformer.transform,
module_name=serving_env.module_name,
execution_parameters_fn=execution_parameters_fn)
return app(environ, start_response)
def serving_entrypoint():
"""Start Inference Server.
NOTE: If the inference server is multi-model, MxNet Model Server will be used as the base server. Otherwise,
GUnicorn is used as the base server.
"""
if is_multi_model():
start_model_server()
else:
server.start(env.ServingEnv().framework_module)
|
622e15ce3c6836f8e6b64ad903024189da6adb48
|
ed865aed525556fd7aa5ac5a024af720de8438e3
|
/api/client/src/test/test_login_nodes_state.py
|
22c41dcf7ea1dbaadb387b1e30a3d13a8246b1f7
|
[
"Python-2.0",
"GPL-1.0-or-later",
"MPL-2.0",
"MIT",
"LicenseRef-scancode-python-cwi",
"BSD-3-Clause",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"MIT-0",
"BSD-2-Clause"
] |
permissive
|
aws/aws-parallelcluster
|
7bb33a6e175168f63a1e0acb1a9a7e9cbc405eff
|
a213978a09ea7fc80855bf55c539861ea95259f9
|
refs/heads/develop
| 2023-09-05T15:12:18.533270
| 2023-09-05T14:38:59
| 2023-09-05T14:38:59
| 19,718,034
| 520
| 226
|
Apache-2.0
| 2023-09-14T15:56:30
| 2014-05-12T22:42:19
|
Python
|
UTF-8
|
Python
| false
| false
| 937
|
py
|
test_login_nodes_state.py
|
"""
ParallelCluster
ParallelCluster API # noqa: E501
The version of the OpenAPI document: 3.7.0
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
from assertpy import assert_that
import pytest
import pcluster_client
import re
from pcluster_client.model.login_nodes_state import LoginNodesState
class TestLoginNodesState(unittest.TestCase):
"""LoginNodesState unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testValidLoginNodesState(self):
LoginNodesState("active")
def testInvalidLoginNodesState(self):
with pytest.raises(
BaseException,
match=re.escape(r"Invalid value for `value` (invalid_value), "
r"must be one of ['pending', 'active', 'failed']")
):
LoginNodesState("invalid_value")
if __name__ == '__main__':
unittest.main()
|
03e27310ca9263fb4f283e8f1968a00657406422
|
4a7f8042d9d0591baf9a776320229b255d95562d
|
/shopify/resources/redirect.py
|
041ecb818bd66d978ade64cc893d48bd0b09e3d9
|
[
"MIT"
] |
permissive
|
Shopify/shopify_python_api
|
56a175187ee22ede2bc1d26eb5b101989ae73410
|
5f295932bebbdde1835d35c4865093ff83564cdc
|
refs/heads/master
| 2023-09-04T14:44:28.214779
| 2023-04-12T16:10:00
| 2023-04-12T16:10:00
| 2,249,127
| 1,029
| 332
|
MIT
| 2023-09-14T20:51:42
| 2011-08-22T14:49:21
|
Python
|
UTF-8
|
Python
| false
| false
| 79
|
py
|
redirect.py
|
from ..base import ShopifyResource
class Redirect(ShopifyResource):
pass
|
47a27ed6c39d56d0ecf62dc39b344b6710c77e85
|
1ffa0900d91ff7dc76e933489d354252edc5dbb9
|
/hardware/companions/n2adr/bom/mkstandardbom.py
|
fd31538914f2efdd7adf3137ae5649ba61055f74
|
[] |
no_license
|
softerhardware/Hermes-Lite2
|
6b05ef86f83e2c2b83ae622d3867f790532bbce0
|
0a6e07c37a23cd3a8721b6c3089e28721c378883
|
refs/heads/master
| 2023-08-05T17:16:25.996884
| 2023-07-27T04:21:47
| 2023-07-27T04:21:47
| 74,639,005
| 177
| 82
| null | 2022-02-04T06:26:33
| 2016-11-24T04:53:08
|
HTML
|
UTF-8
|
Python
| false
| false
| 296
|
py
|
mkstandardbom.py
|
import BOM
## Add ADNI to include assembly DNI parts that are required
optionset = set(["ADNI"])
##optionset = set([""])
bom = BOM.BOM("../n2adr.xml",optionset=optionset)
pre = """\\section*{N2ADR Filter Board E5 BOM}
Standard Build - \\today"""
bom.LaTeXPrint(pre,['Mouser','Digi-Key'])
|
f763d4104354061fdfb2496f0e97e2d82b0beaa7
|
4578be5ff20640cd0940faa27901489daa471ffe
|
/S08 - Padrões de projeto/observer/observer_interface.py
|
32f24e590f4779ea00f3457ce70ec011a99b23b1
|
[] |
no_license
|
CAECOMP/provas
|
cd31c48a912ad5e73f5bf8b826db40cf895f46b1
|
3f5eb4ec63fc91ad2c2e4ae6e5b3ac87c09ca916
|
refs/heads/master
| 2023-07-07T11:53:38.798374
| 2023-04-27T03:13:57
| 2023-04-27T03:13:57
| 55,001,094
| 125
| 83
| null | 2023-07-04T21:21:05
| 2016-03-29T18:38:41
|
HTML
|
UTF-8
|
Python
| false
| false
| 197
|
py
|
observer_interface.py
|
import abc
class Observador(metaclass=abc.ABCMeta):
@abc.abstractmethod
def atualizar(
self, preco_ibm: float, preco_apple: float, preco_google: float
) -> None:
pass
|
7c7db6c36a0a1e8bc1fce06887067ba9973a8d5e
|
f3806d9fb54773908cd9704121a543b114470aca
|
/angr/knowledge_plugins/structured_code/manager.py
|
8304d0ce7527fb0119573de6f8ee35683bb3ced0
|
[
"BSD-2-Clause"
] |
permissive
|
angr/angr
|
8ae95fceca51b0a001de56477d984dd01193ac1d
|
37e8ca1c3308ec601ad1d7c6bc8081ff38a7cffd
|
refs/heads/master
| 2023-08-17T03:15:21.007865
| 2023-08-15T18:44:57
| 2023-08-15T18:44:57
| 40,328,394
| 7,184
| 1,306
|
BSD-2-Clause
| 2023-09-14T20:14:23
| 2015-08-06T21:46:55
|
Python
|
UTF-8
|
Python
| false
| false
| 2,126
|
py
|
manager.py
|
# pylint:disable=import-outside-toplevel
from typing import Dict, Any, Union, TYPE_CHECKING
from .. import KnowledgeBasePlugin
if TYPE_CHECKING:
from angr.knowledge_base import KnowledgeBase
from angr.analyses.decompiler.structured_codegen import BaseStructuredCodeGenerator
from angr.analyses.decompiler.decompilation_cache import DecompilationCache
class StructuredCodeManager(KnowledgeBasePlugin):
def __init__(self, kb):
self._kb: KnowledgeBase = kb
self.cached: Dict[Any, "DecompilationCache"] = {}
def _normalize_key(self, item):
if type(item) is not tuple:
raise TypeError("Structured code can only be queried by tuples of (func, flavor)")
if type(item[0]) is str:
item = (self._kb.labels.lookup(item[0]), *item[1:])
return item
def __getitem__(self, item) -> "DecompilationCache":
return self.cached[self._normalize_key(item)]
def __setitem__(self, key, value: Union["DecompilationCache", "BaseStructuredCodeGenerator"]):
from ...analyses.decompiler.structured_codegen import BaseStructuredCodeGenerator
from ...analyses.decompiler.decompilation_cache import DecompilationCache
nkey = self._normalize_key(key)
if isinstance(value, BaseStructuredCodeGenerator):
cache = DecompilationCache(nkey)
cache.codegen = value
else:
cache = value
self.cached[nkey] = cache
def __contains__(self, key):
return self._normalize_key(key) in self.cached
def __delitem__(self, key):
del self.cached[self._normalize_key(key)]
def discard(self, key):
normalized_key = self._normalize_key(key)
if normalized_key in self.cached:
del self.cached[normalized_key]
def available_flavors(self, item):
if type(item) is str:
item = self._kb.labels.lookup(item)
return [flavor for func, flavor in self.cached if func == item]
def copy(self):
raise NotImplementedError
KnowledgeBasePlugin.register_default("structured_code", StructuredCodeManager)
|
d87c45f3777d41aefb900a03bed29bb605c4da5c
|
a38b98eb2c25559abb01c33c29d5ca80717aee6a
|
/gffutils/test/feature_test.py
|
c52fbd6188e45c9eaf6da8b642fc3cd2e7ecd762
|
[
"MIT"
] |
permissive
|
daler/gffutils
|
be99bed54ae858c21932ee2f9da88faa3733f4c1
|
955fb41a32022e11683a873a7bdc78444d5628cc
|
refs/heads/master
| 2023-07-20T13:33:49.031838
| 2023-07-04T21:56:35
| 2023-07-04T21:56:35
| 2,562,619
| 227
| 70
|
MIT
| 2023-07-04T21:56:37
| 2011-10-12T13:48:14
|
Python
|
UTF-8
|
Python
| false
| false
| 5,745
|
py
|
feature_test.py
|
from gffutils import parser, feature, helpers, constants
def test_feature_from_line():
# spaces and tabs should give identical results
line1 = "chr2L FlyBase exon 7529 8116 . + . Name=CG11023:1;Parent=FBtr0300689,FBtr0300690"
line2 = "chr2L FlyBase exon 7529 8116 . + . Name=CG11023:1;Parent=FBtr0300689,FBtr0300690"
assert feature.feature_from_line(
line1, strict=False, keep_order=True
) == feature.feature_from_line(line2, strict=False, keep_order=True)
def test_default_feature():
# Default Feature is 8 tab-delimited ".", with a trailing tab
assert str(feature.Feature()) == ". . . . . . . . "
def test_attributes_representations():
# These different ways of supplying attributes should yield identical
# results:
s = ". . . . . . . . ID=asdf"
for item in ('{"ID": ["asdf"]}', dict(ID=["asdf"]), "ID=asdf"):
result = str(feature.Feature(attributes=item))
assert result == s, result
def test_default_start_stop():
# Whether start or end is "." or None, attribute should always be None and
# printing should show "."
c = [".", None]
for i1 in c:
for i2 in c:
f = feature.Feature(start=i1, end=i2)
assert f.start is None
assert f.end is None
assert f.stop is None
assert str(f) == ". . . . . . . . ", str(f)
# Make sure zero works (protects against sloppy "if start:")
f = feature.Feature(start=0, end=0)
assert f.start == f.end == f.stop == 0
assert str(f) == ". . . 0 0 . . . ", str(f)
def test_aliases():
line = "chr2L FlyBase exon 7529 8116 . + . Name=CG11023:1;Parent=FBtr0300689,FBtr0300690"
f = feature.feature_from_line(line, keep_order=True)
assert f.chrom == "chr2L" == f.seqid
assert f.end == 8116 == f.stop
f.chrom = "fake"
f.stop = 1
assert f.chrom == "fake" == f.seqid
assert f.stop == 1 == f.end
def test_string_representation():
line = "chr2L FlyBase exon 7529 8116 . + . Name=CG11023:1;Parent=FBtr0300689,FBtr0300690"
f = feature.feature_from_line(line, keep_order=True)
assert line == str(f), str(f)
line = "chr2L FlyBase exon 7529 8116 . + . Name=CG11023:1;Parent=FBtr0300689,FBtr0300690 some more stuff"
f = feature.feature_from_line(line, keep_order=True)
assert line == str(f)
def test_pbt_interval_conversion():
try:
import pybedtools
except ImportError:
return
line = "chr2L FlyBase exon 7529 8116 . + . Name=CG11023:1;Parent=FBtr0300689,FBtr0300690"
f = feature.feature_from_line(line, strict=False, keep_order=True)
pbt = helpers.asinterval(f)
assert pbt.chrom == f.chrom == f.seqid
assert pbt.start == f.start - 1
assert pbt.stop == f.stop == f.end
pn = pbt.name
fn = f.attributes["Name"][0]
assert pn == fn, "%s, %s" % (pn, fn)
def test_hash():
line = "chr2L FlyBase exon 7529 8116 . + . Name=CG11023:1;Parent=FBtr0300689,FBtr0300690 some more stuff"
f = feature.feature_from_line(line, keep_order=True)
assert hash(f) == hash(line)
def test_repr():
line = "chr2L FlyBase exon 7529 8116 . + . Name=CG11023:1;Parent=FBtr0300689,FBtr0300690 some more stuff"
f = feature.feature_from_line(line, keep_order=True)
print(repr(f))
print(hex(id(f)))
assert repr(f) == ("<Feature exon (chr2L:7529-8116[+]) at %s>" % hex(id(f)))
def test_attribute_order():
# default order is gene_id, transcript_id. But feature_from_line -- if
# dialect not provided -- will infer its own dialect. In this case,
# transcript_id comes first.
attributes = 'transcript_id "mRNA1"; gene_id "gene1";'
a = feature.feature_from_line(
"""
chr1 . mRNA 1 100 . + . %s
"""
% attributes,
strict=False,
keep_order=True,
)
a.strict = True
a.keep_order = True
assert (
str(a) == 'chr1 . mRNA 1 100 . + . transcript_id "mRNA1"; gene_id "gene1";'
), str(a)
# ensure that using the default dialect uses the default order (and
# indidentally converts to GFF3 format)
orig_dialect = a.dialect
a.dialect = constants.dialect
a.keep_order = True
assert str(a) == "chr1 . mRNA 1 100 . + . gene_id=gene1;transcript_id=mRNA1", str(a)
# adding an attribute shoud always result in that attribute coming last (as
# long as that attribute is not in the dialect order)
a["dummy"] = ["asdf"]
a.strict = True
assert (
str(a) == "chr1 . mRNA 1 100 . + . gene_id=gene1;transcript_id=mRNA1;dummy=asdf"
), str(a)
def test_unjsonify():
attributes, dialect = parser._split_keyvals('transcript_id "mRNA1"')
assert attributes == {"transcript_id": ["mRNA1"]}, attributes
s = helpers._jsonify(attributes)
assert s == '{"transcript_id":["mRNA1"]}', s
d = helpers._unjsonify(s, isattributes=True)
assert d == attributes
class IsolatedTestCase(object):
"""
Isolated test case for checking that the module-level
constants.always_return_list works.
This was needed because having this test as a function caused other tests
to fail even though constants.always_return_list was put back to its
original setting. Apparently nose runs tests concurrently in the same
namespace or something? Anyway, these setup/teardowns do the trick.
"""
def setup(self):
constants.always_return_list = False
def teardown(self):
constants.always_return_list = True
def test_feature_single_item(self):
line = "chr2L FlyBase exon 7529 8116 . + . Name=CG11023:1;Parent=FBtr0300689,FBtr0300690 some more stuff"
f = feature.feature_from_line(line, keep_order=True)
assert f["Name"] == ["CG11023:1"]
|
038fb272455b9077c0c0453d3746627501a27514
|
286e498dd1d3afde2cc66c37dd9c69868c230eb2
|
/tensorflow_tutorials/tensorflow scope/test2.py
|
65dcc733e7995c216ce8e069e40c41e54591f896
|
[] |
no_license
|
zhaozhengcoder/Machine-Learning
|
604fa3f020d9879a67969221b7f1928a2df494cf
|
6c349118e0c97038e549a1f7da7c5ce2e7dacad3
|
refs/heads/master
| 2022-10-27T21:49:35.847885
| 2022-09-17T02:08:47
| 2022-09-17T02:08:47
| 83,763,440
| 342
| 267
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 680
|
py
|
test2.py
|
import tensorflow as tf
import numpy as np
with tf.variable_scope('v_scope'):
Weights1 = tf.get_variable('Weights', shape=[2,3])
bias1 = tf.Variable([0.52], name='bias')
# resue 的作用是共享上面已经定义好的变量
# 下面来共享上面已经定义好的变量
# note: 在下面的 scope 中的get_variable()变量必须已经定义过了,才能设置 reuse=True,否则会报错
with tf.variable_scope('v_scope', reuse=True):
Weights2 = tf.get_variable('Weights')
bias2 = tf.Variable([0.52], name='bias')
init =tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
print (Weights1.name)
print (Weights2.name)
|
7f86e5f3577de45face9fa0d255518f88d5e2ba6
|
952dc66c61966f099756cdb6c2d13b40352f63cc
|
/corporate/migrations/0010_customerplan_exempt_from_from_license_number_check.py
|
261b6edb98cf90c3f14df181fd8c2831506e736b
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
zulip/zulip
|
5ae6aad35fd9f72996c0a2a9cdd674400966ebf6
|
965a25d91b6ee2db54038f5df855215fa25146b0
|
refs/heads/main
| 2023-08-28T23:43:00.971110
| 2023-08-28T16:47:09
| 2023-08-28T19:33:02
| 43,160,685
| 20,239
| 8,996
|
Apache-2.0
| 2023-09-14T20:57:47
| 2015-09-25T16:37:25
|
Python
|
UTF-8
|
Python
| false
| false
| 435
|
py
|
0010_customerplan_exempt_from_from_license_number_check.py
|
# Generated by Django 3.2.2 on 2021-06-08 08:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("corporate", "0009_customer_sponsorship_pending"),
]
operations = [
migrations.AddField(
model_name="customerplan",
name="exempt_from_from_license_number_check",
field=models.BooleanField(default=False),
),
]
|
79650c6105e514063fc3708a562bc9a217dcbb9a
|
7c5fb33929116bb77b438de3ead93b3978b5af71
|
/alf/utils/losses_test.py
|
dda339461aa553a4107044b332520f25eccba139
|
[
"Apache-2.0"
] |
permissive
|
HorizonRobotics/alf
|
d6dac891322a81ccb7e2a9749139627b1eda28cb
|
b00ff2fa5e660de31020338ba340263183fbeaa4
|
refs/heads/pytorch
| 2023-08-21T18:51:41.370566
| 2023-08-16T00:07:22
| 2023-08-16T00:07:22
| 178,459,453
| 288
| 57
|
Apache-2.0
| 2023-09-14T20:40:20
| 2019-03-29T18:44:07
|
Python
|
UTF-8
|
Python
| false
| false
| 12,701
|
py
|
losses_test.py
|
# Copyright (c) 2022 Horizon Robotics and ALF Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from absl import logging
from absl.testing import parameterized
import torch
import time
import alf
from alf.utils import losses
class LossTest(alf.test.TestCase):
def test_discrete_regression_loss(self):
n = 10
batch_size = 256
for loss_cls in [
losses.DiscreteRegressionLoss,
losses.OrderedDiscreteRegressionLoss,
losses.QuantileRegressionLoss
]:
for transform in [None, alf.math.Sqrt1pTransform()]:
param = torch.nn.Parameter(torch.zeros(2 * n))
opt = torch.optim.Adam([param], lr=0.01)
lossf = loss_cls(transform)
logging.info("lossf=%s" % lossf)
lossf.initialize_bias(param.data)
logging.info("initial bias=%s" % param)
ex = lossf.calc_expectation(param.unsqueeze(0))
logging.info("initial expectation=%s" % ex.item())
if loss_cls == losses.DiscreteRegressionLoss:
probs = param.softmax(dim=-1)
elif loss_cls == losses.OrderedDiscreteRegressionLoss:
probs = param.sigmoid()
probs = torch.cat(
[probs[..., :-1] - probs[..., 1:], probs[..., -1:]],
dim=-1)
else:
probs = param
logging.info("initial probs=%s" % probs)
for _ in range(2000):
target = torch.rand(batch_size) * 10
logits = param.unsqueeze(dim=0).expand(target.shape[0], -1)
loss = lossf(logits, target)
self.assertEqual(loss.shape, target.shape)
loss = loss.mean()
opt.zero_grad()
loss.backward()
opt.step()
if loss_cls == losses.DiscreteRegressionLoss:
probs = param.softmax(dim=-1)
elif loss_cls == losses.OrderedDiscreteRegressionLoss:
probs = param.sigmoid()
probs = torch.cat(
[probs[..., :-1] - probs[..., 1:], probs[..., -1:]],
dim=-1)
else:
probs = param
logging.info("probs=%s" % probs)
if transform is None and loss_cls != losses.QuantileRegressionLoss:
self.assertAlmostEqual(probs[9], 0.05, delta=0.005)
self.assertAlmostEqual(probs[19], 0.05, delta=0.005)
self.assertTrue(((probs[10:19] - 0.1).abs() < 0.01).all())
ex = lossf.calc_expectation(param.unsqueeze(0))
logging.info("expectation=%s" % ex.item())
self.assertAlmostEqual(ex.item(), 5.0, delta=0.1)
class BipartiteMatchingLossTest(parameterized.TestCase, alf.test.TestCase):
@parameterized.parameters(('mean', ), ('sum', ), ('none', ))
def test_loss_shape(self, reduction):
prediction = torch.rand([2, 5, 4])
target = torch.rand([2, 5, 4])
matcher = losses.BipartiteMatchingLoss(reduction=reduction)
cost_mat = torch.cdist(prediction, target, p=1)
loss, _ = matcher(cost_mat)
if reduction == 'none':
self.assertEqual(loss.shape, (2, 5))
else:
self.assertEqual(loss.shape, (2, ))
def test_forward_loss(self):
prediction = torch.tensor(
[[[0, 0, 0], [1, 1, 1]], [[0, 0, 0], [1, 1, 1]]],
dtype=torch.float32,
requires_grad=True)
target = torch.tensor([[[0.9, 0.9, 0.9], [0, 0, 0.1]],
[[0.1, 0.1, 0.1], [0.5, 0.6, 0.5]]])
matcher = losses.BipartiteMatchingLoss(reduction='none')
cost_mat = torch.cdist(prediction, target, p=1)
loss, _ = matcher(cost_mat)
self.assertTrue(loss.requires_grad)
self.assertTensorClose(loss, torch.tensor([[0.1, 0.3], [0.3, 1.4]]))
def test_loss_training(self):
"""A simple toy training task where each target is a set of unordered
1d vectors whose means are ``torch.arange(N) + input``, where ``input``
follows a standard Gaussian. The task is for the model to predict this
unordered set given the ``input``.
"""
samples_n = 20200
N = 5
mean = torch.arange(
N, dtype=torch.float32).unsqueeze(0).expand(samples_n,
-1) # [samples_n, N]
std = torch.ones_like(mean) * 0.01
target = torch.normal(mean, std).unsqueeze(-1) # [samples_n, N, 1]
idx = torch.argsort(torch.randn(samples_n, N), dim=1)
# randomly shuffle the objects in the target set
target = torch.gather(target, dim=1, index=idx.unsqueeze(-1))
inputs = torch.randn(samples_n, 1).unsqueeze(-1) # [samples_n, 1, 1]
# offset the target objects by the inputs, to make the target input-dependent
target = target + inputs
d_model = 64
transform_layers = []
for i in range(3):
transform_layers.append(
alf.layers.TransformerBlock(
d_model=d_model,
num_heads=3,
memory_size=N + 1,
positional_encoding='abs'))
model = torch.nn.Sequential(
alf.layers.FC(1, d_model, torch.relu_),
alf.layers.FC(d_model, d_model, torch.relu_), *transform_layers,
alf.layers.FC(d_model, 1))
# We prepend the input to some random noise vectors for the transformer
# We expect the transformer converts the noise vectors to correct predictions
# Note: noise is important. Constant vectors are hard to train.
# [samples_n, N, 1]
inputs = torch.cat([inputs, torch.randn((samples_n, N, 1))], dim=1)
val_n = 200
tr_inputs = inputs[:-val_n, ...]
val_inputs = inputs[-val_n:, ...]
tr_target = target[:-val_n, ...]
val_target = target[-val_n:, ...]
t0 = time.time()
optimizer = torch.optim.Adam(list(model.parameters()), lr=1e-3)
epochs = 10
batch_size = 100
matcher = losses.BipartiteMatchingLoss(reduction='mean')
for _ in range(epochs):
idx = torch.randperm(tr_inputs.shape[0])
tr_inputs = tr_inputs[idx]
tr_target = tr_target[idx]
l = []
for i in range(0, idx.shape[0], batch_size):
optimizer.zero_grad()
b_inputs = tr_inputs[i:i + batch_size]
b_target = tr_target[i:i + batch_size]
b_pred = model(b_inputs) # [b,N+1,1]
b_pred = b_pred[:, 1:, :]
cost_mat = torch.cdist(b_pred, b_target, p=2.)
loss = matcher(cost_mat)[0].mean()
loss.backward()
optimizer.step()
l.append(loss)
print("Training loss: ", sum(l) / len(l))
print("Training time: ", time.time() - t0)
val_pred = model(val_inputs)
val_pred = val_pred[:, 1:, :]
cost_mat = torch.cdist(val_pred, val_target, p=2.)
val_loss = matcher(cost_mat)[0]
print("Validation prediction - inputs")
print(torch.round(val_pred[:3] - val_inputs[:3, :1, :], decimals=2))
print("Validation loss: ", val_loss.mean())
self.assertLess(float(val_loss.mean()), 0.15)
def test_loss_training_discrete_target(self):
"""A simple toy task for testing bipartite matching loss on discrete
target variables.
The inputs are sampled from some fixed random Gaussians, each Gaussian
representing a different object class. The target values are Gaussian ids,
but shuffled in a random order for each sample.
"""
samples_n = 10200
M, N, D = 3, 5, 10
mean = torch.randn((N, D)).unsqueeze(0) # [1,N,D]
std = torch.ones_like(mean) * 0.5 # [1,N,D]
inputs = torch.normal(
mean.repeat(samples_n, 1, 1), std.repeat(samples_n, 1,
1)) # [samples_n,N,D]
target = torch.arange(N).unsqueeze(0).repeat(samples_n,
1) # [samples_n,N]
# randomly shuffle
idx = torch.argsort(torch.randn(samples_n, N), dim=1)
inputs = torch.gather(
inputs, dim=1, index=idx.unsqueeze(-1).expand(-1, -1, D))
target = torch.gather(target, dim=1, index=idx)
# Only take the first M objects for each sample
inputs = inputs[:, :M, :]
target = target[:, :M]
# shuffle the target again ...
idx = torch.argsort(torch.randn(samples_n, M), dim=1)
target = torch.gather(target, dim=1, index=idx)
d_model = 64
transform_layers = []
for i in range(3):
transform_layers.append(
alf.layers.TransformerBlock(
d_model=d_model,
num_heads=3,
memory_size=M * 2, # input + queries
positional_encoding='abs' if i == 0 else 'none'))
model = torch.nn.Sequential(*transform_layers, alf.layers.FC(
d_model, N))
input_fc = alf.layers.FC(D, d_model)
queries = torch.nn.Parameter(torch.Tensor(M, d_model))
torch.nn.init.normal_(queries)
val_n = 200
tr_inputs = inputs[:-val_n, ...]
val_inputs = inputs[-val_n:, ...]
tr_target = target[:-val_n, ...]
val_target = target[-val_n:, ...]
def _compute_cost_mat(p, t):
p = torch.nn.functional.log_softmax(p, dim=-1)
oh_t = torch.nn.functional.one_hot(
t, num_classes=N).to(torch.float32)
return -torch.einsum('bnk,bmk->bnm', p, oh_t)
optimizer = torch.optim.Adam(
list(model.parameters()) + list(input_fc.parameters()) + [queries],
lr=1e-3)
epochs = 5
batch_size = 100
matcher = losses.BipartiteMatchingLoss(reduction='mean')
for _ in range(epochs):
idx = torch.randperm(tr_inputs.shape[0])
tr_inputs = tr_inputs[idx]
tr_target = tr_target[idx]
l = []
for i in range(0, idx.shape[0], batch_size):
optimizer.zero_grad()
b_inputs = tr_inputs[i:i + batch_size]
b_target = tr_target[i:i + batch_size]
b_inputs = input_fc(b_inputs)
b_queries = queries.unsqueeze(0).expand(
b_inputs.shape[0], -1, -1)
b_inputs = torch.cat([b_inputs, b_queries], dim=1) # [b,2M,..]
b_pred = model(b_inputs) # [b,2M,N]
b_pred = b_pred[:, M:, :]
cost_mat = _compute_cost_mat(b_pred, b_target)
loss = matcher(cost_mat)[0].mean()
loss.backward()
optimizer.step()
l.append(loss)
print("Training loss: ", sum(l) / len(l))
val_fc_out = input_fc(val_inputs)
val_queries = queries.unsqueeze(0).expand(val_fc_out.shape[0], -1, -1)
val_fc_out = torch.cat([val_fc_out, val_queries], dim=1) # [b,2M,..]
val_pred = model(val_fc_out)
val_pred = val_pred[:, M:, :]
cost_mat = _compute_cost_mat(val_pred, val_target)
val_loss = matcher(cost_mat)[0]
print("Cluster mean")
print(mean)
print("Validation inputs")
print(val_inputs[:5])
print("Validation prediction")
print(torch.argmax(val_pred, dim=-1)[:5])
print("Validation target")
print(val_target[:5])
print("Validation loss: ", val_loss.mean())
self.assertLess(float(val_loss.mean()), 0.01)
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
alf.test.main()
|
740af68bddf190c938345d70ed83263ccc611ba6
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/_operations/_patch.py
|
6b17f0f22c9086c0af5d52f2fe7dfcc991c32248
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 12,506
|
py
|
_patch.py
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
"""Customize generated code here.
Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
"""
from typing import Any, List, overload, Optional, Union, Tuple, cast, MutableMapping
import copy
from azure.core.tracing.decorator import distributed_trace
from ._operations import QuestionAnsweringClientOperationsMixin as QuestionAnsweringClientOperationsMixinGenerated
from ..models import (
AnswersOptions,
AnswersFromTextOptions,
AnswersResult,
AnswersFromTextResult,
KnowledgeBaseAnswerContext,
QueryFilters,
ShortAnswerOptions,
TextDocument,
)
JSON = MutableMapping[str, Any]
def _validate_text_records(records):
if not records:
raise ValueError("Input documents can not be empty or None")
if isinstance(records, str):
raise TypeError("Input documents cannot be a string.")
if isinstance(records, dict):
raise TypeError("Input documents cannot be a dict")
if not all(isinstance(x, str) for x in records):
if not all(isinstance(x, (dict, TextDocument)) for x in records):
raise TypeError("Mixing string and dictionary/object document input unsupported.")
request_batch = []
for idx, doc in enumerate(records):
if isinstance(doc, str):
record = {"id": str(idx), "text": doc}
request_batch.append(record)
else:
request_batch.append(doc)
return request_batch
def _get_positional_body(*args, **kwargs):
"""Verify args and kwargs are valid, and then return the positional body, if users passed it in.
:param args: The arguments passed to the method.
:type args: AnswersOptions or dict
"""
if len(args) > 1:
raise TypeError("There can only be one positional argument, which is the POST body of this request.")
if "options" in kwargs:
raise TypeError("The 'options' parameter is positional only.")
return args[0] if args else None
def _verify_qna_id_and_question(query_knowledgebase_options):
"""For query_knowledge_base we require either `question` or `qna_id`.
:param query_knowledgebase_options: The user-passed AnswersOptions or dict
:type query_knowledgebase_options: AnswersOptions or dict
"""
try:
qna_id = query_knowledgebase_options.qna_id
question = query_knowledgebase_options.question
except AttributeError:
qna_id = query_knowledgebase_options.get("qna_id") or query_knowledgebase_options.get("qnaId")
question = query_knowledgebase_options.get("question")
if not (qna_id or question):
raise TypeError("You need to pass in either `qna_id` or `question`.")
if qna_id and question:
raise TypeError("You can not specify both `qna_id` and `question`.")
def _handle_metadata_filter_conversion(options_input):
options = copy.deepcopy(options_input)
filters = options.filters if hasattr(options, "filters") else options.get("filters", {})
try:
if filters and filters.metadata_filter and filters.metadata_filter.metadata:
metadata_input = filters.metadata_filter.metadata
else:
metadata_input = None
in_class = True
except AttributeError:
metadata_input = filters.get("metadataFilter", {}).get("metadata")
in_class = False
if not metadata_input:
return options
try:
if any(t for t in metadata_input if len(t) != 2):
raise ValueError("'metadata' must be a sequence of key-value tuples.")
except TypeError as exc:
raise ValueError("'metadata' must be a sequence of key-value tuples.") from exc
metadata_modified = [{"key": m[0], "value": m[1]} for m in metadata_input]
if in_class:
filters.metadata_filter.metadata = metadata_modified
else:
filters["metadataFilter"]["metadata"] = metadata_modified
return options
def _get_answers_prepare_options(*args: AnswersOptions, **kwargs: Any) -> Tuple[AnswersOptions, Any]:
options = _get_positional_body(*args, **kwargs) or AnswersOptions(
qna_id=kwargs.pop("qna_id", None),
question=kwargs.pop("question", None),
top=kwargs.pop("top", None),
user_id=kwargs.pop("user_id", None),
confidence_threshold=kwargs.pop("confidence_threshold", None),
answer_context=kwargs.pop("answer_context", None),
ranker_kind=kwargs.pop("ranker_kind", None),
filters=kwargs.pop("filters", None),
short_answer_options=kwargs.pop("short_answer_options", None),
include_unstructured_sources=kwargs.pop("include_unstructured_sources", None),
)
_verify_qna_id_and_question(options)
return _handle_metadata_filter_conversion(options), kwargs
def _get_answers_from_text_prepare_options(
*args: AnswersFromTextOptions, **kwargs: Any
) -> Tuple[Union[JSON, AnswersFromTextOptions], Any]:
default_language = kwargs.pop("language", None)
options = _get_positional_body(*args, **kwargs) or AnswersFromTextOptions(
question=kwargs.pop("question"),
text_documents=kwargs.pop("text_documents"),
language=default_language,
)
try:
options = cast(JSON, options)
# pylint: disable=unsubscriptable-object,unsupported-assignment-operation
options["records"] = _validate_text_records(options["records"])
# pylint: disable=no-member,unsupported-assignment-operation
options["language"] = options.get("language", None) or default_language
except TypeError:
options = cast(AnswersFromTextOptions, options)
options.text_documents = _validate_text_records(options.text_documents)
options.language = options.language or default_language
return options, kwargs
class QuestionAnsweringClientOperationsMixin(QuestionAnsweringClientOperationsMixinGenerated):
@overload # type: ignore # https://github.com/Azure/azure-sdk-for-python/issues/26621
def get_answers(
self, options: AnswersOptions, *, project_name: str, deployment_name: str, **kwargs: Any
) -> AnswersResult:
...
@overload
def get_answers( # pylint: disable=arguments-differ
self,
*,
project_name: str,
deployment_name: str,
qna_id: Optional[int] = None,
question: Optional[str] = None,
top: Optional[int] = None,
user_id: Optional[str] = None,
confidence_threshold: Optional[float] = None,
answer_context: Optional[KnowledgeBaseAnswerContext] = None,
ranker_kind: Optional[str] = None,
filters: Optional[QueryFilters] = None,
short_answer_options: Optional[ShortAnswerOptions] = None,
include_unstructured_sources: Optional[bool] = None,
**kwargs: Any
) -> AnswersResult:
...
@distributed_trace
def get_answers(self, *args: AnswersOptions, **kwargs: Any) -> AnswersResult:
"""Answers the specified question using your knowledge base.
:param options: Positional only. POST body of the request. Provide either `options`, OR
individual keyword arguments. If both are provided, only the options object will be used.
:type options: ~azure.ai.language.questionanswering.models.AnswersOptions
:keyword project_name: The name of the knowledge base project to use.
:paramtype project_name: str
:keyword deployment_name: The name of the specific deployment of the project to use.
:paramtype deployment_name: str
:keyword qna_id: Exact QnA ID to fetch from the knowledge base, this field takes priority over
question.
:paramtype qna_id: int
:keyword question: User question to query against the knowledge base.
:paramtype question: str
:keyword top: Max number of answers to be returned for the question.
:paramtype top: int
:keyword user_id: Unique identifier for the user.
:paramtype user_id: str
:keyword confidence_threshold: Minimum threshold score for answers, value ranges from 0 to 1.
:paramtype confidence_threshold: float
:keyword answer_context: Context object with previous QnA's information.
:paramtype answer_context: ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerContext
:keyword ranker_kind: Type of ranker to be used. Possible
values include: "Default", "QuestionOnly".
:paramtype ranker_kind: str
:keyword filters: Filter QnAs based on given metadata list and knowledge base sources.
:paramtype filters: ~azure.ai.language.questionanswering.models.QueryFilters
:keyword short_answer_options: To configure Answer span prediction feature.
:paramtype short_answer_options: ~azure.ai.language.questionanswering.models.ShortAnswerOptions
:keyword include_unstructured_sources: (Optional) Flag to enable Query over Unstructured
Sources.
:paramtype include_unstructured_sources: bool
:return: AnswersResult
:rtype: ~azure.ai.language.questionanswering.models.AnswersResult
:raises ~azure.core.exceptions.HttpResponseError:
.. admonition:: Example:
.. literalinclude:: ../samples/sample_query_knowledgebase.py
:start-after: [START query_knowledgebase]
:end-before: [END query_knowledgebase]
:language: python
:dedent: 4
:caption: Answer the specified question using your knowledge base.
"""
options, kwargs = _get_answers_prepare_options(*args, **kwargs)
return super().get_answers(options, **kwargs)
@overload # type: ignore
def get_answers_from_text(self, options: AnswersFromTextOptions, **kwargs: Any) -> AnswersFromTextResult:
pass
@overload
def get_answers_from_text( # pylint: disable=arguments-differ
self,
*,
question: str,
text_documents: List[Union[str, TextDocument]],
language: Optional[str] = None,
**kwargs: Any
) -> AnswersFromTextResult:
...
@distributed_trace
def get_answers_from_text(self, *args: AnswersFromTextOptions, **kwargs: Any) -> AnswersFromTextResult:
"""Answers the specified question using the provided text in the body.
:param options: Positional only. POST body of the request. Provide either `options`, OR
individual keyword arguments. If both are provided, only the options object will be used.
:type options: ~azure.ai.language.questionanswering.models.AnswersFromTextOptions
:keyword question: User question to query against the given text records.
:paramtype question: str
:keyword text_documents: Text records to be searched for given question.
:paramtype text_documents: list[str or ~azure.ai.language.questionanswering.models.TextDocument]
:keyword language: Language of the text records. This is BCP-47 representation of a language.
For example, use "en" for English; "es" for Spanish etc. If not set, use "en" for English as
default.
:paramtype language: str
:return: AnswersFromTextResult
:rtype: ~azure.ai.language.questionanswering.models.AnswersFromTextResult
:raises ~azure.core.exceptions.HttpResponseError:
.. admonition:: Example:
.. literalinclude:: ../samples/sample_query_text.py
:start-after: [START query_text]
:end-before: [END query_text]
:language: python
:dedent: 4
:caption: Answers the specified question using the provided text.
"""
options, kwargs = _get_answers_from_text_prepare_options(
*args, language=kwargs.pop("language", self._default_language), **kwargs # type: ignore
)
return super().get_answers_from_text(options, **kwargs) # type: ignore
__all__: List[str] = [
"QuestionAnsweringClientOperationsMixin"
] # Add all objects you want publicly available to users at this package level
def patch_sdk():
"""Do not remove from this file.
`patch_sdk` is a last resort escape hatch that allows you to do customizations
you can't accomplish using the techniques described in
https://aka.ms/azsdk/python/dpcodegen/python/customize
"""
|
1a3ef938ae13b4fa4bf3548fb9a1a4217e4cd2c9
|
316b99c6046ff58c8499e0c214e9b81d9c3132b0
|
/beartype/_util/func/mod/utilfuncmodtest.py
|
df09067f781a6b0b080fdce48dccc43c85311298
|
[
"MIT",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
beartype/beartype
|
fb6417b3dc2e08c065f0d907f43411c33d883a7d
|
0cfd53391eb4de2f8297a4632aa5895b8d82a5b7
|
refs/heads/main
| 2023-08-15T13:17:47.095732
| 2023-08-15T05:25:54
| 2023-08-15T05:25:54
| 252,646,465
| 1,992
| 51
|
MIT
| 2023-07-28T04:13:08
| 2020-04-03T06:06:22
|
Python
|
UTF-8
|
Python
| false
| false
| 5,262
|
py
|
utilfuncmodtest.py
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2023 Beartype authors.
# See "LICENSE" for further details.
'''
Project-wide **module-specific callable testers** (i.e., utility functions
dynamically validating and inspecting various properties of passed callables
declared by standard modules and packages in Python's standard library).
This private submodule is *not* intended for importation by downstream callers.
'''
# ....................{ IMPORTS }....................
from beartype.typing import (
Any,
)
from beartype._data.hint.datahintfactory import TypeGuard
from beartype._util.func.utilfunccodeobj import (
get_func_codeobj_or_none,
get_func_codeobj_name,
)
from beartype._util.py.utilpyversion import IS_PYTHON_AT_MOST_3_10
from collections.abc import (
Callable,
# Generator,
)
# ....................{ TESTERS }....................
def is_func_contextlib_contextmanager(func: Any) -> TypeGuard[Callable]:
'''
:data:`True` only if the passed object is a
:func:`contextlib.contextmanager`-based **isomorphic decorator closure**
(i.e., closure both defined and returned by the standard
:func:`contextlib.contextmanager` decorator where that closure
isomorphically preserves both the number and types of all passed parameters
and returns by accepting only a variadic positional argument and variadic
keyword argument).
This tester enables callers to detect when a user-defined callable has been
decorated by :func:`contextlib.contextmanager` and thus has a mismatch
between the type hints annotating that decorated callable and the type of
the object created and returned by that decorated callable.
Parameters
----------
func : object
Object to be inspected.
Returns
----------
bool
:data:`True` only if this object is a
:func:`contextlib.contextmanager`-based isomorphic decorator closure.
See Also
----------
beartype._data.func.datafunc.CONTEXTLIB_CONTEXTMANAGER_CO_NAME_QUALNAME
Further discussion.
'''
# Avoid circular import dependencies.
from beartype._util.func.utilfunctest import is_func_closure
# If either...
if (
# The active Python interpreter targets Python < 3.10 and thus fails to
# define the "co_qualname" attribute on code objects required to
# robustly implement this test *OR*...
IS_PYTHON_AT_MOST_3_10 or
# The passed callable is *NOT* a closure...
not is_func_closure(func)
):
# Then immediately return false.
return False
# Else, that callable is a closure.
# Code object underlying that callable as is (rather than possibly unwrapped
# to another code object entirely) if that callable is pure-Python *OR*
# "None" otherwise (i.e., if that callable is C-based).
func_codeobj = get_func_codeobj_or_none(func)
# If that callable is C-based, immediately return false.
if func_codeobj is None:
return False
# Else, that callable is pure-Python.
# Defer heavyweight tester-specific imports with potential side effects --
# notably, increased costs to space and time complexity.
from beartype._data.module.datamodcontextlib import (
CONTEXTLIB_CONTEXTMANAGER_CODEOBJ_NAME)
# Fully-qualified name of that code object.
func_codeobj_name = get_func_codeobj_name(func_codeobj)
# Return true only if the fully-qualified name of that code object is that
# of the isomorphic decorator closure created and returned by the standard
# @contextlib.contextmanager decorator.
#
# Note that we *COULD* technically also explicitly test whether that
# callable satisfies the is_func_closure_isomorphic() tester, but that
# there's no benefit and a minor efficiency cost to doing so.
return func_codeobj_name == CONTEXTLIB_CONTEXTMANAGER_CODEOBJ_NAME
def is_func_functools_lru_cache(func: Any) -> TypeGuard[Callable]:
'''
:data:`True` only if the passed object is a
:func:`functools.lru_cache`-memoized **pseudo-callable** (i.e., low-level
C-based callable object both created and returned by the standard
:func:`functools.lru_cache` decorator).
This tester enables callers to detect when a user-defined callable has been
decorated by the :func:`functools.lru_cache` decorator, which creates
low-level C-based callable objects requiring special handling elsewhere.
Parameters
----------
func : object
Object to be inspected.
Returns
----------
bool
:data:`True` only if this object is a
:func:`functools.lru_cache`-memoized callable.
'''
# Defer heavyweight tester-specific imports with potential side effects --
# notably, increased costs to space and time complexity.
from beartype._data.module.datamodfunctools import (
LRU_CACHE_TYPE)
# Return true only if the type of that callable is the low-level C-based
# private type of all objects created and returned by the standard
# @functools.lru_cache decorator.
return type(func) is LRU_CACHE_TYPE
|
c6dbcbd6f49294f2f51b8f9541eda7c920a89259
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/BillSendExtInfo.py
|
c9bdd3bc50b6fce718f0a2f95348e94a9e6f89d9
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,035
|
py
|
BillSendExtInfo.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class BillSendExtInfo(object):
def __init__(self):
self._order_pay_type = None
self._royalty_amount = None
self._trans_in_pid = None
@property
def order_pay_type(self):
return self._order_pay_type
@order_pay_type.setter
def order_pay_type(self, value):
self._order_pay_type = value
@property
def royalty_amount(self):
return self._royalty_amount
@royalty_amount.setter
def royalty_amount(self, value):
self._royalty_amount = value
@property
def trans_in_pid(self):
return self._trans_in_pid
@trans_in_pid.setter
def trans_in_pid(self, value):
self._trans_in_pid = value
def to_alipay_dict(self):
params = dict()
if self.order_pay_type:
if hasattr(self.order_pay_type, 'to_alipay_dict'):
params['order_pay_type'] = self.order_pay_type.to_alipay_dict()
else:
params['order_pay_type'] = self.order_pay_type
if self.royalty_amount:
if hasattr(self.royalty_amount, 'to_alipay_dict'):
params['royalty_amount'] = self.royalty_amount.to_alipay_dict()
else:
params['royalty_amount'] = self.royalty_amount
if self.trans_in_pid:
if hasattr(self.trans_in_pid, 'to_alipay_dict'):
params['trans_in_pid'] = self.trans_in_pid.to_alipay_dict()
else:
params['trans_in_pid'] = self.trans_in_pid
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = BillSendExtInfo()
if 'order_pay_type' in d:
o.order_pay_type = d['order_pay_type']
if 'royalty_amount' in d:
o.royalty_amount = d['royalty_amount']
if 'trans_in_pid' in d:
o.trans_in_pid = d['trans_in_pid']
return o
|
daf49f5b711ad01d760e18ae3731ed69551d0b4d
|
6186a3787d1e74f1866844491da48b9643c8f1a9
|
/ghostwriter/shepherd/forms.py
|
c6f31882961b386475ca71441c8c3d942476da1b
|
[
"BSD-3-Clause"
] |
permissive
|
GhostManager/Ghostwriter
|
b46b2421e5737ed0afbf49182dce9eeb5eb31936
|
b9eae4459ba192fbb2d4a5b66f8210d57fd7112a
|
refs/heads/master
| 2023-09-04T02:34:54.085997
| 2023-07-13T22:38:44
| 2023-07-13T22:38:44
| 197,269,443
| 1,011
| 197
|
BSD-3-Clause
| 2023-09-08T00:19:52
| 2019-07-16T21:19:43
|
Python
|
UTF-8
|
Python
| false
| false
| 15,940
|
py
|
forms.py
|
"""This contains all the forms used by the Shepherd application."""
# Standard Libraries
from datetime import date
# Django Imports
from django import forms
from django.core.exceptions import ValidationError
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
# 3rd Party Libraries
from crispy_forms.helper import FormHelper
from crispy_forms.layout import HTML, ButtonHolder, Column, Div, Layout, Row, Submit
# Ghostwriter Libraries
from ghostwriter.modules.custom_layout_object import SwitchToggle
from ghostwriter.rolodex.models import Project
from .models import (
Domain,
DomainNote,
DomainServerConnection,
DomainStatus,
History,
ServerHistory,
TransientServer,
)
class CheckoutForm(forms.ModelForm):
"""
Save an individual :model:`shepherd.History` associated with an individual
:model:`shepherd.Domain`.
"""
class Meta:
model = History
fields = "__all__"
widgets = {
"domain": forms.HiddenInput(),
"start_date": forms.DateInput(
format="%Y-%m-%d",
),
"end_date": forms.DateInput(
format="%Y-%m-%d",
),
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
data_projects_url = reverse("shepherd:ajax_load_projects")
data_project_url = reverse("shepherd:ajax_load_project")
overwatch_url = reverse("shepherd:ajax_domain_overwatch")
for field in self.fields:
self.fields[field].widget.attrs["autocomplete"] = "off"
self.fields["client"].empty_label = "-- Select a Client --"
self.fields["client"].label = ""
self.fields["activity_type"].empty_label = "-- Select Activity --"
self.fields["activity_type"].label = ""
self.fields["project"].empty_label = "-- Select a Client First --"
self.fields["project"].label = ""
self.fields["project"].queryset = Project.objects.none()
self.fields["start_date"].widget.input_type = "date"
self.fields["end_date"].widget.input_type = "date"
self.fields["note"].widget.attrs["placeholder"] = "This domain will be used for..."
self.fields["note"].label = ""
self.helper = FormHelper()
self.helper.form_method = "post"
self.helper.form_show_labels = False
self.helper.form_show_errors = False
self.helper.attrs = {
"data-projects-url": data_projects_url,
"data-project-url": data_project_url,
"overwatch-url": overwatch_url,
}
self.helper.form_id = "checkout-form"
self.helper.layout = Layout(
HTML(
"""
<h4 class="icon project-icon">Project & Activity Information</h4>
<hr>
"""
),
"client",
"project",
Row(
Column("start_date", css_class="form-group col-md-6 mb-0"),
Column("end_date", css_class="form-group col-md-6 mb-0"),
css_class="form-row",
),
"activity_type",
HTML(
"""
<h4 class="icon comment-icon">Additional Information</h4>
<hr>
"""
),
"note",
"domain",
ButtonHolder(
Submit("submit", "Submit", css_class="btn btn-primary col-md-4"),
HTML(
"""
<button onclick="window.location.href='{{ cancel_link }}'" class="btn btn-outline-secondary col-md-4" type="button">Cancel</button>
"""
),
),
)
# Prevent "not one of the valid options" errors from AJAX project filtering
if "client" in self.data:
try:
client_id = int(self.data.get("client"))
self.fields["project"].queryset = Project.objects.filter(client_id=client_id).order_by("codename")
except (ValueError, TypeError): # pragma: no cover
pass
elif self.instance.pk:
self.fields["project"].queryset = self.instance.client.project_set.order_by("codename")
def clean_end_date(self):
end_date = self.cleaned_data["end_date"]
start_date = self.cleaned_data["start_date"]
# Check if end_date comes before the start_date
if end_date < start_date:
raise ValidationError(_("The provided end date comes before the start date"), code="invalid")
return end_date
def clean_domain(self):
insert = bool(self.instance.pk is None)
domain = self.cleaned_data["domain"]
if insert:
unavailable = DomainStatus.objects.get(domain_status="Unavailable")
if (domain.expiration < date.today() and domain.auto_renew is False) or domain.expired:
raise ValidationError(_("This domain has expired!"), code="expired")
if domain.domain_status == unavailable:
raise ValidationError(
_("Someone beat you to it – This domain has already been checked out!"),
code="unavailable",
)
return domain
class DomainForm(forms.ModelForm):
"""
Save an individual :model:`shepherd.Domain`.
"""
class Meta:
model = Domain
exclude = (
"last_used_by",
"burned_explanation",
"categorization",
"dns",
"expired",
)
widgets = {
"creation": forms.DateInput(
format="%Y-%m-%d",
),
"expiration": forms.DateInput(
format="%Y-%m-%d",
),
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field in self.fields:
self.fields[field].widget.attrs["autocomplete"] = "off"
self.fields["name"].widget.attrs["placeholder"] = "ghostwriter.wiki"
self.fields["registrar"].widget.attrs["placeholder"] = "NameCheap"
self.fields["domain_status"].empty_label = "-- Select Status --"
self.fields["whois_status"].empty_label = "-- Select Status --"
self.fields["health_status"].empty_label = "-- Select Status --"
self.fields["creation"].widget.input_type = "date"
self.fields["expiration"].widget.input_type = "date"
self.fields["note"].widget.attrs["placeholder"] = "This domain was purchased for..."
self.fields["note"].label = ""
self.fields["tags"].widget.attrs["placeholder"] = "phishing, categorized, ..."
self.fields["name"].label = "Domain Name"
self.fields["whois_status"].label = "WHOIS Status"
self.fields["health_status"].label = "Health Status"
self.helper = FormHelper()
self.helper.form_method = "post"
self.helper.form_show_errors = False
self.helper.form_id = "checkout-form"
self.helper.layout = Layout(
HTML(
"""
<h4 class="icon domain-icon">Domain Information</h4>
<hr>
"""
),
Row(
Column("name", css_class="form-group col-md-6 mb-0"),
Column("registrar", css_class="form-group col-md-6 mb-0"),
css_class="form-row",
),
Row(
Column("domain_status", css_class="form-group col-md-6 mb-0"),
Column("tags", css_class="form-group col-md-6 mb-0"),
css_class="form-row",
),
Row(
Column("creation", css_class="form-group col-md-6 mb-0"),
Column("expiration", css_class="form-group col-md-6 mb-0"),
css_class="form-row",
),
Row(
Column(SwitchToggle("auto_renew"), css_class="form-group col-md-6 mb-0"),
Column(SwitchToggle("reset_dns"), css_class="form-group col-md-6 mb-0"),
css_class="form-row",
),
HTML(
"""
<h4 class="icon heartbeat-icon">Health & Category Information</h4>
<hr>
"""
),
Row(
Column("whois_status", css_class="form-group col-md-6 mb-0"),
Column("health_status", css_class="form-group col-md-6 mb-0"),
css_class="form-row",
),
HTML(
"""
<h4 class="icon comment-icon">Additional Information</h4>
<hr>
"""
),
"note",
ButtonHolder(
Submit("submit", "Submit", css_class="btn btn-primary col-md-4"),
HTML(
"""
<button onclick="window.location.href='{{ cancel_link }}'"
class="btn btn-outline-secondary col-md-4" type="button">Cancel</button>
"""
),
),
)
def clean_name(self):
domain = None
name = self.cleaned_data["name"]
try:
domain = Domain.objects.get(name=name.lower())
except Domain.DoesNotExist:
pass
if domain and domain.pk != self.instance.pk:
raise ValidationError(
_("Domain names must be unique and this one already exists in the library"),
code="unique",
)
return name
def clean(self):
expiration = self.cleaned_data["expiration"]
creation = self.cleaned_data["creation"]
# Check if expiration comes before the creation date
if expiration < creation:
raise ValidationError(
_("The provided expiration date comes before the purchase date"),
code="invalid_date",
)
class DomainLinkForm(forms.ModelForm):
"""
Save an individual :model:`shepherd.DomainServerConnection` linking an individual
:model:`shepherd.Domain` with an individual :model:`shepherd.StaticServer` or
:model:`shepherd.TransientServer`.
"""
class Meta:
model = DomainServerConnection
fields = "__all__"
widgets = {
"project": forms.HiddenInput(),
}
def __init__(self, project=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if project:
self.fields["static_server"].queryset = ServerHistory.objects.filter(project=project).order_by(
"activity_type", "server_role"
)
self.fields["transient_server"].queryset = TransientServer.objects.filter(project=project).order_by(
"activity_type", "server_role"
)
self.fields["domain"].queryset = History.objects.filter(project=project).order_by("activity_type")
for field in self.fields:
self.fields[field].widget.attrs["autocomplete"] = "off"
self.fields["domain"].empty_label = "-- Select a Domain [Required] --"
self.fields["domain"].label_from_instance = lambda obj: f"{obj.domain.name} ({obj.activity_type})"
self.fields["static_server"].empty_label = "-- Select Static Server --"
self.fields[
"static_server"
].label_from_instance = lambda obj: f"{obj.server.ip_address} ({obj.server_role} | {obj.activity_type})"
self.fields["transient_server"].empty_label = "-- Select VPS --"
self.fields[
"transient_server"
].label_from_instance = lambda obj: f"{obj.ip_address} ({obj.server_role} | {obj.activity_type})"
self.helper = FormHelper()
self.helper.form_method = "post"
self.helper.form_show_errors = False
self.helper.form_show_labels = False
self.helper.layout = Layout(
HTML(
"""
<p>First, select a domain checked-out for this project:</p>
"""
),
"domain",
HTML(
"""
<p>Then set your subdomain (or "*" for a wildcard) and CDN endpoint (if any) used with this link:</p>
"""
),
"subdomain",
"endpoint",
HTML(
"""
<p>Finally, select either a static server checked-out for this project
<em>or</em> a transient server to associate with the selected domain:</p>
"""
),
Row(
Column("static_server", css_class="form-group col-md-6 mb-0"),
Column("transient_server", css_class="form-group col-md-6 mb-0"),
css_class="form-row",
),
"project",
ButtonHolder(
Submit("submit", "Submit", css_class="btn btn-primary col-md-4"),
HTML(
"""
<button onclick="window.location.href='{{ cancel_link }}'"
class="btn btn-outline-secondary col-md-4" type="button">Cancel</button>
"""
),
),
)
def clean(self):
if self.cleaned_data["static_server"] and self.cleaned_data["transient_server"]:
raise ValidationError(_("Select only one server"), code="invalid_selection")
if not self.cleaned_data["static_server"] and not self.cleaned_data["transient_server"]:
raise ValidationError(_("You must select one server"), code="invalid_selection")
class DomainNoteForm(forms.ModelForm):
"""
Save an individual :model:`shepherd.DomainNote` associated with an individual
:model:`shepherd.Domain`.
"""
class Meta:
model = DomainNote
fields = ("note",)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = "post"
self.helper.form_show_labels = False
self.helper.form_show_errors = False
self.helper.layout = Layout(
Div("note"),
ButtonHolder(
Submit("submit", "Submit", css_class="btn btn-primary col-md-4"),
HTML(
"""
<button onclick="window.location.href='{{ cancel_link }}'"
class="btn btn-outline-secondary col-md-4" type="button">Cancel</button>
"""
),
),
)
def clean_note(self):
note = self.cleaned_data["note"]
# Check if note is empty
if not note:
raise ValidationError(
_("You must provide some content for the note"),
code="required",
)
return note
class BurnForm(forms.ModelForm):
"""
Update the ``burned_explanation`` field for an individual :model:`shepherd.Domain`.
"""
class Meta:
model = Domain
fields = ("burned_explanation",)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["burned_explanation"].widget.attrs["placeholder"] = "This domain was flagged for..."
self.helper = FormHelper()
self.helper.form_method = "post"
self.helper.form_show_labels = False
self.helper.form_show_errors = False
self.helper.layout = Layout(
"burned_explanation",
ButtonHolder(
Submit("submit", "Submit", css_class="btn btn-primary col-md-4"),
HTML(
"""
<button onclick="window.location.href='{{ cancel_link }}'"
class="btn btn-outline-secondary col-md-4" type="button">Cancel</button>
"""
),
),
)
|
76691004431ca9f8a1f34eb82af788c2e9d9186f
|
d58a58261efe1db9f2956be1e7081dbd4e7eeb0f
|
/models/pix2pix_model.py
|
3e3f9b87931b258e09b3fe858882d18f34fac325
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
mit-han-lab/gan-compression
|
6245dc896ce79470e6a55a39e678ec32197c0fc0
|
3dd79dd4973e4bdad511169fde89b3e9e12adc5e
|
refs/heads/master
| 2023-08-24T09:12:58.311288
| 2022-11-10T00:57:24
| 2022-11-10T00:57:24
| 245,032,939
| 1,135
| 166
|
NOASSERTION
| 2023-02-16T00:44:54
| 2020-03-05T00:27:13
|
Python
|
UTF-8
|
Python
| false
| false
| 10,049
|
py
|
pix2pix_model.py
|
import ntpath
import os
import numpy as np
import torch
from torch import nn
from tqdm import tqdm
from data import create_eval_dataloader
from metric import get_fid, get_cityscapes_mIoU
from metric.cityscapes_mIoU import DRNSeg
from metric.inception import InceptionV3
from models import networks
from models.base_model import BaseModel
from models.modules.loss import GANLoss
from utils import util
class Pix2PixModel(BaseModel):
@staticmethod
def modify_commandline_options(parser, is_train=True):
assert is_train
parser = super(Pix2PixModel, Pix2PixModel).modify_commandline_options(parser, is_train)
parser.add_argument('--restore_G_path', type=str, default=None,
help='the path to restore the generator')
parser.add_argument('--restore_D_path', type=str, default=None,
help='the path to restore the discriminator')
parser.add_argument('--recon_loss_type', type=str, default='l1',
choices=['l1', 'l2', 'smooth_l1'],
help='the type of the reconstruction loss')
parser.add_argument('--lambda_recon', type=float, default=100,
help='weight for reconstruction loss')
parser.add_argument('--lambda_gan', type=float, default=1,
help='weight for gan loss')
parser.add_argument('--real_stat_path', type=str, required=True,
help='the path to load the groud-truth images information to compute FID.')
return parser
def __init__(self, opt):
"""Initialize the pix2pix class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
assert opt.isTrain
BaseModel.__init__(self, opt)
# specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
self.loss_names = ['G_gan', 'G_recon', 'D_real', 'D_fake']
# specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
self.visual_names = ['real_A', 'fake_B', 'real_B']
# specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>
self.model_names = ['G', 'D']
# define networks (both generator and discriminator)
self.netG = networks.define_G(opt.netG, input_nc=opt.input_nc, output_nc=opt.output_nc, ngf=opt.ngf,
norm=opt.norm, dropout_rate=opt.dropout_rate, init_type=opt.init_type,
init_gain=opt.init_gain, gpu_ids=self.gpu_ids, opt=opt)
self.netD = networks.define_D(opt.netD, input_nc=opt.input_nc + opt.output_nc, ndf=opt.ndf,
n_layers_D=opt.n_layers_D, norm=opt.norm, init_type=opt.init_type,
init_gain=opt.init_gain, gpu_ids=self.gpu_ids, opt=opt)
# define loss functions
self.criterionGAN = GANLoss(opt.gan_mode).to(self.device)
if opt.recon_loss_type == 'l1':
self.criterionRecon = torch.nn.L1Loss()
elif opt.recon_loss_type == 'l2':
self.criterionRecon = torch.nn.MSELoss()
elif opt.recon_loss_type == 'smooth_l1':
self.criterionRecon = torch.nn.SmoothL1Loss()
else:
raise NotImplementedError('Unknown reconstruction loss type [%s]!' % opt.loss_type)
# initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers = []
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_D)
self.eval_dataloader = create_eval_dataloader(self.opt)
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[2048]
self.inception_model = InceptionV3([block_idx])
self.inception_model.to(self.device)
self.inception_model.eval()
if 'cityscapes' in opt.dataroot:
self.drn_model = DRNSeg('drn_d_105', 19, pretrained=False)
util.load_network(self.drn_model, opt.drn_path, verbose=False)
if len(opt.gpu_ids) > 0:
self.drn_model.to(self.device)
self.drn_model = nn.DataParallel(self.drn_model, opt.gpu_ids)
self.drn_model.eval()
self.best_fid = 1e9
self.best_mIoU = -1e9
self.fids, self.mIoUs = [], []
self.is_best = False
self.Tacts, self.Sacts = {}, {}
self.npz = np.load(opt.real_stat_path)
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): include the data itself and its metadata information.
The option 'direction' can be used to swap images in domain A and domain B.
"""
AtoB = self.opt.direction == 'AtoB'
self.real_A = input['A' if AtoB else 'B'].to(self.device)
self.real_B = input['B' if AtoB else 'A'].to(self.device)
self.image_paths = input['A_paths' if AtoB else 'B_paths']
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
self.fake_B = self.netG(self.real_A) # G(A)
def backward_D(self):
"""Calculate GAN loss for the discriminator"""
fake_AB = torch.cat((self.real_A, self.fake_B), 1).detach()
real_AB = torch.cat((self.real_A, self.real_B), 1).detach()
pred_fake = self.netD(fake_AB)
self.loss_D_fake = self.criterionGAN(pred_fake, False, for_discriminator=True)
pred_real = self.netD(real_AB)
self.loss_D_real = self.criterionGAN(pred_real, True, for_discriminator=True)
self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5
self.loss_D.backward()
def backward_G(self):
"""Calculate GAN and L1 loss for the generator"""
# First, G(A) should fake the discriminator
fake_AB = torch.cat((self.real_A, self.fake_B), 1)
pred_fake = self.netD(fake_AB)
self.loss_G_gan = self.criterionGAN(pred_fake, True, for_discriminator=False) * self.opt.lambda_gan
# Second, G(A) = B
self.loss_G_recon = self.criterionRecon(self.fake_B, self.real_B) * self.opt.lambda_recon
# combine loss and calculate gradients
self.loss_G = self.loss_G_gan + self.loss_G_recon
self.loss_G.backward()
def optimize_parameters(self, steps):
self.forward() # compute fake images: G(A)
# update D
self.set_requires_grad(self.netD, True) # enable backprop for D
self.optimizer_D.zero_grad() # set D's gradients to zero
self.backward_D() # calculate gradients for D
self.optimizer_D.step() # update D's weights
# update G
self.set_requires_grad(self.netD, False) # D requires no gradients when optimizing G
self.optimizer_G.zero_grad() # set G's gradients to zero
self.backward_G() # calculate gradients for G
self.optimizer_G.step() # update G's weights
def evaluate_model(self, step):
self.is_best = False
save_dir = os.path.join(self.opt.log_dir, 'eval', str(step))
os.makedirs(save_dir, exist_ok=True)
self.netG.eval()
fakes, names = [], []
cnt = 0
for i, data_i in enumerate(tqdm(self.eval_dataloader, desc='Eval ', position=2, leave=False)):
self.set_input(data_i)
self.test()
fakes.append(self.fake_B.cpu())
for j in range(len(self.image_paths)):
short_path = ntpath.basename(self.image_paths[j])
name = os.path.splitext(short_path)[0]
names.append(name)
if cnt < 10:
input_im = util.tensor2im(self.real_A[j])
real_im = util.tensor2im(self.real_B[j])
fake_im = util.tensor2im(self.fake_B[j])
util.save_image(input_im, os.path.join(save_dir, 'input', '%s.png' % name), create_dir=True)
util.save_image(real_im, os.path.join(save_dir, 'real', '%s.png' % name), create_dir=True)
util.save_image(fake_im, os.path.join(save_dir, 'fake', '%s.png' % name), create_dir=True)
cnt += 1
fid = get_fid(fakes, self.inception_model, self.npz, device=self.device,
batch_size=self.opt.eval_batch_size, tqdm_position=2)
if fid < self.best_fid:
self.is_best = True
self.best_fid = fid
self.fids.append(fid)
if len(self.fids) > 3:
self.fids.pop(0)
ret = {'metric/fid': fid, 'metric/fid-mean': sum(self.fids) / len(self.fids), 'metric/fid-best': self.best_fid}
if 'cityscapes' in self.opt.dataroot:
mIoU = get_cityscapes_mIoU(fakes, names, self.drn_model, self.device,
table_path=self.opt.table_path,
data_dir=self.opt.cityscapes_path,
batch_size=self.opt.eval_batch_size,
num_workers=self.opt.num_threads, tqdm_position=2)
if mIoU > self.best_mIoU:
self.is_best = True
self.best_mIoU = mIoU
self.mIoUs.append(mIoU)
if len(self.mIoUs) > 3:
self.mIoUs = self.mIoUs[1:]
ret['metric/mIoU'] = mIoU
ret['metric/mIoU-mean'] = sum(self.mIoUs) / len(self.mIoUs)
ret['metric/mIoU-best'] = self.best_mIoU
self.netG.train()
return ret
|
b4a6c7bc2c5c254ebe4d2e6d93f52f4bbc469123
|
93530fb468b1106b0d7dc61e4629b2aba0d2a27d
|
/DynamicKey/AgoraDynamicKey/python/src/RtcTokenBuilder2.py
|
d6cac1ecb3d1aa806034a098d6ce0aa791c34144
|
[
"MIT"
] |
permissive
|
AgoraIO/Tools
|
4e02dbed064dfc7dcf9cf0c582f8216c83d20d93
|
5c800b136f132a92d5f70252aac12e9c32dbf5e7
|
refs/heads/master
| 2023-09-01T06:59:16.501266
| 2023-07-27T07:37:15
| 2023-07-27T07:37:15
| 167,963,357
| 380
| 934
| null | 2023-08-24T14:01:29
| 2019-01-28T13:03:58
|
C++
|
UTF-8
|
Python
| false
| false
| 14,048
|
py
|
RtcTokenBuilder2.py
|
# -*- coding: utf-8 -*-
__copyright__ = "Copyright (c) 2014-2017 Agora.io, Inc."
from .AccessToken2 import *
Role_Publisher = 1 # for live broadcaster
Role_Subscriber = 2 # default, for live audience
class RtcTokenBuilder:
@staticmethod
def build_token_with_uid(app_id, app_certificate, channel_name, uid, role, token_expire, privilege_expire=0):
"""
Build the RTC token with uid.
:param app_id: The App ID issued to you by Agora. Apply for a new App ID from Agora Dashboard if it is missing
from your kit. See Get an App ID.
:param app_certificate: Certificate of the application that you registered in the Agora Dashboard.
See Get an App Certificate.
:param channel_name: Unique channel name for the AgoraRTC session in the string format.
:param uid: User ID. A 32-bit unsigned integer with a value ranging from 1 to (2^32-1).
optionalUid must be unique.
:param role: Role_Publisher: A broadcaster/host in a live-broadcast profile.
Role_Subscriber: An audience(default) in a live-broadcast profile.
:param token_expire: represented by the number of seconds elapsed since now. If, for example,
you want to access the Agora Service within 10 minutes after the token is generated,
set token_expire as 600(seconds).
:param privilege_expire: represented by the number of seconds elapsed since now. If, for example,
you want to enable your privilege for 10 minutes, set privilege_expire as 600(seconds).
:return: The RTC token.
"""
return RtcTokenBuilder.build_token_with_user_account(app_id, app_certificate, channel_name, uid, role,
token_expire, privilege_expire)
@staticmethod
def build_token_with_user_account(app_id, app_certificate, channel_name, account, role, token_expire,
privilege_expire=0):
"""
Build the RTC token with account.
:param app_id: The App ID issued to you by Agora. Apply for a new App ID from Agora Dashboard if it is missing
from your kit. See Get an App ID.
:param app_certificate: Certificate of the application that you registered in the Agora Dashboard.
See Get an App Certificate.
:param channel_name: Unique channel name for the AgoraRTC session in the string format.
:param account: The user's account, max length is 255 Bytes.
:param role: Role_Publisher: A broadcaster/host in a live-broadcast profile.
Role_Subscriber: An audience(default) in a live-broadcast profile.
:param token_expire: represented by the number of seconds elapsed since now. If, for example,
you want to access the Agora Service within 10 minutes after the token is generated,
set token_expire as 600(seconds).
:param privilege_expire: represented by the number of seconds elapsed since now. If, for example,
you want to enable your privilege for 10 minutes, set privilege_expire as 600(seconds).
:return: The RTC token.
"""
token = AccessToken(app_id, app_certificate, expire=token_expire)
rtc_service = ServiceRtc(channel_name, account)
rtc_service.add_privilege(ServiceRtc.kPrivilegeJoinChannel, privilege_expire)
if role == Role_Publisher:
rtc_service.add_privilege(ServiceRtc.kPrivilegePublishAudioStream, privilege_expire)
rtc_service.add_privilege(ServiceRtc.kPrivilegePublishVideoStream, privilege_expire)
rtc_service.add_privilege(ServiceRtc.kPrivilegePublishDataStream, privilege_expire)
token.add_service(rtc_service)
return token.build()
@staticmethod
def build_token_with_uid_and_privilege(app_id, app_certificate, channel_name, uid, token_expire,
join_channel_privilege_expire, pub_audio_privilege_expire,
pub_video_privilege_expire, pub_data_stream_privilege_expire):
"""
Generates an RTC token with the specified privilege.
This method supports generating a token with the following privileges:
- Joining an RTC channel.
- Publishing audio in an RTC channel.
- Publishing video in an RTC channel.
- Publishing data streams in an RTC channel.
The privileges for publishing audio, video, and data streams in an RTC channel apply only if you have
enabled co-host authentication.
A user can have multiple privileges. Each privilege is valid for a maximum of 24 hours.
The SDK triggers the onTokenPrivilegeWillExpire and onRequestToken callbacks when the token is about to expire
or has expired. The callbacks do not report the specific privilege affected, and you need to maintain
the respective timestamp for each privilege in your app logic. After receiving the callback, you need
to generate a new token, and then call renewToken to pass the new token to the SDK, or call joinChannel to re-join
the channel.
@note
Agora recommends setting a reasonable timestamp for each privilege according to your scenario.
Suppose the expiration timestamp for joining the channel is set earlier than that for publishing audio.
When the token for joining the channel expires, the user is immediately kicked off the RTC channel
and cannot publish any audio stream, even though the timestamp for publishing audio has not expired.
:param app_id The App ID of your Agora project.
:param app_certificate: The App Certificate of your Agora project.
:param channel_name: The unique channel name for the Agora RTC session in string format. The string length must be less than 64 bytes. The channel name may contain the following characters:
- All lowercase English letters: a to z.
- All uppercase English letters: A to Z.
- All numeric characters: 0 to 9.
- The space character.
- "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", " {", "}", "|", "~", ",".
:param uid: The user ID. A 32-bit unsigned integer with a value range from 1 to (2^32 - 1). It must be unique. Set uid as 0, if you do not want to authenticate the user ID, that is, any uid from the app client can join the channel.
:param token_expire: represented by the number of seconds elapsed since now. If, for example, you want to access the
Agora Service within 10 minutes after the token is generated, set token_expire as 600(seconds).
:param join_channel_privilege_expire: The Unix timestamp when the privilege for joining the channel expires, represented
by the sum of the current timestamp plus the valid time period of the token. For example, if you set join_channel_privilege_expire as the
current timestamp plus 600 seconds, the token expires in 10 minutes.
:param pub_audio_privilege_expire: The Unix timestamp when the privilege for publishing audio expires, represented
by the sum of the current timestamp plus the valid time period of the token. For example, if you set pub_audio_privilege_expire as the
current timestamp plus 600 seconds, the token expires in 10 minutes. If you do not want to enable this privilege,
set pub_audio_privilege_expire as the current Unix timestamp.
:param pub_video_privilege_expire: The Unix timestamp when the privilege for publishing video expires, represented
by the sum of the current timestamp plus the valid time period of the token. For example, if you set pub_video_privilege_expire as the
current timestamp plus 600 seconds, the token expires in 10 minutes. If you do not want to enable this privilege,
set pub_video_privilege_expire as the current Unix timestamp.
:param pub_data_stream_privilege_expire: The Unix timestamp when the privilege for publishing data streams expires, represented
by the sum of the current timestamp plus the valid time period of the token. For example, if you set pub_data_stream_privilege_expire as the
current timestamp plus 600 seconds, the token expires in 10 minutes. If you do not want to enable this privilege,
set pub_data_stream_privilege_expire as the current Unix timestamp.
:return: The new Token
"""
return RtcTokenBuilder.build_token_with_user_account_and_privilege(
app_id, app_certificate, channel_name, uid, token_expire, join_channel_privilege_expire,
pub_audio_privilege_expire, pub_video_privilege_expire, pub_data_stream_privilege_expire)
@staticmethod
def build_token_with_user_account_and_privilege(app_id, app_certificate, channel_name, account, token_expire,
join_channel_privilege_expire, pub_audio_privilege_expire,
pub_video_privilege_expire, pub_data_stream_privilege_expire):
"""
Generates an RTC token with the specified privilege.
This method supports generating a token with the following privileges:
- Joining an RTC channel.
- Publishing audio in an RTC channel.
- Publishing video in an RTC channel.
- Publishing data streams in an RTC channel.
The privileges for publishing audio, video, and data streams in an RTC channel apply only if you have
enabled co-host authentication.
A user can have multiple privileges. Each privilege is valid for a maximum of 24 hours.
The SDK triggers the onTokenPrivilegeWillExpire and onRequestToken callbacks when the token is about to expire
or has expired. The callbacks do not report the specific privilege affected, and you need to maintain
the respective timestamp for each privilege in your app logic. After receiving the callback, you need
to generate a new token, and then call renewToken to pass the new token to the SDK, or call joinChannel to re-join
the channel.
@note
Agora recommends setting a reasonable timestamp for each privilege according to your scenario.
Suppose the expiration timestamp for joining the channel is set earlier than that for publishing audio.
When the token for joining the channel expires, the user is immediately kicked off the RTC channel
and cannot publish any audio stream, even though the timestamp for publishing audio has not expired.
:param app_id: The App ID of your Agora project.
:param app_certificate: The App Certificate of your Agora project.
:param channel_name: The unique channel name for the Agora RTC session in string format. The string length must be less than 64 bytes. The channel name may contain the following characters:
- All lowercase English letters: a to z.
- All uppercase English letters: A to Z.
- All numeric characters: 0 to 9.
- The space character.
- "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", " {", "}", "|", "~", ",".
:param account: The user account.
:param token_expire: represented by the number of seconds elapsed since now. If, for example, you want to access the
Agora Service within 10 minutes after the token is generated, set token_expire as 600(seconds).
:param join_channel_privilege_expire: The Unix timestamp when the privilege for joining the channel expires, represented
by the sum of the current timestamp plus the valid time period of the token. For example, if you set join_channel_privilege_expire as the
current timestamp plus 600 seconds, the token expires in 10 minutes.
:param pub_audio_privilege_expire: The Unix timestamp when the privilege for publishing audio expires, represented
by the sum of the current timestamp plus the valid time period of the token. For example, if you set pub_audio_privilege_expire as the
current timestamp plus 600 seconds, the token expires in 10 minutes. If you do not want to enable this privilege,
set pub_audio_privilege_expire as the current Unix timestamp.
:param pub_video_privilege_expire: The Unix timestamp when the privilege for publishing video expires, represented
by the sum of the current timestamp plus the valid time period of the token. For example, if you set pub_video_privilege_expire as the
current timestamp plus 600 seconds, the token expires in 10 minutes. If you do not want to enable this privilege,
set pub_video_privilege_expire as the current Unix timestamp.
:param pub_data_stream_privilege_expire: The Unix timestamp when the privilege for publishing data streams expires, represented
by the sum of the current timestamp plus the valid time period of the token. For example, if you set pub_data_stream_privilege_expire as the
current timestamp plus 600 seconds, the token expires in 10 minutes. If you do not want to enable this privilege,
set pub_data_stream_privilege_expire as the current Unix timestamp.
:return: The new Token
"""
token = AccessToken(app_id, app_certificate, expire=token_expire)
service_rtc = ServiceRtc(channel_name, account)
service_rtc.add_privilege(ServiceRtc.kPrivilegeJoinChannel, join_channel_privilege_expire)
service_rtc.add_privilege(ServiceRtc.kPrivilegePublishAudioStream, pub_audio_privilege_expire)
service_rtc.add_privilege(ServiceRtc.kPrivilegePublishVideoStream, pub_video_privilege_expire)
service_rtc.add_privilege(ServiceRtc.kPrivilegePublishDataStream, pub_data_stream_privilege_expire)
token.add_service(service_rtc)
return token.build()
|
90087be9e8d4eb93c89aa175aa4bf939c60c1316
|
ed75170ffe743eea5f2fedd518c21b61de55f879
|
/test/tbats/TBATSHarmonicsChoosingStrategy_test.py
|
66d0a505cd1955e285b1621aeddaea05b3c83e03
|
[
"MIT"
] |
permissive
|
intive-DataScience/tbats
|
d294fe6d3dcb4ec0b2fc0db5e6aaaae08ade14b5
|
184bd635e1aea6bd1dd0ac7fa2339257b9ca6bdb
|
refs/heads/master
| 2023-04-30T02:25:00.961248
| 2023-04-17T10:20:31
| 2023-04-17T10:20:31
| 162,722,338
| 162
| 21
|
MIT
| 2022-12-09T07:30:53
| 2018-12-21T14:19:21
|
Python
|
UTF-8
|
Python
| false
| false
| 11,449
|
py
|
TBATSHarmonicsChoosingStrategy_test.py
|
import pytest
import numpy as np
from tbats.tbats import HarmonicsChoosingStrategy, Context, Components, ModelParams
class TestTBATSHarmonicsChoosingStrategy(object):
class ModelMock:
def __init__(self, y, params, aic_score):
self.params = params
self.y = y
self.aic = aic_score
self.is_fitted = True
def calculate_aic(self):
return self.aic
class CaseMock:
def __init__(self, components, aic_score):
self.components = components
self.aic_score = aic_score
def fit(self, y):
params = ModelParams(self.components, alpha=0)
return TestTBATSHarmonicsChoosingStrategy.ModelMock(y, params, self.aic_score)
class ContextMock(Context):
def __init__(self, aic_score_map):
super().__init__(n_jobs=1)
self.aic_score_map = aic_score_map
pass
def create_case(self, components):
for (harmonics, aic_score) in self.aic_score_map:
if np.array_equal(components.seasonal_harmonics, harmonics):
return TestTBATSHarmonicsChoosingStrategy.CaseMock(components, aic_score)
raise Exception('Unknown score for harmonics ' + str(components.seasonal_harmonics))
@pytest.mark.parametrize(
"components, aic_score_map, expected_harmonics",
[
[ # no periods means no harmonics
dict(),
[
# strategy shouldn't even attempt to build a model
],
[],
],
[ # when score for 5 is better than for 6, strategy will be checking less complex models
dict(seasonal_periods=[21]),
[ # AIC score map for chosen harmonics
([1], 10.),
([6], 8.),
([7], 9.),
([5], 7.),
([4], 6.), # best model
([3], 7.),
],
[4],
],
[ # when score for 7 is better than one for 6, strategy will be checking more complex models
dict(seasonal_periods=[21]),
[ # AIC score map for chosen harmonics
([1], 20.),
([5], 9.),
([6], 8.),
([7], 7.),
([8], 6.),
([9], 5.), # best model
([10], 6.),
],
[9],
],
[ # if initial model is the best one, it should be returned
dict(seasonal_periods=[30]),
[ # AIC score map for chosen harmonics
([1], 1.), # best model
([5], 8.),
([6], 6.),
([7], 9.),
],
[1],
],
[ # a model with small amount of harmonics
dict(seasonal_periods=[4]),
[ # AIC score map for chosen harmonics
([1], 1.), # this is the only possible model
],
[1],
],
[ # two periods
dict(seasonal_periods=[7, 365]),
[ # AIC score map for chosen harmonics
([1, 1], 20.),
([2, 1], 18.), # best model for 1st season
([3, 1], 19.),
([2, 5], 16.),
([2, 6], 15.),
([2, 7], 14.),
([2, 8], 13.), # best model
([2, 9], 16.),
],
[2, 8],
],
[ # three periods
dict(seasonal_periods=[7, 30, 365]),
[ # AIC score map for chosen harmonics
([1, 1, 1], 20.),
([2, 1, 1], 18.),
([3, 1, 1], 17.), # best model for 1st season
([4, 1, 1], 19.),
([3, 5, 1], 16.),
([3, 6, 1], 15.),
([3, 7, 1], 14.),
([3, 8, 1], 13.), # best model for 2nd season
([3, 9, 1], 16.),
([3, 8, 7], 14.),
([3, 8, 6], 12.),
([3, 8, 5], 11.),
([3, 8, 4], 10.), # best model
([3, 8, 3], 11.),
],
[3, 8, 4],
],
]
)
def test_choose(self, components, aic_score_map, expected_harmonics):
context = self.ContextMock(aic_score_map)
strategy = HarmonicsChoosingStrategy(context, checking_range=1)
harmonics = strategy.choose([1, 2, 3], Components(**components))
assert np.array_equal(expected_harmonics, harmonics)
@pytest.mark.parametrize(
"seasonal_periods, expected_max_harmonics",
[
[ # no periods means no harmonics
[], [],
],
[ # period of length 2 is limited to 1
[2], [1],
],
[ # floor((4 - 1) / 2) = 1
[4], [1],
],
[
[5], [2],
],
[
[9], [4],
],
[ # floor((28 - 1) / 2) = 13
[28], [13],
],
[ # 2nd seasonal harmonic for 16 is equal to 1st seasonal harmonic for 8
[8, 16], [3, 1]
],
[ # 10th seasonal harmonic for 100 is equal to 1st seasonal harmonic for 10
[10, 100], [4, 9]
],
[ # 2nd seasonal harmonic for 100 is equal to 1st seasonal harmonic for 50
# 5th seasonal harmonic for 50 is equal to 1st seasonal harmonic for 10
[10, 50, 100], [4, 4, 1]
],
[ # 5th seasonal harmonic for 50 is equal to 2nd seasonal harmonic for 20
[20, 50], [9, 4]
],
[ # This method does not work with floats, see _better implementation
[25.5, 51], [12, 25]
],
]
)
def test_calculate_max(self, seasonal_periods, expected_max_harmonics):
strategy = HarmonicsChoosingStrategy(Context(), checking_range=1)
harmonics = strategy.calculate_max(np.array(seasonal_periods))
assert np.array_equal(expected_max_harmonics, harmonics)
@pytest.mark.parametrize(
"seasonal_periods, expected_max_harmonics",
[
[ # no periods means no harmonics
[], [],
],
[ # period of length 2 is limited to 1
[2], [1],
],
[ # floor((4 - 1) / 2) = 1
[4], [1],
],
[
[5], [2],
],
[
[9], [4],
],
[ # floor((28 - 1) / 2) = 13
[28], [13],
],
[ # 2nd seasonal harmonic for 16 is equal to 1st seasonal harmonic for 8
[8, 16], [3, 1]
],
[ # 10th seasonal harmonic for 100 is equal to 1st seasonal harmonic for 10
[10, 100], [4, 9]
],
[ # 2nd seasonal harmonic for 100 is equal to 1st seasonal harmonic for 50
# 5th seasonal harmonic for 50 is equal to 1st seasonal harmonic for 10
[10, 50, 100], [4, 4, 1]
],
[ # 5th seasonal harmonic for 50 is equal to 2nd seasonal harmonic for 20
[20, 50], [9, 4]
],
[ # The better method also works with floats
[25.5, 51], [12, 1]
],
]
)
def test_calculate_max_better(self, seasonal_periods, expected_max_harmonics):
strategy = HarmonicsChoosingStrategy(Context(), checking_range=1)
harmonics = strategy.calculate_max(
np.asarray(seasonal_periods),
HarmonicsChoosingStrategy.max_harmonic_dependency_reduction_better
)
assert np.array_equal(expected_max_harmonics, harmonics)
@pytest.mark.parametrize(
"n_jobs, max_harmonic, expected_range",
[
[ # no harmonics to check, return empty array
12, 1, [],
],
[ # only 1 harmonic to check
12, 2, [2],
],
[ # 5 harmonics to check and 5 cores, should contain all harmonics
5, 6, range(2, 7),
],
[ # 32 harmonics to check and 32 cores, should contain all harmonics
32, 33, range(2, 34),
],
[ # only 1 core and 1 harmonic to check
1, 2, range(2, 3)
],
[ # only 1 core but needs to check those 3 models anyway
1, 12, range(5, 8)
],
[ # only 1 core, will check 3 most complex models
1, 5, range(3, 6)
],
[ # 5 cores, should check models around 6
5, 16, range(4, 9)
],
[ # 6 cores, should check models around 6
6, 16, range(3, 9)
],
[ # 8 cores, should check all models around 6
8, 11, range(2, 10)
],
[ # 4 cores, range should cover all of the most complex cases
4, 7, range(4, 8)
],
]
)
def test_initial_harmonics_to_check(self, n_jobs, max_harmonic, expected_range):
strategy = HarmonicsChoosingStrategy(Context(), checking_range=n_jobs)
obtained_range = strategy.initial_harmonics_to_check(max_harmonic)
assert np.array_equal(expected_range, obtained_range)
@pytest.mark.parametrize(
"n_jobs, max_harmonic, chosen_harmonic, previous_range, expected_range",
[
[ # no harmonics to check, return empty array
1, 1, 1, [], [],
],
[ # previously checked are 4 and 3, we should check 2 now
1, 4, 3, [3, 4], [2],
],
[ # we should check higher orders of harmonics
2, 10, 7, [5, 6, 7], [8, 9],
],
[ # we should check all lower orders of harmonics
8, 10, 5, [5, 6, 7], [2, 3, 4],
],
[ # nothing to check, we already checked lower and higher order models
8, 10, 6, [5, 6, 7], [],
],
[ # we have already chosen the simplest model
8, 10, 2, [2, 3, 4, 5, 6, 7], [],
],
[ # we have already chosen the most complex model
8, 4, 4, [2, 3, 4], [],
],
[ # we are still choosing the simplest model, check lower level harmonics
2, 12, 1, [5, 6, 7], [3, 4],
],
]
)
def test_next_harmonics_to_check(self, n_jobs, max_harmonic, chosen_harmonic, previous_range, expected_range):
strategy = HarmonicsChoosingStrategy(Context(), checking_range=n_jobs)
obtained_range = strategy.next_harmonics_to_check(
max_harmonic=max_harmonic,
previously_checked=previous_range,
chosen_harmonic=chosen_harmonic
)
assert np.array_equal(expected_range, obtained_range)
|
08e0a21d36c1c298561f96ddf7b726d0d8547ed2
|
27b86f422246a78704e0e84983b2630533a47db6
|
/profiling/stress.py
|
6c11c58ae16c86a41815d63d76d91fdf1fe9d9ec
|
[
"MIT"
] |
permissive
|
mozman/ezdxf
|
7512decd600896960660f0f580cab815bf0d7a51
|
ba6ab0264dcb6833173042a37b1b5ae878d75113
|
refs/heads/master
| 2023-09-01T11:55:13.462105
| 2023-08-15T11:50:05
| 2023-08-15T12:00:04
| 79,697,117
| 750
| 194
|
MIT
| 2023-09-14T09:40:41
| 2017-01-22T05:55:55
|
Python
|
UTF-8
|
Python
| false
| false
| 3,631
|
py
|
stress.py
|
# Copyright (c) 2020-2022, Manfred Moitzi
# License: MIT License
import pytest
import sys
import argparse
import time
import ezdxf
from ezdxf import recover
from itertools import chain
DIRS = [
"AutodeskSamples",
"AutodeskProducts",
"CADKitSamples",
"",
]
files = list(
chain(*[(ezdxf.options.test_files_path / d).glob("*.dxf") for d in DIRS])
)
@pytest.mark.parametrize("filename", files)
def test_readfile(filename):
try:
recover.readfile(filename)
except ezdxf.DXFStructureError:
pytest.fail(f"{filename}: DXFStructureError in recover mode.")
else:
assert True
if __name__ == "__main__":
import logging
from ezdxf import bbox, print_config
from ezdxf.math import Vec3
import warnings
# Suppress Matplotlib font replacement warnings
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser("stress")
parser.add_argument(
"-v",
"--verbose",
action="store_true",
help="give more output",
)
parser.add_argument(
"-e",
"--extends",
action="store_true",
help="perform extends calculation",
)
parser.add_argument(
"-c",
"--cadkit",
action="store_true",
help="use only CADKit samples",
)
parser.add_argument(
"-l",
"--log",
action="store_true",
help="turn logging on",
)
args = parser.parse_args(sys.argv[1:])
print_config()
print("-" * 79)
if args.cadkit: # only CADKit samples
files = (ezdxf.options.test_files_path / "CADKitSamples").glob("*.dxf")
if args.log:
logging.basicConfig(level=logging.WARNING)
for name in files:
print(f'Loading file: "{name}"')
try:
t_start = time.perf_counter()
doc = ezdxf.readfile(name)
t_read = time.perf_counter()
auditor = doc.audit()
t_audit = time.perf_counter()
except ezdxf.DXFStructureError:
if args.verbose:
print("Regular loading function failed, using recover mode.")
t_start = time.perf_counter()
doc, auditor = recover.readfile(name)
t_read = time.perf_counter()
t_audit = t_read
if auditor.has_errors and args.verbose:
print(f"Found {len(auditor.errors)} unrecoverable error(s).")
if auditor.has_fixes and args.verbose:
print(f"Fixed {len(auditor.fixes)} error(s).")
ex_run = 0
if args.extends:
ex_start = time.perf_counter()
extends = bbox.extents(doc.modelspace())
ex_run = time.perf_counter() - t_start
if args.verbose:
extmin = doc.header.get("$EXTMIN")
extmax = doc.header.get("$EXTMAX")
if extmin is not None:
e1 = Vec3(extmin).round(3)
e2 = Vec3(extmax).round(3)
print(f"Header var $EXTMIN/$EXTMAX: {e1}; {e2}")
if extends.has_data:
e1 = extends.extmin.round(3)
e2 = extends.extmax.round(3)
print(f"Calculated $EXTMIN/$EXTMAX: {e1}; {e2}")
if args.verbose:
print("Timing: ", end="")
t_run = t_read - t_start
print(f" loading: {t_run:.3f}s", end="")
if t_read != t_audit:
print(f" audit: {t_audit - t_read:.3f}s", end="")
if ex_run:
print(f" extends: {ex_run:.3f}s", end="")
print()
print("-" * 79)
|
168348f94166a8a970e00fa3937afd6bf9edc9cc
|
9d0228f3f7ee9cee0794319d4affc161b0a7adc2
|
/qmpy/utils/rendering/renderable.py
|
3dc608f363c97408e66c2142aeaf7a0b585c6d7c
|
[
"MIT"
] |
permissive
|
wolverton-research-group/qmpy
|
db8a450a5708aac63aa39e104745b5cb0a4fa930
|
dede5bdf4aa3ea1187a7bc273e86336c24aadb25
|
refs/heads/master
| 2023-01-24T17:18:48.335699
| 2022-08-23T01:12:29
| 2022-08-23T01:12:29
| 18,248,720
| 124
| 65
|
MIT
| 2023-01-11T02:04:51
| 2014-03-29T19:18:53
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 485
|
py
|
renderable.py
|
#!/usr/bin/env python
import sys
if not "matplotlib" in sys.modules:
import matplotlib
try:
matplotlib.use("WXAgg")
except:
matplotlib.use("Agg")
class RenderingError(Exception):
pass
class Renderable(object):
def draw_in_matplotlib(self, **kwargs):
raise NotImplementedError
def get_flot_series(self, **kwargs):
raise NotImplementedError
def get_matplotlib_script(self, **kwargs):
raise NotImplementedError
|
e60b3111099b1bb3e3c7e7c102899003579baea4
|
b8d80a23cb27af08a1c4d34b478c76228ae5fbb4
|
/insights/parsers/certificates_enddate.py
|
8127a0212af13b0415c847b45be428c56162cd4c
|
[
"Apache-2.0"
] |
permissive
|
RedHatInsights/insights-core
|
bb243e2bf8a52446fefb95ebe05478d6e35efe2e
|
b0ea07fc3f4dd8801b505fe70e9b36e628152c4a
|
refs/heads/master
| 2023-09-04T21:15:40.456257
| 2023-09-04T10:46:56
| 2023-09-04T10:46:56
| 92,518,221
| 144
| 290
|
Apache-2.0
| 2023-09-14T02:40:13
| 2017-05-26T14:23:11
|
Python
|
UTF-8
|
Python
| false
| false
| 3,688
|
py
|
certificates_enddate.py
|
"""
CertificatesEnddate - command ``/usr/bin/openssl x509 -noout -enddate -in path/to/cert/file``
=============================================================================================
This command gets the enddate of the certificate files.
Sample Output::
/usr/bin/find: '/etc/origin/node': No such file or directory
/usr/bin/find: '/etc/origin/master': No such file or directory
notAfter=May 25 16:39:40 2019 GMT
FileName= /etc/origin/node/cert.pem
unable to load certificate
139881193203616:error:0906D066:PEM routines:PEM_read_bio:bad end line:pem_lib.c:802:
unable to load certificate
140695459370912:error:0906D06C:PEM routines:PEM_read_bio:no start line:pem_lib.c:703:Expecting: TRUSTED CERTIFICATE
notAfter=May 25 16:39:40 2019 GMT
FileName= /etc/pki/ca-trust/extracted/pem/email-ca-bundle.pem
notAfter=Dec 9 10:55:38 2017 GMT
FileName= /etc/pki/consumer/cert.pem
notAfter=Jan 1 04:59:59 2022 GMT
FileName= /etc/pki/entitlement/3343502840335059594.pem
notAfter=Aug 31 02:19:59 2017 GMT
FileName= /etc/pki/consumer/cert.pem
notAfter=Jan 1 04:59:59 2022 GMT
FileName= /etc/pki/entitlement/2387590574974617178.pem
Examples:
>>> type(cert_enddate)
<class 'insights.parsers.certificates_enddate.CertificatesEnddate'>
>>> paths = cert_enddate.certificates_path
>>> '/etc/origin/node/cert.pem' in paths
True
>>> cert_enddate.expiration_date('/etc/origin/node/cert.pem').datetime
datetime.datetime(2019, 5, 25, 16, 39, 40)
>>> cert_enddate.expiration_date('/etc/origin/node/cert.pem').str
'May 25 16:39:40 2019'
"""
from collections import namedtuple
from datetime import datetime
from insights.core import CommandParser
from insights.core.exceptions import SkipComponent
from insights.core.plugins import parser
from insights.specs import Specs
@parser(Specs.certificates_enddate)
class CertificatesEnddate(CommandParser, dict):
"""Class to parse the expiration date."""
ExpirationDate = namedtuple('ExpirationDate', ['str', 'datetime'])
"""namedtuple: contains the expiration date in string and datetime format."""
def parse_content(self, content):
"""Parse the content of crt files."""
datestamp = None
for l in content:
if datestamp and l.startswith("FileName="):
self[l.split("=")[-1].strip()] = datestamp
datestamp = None
elif l.startswith("notAfter="):
datestamp = l.split("=")[-1].rsplit(" ", 1)[0]
else:
datestamp = None
if not self:
raise SkipComponent("No certification files found.")
@property
def data(self):
""" Set data as property to keep compatibility """
return self
@property
def certificates_path(self):
"""list: Return filepaths in list or []."""
return self.data.keys() if self.data else []
def expiration_date(self, path):
"""This will return a namedtuple(['str', 'datetime']) contains the
expiration date in string and datetime format. If the expiration date
is unparsable, the ExpirationDate.datetime should be None.
Args:
path(str): The certificate file path.
Returns:
A ExpirationDate for available path. None otherwise.
"""
path_date = self.data.get(path)
if path_date:
try:
path_datetime = datetime.strptime(path_date, '%b %d %H:%M:%S %Y')
return self.ExpirationDate(path_date, path_datetime)
except Exception:
return self.ExpirationDate(path_date, None)
|
96f59265135066189a196dfe0986b83bf0952f6b
|
6c88b2cea38b2cead9e2402d46a8fc64949c53df
|
/pkg/codegen/testing/test/testdata/dash-named-schema/python/pulumi_foo_bar/__init__.py
|
5fcf9dc2e553b083d26115018cd0aed5d71cc889
|
[
"Apache-2.0"
] |
permissive
|
pulumi/pulumi
|
a9b36c32f0cdd445c22f9ca64ce26c9ae5147575
|
46e2753d02d46a1c077930eeccdfe6738f46c0d2
|
refs/heads/master
| 2023-08-19T10:25:49.849189
| 2023-08-16T04:59:07
| 2023-08-16T04:59:07
| 72,477,752
| 17,553
| 1,082
|
Apache-2.0
| 2023-09-14T21:05:35
| 2016-10-31T21:02:47
|
Go
|
UTF-8
|
Python
| false
| false
| 954
|
py
|
__init__.py
|
# coding=utf-8
# *** WARNING: this file was generated by test. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from . import _utilities
import typing
# Export this package's modules as members:
from .provider import *
from ._inputs import *
from . import outputs
# Make subpackages available:
if typing.TYPE_CHECKING:
import pulumi_foo_bar.submodule1 as __submodule1
submodule1 = __submodule1
else:
submodule1 = _utilities.lazy_import('pulumi_foo_bar.submodule1')
_utilities.register(
resource_modules="""
[
{
"pkg": "foo-bar",
"mod": "submodule1",
"fqn": "pulumi_foo_bar.submodule1",
"classes": {
"foo-bar:submodule1:FOOEncryptedBarClass": "FOOEncryptedBarClass",
"foo-bar:submodule1:ModuleResource": "ModuleResource"
}
}
]
""",
resource_packages="""
[
{
"pkg": "foo-bar",
"token": "pulumi:providers:foo-bar",
"fqn": "pulumi_foo_bar",
"class": "Provider"
}
]
"""
)
|
4aa9d8867cdcaf38ab4b7e6dca43091d8fea3d1e
|
d7fd46dfd8aab520c4958fa065367e168b6bfee7
|
/tests/spaces/scalar_test.py
|
9b827f2c6fe99448d6018c8dd4ad0ff941e4a3e9
|
[
"MIT"
] |
permissive
|
facebookresearch/CompilerGym
|
f04a79fbfdbaf8afd6920ec205db6f1b6003d073
|
9e0c0beb12da1e1ea82ae6ce920713ee28dda4c9
|
refs/heads/development
| 2023-08-31T09:17:48.967970
| 2023-03-10T19:29:56
| 2023-03-10T19:29:56
| 312,059,069
| 787
| 126
|
MIT
| 2023-03-10T19:29:58
| 2020-11-11T18:44:35
|
Python
|
UTF-8
|
Python
| false
| false
| 2,431
|
py
|
scalar_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/spaces:scalar."""
from copy import copy, deepcopy
from compiler_gym.spaces import Scalar
from tests.test_main import main
def test_sample():
space = Scalar(name="test", min=-10, max=10, dtype=int)
x = space.sample()
assert isinstance(x, int)
assert -10 <= x <= 10
def test_int_contains():
space = Scalar(name="test", min=-10, max=10, dtype=int)
assert space.contains(-10)
assert not space.contains(-11)
assert not space.contains(0.5)
def test_int_contains_no_upper_bound():
space = Scalar(name="test", min=0, max=None, dtype=int)
assert space.contains(0)
assert not space.contains(-1)
assert space.contains(1000)
def test_equality():
space_a = Scalar(name="test", min=0, max=None, dtype=int)
space_b = Scalar(name="test", min=0, max=None, dtype=int)
assert space_a == space_b
def test_dtype_inequality():
space_a = Scalar(name="test", min=0, max=None, dtype=int)
space_b = Scalar(name="test", min=0, max=None, dtype=float)
assert space_a != space_b
def test_upper_bound_inequality():
space_a = Scalar(name="test", min=0, max=None, dtype=int)
space_b = Scalar(name="test", min=0, max=5, dtype=int)
assert space_a != space_b
def test_lower_bound_inequality():
space_a = Scalar(name="test", min=0, max=None, dtype=int)
space_b = Scalar(name="test", min=None, max=None, dtype=int)
assert space_a != space_b
def test_equal():
assert Scalar(name="test_scalar", min=-10, max=10, dtype=int) == Scalar(
name="test_scalar", min=-10, max=10, dtype=int
)
def test_not_equal():
scalar = Scalar(name="test_scalar", min=-10, max=10, dtype=int)
assert scalar != Scalar(name="test_scalar_2", min=-10, max=10, dtype=int)
assert scalar != Scalar(name="test_scalar", min=-5, max=10, dtype=int)
assert scalar != Scalar(name="test_scalar", min=-10, max=5, dtype=int)
assert scalar != Scalar(name="test_scalar", min=-10, max=10, dtype=float)
assert scalar != "not_as_scalar"
def test_deepcopy_regression_test():
"""Test to reproduce github.com/facebookresearch/CompilerGym/issues/768."""
x = Scalar(name="foo")
copy(x)
deepcopy(x)
if __name__ == "__main__":
main()
|
2ce7a3e40a6d37aca872720447d640f8119beaf1
|
0760fb4901a75766921a205b55686d6d6f049b30
|
/python/ray/serve/_private/deployment_method_executor_node.py
|
b618ca2bcb2821dea89bf86e5f93759fa160e06c
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
ray-project/ray
|
a4bb6940b08b59a61ef0b8e755a52d8563a2f867
|
edba68c3e7cf255d1d6479329f305adb7fa4c3ed
|
refs/heads/master
| 2023-08-31T03:36:48.164405
| 2023-08-31T03:20:38
| 2023-08-31T03:20:38
| 71,932,349
| 29,482
| 5,669
|
Apache-2.0
| 2023-09-14T21:48:14
| 2016-10-25T19:38:30
|
Python
|
UTF-8
|
Python
| false
| false
| 3,471
|
py
|
deployment_method_executor_node.py
|
from typing import Any, Dict, List
from ray import ObjectRef
from ray.dag import DAGNode
from ray.dag.constants import DAGNODE_TYPE_KEY, PARENT_CLASS_NODE_KEY
from ray.dag.format_utils import get_dag_node_str
class DeploymentMethodExecutorNode(DAGNode):
"""The lightweight executor DAGNode of DeploymentMethodNode that optimizes
for efficiency.
- We need Ray DAGNode's traversal and replacement mechanism to deal
with deeply nested nodes as args in the DAG
- Meanwhile, __init__, _copy_impl and _execute_impl are on the critical
pass of execution for every request.
Therefore for serve we introduce a minimal weight node as the final product
of DAG transformation, and will be used in actual execution as well as
deployment.
"""
def __init__(
self,
deployment_method_name: str,
dag_args,
dag_kwargs,
other_args_to_resolve=None,
):
super().__init__(
dag_args, dag_kwargs, {}, other_args_to_resolve=other_args_to_resolve
)
self._deployment_node_replaced_by_handle = other_args_to_resolve[
PARENT_CLASS_NODE_KEY
]
self._deployment_method_name = deployment_method_name
def _copy_impl(
self,
new_args: List[Any],
new_kwargs: Dict[str, Any],
new_options: Dict[str, Any],
new_other_args_to_resolve: Dict[str, Any],
) -> "DeploymentMethodExecutorNode":
return DeploymentMethodExecutorNode(
self._deployment_method_name,
new_args,
new_kwargs,
other_args_to_resolve=new_other_args_to_resolve,
)
def _execute_impl(self, *args, **kwargs) -> ObjectRef:
"""Executor of DeploymentNode getting called each time on dag.execute.
The execute implementation is recursive, that is, the method nodes will
receive whatever this method returns. We return a handle here so method
node can directly call upon.
"""
method_body = getattr(
self._deployment_node_replaced_by_handle, self._deployment_method_name
)
return method_body.remote(*self._bound_args, **self._bound_kwargs)
def __str__(self) -> str:
return get_dag_node_str(self, str(self._deployment_method_name))
def to_json(self) -> Dict[str, Any]:
return {
DAGNODE_TYPE_KEY: DeploymentMethodExecutorNode.__name__,
"deployment_method_name": self._deployment_method_name,
"args": self.get_args(),
"kwargs": self.get_kwargs(),
"other_args_to_resolve": self.get_other_args_to_resolve(),
"uuid": self.get_stable_uuid(),
}
def get_result_type(self) -> str:
"""Get type of the output of this DAGNode.
Generated by ray.experimental.gradio_utils.type_to_string().
"""
if "result_type_string" in self._bound_other_args_to_resolve:
return self._bound_other_args_to_resolve["result_type_string"]
@classmethod
def from_json(cls, input_json):
assert input_json[DAGNODE_TYPE_KEY] == DeploymentMethodExecutorNode.__name__
node = cls(
input_json["deployment_method_name"],
input_json["args"],
input_json["kwargs"],
other_args_to_resolve=input_json["other_args_to_resolve"],
)
node._stable_uuid = input_json["uuid"]
return node
|
7cdf40cb1fcfc2d8609b22f210eb39187340272e
|
ec7591c3f478c43e76257aaa500d8f6a2e763d74
|
/stanza/tests/tokenization/test_tokenize_utils.py
|
cf8ad7380e19ac252c78bae9151d56510fc69ad7
|
[
"Apache-2.0"
] |
permissive
|
stanfordnlp/stanza
|
5cc3dbe70a96dd565639b7dae1efde6b4fa76985
|
c530c9af647d521262b56b717bcc38b0cfc5f1b8
|
refs/heads/main
| 2023-09-01T12:01:38.980322
| 2023-03-14T16:10:05
| 2023-03-14T16:10:05
| 104,854,615
| 4,281
| 599
|
NOASSERTION
| 2023-09-10T00:31:36
| 2017-09-26T08:00:56
|
Python
|
UTF-8
|
Python
| false
| false
| 4,210
|
py
|
test_tokenize_utils.py
|
"""
Very simple test of the sentence slicing by <PAD> tags
TODO: could add a bunch more simple tests for the tokenization utils
"""
import pytest
import stanza
from stanza import Pipeline
from stanza.tests import *
from stanza.models.common import doc
from stanza.models.tokenization import data
from stanza.models.tokenization import utils
pytestmark = [pytest.mark.travis, pytest.mark.pipeline]
def test_find_spans():
"""
Test various raw -> span manipulations
"""
raw = ['u', 'n', 'b', 'a', 'n', ' ', 'm', 'o', 'x', ' ', 'o', 'p', 'a', 'l']
assert utils.find_spans(raw) == [(0, 14)]
raw = ['u', 'n', 'b', 'a', 'n', ' ', 'm', 'o', 'x', ' ', 'o', 'p', 'a', 'l', '<PAD>']
assert utils.find_spans(raw) == [(0, 14)]
raw = ['<PAD>', 'u', 'n', 'b', 'a', 'n', ' ', 'm', 'o', 'x', ' ', 'o', 'p', 'a', 'l', '<PAD>']
assert utils.find_spans(raw) == [(1, 15)]
raw = ['<PAD>', 'u', 'n', 'b', 'a', 'n', ' ', 'm', 'o', 'x', ' ', 'o', 'p', 'a', 'l']
assert utils.find_spans(raw) == [(1, 15)]
raw = ['<PAD>', 'u', 'n', 'b', 'a', 'n', '<PAD>', 'm', 'o', 'x', ' ', 'o', 'p', 'a', 'l']
assert utils.find_spans(raw) == [(1, 6), (7, 15)]
def check_offsets(doc, expected_offsets):
"""
Compare the start_char and end_char of the tokens in the doc with the given list of list of offsets
"""
assert len(doc.sentences) == len(expected_offsets)
for sentence, offsets in zip(doc.sentences, expected_offsets):
assert len(sentence.tokens) == len(offsets)
for token, offset in zip(sentence.tokens, offsets):
assert token.start_char == offset[0]
assert token.end_char == offset[1]
def test_match_tokens_with_text():
"""
Test the conversion of pretokenized text to Document
"""
doc = utils.match_tokens_with_text([["This", "is", "a", "test"]], "Thisisatest")
expected_offsets = [[(0, 4), (4, 6), (6, 7), (7, 11)]]
check_offsets(doc, expected_offsets)
doc = utils.match_tokens_with_text([["This", "is", "a", "test"], ["unban", "mox", "opal", "!"]], "Thisisatest unban mox opal!")
expected_offsets = [[(0, 4), (4, 6), (6, 7), (7, 11)],
[(13, 18), (19, 22), (24, 28), (28, 29)]]
check_offsets(doc, expected_offsets)
with pytest.raises(ValueError):
doc = utils.match_tokens_with_text([["This", "is", "a", "test"]], "Thisisatestttt")
with pytest.raises(ValueError):
doc = utils.match_tokens_with_text([["This", "is", "a", "test"]], "Thisisates")
with pytest.raises(ValueError):
doc = utils.match_tokens_with_text([["This", "iz", "a", "test"]], "Thisisatest")
def test_long_paragraph():
"""
Test the tokenizer's capacity to break text up into smaller chunks
"""
pipeline = Pipeline("en", dir=TEST_MODELS_DIR, processors="tokenize")
tokenizer = pipeline.processors['tokenize']
raw_text = "TIL not to ask a date to dress up as Smurfette on a first date. " * 100
# run a test to make sure the chunk operation is called
# if not, the test isn't actually testing what we need to test
batches = data.DataLoader(tokenizer.config, input_text=raw_text, vocab=tokenizer.vocab, evaluation=True, dictionary=tokenizer.trainer.dictionary)
batches.advance_old_batch = None
with pytest.raises(TypeError):
_, _, _, document = utils.output_predictions(None, tokenizer.trainer, batches, tokenizer.vocab, None, 3000,
orig_text=raw_text,
no_ssplit=tokenizer.config.get('no_ssplit', False))
# a new DataLoader should not be crippled as the above one was
batches = data.DataLoader(tokenizer.config, input_text=raw_text, vocab=tokenizer.vocab, evaluation=True, dictionary=tokenizer.trainer.dictionary)
_, _, _, document = utils.output_predictions(None, tokenizer.trainer, batches, tokenizer.vocab, None, 3000,
orig_text=raw_text,
no_ssplit=tokenizer.config.get('no_ssplit', False))
document = doc.Document(document, raw_text)
assert len(document.sentences) == 100
|
94f6aab814bee48baf46b22d495984614908af44
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/7_graph/bfs求无权图的最短路径/Group Points-bfs波纹扩展.py
|
48a3b08ec438b41ab50f35b2f6abcec12659508d
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579
| 2023-09-01T12:21:58
| 2023-09-01T12:21:58
| 385,861,235
| 225
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,099
|
py
|
Group Points-bfs波纹扩展.py
|
# Assuming you can group any point a and b if the Euclidean distance between them is ≤ k,
# return the total number of disjoint groups.
# 按照距离给点分组 所有<=k的分到一组 求组数
from collections import deque
# n ≤ 1,000
class Solution:
def solve(self, points, k):
def distance(x1, y1, x2, y2):
return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5
def bfs(x1, y1):
if (x1, y1) in visited:
return
visited.add((x1, y1))
queue = deque([(x1, y1)])
while queue:
x1, y1 = queue.popleft()
for x2, y2 in points:
if not (x2, y2) in visited:
if distance(x1, y1, x2, y2) <= k:
queue.append((x2, y2))
visited.add((x2, y2))
visited = set()
count = 0
# O(n^2)
for (x, y) in points:
if not (x, y) in visited:
bfs(x, y)
count += 1
return count
|
7b9e573a96dcd5b93698c268a4810dc3a1279ddb
|
23f8bccbd30267bf43ea11e4093d79865d8af917
|
/Chapter01/Ch01_Code/First_GUI.py
|
a0732b3c5e4cc311cbe7946a2f1322b9b2ef6524
|
[
"MIT"
] |
permissive
|
PacktPublishing/Python-GUI-Programming-Cookbook-Second-Edition
|
1450d6903178865c9928d3bc57c99c623944b575
|
1ca9866be88b4191a5a951cbc4ead7aad903f226
|
refs/heads/master
| 2023-02-05T11:22:21.293438
| 2023-01-30T09:54:00
| 2023-01-30T09:54:00
| 91,304,266
| 343
| 197
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 280
|
py
|
First_GUI.py
|
'''
May 2017
@author: Burkhard A. Meier
'''
#======================
# imports
#======================
import tkinter as tk
# Create instance
win = tk.Tk()
# Add a title
win.title("Python GUI")
#======================
# Start GUI
#======================
win.mainloop()
|
99f31a646ad44e28ead5f9d7aaaefe4b946888d9
|
0869d7edac80e8aebe951682a2cc311a083eade3
|
/Python/tdw/FBOutput/QuitSignal.py
|
27fe7859ef589c5991e52d017a98d45b923cae86
|
[
"BSD-2-Clause"
] |
permissive
|
threedworld-mit/tdw
|
7d5b4453832647733ff91ad7a7ce7ec2320454c1
|
9df96fba455b327bb360d8dd5886d8754046c690
|
refs/heads/master
| 2023-09-01T11:45:28.132298
| 2023-08-31T16:13:30
| 2023-08-31T16:13:30
| 245,492,977
| 427
| 75
|
BSD-2-Clause
| 2023-09-14T17:36:12
| 2020-03-06T18:42:09
|
Python
|
UTF-8
|
Python
| false
| false
| 932
|
py
|
QuitSignal.py
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: FBOutput
import tdw.flatbuffers
class QuitSignal(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsQuitSignal(cls, buf, offset):
n = tdw.flatbuffers.encode.Get(tdw.flatbuffers.packer.uoffset, buf, offset)
x = QuitSignal()
x.Init(buf, n + offset)
return x
# QuitSignal
def Init(self, buf, pos):
self._tab = tdw.flatbuffers.table.Table(buf, pos)
# QuitSignal
def Ok(self):
o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return bool(self._tab.Get(tdw.flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
def QuitSignalStart(builder): builder.StartObject(1)
def QuitSignalAddOk(builder, ok): builder.PrependBoolSlot(0, ok, 0)
def QuitSignalEnd(builder): return builder.EndObject()
|
a708cfeb8209a5d2322ab259fec60c12a21e1041
|
360328d098a74581d0822fba489dd15e0d4e7ab3
|
/src/richie/plugins/section/forms.py
|
a38d9dcb22adcd039f4aff061276cf465f792985
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
openfun/richie
|
0cef545486267bfb40e75e5fb2ce2a74f85a53ff
|
f2d46fc46b271eb3b4d565039a29c15ba15f027c
|
refs/heads/master
| 2023-08-31T23:51:37.714179
| 2023-08-29T15:25:04
| 2023-08-29T15:48:39
| 111,388,461
| 238
| 96
|
MIT
| 2023-09-13T12:48:53
| 2017-11-20T09:23:40
|
Python
|
UTF-8
|
Python
| false
| false
| 746
|
py
|
forms.py
|
"""
Section plugin forms
"""
from django import forms
from djangocms_attributes_field.widgets import AttributesWidget
from djangocms_text_ckeditor.widgets import TextEditorWidget
from .models import Section
CKEDITOR_CONFIGURATION_NAME = "CKEDITOR_INLINE_BOLD_CONFIGURATION"
class SectionForm(forms.ModelForm):
"""
Plugin form used to fill its content from frontend admin.
"""
class Meta:
"""
Form meta attributes
"""
model = Section
widgets = {
"title": TextEditorWidget(configuration=CKEDITOR_CONFIGURATION_NAME),
"attributes": AttributesWidget(),
}
fields = {
"title",
"template",
"attributes",
}
|
a374c8569460eff57d8282f20d4a40f6b990da90
|
0933f9ecf49ed89db35cee051a64648886f13e40
|
/tests/test_move.py
|
8eb1af752e01a6f04df491f75e001f614dc22ecc
|
[
"MIT"
] |
permissive
|
PyFilesystem/pyfilesystem2
|
63da155692594d0405dd237db7d66be243658249
|
8ed9dc495d8ba2f83fbb2a1145d34d92e13644be
|
refs/heads/master
| 2023-09-01T17:05:54.176292
| 2022-10-18T10:59:07
| 2022-10-18T10:59:07
| 70,920,962
| 1,956
| 254
|
MIT
| 2023-08-24T20:00:22
| 2016-10-14T15:05:27
|
Python
|
UTF-8
|
Python
| false
| false
| 8,991
|
py
|
test_move.py
|
from __future__ import unicode_literals
import unittest
try:
from unittest import mock
except ImportError:
import mock
from parameterized import parameterized, parameterized_class
import fs.move
from fs import open_fs
from fs.errors import FSError, ResourceReadOnly
from fs.path import join
from fs.wrap import read_only
@parameterized_class(("preserve_time",), [(True,), (False,)])
class TestMoveCheckTime(unittest.TestCase):
def test_move_fs(self):
namespaces = ("details", "modified")
src_fs = open_fs("mem://")
src_fs.makedirs("foo/bar")
src_fs.touch("test.txt")
src_fs.touch("foo/bar/baz.txt")
src_file1_info = src_fs.getinfo("test.txt", namespaces)
src_file2_info = src_fs.getinfo("foo/bar/baz.txt", namespaces)
dst_fs = open_fs("mem://")
dst_fs.create("test.txt")
dst_fs.setinfo("test.txt", {"details": {"modified": 1000000}})
fs.move.move_fs(src_fs, dst_fs, preserve_time=self.preserve_time)
self.assertTrue(src_fs.isempty("/"))
self.assertTrue(dst_fs.isdir("foo/bar"))
self.assertTrue(dst_fs.isfile("test.txt"))
self.assertTrue(dst_fs.isfile("foo/bar/baz.txt"))
if self.preserve_time:
dst_file1_info = dst_fs.getinfo("test.txt", namespaces)
dst_file2_info = dst_fs.getinfo("foo/bar/baz.txt", namespaces)
self.assertEqual(dst_file1_info.modified, src_file1_info.modified)
self.assertEqual(dst_file2_info.modified, src_file2_info.modified)
def test_move_file(self):
namespaces = ("details", "modified")
with open_fs("mem://") as src_fs, open_fs("mem://") as dst_fs:
src_fs.writetext("source.txt", "Source")
src_fs_file_info = src_fs.getinfo("source.txt", namespaces)
fs.move.move_file(
src_fs,
"source.txt",
dst_fs,
"dest.txt",
preserve_time=self.preserve_time,
)
self.assertFalse(src_fs.exists("source.txt"))
self.assertEqual(dst_fs.readtext("dest.txt"), "Source")
if self.preserve_time:
dst_fs_file_info = dst_fs.getinfo("dest.txt", namespaces)
self.assertEqual(src_fs_file_info.modified, dst_fs_file_info.modified)
def test_move_dir(self):
namespaces = ("details", "modified")
src_fs = open_fs("mem://")
src_fs.makedirs("foo/bar")
src_fs.touch("test.txt")
src_fs.touch("foo/bar/baz.txt")
src_file2_info = src_fs.getinfo("foo/bar/baz.txt", namespaces)
dst_fs = open_fs("mem://")
dst_fs.create("test.txt")
dst_fs.setinfo("test.txt", {"details": {"modified": 1000000}})
fs.move.move_dir(src_fs, "/foo", dst_fs, "/", preserve_time=self.preserve_time)
self.assertFalse(src_fs.exists("foo"))
self.assertTrue(src_fs.isfile("test.txt"))
self.assertTrue(dst_fs.isdir("bar"))
self.assertTrue(dst_fs.isfile("bar/baz.txt"))
if self.preserve_time:
dst_file2_info = dst_fs.getinfo("bar/baz.txt", namespaces)
self.assertEqual(dst_file2_info.modified, src_file2_info.modified)
class TestMove(unittest.TestCase):
def test_move_file_tempfs(self):
with open_fs("temp://") as src, open_fs("temp://") as dst:
src_dir = src.makedir("Some subfolder")
src_dir.writetext("file.txt", "Content")
dst_dir = dst.makedir("dest dir")
fs.move.move_file(src_dir, "file.txt", dst_dir, "target.txt")
self.assertFalse(src.exists("Some subfolder/file.txt"))
self.assertEqual(dst.readtext("dest dir/target.txt"), "Content")
def test_move_file_fs_urls(self):
# create a temp dir to work on
with open_fs("temp://") as tmp:
path = tmp.getsyspath("/")
tmp.makedir("subdir_src")
tmp.writetext("subdir_src/file.txt", "Content")
tmp.makedir("subdir_dst")
fs.move.move_file(
"osfs://" + join(path, "subdir_src"),
"file.txt",
"osfs://" + join(path, "subdir_dst"),
"target.txt",
)
self.assertFalse(tmp.exists("subdir_src/file.txt"))
self.assertEqual(tmp.readtext("subdir_dst/target.txt"), "Content")
def test_move_file_same_fs_read_only_source(self):
with open_fs("temp://") as tmp:
path = tmp.getsyspath("/")
tmp.writetext("file.txt", "Content")
src = read_only(open_fs(path))
dst = tmp.makedir("sub")
with self.assertRaises(ResourceReadOnly):
fs.move.move_file(src, "file.txt", dst, "target_file.txt")
self.assertTrue(src.exists("file.txt"))
self.assertFalse(
dst.exists("target_file.txt"), "file should not have been copied over"
)
def test_move_file_read_only_mem_source(self):
with open_fs("mem://") as src, open_fs("mem://") as dst:
src.writetext("file.txt", "Content")
dst_sub = dst.makedir("sub")
src_ro = read_only(src)
with self.assertRaises(ResourceReadOnly):
fs.move.move_file(src_ro, "file.txt", dst_sub, "target.txt")
self.assertTrue(src.exists("file.txt"))
self.assertFalse(
dst_sub.exists("target.txt"), "file should not have been copied over"
)
def test_move_file_read_only_mem_dest(self):
with open_fs("mem://") as src, open_fs("mem://") as dst:
src.writetext("file.txt", "Content")
dst_ro = read_only(dst)
with self.assertRaises(ResourceReadOnly):
fs.move.move_file(src, "file.txt", dst_ro, "target.txt")
self.assertTrue(src.exists("file.txt"))
self.assertFalse(
dst_ro.exists("target.txt"), "file should not have been copied over"
)
@parameterized.expand([("temp", "temp://"), ("mem", "mem://")])
def test_move_file_overwrite(self, _, fs_url):
# we use TempFS and MemoryFS in order to make sure the optimized code path
# behaves like the regular one (TempFS tests the optmized code path).
with open_fs(fs_url) as src, open_fs(fs_url) as dst:
src.writetext("file.txt", "source content")
dst.writetext("target.txt", "target content")
self.assertTrue(src.exists("file.txt"))
self.assertFalse(src.exists("target.txt"))
self.assertFalse(dst.exists("file.txt"))
self.assertTrue(dst.exists("target.txt"))
fs.move.move_file(src, "file.txt", dst, "target.txt")
self.assertFalse(src.exists("file.txt"))
self.assertFalse(src.exists("target.txt"))
self.assertFalse(dst.exists("file.txt"))
self.assertTrue(dst.exists("target.txt"))
self.assertEquals(dst.readtext("target.txt"), "source content")
@parameterized.expand([("temp", "temp://"), ("mem", "mem://")])
def test_move_file_overwrite_itself(self, _, fs_url):
# we use TempFS and MemoryFS in order to make sure the optimized code path
# behaves like the regular one (TempFS tests the optmized code path).
with open_fs(fs_url) as tmp:
tmp.writetext("file.txt", "content")
fs.move.move_file(tmp, "file.txt", tmp, "file.txt")
self.assertTrue(tmp.exists("file.txt"))
self.assertEquals(tmp.readtext("file.txt"), "content")
@parameterized.expand([("temp", "temp://"), ("mem", "mem://")])
def test_move_file_overwrite_itself_relpath(self, _, fs_url):
# we use TempFS and MemoryFS in order to make sure the optimized code path
# behaves like the regular one (TempFS tests the optmized code path).
with open_fs(fs_url) as tmp:
new_dir = tmp.makedir("dir")
new_dir.writetext("file.txt", "content")
fs.move.move_file(tmp, "dir/../dir/file.txt", tmp, "dir/file.txt")
self.assertTrue(tmp.exists("dir/file.txt"))
self.assertEquals(tmp.readtext("dir/file.txt"), "content")
@parameterized.expand([(True,), (False,)])
def test_move_file_cleanup_on_error(self, cleanup):
with open_fs("mem://") as src, open_fs("mem://") as dst:
src.writetext("file.txt", "Content")
with mock.patch.object(src, "remove") as mck:
mck.side_effect = FSError
with self.assertRaises(FSError):
fs.move.move_file(
src,
"file.txt",
dst,
"target.txt",
cleanup_dst_on_error=cleanup,
)
self.assertTrue(src.exists("file.txt"))
self.assertEqual(not dst.exists("target.txt"), cleanup)
|
f1b3cf162d773bb7bf271401d6529930ff5ff093
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/AlipayDataAiserviceJunengLoanQueryModel.py
|
2efd594c97c348bfc49473c123efc03e8c5f2163
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 3,201
|
py
|
AlipayDataAiserviceJunengLoanQueryModel.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayDataAiserviceJunengLoanQueryModel(object):
def __init__(self):
self._extension_info = None
self._hashed_cert_no = None
self._institution_uuid = None
self._request_uuid = None
self._user_feature = None
@property
def extension_info(self):
return self._extension_info
@extension_info.setter
def extension_info(self, value):
self._extension_info = value
@property
def hashed_cert_no(self):
return self._hashed_cert_no
@hashed_cert_no.setter
def hashed_cert_no(self, value):
self._hashed_cert_no = value
@property
def institution_uuid(self):
return self._institution_uuid
@institution_uuid.setter
def institution_uuid(self, value):
self._institution_uuid = value
@property
def request_uuid(self):
return self._request_uuid
@request_uuid.setter
def request_uuid(self, value):
self._request_uuid = value
@property
def user_feature(self):
return self._user_feature
@user_feature.setter
def user_feature(self, value):
self._user_feature = value
def to_alipay_dict(self):
params = dict()
if self.extension_info:
if hasattr(self.extension_info, 'to_alipay_dict'):
params['extension_info'] = self.extension_info.to_alipay_dict()
else:
params['extension_info'] = self.extension_info
if self.hashed_cert_no:
if hasattr(self.hashed_cert_no, 'to_alipay_dict'):
params['hashed_cert_no'] = self.hashed_cert_no.to_alipay_dict()
else:
params['hashed_cert_no'] = self.hashed_cert_no
if self.institution_uuid:
if hasattr(self.institution_uuid, 'to_alipay_dict'):
params['institution_uuid'] = self.institution_uuid.to_alipay_dict()
else:
params['institution_uuid'] = self.institution_uuid
if self.request_uuid:
if hasattr(self.request_uuid, 'to_alipay_dict'):
params['request_uuid'] = self.request_uuid.to_alipay_dict()
else:
params['request_uuid'] = self.request_uuid
if self.user_feature:
if hasattr(self.user_feature, 'to_alipay_dict'):
params['user_feature'] = self.user_feature.to_alipay_dict()
else:
params['user_feature'] = self.user_feature
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayDataAiserviceJunengLoanQueryModel()
if 'extension_info' in d:
o.extension_info = d['extension_info']
if 'hashed_cert_no' in d:
o.hashed_cert_no = d['hashed_cert_no']
if 'institution_uuid' in d:
o.institution_uuid = d['institution_uuid']
if 'request_uuid' in d:
o.request_uuid = d['request_uuid']
if 'user_feature' in d:
o.user_feature = d['user_feature']
return o
|
396a45708f2a7aede1000e7735cefec7e02aa63f
|
b54db4f80d50fac2e880fa32312d5e39fd71cdf0
|
/torchelie/recipes/cut.py
|
99e2514db26e30a1f7de05a0519bde1dab205140
|
[
"MIT"
] |
permissive
|
Vermeille/Torchelie
|
91fde0145d67a3b50472db4f1429937fe277e45b
|
3b09ea9a4cfa195aa78dcac676aab1c43815bd53
|
refs/heads/master
| 2023-08-17T08:26:44.549037
| 2023-08-14T15:49:16
| 2023-08-14T15:49:16
| 196,767,053
| 124
| 14
|
MIT
| 2023-08-21T12:22:42
| 2019-07-13T21:14:50
|
Python
|
UTF-8
|
Python
| false
| false
| 15,645
|
py
|
cut.py
|
import torch
import torchelie as tch
import torchelie.utils as tu
from torchelie.recipes.gan import GANRecipe
import torchvision.transforms as TF
import torchelie.loss.gan.standard as gan_loss
from torchelie.loss.gan.penalty import zero_gp
from torchelie.datasets.pix2pix import UnlabeledImages
from torchelie.models import *
import torch.nn as nn
"""
MtF:
torchelie.recipes.unpaired
--r0-D 0.01
--r0-M 0.1
--consistency 1
"""
@tu.experimental
class GradientPenaltyM:
def __init__(self, gamma):
self.gamma = gamma
self.iters = 0
self.last_norm = float('nan')
def do(self, model, fake_dst, src, real_dst, objective_norm: float):
fake_dst = fake_dst.detach()
src = src.detach()
t = torch.rand(fake_dst.shape[0], 1, 1, 1, device=fake_dst.device)
fake_dst = t * fake_dst + (1 - t) * real_dst
fake_dst.requires_grad_(True)
src.requires_grad_(True)
out = model(fake_dst, src)['matches'].sum()
g = torch.autograd.grad(outputs=out,
inputs=fake_dst,
create_graph=True,
only_inputs=True)[0]
g_norm = g.pow(2).sum(dim=(1, 2, 3)).add_(1e-8).sqrt()
return (g_norm - objective_norm).pow(2).mean(), g_norm.mean().item()
def __call__(self, model, fake_dst, src, real_dst):
if self.iters < 100 or self.iters % 4 == 0:
real_dst = real_dst.detach()
fake_dst = fake_dst.detach()
gp, g_norm = self.do(model, fake_dst, src, real_dst, 0)
# Sync the gradient on the next backward
if torch.any(torch.isnan(gp)):
gp.detach_()
else:
(4 * self.gamma * gp).backward()
self.last_norm = g_norm
self.iters += 1
return self.last_norm
class Matcher(nn.Module):
@tu.experimental
def __init__(self, n_scales=3):
super().__init__()
proj_size = 256
self.n_scales = n_scales
self.nets = nn.ModuleDict()
self.proj_As = nn.ModuleDict()
self.proj_Bs = nn.ModuleDict()
for i in range(n_scales):
net = patch34().remove_batchnorm()
net.classifier = nn.Sequential()
net.to_equal_lr()
proj_A = nn.Sequential(
nn.LeakyReLU(0.2, False),
tu.kaiming(tnn.Conv1x1(proj_size, proj_size), dynamic=True))
proj_B = nn.Identity()
self.nets[str(i)] = net
self.proj_As[str(i)] = proj_A
self.proj_Bs[str(i)] = proj_B
def barlow(self, f1, f2):
f1 = F.normalize(f1, dim=1)
f2 = F.normalize(f2, dim=1)
n, c, h, w = f1.shape
out = torch.bmm(
f1.permute(2, 3, 0, 1).reshape(-1, n, c),
f2.permute(2, 3, 0, 1).reshape(-1, n, c).permute(0, 2, 1))
out = out.view(h, w, n, n).permute(2, 3, 0, 1)
labels = torch.eye(n, device=out.device)
labels = labels.view(n, n, 1, 1).expand(n, n, h, w)
return out, F.smooth_l1_loss(out, labels, beta=0.1)
def forward(self, fake, ins):
total_loss = 0
outs = []
all_labels = []
for scale_order in range(self.n_scales):
scale = 2**scale_order
fake_scale = F.interpolate(fake,
scale_factor=1 / scale,
mode='bilinear')
ins_scale = F.interpolate(ins,
scale_factor=1 / scale,
mode='bilinear')
f1 = self.proj_As[str(scale_order)](
self.nets[str(scale_order)](fake_scale))
f2 = self.proj_Bs[str(scale_order)](
self.nets[str(scale_order)](ins_scale))
N, c, h, w = f1.shape
labels = torch.arange(N, device=f1.device)
labels = labels.view(N, 1, 1).expand(N, h, w)
out, loss = self.barlow(f1, f2)
total_loss += loss
outs.append(out.reshape(out.shape[0], out.shape[1], -1))
all_labels.append(labels.reshape(labels.shape[0], -1))
outs = torch.cat(outs, dim=2)
return {
'matches': outs,
'loss': total_loss,
'labels': torch.cat(all_labels, dim=1)
}
def get_dataset(typ: str, path: str, train: bool, size: int):
if typ == 'images':
return UnlabeledImages(
path,
TF.Compose([
TF.Resize(size),
TF.CenterCrop(size),
TF.RandomHorizontalFlip(),
TF.ToTensor(),
]))
if typ == 'celeba':
return celeba(
path, train,
TF.Compose([
TF.Resize(size),
TF.CenterCrop(size),
TF.RandomHorizontalFlip(),
TF.ToTensor(),
]))
@tu.experimental
def celeba(path, train: bool, tfm=None):
from torchvision.datasets import CelebA
positive = True
if path[:4] == 'not-':
positive = False
path = path[4:]
celeba = CelebA('~/.torch/celeba',
download=True,
target_type=[],
split='train' if train else 'test')
male_idx = celeba.attr_names.index(path)
files = [
f'~/.torch/celeba/celeba/img_align_celeba/{celeba.filename[i]}'
for i in range(len(celeba))
if celeba.attr[i, male_idx] == (1 if positive else 0)
]
return tch.datasets.pix2pix.ImagesPaths(files, tfm)
@tu.experimental
def train(rank, world_size, opts):
G = pix2pix_128_dev()
G.to_instance_norm()
def to_adain(m):
if isinstance(m, nn.InstanceNorm2d):
return tnn.FiLM2d(m.num_features, 256)
return m
tnn.edit_model(G, to_adain)
tnn.utils.net_to_equal_lr(G, leak=0.2)
D = patch34().remove_batchnorm()
tnn.utils.net_to_equal_lr(D, leak=0.2)
D = MultiScaleDiscriminator(D)
M = Matcher()
if rank == 0:
print(G)
print(D)
print(M)
G = torch.nn.parallel.DistributedDataParallel(G.to(rank), [rank], rank)
D = torch.nn.parallel.DistributedDataParallel(D.to(rank), [rank], rank)
M = torch.nn.parallel.DistributedDataParallel(M.to(rank), [rank], rank)
SIZE = 128
ds_A = get_dataset(opts.data_A[0], opts.data_A[1], True, SIZE)
ds_B = get_dataset(opts.data_B[0], opts.data_B[1], True, SIZE)
ds = tch.datasets.RandomPairsDataset(ds_A, ds_B)
ds_test_A = get_dataset(opts.data_test[0], opts.data_test[1], False, SIZE)
ds_test_B = get_dataset(opts.data_B[0], opts.data_B[1], False, SIZE)
ds_test = tch.datasets.RandomPairsDataset(ds_test_A, ds_test_B)
if rank == 0:
print(ds)
print(ds_test)
ds = torch.utils.data.DataLoader(ds,
8,
num_workers=4,
drop_last=True,
shuffle=True,
pin_memory=True)
ds_test = torch.utils.data.DataLoader(ds_test,
128,
num_workers=4,
drop_last=True,
shuffle=True,
pin_memory=True)
def G_fun(batch) -> dict:
x, y = batch
D.train()
M.eval()
out = G(x * 2 - 1, torch.randn(x.shape[0], 256, device=x.device))
out_d = out.detach()
out_d.requires_grad_()
with D.no_sync():
loss = gan_loss.generated(D(out_d * 2 - 1))
loss.backward()
if opts.consistency != 0:
with M.no_sync():
clf_loss = opts.consistency * M(out_d * 2 - 1,
x * 2 - 1)['loss']
clf_loss.backward()
out.backward(out_d.grad)
return {'G_loss': loss.item()}
class GradientPenalty:
def __init__(self, gamma):
self.gamma = gamma
self.iters = 0
self.last_norm = float('nan')
def __call__(self, model, real, fake):
if self.iters < 100 or self.iters % 4 == 0:
real = real.detach()
fake = fake.detach()
gp, g_norm = zero_gp(model, real, fake)
# Sync the gradient on the next backward
if torch.any(torch.isnan(gp)):
gp.detach_()
else:
(4 * self.gamma * gp).backward()
self.last_norm = g_norm
self.iters += 1
return self.last_norm
gradient_penalty = GradientPenalty(opts.r0_D)
gradient_penalty_M = GradientPenaltyM(opts.r0_M)
def D_fun(batch) -> dict:
x, y = batch
with G.no_sync():
with torch.no_grad():
out = G(x * 2 - 1, torch.randn(x.shape[0], 256,
device=x.device))
fake = out * 2 - 1
real = y * 2 - 1
with D.no_sync():
prob_fake = D(fake)
fake_correct = prob_fake.detach().lt(0).int().eq(1).sum()
fake_loss = gan_loss.fake(prob_fake)
fake_loss.backward()
with D.no_sync():
g_norm = gradient_penalty(D, real, fake)
prob_real = D(real)
real_correct = prob_real.detach().gt(0).int().eq(1).sum()
real_loss = gan_loss.real(prob_real)
real_loss.backward()
if opts.consistency != 0:
with M.no_sync():
match_g_norm = gradient_penalty_M(M, fake, x * 2 - 1, real)
else:
match_g_norm = 0
M_out = M(fake, x * 2 - 1)
matches = M_out['matches']
labels = M_out['labels']
match_correct = matches.argmax(1).eq(labels).float().mean()
loss = M_out['loss']
loss.backward()
return {
'out':
out,
'fake_loss':
fake_loss.item(),
'prob_fake':
torch.sigmoid(prob_fake).mean().item(),
'prob_real':
torch.sigmoid(prob_real).mean().item(),
'real_loss':
real_loss.item(),
'g_norm':
g_norm,
'D-correct':
(fake_correct + real_correct) / (2 * prob_fake.numel()),
'match_correct':
match_correct,
'match_g_norm':
match_g_norm,
}
def test_fun(batch):
x, y = batch
G.train()
with G.no_sync():
out = torch.cat([
G(
xx * 2 - 1,
tch.distributions.sample_truncated_normal(
xx.shape[0], 256).to(xx.device))
for xx in torch.split(x, 32)
],
dim=0)
return {'out': out}
recipe = GANRecipe(G,
D,
G_fun,
D_fun,
test_fun,
ds,
test_loader=ds_test,
test_every=5000,
log_every=100,
checkpoint='main_adain' if rank == 0 else None,
visdom_env='main_adain' if rank == 0 else None)
recipe.register('M', M)
recipe.callbacks.add_callbacks([
tch.callbacks.Optimizer(
tch.optim.Lookahead(
tch.optim.RAdamW(D.parameters(),
lr=2e-3,
betas=(0., 0.99),
weight_decay=0))),
tch.callbacks.Optimizer(
tch.optim.Lookahead(
tch.optim.RAdamW(M.parameters(),
lr=2e-3,
betas=(0.9, 0.99),
weight_decay=0))),
tch.callbacks.Log('out', 'out'),
tch.callbacks.Log('batch.0', 'x'),
tch.callbacks.Log('batch.1', 'y'),
tch.callbacks.Log('batch.0.1', 'y'),
tch.callbacks.WindowedMetricAvg('fake_loss', 'fake_loss'),
tch.callbacks.WindowedMetricAvg('real_loss', 'real_loss'),
tch.callbacks.WindowedMetricAvg('prob_fake', 'prob_fake'),
tch.callbacks.WindowedMetricAvg('prob_real', 'prob_real'),
tch.callbacks.WindowedMetricAvg('D-correct', 'D-correct'),
tch.callbacks.WindowedMetricAvg('match_correct', 'match_correct'),
tch.callbacks.Log('g_norm', 'g_norm'),
tch.callbacks.Log('match_g_norm', 'match_g_norm'),
])
recipe.G_loop.callbacks.add_callbacks([
tch.callbacks.Optimizer(
tch.optim.Lookahead(
tch.optim.RAdamW(G.parameters(),
lr=2e-3,
betas=(0., 0.99),
weight_decay=0))),
])
recipe.test_loop.callbacks.add_callbacks([
tch.callbacks.GANMetrics('batch.1', 'out', device=rank),
tch.callbacks.Log('kid', 'kid'),
tch.callbacks.Log('fid', 'fid'),
tch.callbacks.Log('precision', 'precision'),
tch.callbacks.Log('recall', 'recall'),
tch.callbacks.Log('out', 'test_out'),
tch.callbacks.Log('batch.0', 'test_x'),
])
recipe.to(rank)
if opts.from_ckpt is not None:
recipe.load_state_dict(torch.load(opts.from_ckpt, map_location='cpu'))
tu.unfreeze(D)
tu.unfreeze(G)
tu.unfreeze(M)
recipe.run(200)
def run(opts):
G = pix2pix_128()
G.to_instance_norm()
tnn.utils.net_to_equal_lr(G, leak=0.2)
G.load_state_dict(torch.load(opts.from_ckpt, map_location='cpu')['G'])
import torchvision.transforms as TF
from PIL import Image
tfm = TF.Compose([
TF.Resize(128),
TF.CenterCrop(128),
TF.ToTensor(),
TF.Normalize([0.5] * 3, [0.5] * 3),
])
img = tfm(Image.open(opts.src).convert('RGB'))
img = torch.stack([img, img], dim=0)
TF.functional.to_pil_image(G(img, torch.randn(2, 256))[0]).save(opts.dst)
def para_run(opts):
return tu.parallel_run(train, opts=opts)
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
subparsers = parser.add_subparsers()
train_parser = subparsers.add_parser('train')
train_parser.add_argument('--data-A',
required=True,
type=lambda x: x.split(':'))
train_parser.add_argument('--data-B',
required=True,
type=lambda x: x.split(':'))
train_parser.add_argument('--data-test',
required=True,
type=lambda x: x.split(':'))
train_parser.add_argument('--r0-D', default=0.0001, type=float)
train_parser.add_argument('--r0-M', default=0.0001, type=float)
train_parser.add_argument('--consistency', default=0.01, type=float)
train_parser.add_argument('--from-ckpt')
train_parser.set_defaults(func=para_run)
run_parser = subparsers.add_parser('run')
run_parser.add_argument('--from-ckpt', required=True)
run_parser.add_argument('--src', required=True)
run_parser.add_argument('--dst', required=True)
run_parser.set_defaults(func=run)
opts = parser.parse_args()
opts.func(opts)
|
948060d7849c6810f6a7327481577b000ecfe31c
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/RecoMuon/MuonSeedGenerator/python/MuonSeed_cfi.py
|
1bdc38e49c3ec10659c41bf11597ac4fc5d2885e
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 1,782
|
py
|
MuonSeed_cfi.py
|
import FWCore.ParameterSet.Config as cms
from RecoMuon.TrackingTools.MuonServiceProxy_cff import *
from RecoMuon.MuonSeedGenerator.ptSeedParameterization_cfi import *
from RecoMuon.MuonSeedGenerator.MuonSeedPtScale_cfi import *
MuonSeed = cms.EDProducer("MuonSeedProducer",
ptSeedParameterization,
MuonServiceProxy,
dphiScale,
# Parameters for seed creation in overlap region
maxDeltaEtaOverlap = cms.double(0.08),
# Flag for internal debugging
DebugMuonSeed = cms.bool(False),
minimumSeedPt = cms.double(5.0),
# The following parameters are only used in the new seed generator !
# Parameters for seed creation in endcap region
minCSCHitsPerSegment = cms.int32(4),
maxDeltaPhiDT = cms.double(0.3),
maxDeltaPhiOverlap = cms.double(0.25),
# Parameters for seed creation in barrel region
minDTHitsPerSegment = cms.int32(2),
maxPhiResolutionDT = cms.double(0.03),
DTSegmentLabel = cms.InputTag("dt4DSegments"),
SeedPtSystematics = cms.double(0.1),
maximumSeedPt = cms.double(3000.0),
# Minimum and maximum Pt for seeds
defaultSeedPt = cms.double(25.0),
CSCSegmentLabel = cms.InputTag("cscSegments"),
# this means 1/5 of length in ME1/a
maxEtaResolutionCSC = cms.double(0.06),
# enable the DT chamber
EnableDTMeasurement = cms.bool(True),
# Resolution power for distinguishing between 2 muon seeds (suppression of combinatorics)
# this means 1/20th of MB0
maxEtaResolutionDT = cms.double(0.02),
maxDeltaEtaDT = cms.double(0.3),
# this is a 5th of a chamber width
maxPhiResolutionCSC = cms.double(0.03),
maxDeltaEtaCSC = cms.double(0.2),
maxDeltaPhiCSC = cms.double(0.5),
# enable the CSC chamber
EnableCSCMeasurement = cms.bool(True)
)
|
4e9111121c9209f5a43a30be0cda0a236cc9be12
|
73f4f74f678fadee409560b78ffacb7aec38c545
|
/Tests/interop/net/loadorder/t2h.py
|
6f37ad3cbdc7d0754436e7280e648fe6c204bc39
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
IronLanguages/ironpython3
|
14ec38566d7c27675215042d72e38f6a979011ab
|
e8ed79bd7f0f33eb2af1a538dd7e98767c86c211
|
refs/heads/master
| 2023-09-03T03:36:51.590171
| 2023-09-02T19:02:51
| 2023-09-02T19:02:51
| 17,266,066
| 2,396
| 349
|
Apache-2.0
| 2023-09-09T01:46:11
| 2014-02-27T21:50:49
|
Python
|
UTF-8
|
Python
| false
| false
| 1,198
|
py
|
t2h.py
|
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
from iptest.assert_util import *
add_clr_assemblies("loadorder_2")
# namespace First {
# public class Nongeneric1 {
# public static string Flag = typeof(Nongeneric1).FullName;
# }
# }
import First
from First import *
add_clr_assemblies("loadorder_2h")
# // generic type, which has same namespace, different name from First.Nongeneric1
# namespace First {
# public class Nongeneric2<T> {
# public static string Flag = typeof(Nongeneric2<>).FullName;
# }
# }
AreEqual(First.Nongeneric2[str].Flag, "First.Nongeneric2`1") # no need to import First again
AreEqual(First.Nongeneric1.Flag, "First.Nongeneric1")
AreEqual(Nongeneric1.Flag, "First.Nongeneric1")
AssertError(NameError, lambda: Nongeneric2)
from First import *
AreEqual(First.Nongeneric2[str].Flag, "First.Nongeneric2`1")
AreEqual(First.Nongeneric1.Flag, "First.Nongeneric1")
AreEqual(Nongeneric1.Flag, "First.Nongeneric1")
AreEqual(Nongeneric2[float].Flag, "First.Nongeneric2`1")
|
43c8a214b91f78e9c85ef1121357a6f44739d023
|
902eb0e21020148d164e5a49694980315213188b
|
/tests/test_sendgrid_backend.py
|
823d10991bb0b225414aff8a8325334236ec13c4
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
anymail/django-anymail
|
4ca8dfff50ea2a2987b389693e46748e7717367d
|
42dbfcf2c45f38a004a1f576dad38e7ad5d554cb
|
refs/heads/main
| 2023-09-01T06:52:35.147257
| 2023-08-25T19:19:05
| 2023-08-25T19:35:08
| 53,549,881
| 1,556
| 150
|
NOASSERTION
| 2023-08-25T19:35:09
| 2016-03-10T02:55:09
|
Python
|
UTF-8
|
Python
| false
| false
| 51,095
|
py
|
test_sendgrid_backend.py
|
from base64 import b64decode, b64encode
from calendar import timegm
from datetime import date, datetime
from decimal import Decimal
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from unittest.mock import patch
from django.core import mail
from django.test import SimpleTestCase, override_settings, tag
from django.utils.timezone import (
get_fixed_timezone,
override as override_current_timezone,
)
from anymail.exceptions import (
AnymailAPIError,
AnymailConfigurationError,
AnymailSerializationError,
AnymailUnsupportedFeature,
AnymailWarning,
)
from anymail.message import attach_inline_image_file
from .mock_requests_backend import (
RequestsBackendMockAPITestCase,
SessionSharingTestCases,
)
from .utils import (
SAMPLE_IMAGE_FILENAME,
AnymailTestMixin,
sample_image_content,
sample_image_path,
)
@tag("sendgrid")
@override_settings(
EMAIL_BACKEND="anymail.backends.sendgrid.EmailBackend",
ANYMAIL={"SENDGRID_API_KEY": "test_api_key"},
)
class SendGridBackendMockAPITestCase(RequestsBackendMockAPITestCase):
# SendGrid v3 success responses are empty:
DEFAULT_RAW_RESPONSE = b""
# SendGrid v3 uses '202 Accepted' for success (in most cases):
DEFAULT_STATUS_CODE = 202
def setUp(self):
super().setUp()
# Patch uuid4 to generate predictable anymail_ids for testing
patch_uuid4 = patch(
"anymail.backends.sendgrid.uuid.uuid4",
side_effect=["mocked-uuid-%d" % n for n in range(1, 5)],
)
patch_uuid4.start()
self.addCleanup(patch_uuid4.stop)
# Simple message useful for many tests
self.message = mail.EmailMultiAlternatives(
"Subject", "Text Body", "from@example.com", ["to@example.com"]
)
@tag("sendgrid")
class SendGridBackendStandardEmailTests(SendGridBackendMockAPITestCase):
"""Test backend support for Django standard email features"""
def test_send_mail(self):
"""Test basic API for simple send"""
mail.send_mail(
"Subject here",
"Here is the message.",
"from@sender.example.com",
["to@example.com"],
fail_silently=False,
)
self.assert_esp_called("https://api.sendgrid.com/v3/mail/send")
http_headers = self.get_api_call_headers()
self.assertEqual(http_headers["Authorization"], "Bearer test_api_key")
self.assertEqual(http_headers["Content-Type"], "application/json")
data = self.get_api_call_json()
self.assertEqual(data["subject"], "Subject here")
self.assertEqual(
data["content"], [{"type": "text/plain", "value": "Here is the message."}]
)
self.assertEqual(data["from"], {"email": "from@sender.example.com"})
self.assertEqual(
data["personalizations"],
[
{
"to": [{"email": "to@example.com"}],
# make sure the backend assigned the anymail_id
# for event tracking and notification
"custom_args": {"anymail_id": "mocked-uuid-1"},
}
],
)
def test_name_addr(self):
"""Make sure RFC2822 name-addr format (with display-name) is allowed
(Test both sender and recipient addresses)
"""
msg = mail.EmailMessage(
"Subject",
"Message",
"From Name <from@example.com>",
["Recipient #1 <to1@example.com>", "to2@example.com"],
cc=["Carbon Copy <cc1@example.com>", "cc2@example.com"],
bcc=["Blind Copy <bcc1@example.com>", "bcc2@example.com"],
)
msg.send()
data = self.get_api_call_json()
self.assertEqual(
data["from"], {"email": "from@example.com", "name": "From Name"}
)
# single message (single "personalization") sent to all those recipients
# (note workaround for SendGrid v3 API bug quoting display-name
# in personalizations)
self.assertEqual(len(data["personalizations"]), 1)
self.assertEqual(
data["personalizations"][0]["to"],
[
{"name": '"Recipient #1"', "email": "to1@example.com"},
{"email": "to2@example.com"},
],
)
self.assertEqual(
data["personalizations"][0]["cc"],
[
{"name": '"Carbon Copy"', "email": "cc1@example.com"},
{"email": "cc2@example.com"},
],
)
self.assertEqual(
data["personalizations"][0]["bcc"],
[
{"name": '"Blind Copy"', "email": "bcc1@example.com"},
{"email": "bcc2@example.com"},
],
)
def test_email_message(self):
email = mail.EmailMessage(
"Subject",
"Body goes here",
"from@example.com",
["to1@example.com", "Also To <to2@example.com>"],
bcc=["bcc1@example.com", "Also BCC <bcc2@example.com>"],
cc=["cc1@example.com", "Also CC <cc2@example.com>"],
headers={
"Reply-To": "another@example.com",
"X-MyHeader": "my value",
# should override backend msgid:
"Message-ID": "<mycustommsgid@sales.example.com>",
},
)
email.send()
data = self.get_api_call_json()
self.assertEqual(
data["personalizations"],
[
{
"to": [
{"email": "to1@example.com"},
{"email": "to2@example.com", "name": '"Also To"'},
],
"cc": [
{"email": "cc1@example.com"},
{"email": "cc2@example.com", "name": '"Also CC"'},
],
"bcc": [
{"email": "bcc1@example.com"},
{"email": "bcc2@example.com", "name": '"Also BCC"'},
],
# make sure custom Message-ID also added to custom_args
"custom_args": {"anymail_id": "mocked-uuid-1"},
}
],
)
self.assertEqual(data["from"], {"email": "from@example.com"})
self.assertEqual(data["subject"], "Subject")
self.assertEqual(
data["content"], [{"type": "text/plain", "value": "Body goes here"}]
)
self.assertEqual(data["reply_to_list"], [{"email": "another@example.com"}])
self.assertEqual(
data["headers"],
{
"X-MyHeader": "my value",
"Message-ID": "<mycustommsgid@sales.example.com>",
},
)
def test_html_message(self):
text_content = "This is an important message."
html_content = "<p>This is an <strong>important</strong> message.</p>"
email = mail.EmailMultiAlternatives(
"Subject", text_content, "from@example.com", ["to@example.com"]
)
email.attach_alternative(html_content, "text/html")
email.send()
data = self.get_api_call_json()
# SendGrid requires content in text, html order:
self.assertEqual(len(data["content"]), 2)
self.assertEqual(
data["content"][0], {"type": "text/plain", "value": text_content}
)
self.assertEqual(
data["content"][1], {"type": "text/html", "value": html_content}
)
# Don't accidentally send the html part as an attachment:
self.assertNotIn("attachments", data)
def test_html_only_message(self):
html_content = "<p>This is an <strong>important</strong> message.</p>"
email = mail.EmailMessage(
"Subject", html_content, "from@example.com", ["to@example.com"]
)
email.content_subtype = "html" # Main content is now text/html
email.send()
data = self.get_api_call_json()
self.assertEqual(len(data["content"]), 1)
self.assertEqual(
data["content"][0], {"type": "text/html", "value": html_content}
)
def test_extra_headers(self):
self.message.extra_headers = {
"X-Custom": "string",
"X-Num": 123,
"Reply-To": '"Do Not Reply" <noreply@example.com>',
}
self.message.send()
data = self.get_api_call_json()
self.assertEqual(data["headers"]["X-Custom"], "string")
# number converted to string (undocumented SendGrid requirement):
self.assertEqual(data["headers"]["X-Num"], "123")
# Reply-To must be moved to separate param
self.assertNotIn("Reply-To", data["headers"])
self.assertEqual(
data["reply_to_list"],
[{"name": "Do Not Reply", "email": "noreply@example.com"}],
)
def test_extra_headers_serialization_error(self):
self.message.extra_headers = {"X-Custom": Decimal(12.5)}
with self.assertRaisesMessage(AnymailSerializationError, "Decimal"):
self.message.send()
def test_reply_to(self):
self.message.reply_to = [
'"Reply recipient" <reply@example.com',
"reply2@example.com",
]
self.message.send()
data = self.get_api_call_json()
self.assertEqual(
data["reply_to_list"],
[
{"name": "Reply recipient", "email": "reply@example.com"},
{"email": "reply2@example.com"},
],
)
self.assertNotIn("reply_to", data) # not allowed with reply_to_list
def test_attachments(self):
text_content = "* Item one\n* Item two\n* Item three"
self.message.attach(
filename="test.txt", content=text_content, mimetype="text/plain"
)
# Should guess mimetype if not provided...
png_content = b"PNG\xb4 pretend this is the contents of a png file"
self.message.attach(filename="test.png", content=png_content)
# Should work with a MIMEBase object (also tests no filename)...
pdf_content = b"PDF\xb4 pretend this is valid pdf data"
mimeattachment = MIMEBase("application", "pdf")
mimeattachment.set_payload(pdf_content)
self.message.attach(mimeattachment)
self.message.send()
data = self.get_api_call_json()
self.assertEqual(len(data["attachments"]), 3)
attachments = data["attachments"]
self.assertEqual(
attachments[0],
{
"filename": "test.txt",
"content": b64encode(text_content.encode("utf-8")).decode("ascii"),
"type": "text/plain",
},
)
self.assertEqual(
attachments[1],
{
"filename": "test.png",
"content": b64encode(png_content).decode("ascii"),
"type": "image/png", # (type inferred from filename)
},
)
self.assertEqual(
attachments[2],
{
"filename": "", # no filename -- but param is required
"content": b64encode(pdf_content).decode("ascii"),
"type": "application/pdf",
},
)
def test_unicode_attachment_correctly_decoded(self):
self.message.attach(
"Une pièce jointe.html", "<p>\u2019</p>", mimetype="text/html"
)
self.message.send()
attachment = self.get_api_call_json()["attachments"][0]
self.assertEqual(attachment["filename"], "Une pièce jointe.html")
self.assertEqual(
b64decode(attachment["content"]).decode("utf-8"), "<p>\u2019</p>"
)
def test_embedded_images(self):
image_filename = SAMPLE_IMAGE_FILENAME
image_path = sample_image_path(image_filename)
image_data = sample_image_content(image_filename)
cid = attach_inline_image_file(self.message, image_path) # Read from a png file
html_content = (
'<p>This has an <img src="cid:%s" alt="inline" /> image.</p>' % cid
)
self.message.attach_alternative(html_content, "text/html")
self.message.send()
data = self.get_api_call_json()
self.assertEqual(
data["attachments"][0],
{
"filename": image_filename,
"content": b64encode(image_data).decode("ascii"),
"type": "image/png", # (type inferred from filename)
"disposition": "inline",
"content_id": cid,
},
)
def test_attached_images(self):
image_filename = SAMPLE_IMAGE_FILENAME
image_path = sample_image_path(image_filename)
image_data = sample_image_content(image_filename)
# option 1: attach as a file
self.message.attach_file(image_path)
# option 2: construct the MIMEImage and attach it directly
image = MIMEImage(image_data)
self.message.attach(image)
self.message.send()
image_data_b64 = b64encode(image_data).decode("ascii")
data = self.get_api_call_json()
self.assertEqual(
data["attachments"][0],
{
"filename": image_filename, # the named one
"content": image_data_b64,
"type": "image/png",
},
)
self.assertEqual(
data["attachments"][1],
{
"filename": "", # the unnamed one
"content": image_data_b64,
"type": "image/png",
},
)
def test_multiple_html_alternatives(self):
# SendGrid's v3 API allows all kinds of content alternatives.
# It's unclear whether this would permit multiple text/html parts
# (the API docs warn that "If included, text/plain and text/html must be
# the first indices of the [content] array in this order"), but Anymail
# generally passes whatever the API structure supports -- deferring any
# limitations to the ESP.
self.message.body = "Text body"
self.message.attach_alternative("<p>First html is OK</p>", "text/html")
self.message.attach_alternative(
"<p>And maybe second html, too</p>", "text/html"
)
self.message.send()
data = self.get_api_call_json()
self.assertEqual(
data["content"],
[
{"type": "text/plain", "value": "Text body"},
{"type": "text/html", "value": "<p>First html is OK</p>"},
{"type": "text/html", "value": "<p>And maybe second html, too</p>"},
],
)
def test_non_html_alternative(self):
self.message.body = "Text body"
self.message.attach_alternative("{'maybe': 'allowed'}", "application/json")
self.message.send()
data = self.get_api_call_json()
self.assertEqual(
data["content"],
[
{"type": "text/plain", "value": "Text body"},
{"type": "application/json", "value": "{'maybe': 'allowed'}"},
],
)
def test_api_failure(self):
self.set_mock_response(status_code=400)
with self.assertRaisesMessage(AnymailAPIError, "SendGrid API response 400"):
mail.send_mail("Subject", "Body", "from@example.com", ["to@example.com"])
# Make sure fail_silently is respected
self.set_mock_response(status_code=400)
sent = mail.send_mail(
"Subject",
"Body",
"from@example.com",
["to@example.com"],
fail_silently=True,
)
self.assertEqual(sent, 0)
def test_api_error_includes_details(self):
"""AnymailAPIError should include ESP's error message"""
# JSON error response:
error_response = b"""{"errors":[
{"message":"Helpful explanation from SendGrid",
"field":"subject","help":null},
{"message":"Another error","field":null,"help":null}
]}"""
self.set_mock_response(status_code=400, raw=error_response)
with self.assertRaises(AnymailAPIError) as cm:
self.message.send()
err = cm.exception
self.assertIn("Helpful explanation from SendGrid", str(err))
self.assertIn("Another error", str(err))
# Non-JSON error response:
self.set_mock_response(status_code=500, raw=b"Ack! Bad proxy!")
with self.assertRaisesMessage(AnymailAPIError, "Ack! Bad proxy!"):
self.message.send()
# No content in the error response:
self.set_mock_response(status_code=502, raw=None)
with self.assertRaises(AnymailAPIError):
self.message.send()
@tag("sendgrid")
class SendGridBackendAnymailFeatureTests(SendGridBackendMockAPITestCase):
"""Test backend support for Anymail added features"""
def test_envelope_sender(self):
# SendGrid does not have a way to change envelope sender.
self.message.envelope_sender = "anything@bounces.example.com"
with self.assertRaisesMessage(AnymailUnsupportedFeature, "envelope_sender"):
self.message.send()
def test_metadata(self):
self.message.metadata = {"user_id": "12345", "items": 6, "float": 98.6}
self.message.send()
data = self.get_api_call_json()
# remove anymail_id we added for tracking:
data["custom_args"].pop("anymail_id", None)
self.assertEqual(
data["custom_args"],
{
"user_id": "12345",
"items": "6", # int converted to a string,
"float": "98.6", # float converted to a string (watch binary rounding!)
},
)
def test_send_at(self):
utc_plus_6 = get_fixed_timezone(6 * 60)
utc_minus_8 = get_fixed_timezone(-8 * 60)
with override_current_timezone(utc_plus_6):
# Timezone-aware datetime converted to UTC:
self.message.send_at = datetime(2016, 3, 4, 5, 6, 7, tzinfo=utc_minus_8)
self.message.send()
data = self.get_api_call_json()
self.assertEqual(
data["send_at"], timegm((2016, 3, 4, 13, 6, 7))
) # 05:06 UTC-8 == 13:06 UTC
# Timezone-naive datetime assumed to be Django current_timezone
self.message.send_at = datetime(
2022, 10, 11, 12, 13, 14, 567
) # microseconds should get stripped
self.message.send()
data = self.get_api_call_json()
self.assertEqual(
data["send_at"], timegm((2022, 10, 11, 6, 13, 14))
) # 12:13 UTC+6 == 06:13 UTC
# Date-only treated as midnight in current timezone
self.message.send_at = date(2022, 10, 22)
self.message.send()
data = self.get_api_call_json()
self.assertEqual(
data["send_at"], timegm((2022, 10, 21, 18, 0, 0))
) # 00:00 UTC+6 == 18:00-1d UTC
# POSIX timestamp
self.message.send_at = 1651820889 # 2022-05-06 07:08:09 UTC
self.message.send()
data = self.get_api_call_json()
self.assertEqual(data["send_at"], 1651820889)
def test_tags(self):
self.message.tags = ["receipt", "repeat-user"]
self.message.send()
data = self.get_api_call_json()
self.assertCountEqual(data["categories"], ["receipt", "repeat-user"])
def test_tracking(self):
# Test one way...
self.message.track_clicks = False
self.message.track_opens = True
self.message.send()
data = self.get_api_call_json()
self.assertEqual(data["tracking_settings"]["click_tracking"], {"enable": False})
self.assertEqual(data["tracking_settings"]["open_tracking"], {"enable": True})
# ...and the opposite way
self.message.track_clicks = True
self.message.track_opens = False
self.message.send()
data = self.get_api_call_json()
self.assertEqual(data["tracking_settings"]["click_tracking"], {"enable": True})
self.assertEqual(data["tracking_settings"]["open_tracking"], {"enable": False})
def test_template_id(self):
self.message.template_id = "5997fcf6-2b9f-484d-acd5-7e9a99f0dc1f"
self.message.send()
data = self.get_api_call_json()
self.assertEqual(data["template_id"], "5997fcf6-2b9f-484d-acd5-7e9a99f0dc1f")
def test_template_id_with_empty_body(self):
# v2 API required *some* text and html in message to render those template
# bodies, so the v2 backend set those to " " when necessary.
# But per v3 docs:
# "If you use a template that contains content and a subject (either text or
# html), you do not need to specify those in the respective personalizations
# or message level parameters."
# So make sure we aren't adding body content where not needed:
message = mail.EmailMessage(
from_email="from@example.com", to=["to@example.com"]
)
message.template_id = "5997fcf6-2b9f-484d-acd5-7e9a99f0dc1f"
message.send()
data = self.get_api_call_json()
self.assertNotIn("content", data) # neither text nor html body
self.assertNotIn("subject", data)
def test_merge_data(self):
# A template_id starting with "d-" indicates you are using SendGrid's newer
# (non-legacy) "dynamic" transactional templates
self.message.template_id = "d-5a963add2ec84305813ff860db277d7a"
self.message.from_email = "from@example.com"
self.message.to = [
"alice@example.com",
"Bob <bob@example.com>",
"celia@example.com",
]
# cc gets applied to *each* recipient in a merge:
self.message.cc = ["cc@example.com"]
self.message.merge_data = {
"alice@example.com": {"name": "Alice", "group": "Developers"},
"bob@example.com": {"name": "Bob"}, # and leave group undefined
# and no data for celia@example.com
}
self.message.merge_global_data = {
"group": "Users",
"site": "ExampleCo",
}
self.message.send()
data = self.get_api_call_json()
self.assertEqual(
data["personalizations"],
[
{
"to": [{"email": "alice@example.com"}],
"cc": [{"email": "cc@example.com"}], # all recipients get the cc
"custom_args": {"anymail_id": "mocked-uuid-1"},
"dynamic_template_data": {
"name": "Alice",
"group": "Developers",
"site": "ExampleCo",
},
},
{
"to": [{"email": "bob@example.com", "name": '"Bob"'}],
"cc": [{"email": "cc@example.com"}],
"custom_args": {"anymail_id": "mocked-uuid-2"},
"dynamic_template_data": {
"name": "Bob",
"group": "Users",
"site": "ExampleCo",
},
},
{
"to": [{"email": "celia@example.com"}],
"cc": [{"email": "cc@example.com"}],
"custom_args": {"anymail_id": "mocked-uuid-3"},
"dynamic_template_data": {"group": "Users", "site": "ExampleCo"},
},
],
)
self.assertNotIn("sections", data) # 'sections' not used with dynamic templates
def test_explicit_dynamic_template(self):
# undocumented esp_extra['use_dynamic_template']
# can be used to force dynamic/legacy params
self.message.merge_data = {"to@example.com": {"test": "data"}}
self.message.template_id = "apparently-not-dynamic" # doesn't start with "d-"
self.message.esp_extra = {"use_dynamic_template": True}
self.message.send()
data = self.get_api_call_json()
self.assertEqual(
data["personalizations"],
[
{
"to": [{"email": "to@example.com"}],
"custom_args": {"anymail_id": "mocked-uuid-1"},
"dynamic_template_data": {"test": "data"},
}
],
)
self.message.template_id = "d-apparently-not-legacy"
self.message.esp_extra = {
"use_dynamic_template": False,
"merge_field_format": "<%{}%>",
}
self.message.send()
data = self.get_api_call_json()
self.assertEqual(
data["personalizations"],
[
{
"to": [{"email": "to@example.com"}],
"custom_args": {"anymail_id": "mocked-uuid-2"},
"substitutions": {"<%test%>": "data"},
}
],
)
def test_merge_data_global_only(self):
# a template with only global data can be used to send the same message
# to multiple recipients (non-batch)
self.message.template_id = "d-5a963add2ec84305813ff860db277d7a"
self.message.merge_global_data = {"test": "data"}
self.message.to = ["one@example.com", "two@example.com"]
self.message.send()
data = self.get_api_call_json()
self.assertEqual(
data["personalizations"],
[
{
"to": [ # not batch
{"email": "one@example.com"},
{"email": "two@example.com"},
],
"custom_args": {"anymail_id": "mocked-uuid-1"},
"dynamic_template_data": {"test": "data"},
}
],
)
def test_legacy_merge_data(self):
# unless a new "dynamic template" is specified, Anymail assumes the legacy
# "substitutions" format for merge data
self.message.from_email = "from@example.com"
self.message.to = [
"alice@example.com",
"Bob <bob@example.com>",
"celia@example.com",
]
# cc gets applied to *each* recipient in a merge:
self.message.cc = ["cc@example.com"]
# SendGrid template_id is not required to use merge.
# You can just supply (legacy) template content as the message (e.g.):
self.message.body = "Hi :name. Welcome to :group at :site."
self.message.merge_data = {
# You must either include merge field delimiters in the keys
# (':name' rather than just 'name') as shown here, or use one of the
# merge_field_format options shown in the test cases below
"alice@example.com": {":name": "Alice", ":group": "Developers"},
"bob@example.com": {":name": "Bob"}, # and leave :group undefined
# and no data for celia@example.com
}
self.message.merge_global_data = {
":group": "Users",
":site": "ExampleCo",
}
self.message.send()
data = self.get_api_call_json()
self.assertEqual(
data["personalizations"],
[
{
"to": [{"email": "alice@example.com"}],
"cc": [{"email": "cc@example.com"}], # all recipients get the cc
"custom_args": {"anymail_id": "mocked-uuid-1"},
"substitutions": {
":name": "Alice",
":group": "Developers",
":site": "ExampleCo", # merge_global_data merged
},
},
{
"to": [{"email": "bob@example.com", "name": '"Bob"'}],
"cc": [{"email": "cc@example.com"}],
"custom_args": {"anymail_id": "mocked-uuid-2"},
"substitutions": {
":name": "Bob",
":group": "Users",
":site": "ExampleCo",
},
},
{
"to": [{"email": "celia@example.com"}],
"cc": [{"email": "cc@example.com"}],
"custom_args": {"anymail_id": "mocked-uuid-3"},
"substitutions": {":group": "Users", ":site": "ExampleCo"},
},
],
)
# 'sections' no longer used for merge_global_data:
self.assertNotIn("sections", data)
@override_settings(
ANYMAIL_SENDGRID_MERGE_FIELD_FORMAT=":{}" # :field as shown in SG examples
)
def test_legacy_merge_field_format_setting(self):
# Provide merge field delimiters in settings.py
self.message.to = ["alice@example.com", "Bob <bob@example.com>"]
self.message.merge_data = {
"alice@example.com": {"name": "Alice", "group": "Developers"},
"bob@example.com": {"name": "Bob"}, # and leave group undefined
}
self.message.merge_global_data = {"site": "ExampleCo"}
self.message.send()
data = self.get_api_call_json()
self.assertEqual(
data["personalizations"],
[
{
"to": [{"email": "alice@example.com"}],
"custom_args": {"anymail_id": "mocked-uuid-1"},
"substitutions": {
":name": "Alice",
":group": "Developers", # keys changed to :field
":site": "ExampleCo",
},
},
{
"to": [{"email": "bob@example.com", "name": '"Bob"'}],
"custom_args": {"anymail_id": "mocked-uuid-2"},
"substitutions": {":name": "Bob", ":site": "ExampleCo"},
},
],
)
def test_legacy_merge_field_format_esp_extra(self):
# Provide merge field delimiters for an individual message
self.message.to = ["alice@example.com", "Bob <bob@example.com>"]
self.message.merge_data = {
"alice@example.com": {"name": "Alice", "group": "Developers"},
"bob@example.com": {"name": "Bob"}, # and leave group undefined
}
self.message.merge_global_data = {"site": "ExampleCo"}
# match Mandrill/MailChimp *|field|* delimiters:
self.message.esp_extra = {"merge_field_format": "*|{}|*"}
self.message.send()
data = self.get_api_call_json()
self.assertEqual(
data["personalizations"],
[
{
"to": [{"email": "alice@example.com"}],
"custom_args": {"anymail_id": "mocked-uuid-1"},
"substitutions": {
"*|name|*": "Alice",
"*|group|*": "Developers",
"*|site|*": "ExampleCo",
},
},
{
"to": [{"email": "bob@example.com", "name": '"Bob"'}],
"custom_args": {"anymail_id": "mocked-uuid-2"},
"substitutions": {"*|name|*": "Bob", "*|site|*": "ExampleCo"},
},
],
)
# Make sure our esp_extra merge_field_format doesn't get sent to SendGrid API:
self.assertNotIn("merge_field_format", data)
def test_legacy_warn_if_no_merge_field_delimiters(self):
self.message.to = ["alice@example.com"]
self.message.merge_data = {
"alice@example.com": {"name": "Alice", "group": "Developers"},
}
with self.assertWarnsRegex(AnymailWarning, r"SENDGRID_MERGE_FIELD_FORMAT"):
self.message.send()
def test_legacy_warn_if_no_global_merge_field_delimiters(self):
self.message.merge_global_data = {"site": "ExampleCo"}
with self.assertWarnsRegex(AnymailWarning, r"SENDGRID_MERGE_FIELD_FORMAT"):
self.message.send()
def test_merge_metadata(self):
self.message.to = ["alice@example.com", "Bob <bob@example.com>"]
self.message.merge_metadata = {
"alice@example.com": {"order_id": 123},
"bob@example.com": {"order_id": 678, "tier": "premium"},
}
self.message.send()
data = self.get_api_call_json()
self.assertEqual(
data["personalizations"],
[
{
"to": [{"email": "alice@example.com"}],
# anymail_id added to other custom_args
"custom_args": {"anymail_id": "mocked-uuid-1", "order_id": "123"},
},
{
"to": [{"email": "bob@example.com", "name": '"Bob"'}],
"custom_args": {
"anymail_id": "mocked-uuid-2",
"order_id": "678",
"tier": "premium",
},
},
],
)
def test_metadata_with_merge_metadata(self):
# Per SendGrid docs: "personalizations[x].custom_args will be merged
# with message level custom_args, overriding any conflicting keys."
# So there's no need to merge global metadata with per-recipient merge_metadata
# (like we have to for template merge_global_data and merge_data).
self.message.to = ["alice@example.com", "Bob <bob@example.com>"]
self.message.metadata = {"tier": "basic", "batch": "ax24"}
self.message.merge_metadata = {
"alice@example.com": {"order_id": 123},
"bob@example.com": {"order_id": 678, "tier": "premium"},
}
self.message.send()
data = self.get_api_call_json()
self.assertEqual(
data["personalizations"],
[
{
"to": [{"email": "alice@example.com"}],
"custom_args": {"anymail_id": "mocked-uuid-1", "order_id": "123"},
},
{
"to": [{"email": "bob@example.com", "name": '"Bob"'}],
"custom_args": {
"anymail_id": "mocked-uuid-2",
"order_id": "678",
"tier": "premium",
},
},
],
)
self.assertEqual(data["custom_args"], {"tier": "basic", "batch": "ax24"})
def test_merge_metadata_with_merge_data(self):
# (using dynamic templates)
self.message.to = [
"alice@example.com",
"Bob <bob@example.com>",
"celia@example.com",
]
# cc gets applied to *each* recipient in a merge:
self.message.cc = ["cc@example.com"]
self.message.template_id = "d-5a963add2ec84305813ff860db277d7a"
self.message.merge_data = {
"alice@example.com": {"name": "Alice", "group": "Developers"},
"bob@example.com": {"name": "Bob"}
# and no data for celia@example.com
}
self.message.merge_global_data = {
"group": "Users",
"site": "ExampleCo",
}
self.message.merge_metadata = {
"alice@example.com": {"order_id": 123},
"bob@example.com": {"order_id": 678, "tier": "premium"},
# and no metadata for celia@example.com
}
self.message.send()
data = self.get_api_call_json()
self.assertEqual(
data["personalizations"],
[
{
"to": [{"email": "alice@example.com"}],
"cc": [{"email": "cc@example.com"}], # all recipients get the cc
"dynamic_template_data": {
"name": "Alice",
"group": "Developers",
"site": "ExampleCo",
},
"custom_args": {"anymail_id": "mocked-uuid-1", "order_id": "123"},
},
{
"to": [{"email": "bob@example.com", "name": '"Bob"'}],
"cc": [{"email": "cc@example.com"}],
"dynamic_template_data": {
"name": "Bob",
"group": "Users",
"site": "ExampleCo",
},
"custom_args": {
"anymail_id": "mocked-uuid-2",
"order_id": "678",
"tier": "premium",
},
},
{
"to": [{"email": "celia@example.com"}],
"cc": [{"email": "cc@example.com"}],
"dynamic_template_data": {"group": "Users", "site": "ExampleCo"},
"custom_args": {"anymail_id": "mocked-uuid-3"},
},
],
)
def test_merge_metadata_with_legacy_template(self):
self.message.to = [
"alice@example.com",
"Bob <bob@example.com>",
"celia@example.com",
]
# cc gets applied to *each* recipient in a merge:
self.message.cc = ["cc@example.com"]
self.message.template_id = "5a963add2ec84305813ff860db277d7a"
self.message.esp_extra = {"merge_field_format": ":{}"}
self.message.merge_data = {
"alice@example.com": {"name": "Alice", "group": "Developers"},
"bob@example.com": {"name": "Bob"}
# and no data for celia@example.com
}
self.message.merge_global_data = {
"group": "Users",
"site": "ExampleCo",
}
self.message.merge_metadata = {
"alice@example.com": {"order_id": 123},
"bob@example.com": {"order_id": 678, "tier": "premium"},
# and no metadata for celia@example.com
}
self.message.send()
data = self.get_api_call_json()
self.assertEqual(
data["personalizations"],
[
{
"to": [{"email": "alice@example.com"}],
"cc": [{"email": "cc@example.com"}], # all recipients get the cc
"custom_args": {"anymail_id": "mocked-uuid-1", "order_id": "123"},
"substitutions": {
":name": "Alice",
":group": "Developers",
":site": "ExampleCo",
},
},
{
"to": [{"email": "bob@example.com", "name": '"Bob"'}],
"cc": [{"email": "cc@example.com"}],
"custom_args": {
"anymail_id": "mocked-uuid-2",
"order_id": "678",
"tier": "premium",
},
"substitutions": {
":name": "Bob",
":group": "Users",
":site": "ExampleCo",
},
},
{
"to": [{"email": "celia@example.com"}],
"cc": [{"email": "cc@example.com"}],
"custom_args": {"anymail_id": "mocked-uuid-3"},
"substitutions": {":group": "Users", ":site": "ExampleCo"},
},
],
)
@override_settings(
ANYMAIL_SENDGRID_GENERATE_MESSAGE_ID=False # else we force custom_args
)
def test_default_omits_options(self):
"""Make sure by default we don't send any ESP-specific options.
Options not specified by the caller should be omitted entirely from
the API call (*not* sent as False or empty). This ensures
that your ESP account settings apply by default.
"""
self.message.send()
data = self.get_api_call_json()
self.assertNotIn("asm", data)
self.assertNotIn("attachments", data)
self.assertNotIn("batch_id", data)
self.assertNotIn("categories", data)
self.assertNotIn("custom_args", data)
self.assertNotIn("headers", data)
self.assertNotIn("ip_pool_name", data)
self.assertNotIn("mail_settings", data)
self.assertNotIn("reply_to", data)
self.assertNotIn("reply_to_list", data)
self.assertNotIn("sections", data)
self.assertNotIn("send_at", data)
self.assertNotIn("template_id", data)
self.assertNotIn("tracking_settings", data)
for personalization in data["personalizations"]:
self.assertNotIn("custom_args", personalization)
self.assertNotIn("dynamic_template_data", personalization)
self.assertNotIn("headers", personalization)
self.assertNotIn("send_at", personalization)
self.assertNotIn("substitutions", personalization)
def test_esp_extra(self):
self.message.tags = ["tag"]
self.message.track_clicks = True
self.message.esp_extra = {
"ip_pool_name": "transactional",
"asm": { # subscription management
"group_id": 1,
},
"tracking_settings": {
"subscription_tracking": {
"enable": True,
"substitution_tag": "[unsubscribe_url]",
},
},
}
self.message.send()
data = self.get_api_call_json()
# merged from esp_extra:
self.assertEqual(data["ip_pool_name"], "transactional")
self.assertEqual(data["asm"], {"group_id": 1})
self.assertEqual(
data["tracking_settings"]["subscription_tracking"],
{"enable": True, "substitution_tag": "[unsubscribe_url]"},
)
# make sure we didn't overwrite Anymail message options:
self.assertEqual(data["categories"], ["tag"])
self.assertEqual(data["tracking_settings"]["click_tracking"], {"enable": True})
def test_esp_extra_pesonalizations(self):
self.message.to = ["First recipient <first@example.com>", "second@example.com"]
self.message.merge_data = {} # force separate messages for each 'to'
# esp_extra['personalizations'] dict
# merges with message-derived personalizations
self.message.esp_extra = {"personalizations": {"future_feature": "works"}}
self.message.send()
data = self.get_api_call_json()
self.assertEqual(
data["personalizations"],
[
{
"to": [{"email": "first@example.com", "name": '"First recipient"'}],
"custom_args": {"anymail_id": "mocked-uuid-1"},
"future_feature": "works",
},
{
"to": [{"email": "second@example.com"}],
"custom_args": {"anymail_id": "mocked-uuid-2"},
"future_feature": "works", # merged into *every* recipient
},
],
)
# but esp_extra['personalizations'] list just overrides entire personalizations
# (for backwards compatibility)
self.message.esp_extra = {
"personalizations": [
{"to": [{"email": "custom@example.com"}], "future_feature": "works"}
]
}
self.message.send()
data = self.get_api_call_json()
self.assertEqual(
data["personalizations"],
[
{
"to": [{"email": "custom@example.com"}],
"custom_args": {"anymail_id": "mocked-uuid-3"},
"future_feature": "works",
},
],
)
# noinspection PyUnresolvedReferences
def test_send_attaches_anymail_status(self):
"""The anymail_status should be attached to the message when it is sent"""
# the DEFAULT_RAW_RESPONSE above is the *only* success response SendGrid
# returns, so no need to override it here
msg = mail.EmailMessage(
"Subject",
"Message",
"from@example.com",
["to1@example.com"],
)
sent = msg.send()
self.assertEqual(sent, 1)
self.assertEqual(msg.anymail_status.status, {"queued"})
self.assertEqual(msg.anymail_status.message_id, "mocked-uuid-1")
self.assertEqual(
msg.anymail_status.recipients["to1@example.com"].status, "queued"
)
self.assertEqual(
msg.anymail_status.recipients["to1@example.com"].message_id, "mocked-uuid-1"
)
self.assertEqual(
msg.anymail_status.esp_response.content, self.DEFAULT_RAW_RESPONSE
)
def test_batch_recipients_get_unique_message_ids(self):
"""In a batch send, each recipient should get a distinct own message_id"""
msg = mail.EmailMessage(
"Subject",
"Message",
"from@example.com",
["to1@example.com", "Someone Else <to2@example.com>"],
cc=["cc@example.com"],
)
msg.merge_data = {} # force batch send
msg.send()
self.assertEqual(
msg.anymail_status.message_id, {"mocked-uuid-1", "mocked-uuid-2"}
)
self.assertEqual(
msg.anymail_status.recipients["to1@example.com"].message_id, "mocked-uuid-1"
)
self.assertEqual(
msg.anymail_status.recipients["to2@example.com"].message_id, "mocked-uuid-2"
)
# cc's (and bcc's) get copied for all batch recipients,
# but we can only communicate one message_id:
self.assertEqual(
msg.anymail_status.recipients["cc@example.com"].message_id, "mocked-uuid-2"
)
@override_settings(ANYMAIL_SENDGRID_GENERATE_MESSAGE_ID=False)
def test_disable_generate_message_id(self):
msg = mail.EmailMessage(
"Subject",
"Message",
"from@example.com",
["to1@example.com"],
)
msg.send()
self.assertIsNone(msg.anymail_status.message_id)
self.assertIsNone(msg.anymail_status.recipients["to1@example.com"].message_id)
# noinspection PyUnresolvedReferences
def test_send_failed_anymail_status(self):
"""If the send fails, anymail_status should contain initial values"""
self.set_mock_response(status_code=500)
sent = self.message.send(fail_silently=True)
self.assertEqual(sent, 0)
self.assertIsNone(self.message.anymail_status.status)
self.assertIsNone(self.message.anymail_status.message_id)
self.assertEqual(self.message.anymail_status.recipients, {})
self.assertIsNone(self.message.anymail_status.esp_response)
def test_json_serialization_errors(self):
"""Try to provide more information about non-json-serializable data"""
self.message.metadata = {"total": Decimal("19.99")}
with self.assertRaises(AnymailSerializationError) as cm:
self.message.send()
err = cm.exception
self.assertIsInstance(err, TypeError) # compatibility with json.dumps
# our added context:
self.assertIn("Don't know how to send this data to SendGrid", str(err))
# original message:
self.assertRegex(str(err), r"Decimal.*is not JSON serializable")
@override_settings(ANYMAIL_SENDGRID_WORKAROUND_NAME_QUOTE_BUG=False)
def test_undocumented_workaround_name_quote_bug_setting(self):
mail.send_mail(
"Subject",
"Body",
'"Sender, Inc." <from@example.com',
['"Recipient, Ltd." <to@example.com>'],
)
data = self.get_api_call_json()
self.assertEqual(
data["personalizations"][0]["to"][0],
{
"email": "to@example.com",
"name": "Recipient, Ltd.", # no extra quotes on name
},
)
self.assertEqual(
data["from"], {"email": "from@example.com", "name": "Sender, Inc."}
)
@tag("sendgrid")
class SendGridBackendRecipientsRefusedTests(SendGridBackendMockAPITestCase):
"""
Should raise AnymailRecipientsRefused when *all* recipients are rejected or invalid
"""
# SendGrid doesn't check email bounce or complaint lists at time of send --
# it always just queues the message. You'll need to listen for the "rejected"
# and "failed" events to detect refused recipients.
pass # not applicable to this backend
@tag("sendgrid")
class SendGridBackendSessionSharingTestCase(
SessionSharingTestCases, SendGridBackendMockAPITestCase
):
"""Requests session sharing tests"""
pass # tests are defined in SessionSharingTestCases
@tag("sendgrid")
@override_settings(EMAIL_BACKEND="anymail.backends.sendgrid.EmailBackend")
class SendGridBackendImproperlyConfiguredTests(AnymailTestMixin, SimpleTestCase):
"""Test ESP backend without required settings in place"""
def test_missing_auth(self):
with self.assertRaisesRegex(AnymailConfigurationError, r"\bSENDGRID_API_KEY\b"):
mail.send_mail("Subject", "Message", "from@example.com", ["to@example.com"])
@tag("sendgrid")
@override_settings(EMAIL_BACKEND="anymail.backends.sendgrid.EmailBackend")
class SendGridBackendDisallowsV2Tests(AnymailTestMixin, SimpleTestCase):
"""Using v2-API-only features should cause errors with v3 backend"""
@override_settings(
ANYMAIL={"SENDGRID_USERNAME": "sg_username", "SENDGRID_PASSWORD": "sg_password"}
)
def test_user_pass_auth(self):
"""Make sure v2-only USERNAME/PASSWORD auth raises error"""
with self.assertRaisesMessage(
AnymailConfigurationError,
"SendGrid v3 API doesn't support username/password auth;"
" Please change to API key.",
):
mail.send_mail("Subject", "Message", "from@example.com", ["to@example.com"])
@override_settings(ANYMAIL={"SENDGRID_API_KEY": "test_api_key"})
def test_esp_extra_smtpapi(self):
"""x-smtpapi in the esp_extra indicates a desire to use the v2 api"""
message = mail.EmailMessage(
"Subject", "Body", "from@example.com", ["to@example.com"]
)
message.esp_extra = {"x-smtpapi": {"asm_group_id": 1}}
with self.assertRaisesMessage(
AnymailConfigurationError,
"You are attempting to use SendGrid v2 API-style x-smtpapi params with the"
" SendGrid v3 API. Please update your `esp_extra` to the new API.",
):
message.send()
|
3192092bafe801f2567c97f878c7bfc730bfd0b7
|
1178107b71343db8970c6474a49b4df50f3b801e
|
/dataviva/api/stats/profile_helper.py
|
0d6cbaa32590a06be17e6b10883fef3661f5f800
|
[
"MIT"
] |
permissive
|
DataViva/dataviva-site
|
fae24d3dffc29f980b7e989aec268a6be8c9ef68
|
4e6e861493138b3f296b601866fbb1e7a6c1746d
|
refs/heads/master
| 2023-09-01T17:39:26.928666
| 2023-08-10T12:56:40
| 2023-08-10T12:56:40
| 10,696,967
| 130
| 53
|
MIT
| 2023-09-14T12:08:09
| 2013-06-14T20:12:26
|
HTML
|
UTF-8
|
Python
| false
| false
| 19,924
|
py
|
profile_helper.py
|
from dataviva.api.stats.util import parse_year
from sqlalchemy import desc
from dataviva import __year_range__
from dataviva.api.attrs.models import University, Course_hedu, Course_sc, Stat, Bs
from dataviva.api.attrs.models import Yb, Ybs, Bra, Hs, Cbo, Cnae, Wld
from dataviva.api.secex.models import Ymbp, Ymbw, Ympw, Ymb, Ymp, Ymw
from dataviva.api.rais.models import Ybi, Ybo, Yio, Yb_rais, Yi, Yo
from dataviva.api.hedu.models import Yu, Yuc, Ybu, Yc_hedu
from dataviva.api.sc.models import Ybc_sc, Yc_sc
from dataviva.utils.title_case import title_case
from flask.ext.babel import gettext
def bra_stats(pobj, rais_year, secex_year):
stats = []
gen_title = gettext("General Stats")
key = "general"
if pobj.id != "all":
stat_ids = ["pop", "gini", "life_exp", "hdi", "gdp", "gdp_pc", "pop_density"]
stats_year = parse_year(__year_range__["stats"][-1].split("-")[0])
results = batch_stats(Ybs, pobj.id, stat_ids, stats_year)
for stat, val in results:
stats.append(make_stat(key, gen_title, stat.name(), desc=val, year=stats_year, mode=stat.id))
group = u'{1} {0}'.format(gettext("Employment Stats"), rais_year)
key = "rais"
if pobj.id == "all":
filters = [Yi.year == rais_year, Yi.cnae_id_len == 6]
result = get_top_stat(Yi, Yi.cnae_id, Yi.num_jobs, Cnae, filters)
else:
filters = [Ybi.year == rais_year, Ybi.bra_id == pobj.id, Ybi.cnae_id_len == 6]
result = get_top_stat(Ybi, Ybi.cnae_id, Ybi.num_jobs, Cnae, filters)
if result:
profile, value = result
stat = make_stat(key, group, gettext("Top Industry by Employment"), profile=profile, value=value, mode="num_jobs")
stats.append(stat)
if pobj.id == "all":
filters = [Yo.year == rais_year, Yo.cbo_id_len == 4]
profile, value = get_top_stat(Yo, Yo.cbo_id, Yo.num_jobs, Cbo, filters)
else:
filters = [Ybo.year == rais_year, Ybo.bra_id == pobj.id, Ybo.cbo_id_len == 4]
profile, value = get_top_stat(Ybo, Ybo.cbo_id, Ybo.num_jobs, Cbo, filters)
stat = make_stat(key, group, gettext("Top Occupation by Employment"), profile=profile, value=value, mode="num_jobs")
stats.append(stat)
if pobj.id != "all":
value = get_stat_val(Yb_rais, Yb_rais.wage, [Yb_rais.year == rais_year, Yb_rais.bra_id == pobj.id])
stats.append(make_stat(key, group, gettext("Total Monthly Wage"), desc=value, mode="wage"))
group = u'{1} {0}'.format(gettext("Trade Stats"), secex_year)
key = "secex"
if pobj.id == "all":
filters = [Ymp.year == secex_year, Ymp.month == 0, Ymp.hs_id_len == 6]
result = get_top_stat(Ymp, Ymp.hs_id, Ymp.export_val, Hs, filters)
else:
filters = [Ymbp.year == secex_year, Ymbp.month == 0, Ymbp.bra_id == pobj.id, Ymbp.hs_id_len == 6]
result = get_top_stat(Ymbp, Ymbp.hs_id, Ymbp.export_val, Hs, filters)
if result:
profile, value = result
stat = make_stat(key, group, gettext("Top Product by Export Value"), profile=profile, value=value, mode="export_val")
stats.append(stat)
if pobj.id == "all":
filters = [Ymw.year == secex_year, Ymw.month == 0, Ymw.wld_id_len == 5]
result = get_top_stat(Ymw, Ymw.wld_id, Ymw.export_val, Wld, filters)
else:
filters = [Ymbw.year == secex_year, Ymbw.month == 0, Ymbw.bra_id == pobj.id, Ymbw.wld_id_len == 5]
result = get_top_stat(Ymbw, Ymbw.wld_id, Ymbw.export_val, Wld, filters)
if result:
profile,value=result
stats.append(make_stat(key, group, gettext("Top Destination by Export Value"), profile=profile, value=value, mode="export_val"))
if pobj.id != "all":
filters = [Ymb.year == secex_year, Ymb.month == 0, Ymb.bra_id == pobj.id]
result = get_stat_val(Ymb, [Ymb.export_val, Ymb.import_val], filters)
if result:
export_val, import_val = result
stats.append(make_stat(key, group, gettext("Total Exports"), desc=export_val, mode="export_val"))
stats.append(make_stat(key, group, gettext("Total Imports"), desc=import_val, mode="export_val"))
key = "general"
if len(pobj.id) > 1:
geo = Bra.query.get(pobj.id[:1])
stats.append(make_stat(key, gen_title, gettext("Region"), profile=geo))
if len(pobj.id) > 3:
geo = Bra.query.get(pobj.id[:3])
stats.append(make_stat(key, gen_title, gettext("State"), profile=geo))
if len(pobj.id) > 5:
geo = Bra.query.get(pobj.id[:5])
stats.append(make_stat(key, gen_title, gettext("Mesoregion"), profile=geo))
if len(pobj.id) > 7:
geo = Bra.query.get(pobj.id[:7])
stats.append(make_stat(key, gen_title, gettext("Microregion"), profile=geo))
if len(pobj.id) == 9:
stat_ids = ["airport", "airport_dist", "seaport", "seaport_dist", "area", "capital_dist", "neighbors"]
results = batch_stats(Bs, pobj.id, stat_ids)
for stat, val in results:
if stat.id == "neighbors":
bras = Bra.query.filter(Bra.id.in_(val.split(","))).all()
val = ", ".join([u"<a href='{}'>{}</a>".format(b.url(), b.name()) for b in bras])
stats.append(make_stat(key, gen_title, stat.name(), desc=val, mode=stat.id))
return stats
def cnae_stats(pobj, rais_year):
stats = []
five_years_ago = rais_year - 5
group = u'{1} {0}'.format(gettext("Employment Stats"), rais_year)
key = "rais"
filters = [Ybi.year == rais_year, Ybi.cnae_id == pobj.id, Ybi.bra_id_len == 9]
profile, value = get_top_stat(Ybi, Ybi.bra_id, Ybi.num_jobs, Bra, filters)
stats.append(make_stat(key, group, gettext("Top Municipality by Employment"), profile=profile, value=value, mode="num_jobs"))
filters = [Yio.year == rais_year, Yio.cnae_id == pobj.id, Yio.cbo_id_len == 4]
profile, value = get_top_stat(Yio, Yio.cbo_id, Yio.num_jobs, Cbo, filters)
stats.append(make_stat(key, group, gettext("Top Occupation by Employment"), profile=profile, value=value, mode="num_jobs"))
filters = [Yi.year == rais_year, Yi.cnae_id == pobj.id]
wage, wage_avg, num_est, age_avg = get_stat_val(Yi, [Yi.wage, Yi.wage_avg, Yi.num_est, Yi.age_avg], filters)
stats.append(make_stat(key, group, gettext("Total Monthly Wage"), desc=wage, mode="wage"))
stats.append(make_stat(key, group, gettext("Average Monthly Wage"), desc=wage_avg, mode="wage"))
stats.append(make_stat(key, group, gettext("Total Establishments"), desc=num_est, mode="num_est"))
stats.append(make_stat(key, group, gettext("Average Employee Age"), desc=age_avg, mode="age"))
group = u'{1} {0}'.format(gettext("Employment Stats"), five_years_ago)
wage, wage_avg = get_stat_val(Yi, [Yi.wage, Yi.wage_avg], filters)
filters = [Yi.year == five_years_ago, Yi.cnae_id == pobj.id]
wage, wage_avg = get_stat_val(Yi, [Yi.wage, Yi.wage_avg], filters)
stats.append(make_stat(key, group, gettext("Total Monthly Wage"), desc=wage, mode="wage"))
stats.append(make_stat(key, group, gettext("Average Monthly Wage"), desc=wage_avg, mode="wage"))
return stats
def cbo_stats(pobj, rais_year):
stats = []
five_years_ago = rais_year - 5
group = u'{1} {0}'.format(gettext("Employment Stats"), rais_year)
key = "rais"
filters = [Ybo.year == rais_year, Ybo.cbo_id == pobj.id, Ybo.bra_id_len == 9]
profile, value = get_top_stat(Ybo, Ybo.bra_id, Ybo.num_jobs, Bra, filters)
stats.append(make_stat(key, group, gettext("Top Municipality by Employment"), profile=profile, value=value, mode="num_jobs"))
filters = [Yio.year == rais_year, Yio.cbo_id == pobj.id, Yio.cnae_id_len == 6]
profile, value = get_top_stat(Yio, Yio.cnae_id, Yio.num_jobs, Cnae, filters)
stats.append(make_stat(key, group, gettext("Top Industry by Employment"), profile=profile, value=value, mode="num_jobs"))
filters = [Yo.year == rais_year, Yo.cbo_id == pobj.id]
res = get_stat_val(Yo, [Yo.wage, Yo.wage_avg, Yo.age_avg], filters)
if res:
wage, wage_avg, age_avg = res
stats.append(make_stat(key, group, gettext("Total Monthly Wage"), desc=wage, mode="wage"))
stats.append(make_stat(key, group, gettext("Average Monthly Wage"), desc=wage_avg, mode="wage"))
stats.append(make_stat(key, group, gettext("Average Employee Age"), desc=age_avg, mode="age"))
group = u'{1} {0}'.format(gettext("Employment Stats"), five_years_ago)
filters = [Yo.year == five_years_ago, Yo.cbo_id == pobj.id]
res = get_stat_val(Yo, [Yo.wage, Yo.wage_avg], filters)
if res:
wage, wage_avg = res
stats.append(make_stat(key, group, gettext("Total Monthly Wage"), desc=wage, mode="wage"))
stats.append(make_stat(key, group, gettext("Average Monthly Wage"), desc=wage_avg, mode="wage"))
return stats
def hs_stats(pobj, secex_year):
stats =[]
group = u'{1} {0}'.format(gettext("Trade Stats"), secex_year)
key = "secex"
five_years_ago = secex_year - 5
filters = [Ymbp.year == secex_year, Ymbp.month == 0, Ymbp.hs_id == pobj.id, Ymbp.bra_id_len == 9]
top_stat = get_top_stat(Ymbp, Ymbp.bra_id, Ymbp.export_val, Bra, filters)
if top_stat:
stats.append(make_stat(key, group, gettext("Top Municipality by Exports"), profile=top_stat[0], value=top_stat[1], mode="export_val"))
filters = [Ympw.year == secex_year, Ympw.month == 0, Ympw.hs_id == pobj.id, Ympw.wld_id_len == 5]
top_stat = get_top_stat(Ympw, Ympw.wld_id, Ympw.export_val, Wld, filters)
if top_stat:
stats.append(make_stat(key, group, gettext("Top Country by Exports"), profile=top_stat[0], value=top_stat[1], mode="export_val"))
filters = [Ymp.year == secex_year, Ymp.month == 0, Ymp.hs_id == pobj.id]
stat_val = get_stat_val(Ymp, [Ymp.export_val_growth, Ymp.export_val_growth_5, Ymp.export_val, Ymp.import_val], filters)
if stat_val:
g1, g5, total_exports, total_imports = stat_val
stats.append(make_stat(key, group, gettext("Nominal Annual Growth Rate (1 year)"), desc=g1, mode="export_val_growth"))
stats.append(make_stat(key, group, gettext("Nominal Annual Growth Rate (5 year)"), desc=g5, mode="export_val_growth"))
stats.append(make_stat(key, group, gettext("Total Exports"), desc=total_exports, mode="export_val"))
stats.append(make_stat(key, group, gettext("Total Imports"), desc=total_imports, mode="import_val"))
group = u'{1} {0}'.format(gettext("Trade Stats"), five_years_ago)
filters = [Ymp.year == five_years_ago, Ymp.month == 0, Ymp.hs_id == pobj.id]
total_exports = get_stat_val(Ymp, Ymp.export_val, filters)
stats.append(make_stat(key, group, gettext("Total Exports"), desc=total_exports, mode="export_val"))
return stats
def wld_stats(pobj, secex_year):
stats = []
dataset = "secex"
five_years_ago = secex_year - 5
group = u'{1} {0}'.format(gettext("Trade Stats"), secex_year)
key = "secex"
filters = [Ymbw.year == secex_year, Ymbw.month == 0, Ymbw.wld_id == pobj.id, Ymbw.bra_id_len == 9]
profile, value = get_top_stat(Ymbw, Ymbw.bra_id, Ymbw.export_val, Bra, filters)
stats.append(make_stat(key, group, gettext("Top Destination by Export Value"), profile=profile, value=value, mode="export_val"))
filters = [Ympw.year == secex_year, Ympw.month == 0, Ympw.wld_id == pobj.id, Ympw.hs_id_len == 6]
profile, value = get_top_stat(Ympw, Ympw.hs_id, Ympw.export_val, Hs, filters)
stat = make_stat(key, group, gettext("Top Product by Export Value"), profile=profile, value=value, mode="export_val")
stats.append(stat)
filters = [Ymw.year == secex_year, Ymw.month == 0, Ymw.wld_id == pobj.id]
g1e, g5e, total_exports, g1i, g5i, total_imports, eci = get_stat_val(Ymw, [Ymw.export_val_growth, Ymw.export_val_growth_5, Ymw.export_val, Ymw.import_val_growth, Ymw.import_val_growth_5, Ymw.import_val, Ymw.eci], filters)
stats.append(make_stat(key, group, gettext("Total Imports"), desc=total_imports, mode="import_val"))
stats.append(make_stat(key, group, gettext("Nominal Annual Growth Rate (1 year)"), desc=g1i, mode="import_val_growth"))
stats.append(make_stat(key, group, gettext("Nominal Annual Growth Rate (5 year)"), desc=g5i, mode="import_val_growth"))
stats.append(make_stat(key, group, gettext("Total Exports"), desc=total_exports, mode="export_val"))
stats.append(make_stat(key, group, gettext("Nominal Annual Growth Rate (1 year)"), desc=g1e, mode="export_val_growth"))
stats.append(make_stat(key, group, gettext("Nominal Annual Growth Rate (5 year)"), desc=g5e, mode="export_val_growth"))
stats.append(make_stat(key, group, gettext("Economic Complexity"), desc=eci, mode="eci"))
group = u'{1} {0}'.format(gettext("Trade Stats"), five_years_ago)
filters = [Ymw.year == five_years_ago, Ymw.month == 0, Ymw.wld_id == pobj.id]
total_exports, total_imports, eci = get_stat_val(Ymw, [Ymw.export_val, Ymw.import_val, Ymw.eci], filters)
stats.append(make_stat(key, group, gettext("Total Exports"), desc=total_exports, mode="export_val"))
stats.append(make_stat(key, group, gettext("Total Imports"), desc=total_imports, mode="import_val"))
stats.append(make_stat(key, group, gettext("Economic Complexity"), desc=eci, mode="eci"))
return stats
def university_stats(pobj, hedu_year):
stats = []
gen_title = gettext("General Stats")
key = "general"
filters = [Ybu.year == hedu_year, Ybu.university_id == pobj.id, Ybu.bra_id_len == 9, Ybu.bra_id != "0xx000007"]
campuses, num_campuses = get_top_stats(Ybu, Ybu.bra_id, Ybu.enrolled, Bra, filters, max=5)
if num_campuses:
val = ", ".join([u"<a href='{}'>{}</a>".format(c[0].url(), c[0].name()) for c in campuses])
if num_campuses > len(campuses):
val += "<br /> +{} more".format(num_campuses-len(campuses))
if num_campuses > 1:
stats.append(make_stat(key, gen_title, gettext("Top Campuses"), desc=val))
else:
stats.append(make_stat(key, gen_title, gettext("Location"), desc=val))
stats.append(make_stat(key, gen_title, gettext("Administrative Dependency"), desc=pobj.school_type()))
group = u'{1} {0}'.format(gettext("Enrollment Stats"), hedu_year)
key = "hedu"
filters = [Yuc.year == hedu_year, Yuc.university_id == pobj.id, Yuc.course_hedu_id_len == 6]
profile, value = get_top_stat(Yuc, Yuc.course_hedu_id, Yuc.enrolled, Course_hedu, filters)
stats.append(make_stat(key, group, gettext("Top Major by Enrollment"), profile=profile, value=value, mode="enrolled"))
filters = [Yu.year == hedu_year, Yu.university_id == pobj.id]
stat_vals = get_stat_val(Ymw, [Yu.enrolled, Yu.graduates], filters)
if stat_vals:
enrolled, graduates = stat_vals
# raise Exception(enrolled, graduates)
stats.append(make_stat(key, group, gettext("Total Enrollment"), desc=enrolled, mode="enrolled"))
stats.append(make_stat(key, group, gettext("Total Graduates"), desc=graduates, mode="graduates"))
return stats
def course_hedu_stats(pobj, hedu_year):
stats = []
group = u'{1} {0}'.format(gettext("Enrollment Stats"), hedu_year)
key = "hedu"
filters = [Yuc.year == hedu_year, Yuc.course_hedu_id == pobj.id] # -- no nesting for university_ids
top_stat = get_top_stat(Yuc, Yuc.university_id, Yuc.enrolled, University, filters)
if top_stat:
stats.append(make_stat(key, group, gettext("Top University by Enrollment"), profile=top_stat[0], value=top_stat[1], mode="enrolled"))
filters = [Yc_hedu.year == hedu_year, Yc_hedu.course_hedu_id == pobj.id]
stat_val = get_stat_val(Ymw, [Yc_hedu.enrolled, Yc_hedu.graduates], filters)
if stat_val:
enrolled, graduates = stat_val
stats.append(make_stat(key, group, gettext("Total Enrollment"), desc=enrolled, mode="enrolled"))
stats.append(make_stat(key, group, gettext("Total Graduates"), desc=graduates, mode="graduates"))
return stats
def course_sc_stats(pobj):
stats = []
sc_year = parse_year(__year_range__["sc"][-1].split("-")[0])
group = u'{1} {0}'.format(gettext("Enrollment Stats"), sc_year)
key = "hedu"
filters = [Ybc_sc.year == sc_year, Ybc_sc.course_sc_id == pobj.id, Ybc_sc.bra_id_len == 9]
profile, value = get_top_stat(Ybc_sc, Ybc_sc.bra_id, Ybc_sc.enrolled, Bra, filters)
stats.append(make_stat(key, group, gettext("Top Municipality by Enrollment"), profile=profile, value=value, mode="enrolled"))
filters = [Yc_sc.year == sc_year, Yc_sc.course_sc_id == pobj.id]
enrolled, age = get_stat_val(Ymw, [Yc_sc.enrolled, Yc_sc.age], filters)
stats.append(make_stat(key, group, gettext("Total Enrollment"), desc=enrolled, mode="enrolled"))
stats.append(make_stat(key, group, gettext("Average Age"), desc=age, mode="graduates"))
return stats
def compute_stats(pobj):
attr_type = pobj.__class__.__name__.lower()
stats = []
if attr_type == "wld" and pobj.id == "all":
attr_type = "bra"
rais_year = parse_year(__year_range__["rais"][-1].split("-")[0])
secex_year = parse_year(__year_range__["secex"][-1].split("-")[0])
hedu_year = parse_year(__year_range__["hedu"][-1].split("-")[0])
if attr_type == "bra":
return bra_stats(pobj, rais_year, secex_year)
elif attr_type == "cnae":
return cnae_stats(pobj, rais_year)
elif attr_type == "cbo":
return cbo_stats(pobj, rais_year)
elif attr_type == "hs":
return hs_stats(pobj, secex_year)
elif attr_type == "wld":
return wld_stats(pobj, secex_year)
elif attr_type == "university":
return university_stats(pobj, hedu_year)
elif attr_type == "course_hedu":
return course_hedu_stats(pobj, hedu_year)
elif attr_type == "course_sc":
return course_sc_stats(pobj)
return stats
def batch_stats(Tbl, bra_id, stat_ids, year=None):
filters = [Tbl.stat_id.in_(stat_ids), Tbl.bra_id == bra_id]
if year:
filters.append(Tbl.year == year)
results = Tbl.query.join(Stat).with_entities(Stat, Tbl.stat_val).filter(*filters).all()
return results
#
# values = {stat.name() : val for stat,val in results}
# return values
def get_stat_val(Tbl, metric_col, filters):
if not type(metric_col) == list:
q = Tbl.query.with_entities(metric_col).filter(*filters)
res = q.first()
if res:
return res[0]
else:
return None
else:
q = Tbl.query.with_entities(*metric_col).filter(*filters)
return q.first()
def get_top_stat(Tbl, show_col, metric_col, Profile, filters):
q = Tbl.query.with_entities(show_col, metric_col).filter(*filters)
res = q.order_by(desc(metric_col)).first()
if res:
pk, val = res
profile = Profile.query.get(pk)
return profile, val
return (None, None)
def get_top_stats(Tbl, show_col, metric_col, Profile, filters, max=5):
q = Tbl.query.with_entities(show_col, metric_col).filter(*filters)
res = q.order_by(desc(metric_col)).all()
if res:
top_stats = []
for r in res[:max]:
pk, val = r
profile = Profile.query.get(pk)
top_stats.append((profile, val))
return top_stats, len(res)
return ([], 0)
def make_stat(key, group, name, desc=None, value=None, url=None, mode=None, year=None, profile=None):
if year:
name += " ({})".format(year)
if profile:
url = profile.url()
desc = profile.name()
if not value:
if not desc:
desc = "-"
return {
"group": group,
"key": key,
"name": title_case(name),
"url": url,
"desc" : desc,
"value": value,
"mode" : mode
}
|
5645e4102cb457b7ab9fb3e9ffcdeb5d579a261d
|
39b021eabbb8e3be1734cf92fd641965a796b0eb
|
/examples/qm7/qm7_tensorgraph_GraphConv.py
|
0a1dfd42a60d0c26cf7a04571f149b72ae17bddb
|
[
"MIT"
] |
permissive
|
deepchem/deepchem
|
066cbf42316b2f6bec0166727e0264a485d5266f
|
ee6e67ebcf7bf04259cf13aff6388e2b791fea3d
|
refs/heads/master
| 2023-09-02T01:32:17.860111
| 2023-08-31T18:49:00
| 2023-08-31T18:49:00
| 43,098,215
| 4,876
| 1,905
|
MIT
| 2023-09-14T19:10:44
| 2015-09-24T23:20:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,139
|
py
|
qm7_tensorgraph_GraphConv.py
|
"""
Script that trains GraphConv models on qm7 dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
np.random.seed(123)
import tensorflow as tf
tf.random.set_seed(123)
import deepchem as dc
# Load Tox21 dataset
tasks, datasets, transformers = dc.molnet.load_qm7(featurizer='GraphConv',
move_mean=True)
train_dataset, valid_dataset, test_dataset = datasets
# Fit models
metric = [
dc.metrics.Metric(dc.metrics.mean_absolute_error, mode="regression"),
dc.metrics.Metric(dc.metrics.pearson_r2_score, mode="regression")
]
# Batch size of models
batch_size = 64
model = dc.models.GraphConvModel(len(tasks),
batch_size=batch_size,
learning_rate=0.001,
mode="regression")
# Fit trained model
model.fit(train_dataset, nb_epoch=50)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, metric, transformers)
valid_scores = model.evaluate(valid_dataset, metric, transformers)
|
2f22c2621f6ffd74a3193536877f5bcff6d1fdeb
|
f88a5ad8af044f7956a9d03d57f6ed6610dc9246
|
/pyowm/utils/geo.py
|
255326f2362e80d0ebaa370df8c2b3a4790917c0
|
[
"MIT"
] |
permissive
|
csparpa/pyowm
|
9664089d8dd792489882696f194d9659146d95a1
|
3be796cc60fd2cac1fe1fba005dc4c7f650debcf
|
refs/heads/master
| 2023-08-15T17:47:31.064245
| 2023-06-24T11:50:42
| 2023-06-24T11:50:42
| 12,535,703
| 855
| 240
|
MIT
| 2023-08-30T14:24:56
| 2013-09-02T08:43:51
|
Python
|
UTF-8
|
Python
| false
| false
| 13,168
|
py
|
geo.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import geojson
import json
import math
EARTH_RADIUS_KM = 6378.1
# utilities
def assert_is_lat(val):
"""
Checks it the given value is a feasible decimal latitude
:param val: value to be checked
:type val: int of float
:returns: `None`
:raises: *ValueError* if value is out of latitude boundaries, *AssertionError* if type is wrong
"""
assert type(val) is float or type(val) is int, "Value must be a number"
if val < -90.0 or val > 90.0:
raise ValueError("Latitude value must be between -90 and 90")
def assert_is_lon(val):
"""
Checks it the given value is a feasible decimal longitude
:param val: value to be checked
:type val: int of float
:returns: `None`
:raises: *ValueError* if value is out of longitude boundaries, *AssertionError* if type is wrong
"""
assert type(val) is float or type(val) is int, "Value must be a number"
if val < -180.0 or val > 180.0:
raise ValueError("Longitude value must be between -180 and 180")
# classes
class Geometry:
"""
Abstract parent class for geotypes
"""
def geojson(self):
"""
Returns a GeoJSON string representation of this geotype, compliant to
RFC 7946 (https://tools.ietf.org/html/rfc7946)
:return: str
"""
raise NotImplementedError()
def to_dict(self):
"""
Returns a dict representation of this geotype
:return: dict
"""
raise NotImplementedError()
class Point(Geometry):
"""
A Point geotype. Represents a single geographic point
:param lon: decimal longitude for the geopoint
:type lon: int of float
:param lat: decimal latitude for the geopoint
:type lat: int of float
:returns: a *Point* instance
:raises: *ValueError* when negative values are provided
"""
def __init__(self, lon, lat):
assert_is_lon(lon)
assert_is_lat(lat)
self._geom = geojson.Point((lon, lat))
@property
def lon(self):
return self._geom['coordinates'][0]
@property
def lat(self):
return self._geom['coordinates'][1]
def bounding_square_polygon(self, inscribed_circle_radius_km=10.0):
"""
Returns a square polygon (bounding box) that circumscribes the circle having this geopoint as centre and
having the specified radius in kilometers.
The polygon's points calculation is based on theory exposed by: http://janmatuschek.de/LatitudeLongitudeBoundingCoordinates
by Jan Philip Matuschek, owner of the intellectual property of such material.
In short:
- locally to the geopoint, the Earth's surface is approximated to a sphere with radius = Earth's radius
- the calculation works fine also when the bounding box contains the Earth's poles and the 180 deg meridian
:param inscribed_circle_radius_km: the radius of the inscribed circle, defaults to 10 kms
:type inscribed_circle_radius_km: int or float
:return: a `pyowm.utils.geo.Polygon` instance
"""
assert isinstance(inscribed_circle_radius_km, (int, float))
assert inscribed_circle_radius_km > 0., 'Radius must be greater than zero'
# turn metric distance to radians on the approximated local sphere
rad_distance = float(inscribed_circle_radius_km) / EARTH_RADIUS_KM
# calculating min/max lat for bounding box
bb_min_lat_deg = self.lat * math.pi/180. - rad_distance
bb_max_lat_deg = self.lat * math.pi/180. + rad_distance
# now checking for poles...
if bb_min_lat_deg > math.radians(-90) and bb_max_lat_deg < math.radians(90): # no poles in the bounding box
delta_lon = math.asin(math.sin(rad_distance) / math.cos(math.radians(self.lat)))
bb_min_lon_deg = math.radians(self.lon) - delta_lon
if bb_min_lon_deg < math.radians(-180):
bb_min_lon_deg += 2 * math.pi
bb_max_lon_deg = math.radians(self.lon) + delta_lon
if bb_max_lon_deg > math.radians(180):
bb_max_lon_deg -= 2 * math.pi
else: # a pole is contained in the bounding box
bb_min_lat_deg = max(bb_min_lat_deg, math.radians(-90))
bb_max_lat_deg = min(bb_max_lat_deg, math.radians(90))
bb_min_lon_deg = math.radians(-180)
bb_max_lon_deg = math.radians(180)
# turn back from radians to decimal
bb_min_lat = bb_min_lat_deg * 180./math.pi
bb_max_lat = bb_max_lat_deg * 180./math.pi
bb_min_lon = bb_min_lon_deg * 180./math.pi
bb_max_lon = bb_max_lon_deg * 180./math.pi
return Polygon([[
[bb_min_lon, bb_max_lat],
[bb_max_lon, bb_max_lat],
[bb_max_lon, bb_min_lat],
[bb_min_lon, bb_min_lat],
[bb_min_lon, bb_max_lat]
]])
def geojson(self):
return geojson.dumps(self._geom)
def to_dict(self):
return json.loads(self.geojson())
@classmethod
def from_dict(cls, the_dict):
"""
Builds a Point instance out of a geoJSON compliant dict
:param the_dict: the geoJSON dict
:return: `pyowm.utils.geo.Point` instance
"""
geom = geojson.loads(json.dumps(the_dict))
result = Point(0, 0)
result._geom = geom
return result
def __repr__(self):
return "<%s.%s - lon=%s, lat=%s>" % (__name__, self.__class__.__name__, self.lon, self.lat)
class MultiPoint(Geometry):
"""
A MultiPoint geotype. Represents a set of geographic points
:param list_of_tuples: list of tuples, each one being the decimal (lon, lat) coordinates of a geopoint
:type list_of_tuples: list
:returns: a *MultiPoint* instance
"""
def __init__(self, list_of_tuples):
if not list_of_tuples:
raise ValueError("A MultiPoint cannot be empty")
for t in list_of_tuples:
assert_is_lon(t[0])
assert_is_lat(t[1])
self._geom = geojson.MultiPoint(list_of_tuples)
@classmethod
def from_points(cls, iterable_of_points):
"""
Creates a MultiPoint from an iterable collection of `pyowm.utils.geo.Point` instances
:param iterable_of_points: iterable whose items are `pyowm.utils.geo.Point` instances
:type iterable_of_points: iterable
:return: a *MultiPoint* instance
"""
return MultiPoint([(p.lon, p.lat) for p in iterable_of_points])
@property
def longitudes(self):
"""
List of decimal longitudes of this MultiPoint instance
:return: list of tuples
"""
return [coords[0] for coords in self._geom['coordinates']]
@property
def latitudes(self):
"""
List of decimal latitudes of this MultiPoint instance
:return: list of tuples
"""
return [coords[1] for coords in self._geom['coordinates']]
def geojson(self):
return geojson.dumps(self._geom)
def to_dict(self):
return json.loads(self.geojson())
@classmethod
def from_dict(cls, the_dict):
"""
Builds a MultiPoint instance out of a geoJSON compliant dict
:param the_dict: the geoJSON dict
:return: `pyowm.utils.geo.MultiPoint` instance
"""
geom = geojson.loads(json.dumps(the_dict))
result = MultiPoint([(0, 0), (0, 0)])
result._geom = geom
return result
class Polygon(Geometry):
"""
A Polygon geotype. Each Polygon is made up by one or more lines: a line represents a set of connected geographic
points and is conveyed by a list of points, the last one of which must coincide with the its very first one.
As said, Polygons can be also made up by multiple lines (therefore, Polygons with "holes" are allowed)
:param list_of_lists: list of lists, each sublist being a line and being composed by tuples - each one being the
decimal (lon, lat) couple of a geopoint. The last point specified MUST coincide with the first one specified
:type list_of_lists: list
:returns: a *MultiPoint* instance
:raises: *ValueError* when last point and fist point do not coincide or when no points are specified at all
"""
def __init__(self, list_of_lists):
for l in list_of_lists:
for t in l:
assert_is_lon(t[0])
assert_is_lat(t[1])
if not list_of_lists:
raise ValueError("A Polygon cannot be empty")
first, last = list_of_lists[0][0], list_of_lists[0][-1]
if first != last:
raise ValueError("The start and end point of Polygon must coincide")
self._geom = geojson.Polygon(list_of_lists)
def geojson(self):
return geojson.dumps(self._geom)
def to_dict(self):
return json.loads(self.geojson())
@property
def points(self):
"""
Returns the list of *Point* instances representing the points of the polygon
:return: list of *Point* objects
"""
feature = geojson.Feature(geometry=self._geom)
points_coords = list(geojson.utils.coords(feature))
return [Point(p[0], p[1]) for p in points_coords]
@classmethod
def from_dict(cls, the_dict):
"""
Builds a Polygon instance out of a geoJSON compliant dict
:param the_dict: the geoJSON dict
:return: `pyowm.utils.geo.Polygon` instance
"""
geom = geojson.loads(json.dumps(the_dict))
result = Polygon([[[0, 0], [0, 0]]])
result._geom = geom
return result
@classmethod
def from_points(cls, list_of_lists):
"""
Creates a *Polygon* instance out of a list of lists, each sublist being populated with
`pyowm.utils.geo.Point` instances
:param list_of_lists: list
:type: list_of_lists: iterable_of_polygons
:returns: a *Polygon* instance
"""
result = []
for l in list_of_lists:
curve = [(point.lon, point.lat) for point in l]
result.append(curve)
return Polygon(result)
class MultiPolygon(Geometry):
"""
A MultiPolygon geotype. Each MultiPolygon represents a set of (also djsjoint) Polygons. Each MultiPolygon is composed
by an iterable whose elements are the list of lists defining a Polygon geotype. Please refer to the
`pyowm.utils.geo.Point` documentation for details
:param iterable_of_list_of_lists: iterable whose elements are list of lists of tuples
:type iterable_of_list_of_lists: iterable
:returns: a *MultiPolygon* instance
:raises: *ValueError* when last point and fist point do not coincide or when no points are specified at all
"""
def __init__(self, iterable_of_list_of_lists):
if not iterable_of_list_of_lists:
raise ValueError("A MultiPolygon cannot be empty")
for list_of_lists in iterable_of_list_of_lists:
Polygon(list_of_lists)
self._geom = geojson.MultiPolygon(iterable_of_list_of_lists)
def geojson(self):
return geojson.dumps(self._geom)
def to_dict(self):
return json.loads(self.geojson())
@classmethod
def from_dict(cls, the_dict):
"""
Builds a MultiPolygoninstance out of a geoJSON compliant dict
:param the_dict: the geoJSON dict
:return: `pyowm.utils.geo.MultiPolygon` instance
"""
geom = geojson.loads(json.dumps(the_dict))
result = MultiPolygon([
[[[0, 0], [0, 0]]],
[[[1, 1], [1, 1]]]
])
result._geom = geom
return result
@classmethod
def from_polygons(cls, iterable_of_polygons):
"""
Creates a *MultiPolygon* instance out of an iterable of Polygon geotypes
:param iterable_of_polygons: list of `pyowm.utils.geo.Point` instances
:type iterable_of_polygons: iterable
:returns: a *MultiPolygon* instance
"""
return MultiPolygon([polygon.to_dict()['coordinates'] for polygon in iterable_of_polygons])
class GeometryBuilder:
@classmethod
def build(cls, the_dict):
"""
Builds a `pyowm.utils.geo.Geometry` subtype based on the geoJSON geometry type specified on the input dictionary
:param the_dict: a geoJSON compliant dict
:return: a `pyowm.utils.geo.Geometry` subtype instance
:raises `ValueError` if unable to the geometry type cannot be recognized
"""
assert isinstance(the_dict, dict), 'Geometry must be a dict'
geom_type = the_dict.get('type', None)
if geom_type == 'Point':
return Point.from_dict(the_dict)
elif geom_type == 'MultiPoint':
return MultiPoint.from_dict(the_dict)
elif geom_type == 'Polygon':
return Polygon.from_dict(the_dict)
elif geom_type == 'MultiPolygon':
return MultiPolygon.from_dict(the_dict)
else:
raise ValueError('Unable to build a GeoType object: unrecognized geometry type')
|
aaa0965df6411422d01d9a3295b20642113c3ae5
|
bec0318d90a31fed9c35ec2a35a1b9f8f9988834
|
/sktime_dl/networks/_lstm.py
|
699e8ccfc69bc33b42f9ee13da263306e42a6c54
|
[
"BSD-3-Clause"
] |
permissive
|
sktime/sktime-dl
|
8e64fe91726b2705db8c94c50302cdc28206cc89
|
b565b7499f58f43da7314f1bf26eccce94e88134
|
refs/heads/master
| 2023-08-12T07:53:32.899908
| 2022-05-06T18:55:01
| 2022-05-06T18:55:01
| 198,628,357
| 586
| 78
|
BSD-3-Clause
| 2023-03-25T01:18:15
| 2019-07-24T12:08:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,161
|
py
|
_lstm.py
|
__author__ = "Withington"
from tensorflow import keras
from sktime_dl.networks._network import BaseDeepNetwork
class LSTMNetwork(BaseDeepNetwork):
""" Long Short-Term Memory (LSTM)
Adapted from the implementation of Brownlee, J. (2018)
https://machinelearningmastery.com/how-to-develop-lstm-models-for-time-series-forecasting/
"""
def __init__(self):
self.random_state = None
self.units = None
def build_network(self, input_shape, **kwargs):
"""
Construct a network and return its input and output layers
----------
input_shape : tuple
The shape of the data fed into the input layer
Returns
-------
input_layer : a keras layer
output_layer : a keras layer
"""
input_layer = keras.layers.Input(input_shape)
output_layer = keras.layers.LSTM(
units=self.units[0],
activation='relu',
return_sequences=True)(input_layer)
output_layer = keras.layers.LSTM(
units=self.units[1],
activation='relu')(output_layer)
return input_layer, output_layer
|
cfcf62879df161b02cafbd18a922fdd3b71fa347
|
e993a7972529f60210d9dd6d7c4097c62c37bcdf
|
/model/smpl.py
|
587f5419601a74df92c1e37263b28d4aa6a7c0a9
|
[
"MIT"
] |
permissive
|
GuyTevet/motion-diffusion-model
|
64756013105a80ea2a3180a73ac86519b361e53b
|
8139dda55d90a58aa5a257ebf159b2ecfb78c632
|
refs/heads/main
| 2023-09-01T05:00:14.156745
| 2023-06-06T23:42:33
| 2023-06-06T23:42:33
| 543,082,997
| 2,302
| 265
|
MIT
| 2023-08-29T09:27:54
| 2022-09-29T11:24:35
|
Python
|
UTF-8
|
Python
| false
| false
| 3,858
|
py
|
smpl.py
|
# This code is based on https://github.com/Mathux/ACTOR.git
import numpy as np
import torch
import contextlib
from smplx import SMPLLayer as _SMPLLayer
from smplx.lbs import vertices2joints
# action2motion_joints = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 21, 24, 38]
# change 0 and 8
action2motion_joints = [8, 1, 2, 3, 4, 5, 6, 7, 0, 9, 10, 11, 12, 13, 14, 21, 24, 38]
from utils.config import SMPL_MODEL_PATH, JOINT_REGRESSOR_TRAIN_EXTRA
JOINTSTYPE_ROOT = {"a2m": 0, # action2motion
"smpl": 0,
"a2mpl": 0, # set(smpl, a2m)
"vibe": 8} # 0 is the 8 position: OP MidHip below
JOINT_MAP = {
'OP Nose': 24, 'OP Neck': 12, 'OP RShoulder': 17,
'OP RElbow': 19, 'OP RWrist': 21, 'OP LShoulder': 16,
'OP LElbow': 18, 'OP LWrist': 20, 'OP MidHip': 0,
'OP RHip': 2, 'OP RKnee': 5, 'OP RAnkle': 8,
'OP LHip': 1, 'OP LKnee': 4, 'OP LAnkle': 7,
'OP REye': 25, 'OP LEye': 26, 'OP REar': 27,
'OP LEar': 28, 'OP LBigToe': 29, 'OP LSmallToe': 30,
'OP LHeel': 31, 'OP RBigToe': 32, 'OP RSmallToe': 33, 'OP RHeel': 34,
'Right Ankle': 8, 'Right Knee': 5, 'Right Hip': 45,
'Left Hip': 46, 'Left Knee': 4, 'Left Ankle': 7,
'Right Wrist': 21, 'Right Elbow': 19, 'Right Shoulder': 17,
'Left Shoulder': 16, 'Left Elbow': 18, 'Left Wrist': 20,
'Neck (LSP)': 47, 'Top of Head (LSP)': 48,
'Pelvis (MPII)': 49, 'Thorax (MPII)': 50,
'Spine (H36M)': 51, 'Jaw (H36M)': 52,
'Head (H36M)': 53, 'Nose': 24, 'Left Eye': 26,
'Right Eye': 25, 'Left Ear': 28, 'Right Ear': 27
}
JOINT_NAMES = [
'OP Nose', 'OP Neck', 'OP RShoulder',
'OP RElbow', 'OP RWrist', 'OP LShoulder',
'OP LElbow', 'OP LWrist', 'OP MidHip',
'OP RHip', 'OP RKnee', 'OP RAnkle',
'OP LHip', 'OP LKnee', 'OP LAnkle',
'OP REye', 'OP LEye', 'OP REar',
'OP LEar', 'OP LBigToe', 'OP LSmallToe',
'OP LHeel', 'OP RBigToe', 'OP RSmallToe', 'OP RHeel',
'Right Ankle', 'Right Knee', 'Right Hip',
'Left Hip', 'Left Knee', 'Left Ankle',
'Right Wrist', 'Right Elbow', 'Right Shoulder',
'Left Shoulder', 'Left Elbow', 'Left Wrist',
'Neck (LSP)', 'Top of Head (LSP)',
'Pelvis (MPII)', 'Thorax (MPII)',
'Spine (H36M)', 'Jaw (H36M)',
'Head (H36M)', 'Nose', 'Left Eye',
'Right Eye', 'Left Ear', 'Right Ear'
]
# adapted from VIBE/SPIN to output smpl_joints, vibe joints and action2motion joints
class SMPL(_SMPLLayer):
""" Extension of the official SMPL implementation to support more joints """
def __init__(self, model_path=SMPL_MODEL_PATH, **kwargs):
kwargs["model_path"] = model_path
# remove the verbosity for the 10-shapes beta parameters
with contextlib.redirect_stdout(None):
super(SMPL, self).__init__(**kwargs)
J_regressor_extra = np.load(JOINT_REGRESSOR_TRAIN_EXTRA)
self.register_buffer('J_regressor_extra', torch.tensor(J_regressor_extra, dtype=torch.float32))
vibe_indexes = np.array([JOINT_MAP[i] for i in JOINT_NAMES])
a2m_indexes = vibe_indexes[action2motion_joints]
smpl_indexes = np.arange(24)
a2mpl_indexes = np.unique(np.r_[smpl_indexes, a2m_indexes])
self.maps = {"vibe": vibe_indexes,
"a2m": a2m_indexes,
"smpl": smpl_indexes,
"a2mpl": a2mpl_indexes}
def forward(self, *args, **kwargs):
smpl_output = super(SMPL, self).forward(*args, **kwargs)
extra_joints = vertices2joints(self.J_regressor_extra, smpl_output.vertices)
all_joints = torch.cat([smpl_output.joints, extra_joints], dim=1)
output = {"vertices": smpl_output.vertices}
for joinstype, indexes in self.maps.items():
output[joinstype] = all_joints[:, indexes]
return output
|
b2b78dd278496162c6045beb121c59caf0275c36
|
c703b8ac3b5545857f6c95efa2d61eaf7a664021
|
/iPERCore/tools/utils/geometry/rotations.py
|
d4baadd74e390eed8a5577fa3b8601a363594475
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
iPERDance/iPERCore
|
d29681d229b3098b3517b1abf4f7ea65f579de73
|
fcf9a18ffd66bf3fdd3eea4153a3bc4785131848
|
refs/heads/main
| 2023-07-30T15:04:15.835396
| 2023-04-12T14:21:23
| 2023-04-12T14:21:23
| 313,664,064
| 2,520
| 339
|
Apache-2.0
| 2023-05-12T03:26:52
| 2020-11-17T15:36:25
|
Python
|
UTF-8
|
Python
| false
| false
| 18,451
|
py
|
rotations.py
|
# Copyright (c) 2020-2021 impersonator.org authors (Wen Liu and Zhixin Piao). All rights reserved.
import torch
from torch.nn import functional as F
import numpy as np
def rotation_matrix_to_quaternion(rotation_matrix, eps=1e-6):
"""Convert 3x4 rotation matrix to 4d quaternion vector
This algorithm is based on algorithm described in
https://github.com/KieranWynn/pyquaternion/blob/master/pyquaternion/quaternion.py#L201
Args:
rotation_matrix (Tensor): the rotation matrix to convert.
Return:
Tensor: the rotation in quaternion
Shape:
- Input: :math:`(N, 3, 4)`
- Output: :math:`(N, 4)`
Example:
>>> input = torch.rand(4, 3, 4) # Nx3x4
>>> output = tgm.rotation_matrix_to_quaternion(input) # Nx4
"""
if not torch.is_tensor(rotation_matrix):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(rotation_matrix)))
if len(rotation_matrix.shape) > 3:
raise ValueError(
"Input size must be a three dimensional tensor. Got {}".format(
rotation_matrix.shape))
if not rotation_matrix.shape[-2:] == (3, 4):
raise ValueError(
"Input size must be a N x 3 x 4 tensor. Got {}".format(
rotation_matrix.shape))
rmat_t = torch.transpose(rotation_matrix, 1, 2)
mask_d2 = (rmat_t[:, 2, 2] < eps).float()
mask_d0_d1 = (rmat_t[:, 0, 0] > rmat_t[:, 1, 1]).float()
mask_d0_nd1 = (rmat_t[:, 0, 0] < -rmat_t[:, 1, 1]).float()
t0 = 1 + rmat_t[:, 0, 0] - rmat_t[:, 1, 1] - rmat_t[:, 2, 2]
q0 = torch.stack([rmat_t[:, 1, 2] - rmat_t[:, 2, 1],
t0, rmat_t[:, 0, 1] + rmat_t[:, 1, 0],
rmat_t[:, 2, 0] + rmat_t[:, 0, 2]], -1)
t0_rep = t0.repeat(4, 1).t()
t1 = 1 - rmat_t[:, 0, 0] + rmat_t[:, 1, 1] - rmat_t[:, 2, 2]
q1 = torch.stack([rmat_t[:, 2, 0] - rmat_t[:, 0, 2],
rmat_t[:, 0, 1] + rmat_t[:, 1, 0],
t1, rmat_t[:, 1, 2] + rmat_t[:, 2, 1]], -1)
t1_rep = t1.repeat(4, 1).t()
t2 = 1 - rmat_t[:, 0, 0] - rmat_t[:, 1, 1] + rmat_t[:, 2, 2]
q2 = torch.stack([rmat_t[:, 0, 1] - rmat_t[:, 1, 0],
rmat_t[:, 2, 0] + rmat_t[:, 0, 2],
rmat_t[:, 1, 2] + rmat_t[:, 2, 1], t2], -1)
t2_rep = t2.repeat(4, 1).t()
t3 = 1 + rmat_t[:, 0, 0] + rmat_t[:, 1, 1] + rmat_t[:, 2, 2]
q3 = torch.stack([t3, rmat_t[:, 1, 2] - rmat_t[:, 2, 1],
rmat_t[:, 2, 0] - rmat_t[:, 0, 2],
rmat_t[:, 0, 1] - rmat_t[:, 1, 0]], -1)
t3_rep = t3.repeat(4, 1).t()
mask_c0 = mask_d2 * mask_d0_d1
mask_c1 = mask_d2 * (1 - mask_d0_d1)
mask_c2 = (1 - mask_d2) * mask_d0_nd1
mask_c3 = (1 - mask_d2) * (1 - mask_d0_nd1)
mask_c0 = mask_c0.view(-1, 1).type_as(q0)
mask_c1 = mask_c1.view(-1, 1).type_as(q1)
mask_c2 = mask_c2.view(-1, 1).type_as(q2)
mask_c3 = mask_c3.view(-1, 1).type_as(q3)
q = q0 * mask_c0 + q1 * mask_c1 + q2 * mask_c2 + q3 * mask_c3
q /= torch.sqrt(t0_rep * mask_c0 + t1_rep * mask_c1 + # noqa
t2_rep * mask_c2 + t3_rep * mask_c3) # noqa
q *= 0.5
return q
def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor:
"""Convert quaternion vector to angle axis of rotation.
Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h
Args:
quaternion (torch.Tensor): tensor with quaternions.
Return:
torch.Tensor: tensor with angle axis of rotation.
Shape:
- Input: :math:`(*, 4)` where `*` means, any number of dimensions
- Output: :math:`(*, 3)`
Example:
>>> quaternion = torch.rand(2, 4) # Nx4
>>> angle_axis = tgm.quaternion_to_angle_axis(quaternion) # Nx3
"""
if not torch.is_tensor(quaternion):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(quaternion)))
if not quaternion.shape[-1] == 4:
raise ValueError("Input must be a tensor of shape Nx4 or 4. Got {}"
.format(quaternion.shape))
# unpack input and compute conversion
q1: torch.Tensor = quaternion[..., 1]
q2: torch.Tensor = quaternion[..., 2]
q3: torch.Tensor = quaternion[..., 3]
sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3
sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta)
cos_theta: torch.Tensor = quaternion[..., 0]
two_theta: torch.Tensor = 2.0 * torch.where(
cos_theta < 0.0,
torch.atan2(-sin_theta, -cos_theta),
torch.atan2(sin_theta, cos_theta))
k_pos: torch.Tensor = two_theta / sin_theta
k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta)
k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg)
angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3]
angle_axis[..., 0] += q1 * k
angle_axis[..., 1] += q2 * k
angle_axis[..., 2] += q3 * k
return angle_axis
def angle_axis_to_quaternion(angle_axis: torch.Tensor) -> torch.Tensor:
"""Convert an angle axis to a quaternion.
Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h
Args:
angle_axis (torch.Tensor): tensor with angle axis.
Return:
torch.Tensor: tensor with quaternion.
Shape:
- Input: :math:`(*, 3)` where `*` means, any number of dimensions
- Output: :math:`(*, 4)`
Example:
>>> angle_axis = torch.rand(2, 4) # Nx4
>>> quaternion = tgm.angle_axis_to_quaternion(angle_axis) # Nx3
"""
if not torch.is_tensor(angle_axis):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(angle_axis)))
if not angle_axis.shape[-1] == 3:
raise ValueError("Input must be a tensor of shape Nx3 or 3. Got {}"
.format(angle_axis.shape))
# unpack input and compute conversion
a0: torch.Tensor = angle_axis[..., 0:1]
a1: torch.Tensor = angle_axis[..., 1:2]
a2: torch.Tensor = angle_axis[..., 2:3]
theta_squared: torch.Tensor = a0 * a0 + a1 * a1 + a2 * a2
theta: torch.Tensor = torch.sqrt(theta_squared)
half_theta: torch.Tensor = theta * 0.5
mask: torch.Tensor = theta_squared > 0.0
ones: torch.Tensor = torch.ones_like(half_theta)
k_neg: torch.Tensor = 0.5 * ones
k_pos: torch.Tensor = torch.sin(half_theta) / theta
k: torch.Tensor = torch.where(mask, k_pos, k_neg)
w: torch.Tensor = torch.where(mask, torch.cos(half_theta), ones)
quaternion: torch.Tensor = torch.zeros_like(angle_axis)
quaternion[..., 0:1] += a0 * k
quaternion[..., 1:2] += a1 * k
quaternion[..., 2:3] += a2 * k
return torch.cat([w, quaternion], dim=-1)
def rotation_matrix_to_angle_axis(rotation_matrix):
"""Convert 3x4 rotation matrix to Rodrigues vector
Args:
rotation_matrix (Tensor): rotation matrix.
Returns:
Tensor: Rodrigues vector transformation.
Shape:
- Input: :math:`(N, 3, 4)`
- Output: :math:`(N, 3)`
Example:
>>> input = torch.rand(2, 3, 4) # Nx4x4
>>> output = tgm.rotation_matrix_to_angle_axis(input) # Nx3
"""
# todo add check that matrix is a valid rotation matrix
quaternion = rotation_matrix_to_quaternion(rotation_matrix)
return quaternion_to_angle_axis(quaternion)
def angle_axis_to_rotation_matrix(angle_axis):
"""Convert 3d vector of axis-angle rotation to 4x4 rotation matrix
Args:
angle_axis (Tensor): tensor of 3d vector of axis-angle rotations.
Returns:
Tensor: tensor of 4x4 rotation matrices.
Shape:
- Input: :math:`(N, 3)`
- Output: :math:`(N, 4, 4)`
Example:
>>> input = torch.rand(1, 3) # Nx3
>>> output = tgm.angle_axis_to_rotation_matrix(input) # Nx4x4
"""
def _compute_rotation_matrix(angle_axis, theta2, eps=1e-6):
# We want to be careful to only evaluate the square root if the
# norm of the angle_axis vector is greater than zero. Otherwise
# we get a division by zero.
k_one = 1.0
theta = torch.sqrt(theta2)
wxyz = angle_axis / (theta + eps)
wx, wy, wz = torch.chunk(wxyz, 3, dim=1)
cos_theta = torch.cos(theta)
sin_theta = torch.sin(theta)
r00 = cos_theta + wx * wx * (k_one - cos_theta)
r10 = wz * sin_theta + wx * wy * (k_one - cos_theta)
r20 = -wy * sin_theta + wx * wz * (k_one - cos_theta)
r01 = wx * wy * (k_one - cos_theta) - wz * sin_theta
r11 = cos_theta + wy * wy * (k_one - cos_theta)
r21 = wx * sin_theta + wy * wz * (k_one - cos_theta)
r02 = wy * sin_theta + wx * wz * (k_one - cos_theta)
r12 = -wx * sin_theta + wy * wz * (k_one - cos_theta)
r22 = cos_theta + wz * wz * (k_one - cos_theta)
rotation_matrix = torch.cat(
[r00, r01, r02, r10, r11, r12, r20, r21, r22], dim=1)
return rotation_matrix.view(-1, 3, 3)
def _compute_rotation_matrix_taylor(angle_axis):
rx, ry, rz = torch.chunk(angle_axis, 3, dim=1)
k_one = torch.ones_like(rx)
rotation_matrix = torch.cat(
[k_one, -rz, ry, rz, k_one, -rx, -ry, rx, k_one], dim=1)
return rotation_matrix.view(-1, 3, 3)
# stolen from ceres/rotation.h
_angle_axis = torch.unsqueeze(angle_axis, dim=1)
theta2 = torch.matmul(_angle_axis, _angle_axis.transpose(1, 2))
theta2 = torch.squeeze(theta2, dim=1)
# compute rotation matrices
rotation_matrix_normal = _compute_rotation_matrix(angle_axis, theta2)
rotation_matrix_taylor = _compute_rotation_matrix_taylor(angle_axis)
# create mask to handle both cases
eps = 1e-6
mask = (theta2 > eps).view(-1, 1, 1).to(theta2.device)
mask_pos = (mask).type_as(theta2)
mask_neg = (mask == False).type_as(theta2) # noqa
# create output pose matrix
batch_size = angle_axis.shape[0]
rotation_matrix = torch.eye(4).to(angle_axis.device).type_as(angle_axis)
rotation_matrix = rotation_matrix.view(1, 4, 4).repeat(batch_size, 1, 1)
# fill output matrix with masked values
rotation_matrix[..., :3, :3] = \
mask_pos * rotation_matrix_normal + mask_neg * rotation_matrix_taylor
return rotation_matrix # Nx4x4
def batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):
''' Calculates the rotation matrices for a batch of rotation vectors
Parameters
----------
rot_vecs: torch.tensor Nx3
array of N axis-angle vectors
Returns
-------
R: torch.tensor Nx3x3
The rotation matrices for the given axis-angle parameters
'''
batch_size = rot_vecs.shape[0]
device = rot_vecs.device
angle = torch.norm(rot_vecs + 1e-8, dim=1, keepdim=True)
rot_dir = rot_vecs / angle
cos = torch.unsqueeze(torch.cos(angle), dim=1)
sin = torch.unsqueeze(torch.sin(angle), dim=1)
# Bx1 arrays
rx, ry, rz = torch.split(rot_dir, 1, dim=1)
K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device)
zeros = torch.zeros((batch_size, 1), dtype=dtype, device=device)
K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \
.view((batch_size, 3, 3))
ident = torch.eye(3, dtype=dtype, device=device).unsqueeze(dim=0)
rot_mat = ident + sin * K + (1 - cos) * torch.bmm(K, K)
return rot_mat
def rotvec_to_rotmat(rot_vecs):
"""Convert axis-angle representation to rotation matrix.
Args:
rot_vecs: size = [B, 3]
Returns:
Rotation matrix corresponding to the quaternion -- size = [B, 3, 3]
"""
l1norm = torch.norm(rot_vecs + 1e-8, p=2, dim=1)
angle = torch.unsqueeze(l1norm, -1)
normalized = torch.div(rot_vecs, angle)
angle = angle * 0.5
v_cos = torch.cos(angle)
v_sin = torch.sin(angle)
quat = torch.cat([v_cos, v_sin * normalized], dim=1)
return quat_to_rotmat(quat)
def rotmat_to_rotvec(rotmat):
"""
Args:
rotmat (torch.Tensor): (n, 3, 3)
Returns:
rotvec (torch.Tensor): (n, 3)
"""
bs = rotmat.shape[0]
pad_hom = torch.tensor([0, 0, 1], dtype=torch.float32, device=rotmat.device).view(1, 3, 1).expand(bs, -1, -1)
pred_rotmat_hom = torch.cat([rotmat, pad_hom], dim=-1)
rotvec = rotation_matrix_to_angle_axis(pred_rotmat_hom).contiguous().view(bs, -1)
rotvec[torch.isnan(rotvec)] = 0.0
return rotvec
def quat_to_rotmat(quat):
"""Convert quaternion coefficients to rotation matrix.
Args:
quat: size = [B, 4] 4 <===>(w, x, y, z)
Returns:
Rotation matrix corresponding to the quaternion -- size = [B, 3, 3]
"""
norm_quat = quat
norm_quat = norm_quat / norm_quat.norm(p=2, dim=1, keepdim=True)
w, x, y, z = norm_quat[:, 0], norm_quat[:, 1], norm_quat[:, 2], norm_quat[:, 3]
B = quat.size(0)
w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)
wx, wy, wz = w * x, w * y, w * z
xy, xz, yz = x * y, x * z, y * z
rotMat = torch.stack([w2 + x2 - y2 - z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz,
2 * wz + 2 * xy, w2 - x2 + y2 - z2, 2 * yz - 2 * wx,
2 * xz - 2 * wy, 2 * wx + 2 * yz, w2 - x2 - y2 + z2], dim=1).view(B, 3, 3)
return rotMat
def rot6d_to_rotmat(x):
"""Convert 6D rotation representation to 3x3 rotation matrix.
Based on Zhou et al., "On the Continuity of Rotation Representations in Neural Networks", CVPR 2019
Input:
(B,6) Batch of 6-D rotation representations
Output:
(B,3,3) Batch of corresponding rotation matrices
"""
x = x.view(-1, 3, 2)
a1 = x[:, :, 0]
a2 = x[:, :, 1]
b1 = F.normalize(a1)
b2 = F.normalize(a2 - torch.einsum('bi,bi->b', b1, a2).unsqueeze(-1) * b1)
b3 = torch.cross(b1, b2)
return torch.stack((b1, b2, b3), dim=-1)
def rotmat_to_rot6d(rotmat):
"""Convert 6D rotation representation to 3x3 rotation matrix.
Based on Zhou et al., "On the Continuity of Rotation Representations in Neural Networks", CVPR 2019
Input:
(B,3,3) Batch of 6-D rotation representations
Output:
(B,6) Batch of corresponding rotation matrices
"""
rot6d = rotmat[:, :, 0:2]
rot6d = rot6d.reshape(-1, 6)
return rot6d
def rotvec_to_rot6d(rotvec):
"""
Args:
rotvec (torch.Tensor): (n, 3)
Returns:
rot6d (torch.Tensor): (n, 6)
"""
rotmat = rotvec_to_rotmat(rotvec)
rot6d = rotmat_to_rot6d(rotmat)
return rot6d
def rot6d_to_rotvec(rot6d):
"""
Args:
rot6d (torch.Tensor): (n, 6)
Returns:
rotvec (torch.Tensor): (n, 3)
"""
rotmat = rot6d_to_rotmat(rot6d)
rotvec = rotmat_to_rotvec(rotmat)
return rotvec
def rotvec_to_rot6d_np(rotvec):
"""
Args:
rotvec (np.ndarray): (n, 3)
Returns:
rot6d (np.ndarray): (n, 6)
"""
from scipy.spatial.transform.rotation import Rotation as R
rotmat = R.from_rotvec(rotvec).as_matrix()
rotmat = torch.from_numpy(rotmat)
rot6d = rotmat_to_rot6d(rotmat)
rot6d = rot6d.numpy()
return rot6d
def perspective_projection(points, rotation, translation,
focal_length, camera_center):
"""
This function computes the perspective projection of a set of points.
Input:
points (bs, N, 3): 3D points
rotation (bs, 3, 3): Camera rotation
translation (bs, 3): Camera translation
focal_length (bs,) or scalar: Focal length
camera_center (bs, 2): Camera center
"""
batch_size = points.shape[0]
K = torch.zeros([batch_size, 3, 3], device=points.device)
K[:, 0, 0] = focal_length
K[:, 1, 1] = focal_length
K[:, 2, 2] = 1.
K[:, :-1, -1] = camera_center
# Transform points
points = torch.einsum('bij,bkj->bki', rotation, points)
points = points + translation.unsqueeze(1)
# Apply perspective distortion
projected_points = points / points[:, :, -1].unsqueeze(-1)
# Apply camera intrinsics
projected_points = torch.einsum('bij,bkj->bki', K, projected_points)
return projected_points[:, :, :-1]
def estimate_translation_np(S, joints_2d, joints_conf, focal_length=5000, img_size=224):
"""Find camera translation that brings 3D joints S closest to 2D the corresponding joints_2d.
Input:
S: (25, 3) 3D joint locations
joints: (25, 3) 2D joint locations and confidence
Returns:
(3,) camera translation vector
"""
num_joints = S.shape[0]
# focal length
f = np.array([focal_length, focal_length])
# optical center
center = np.array([img_size / 2., img_size / 2.])
# transformations
Z = np.reshape(np.tile(S[:, 2], (2, 1)).T, -1)
XY = np.reshape(S[:, 0:2], -1)
O = np.tile(center, num_joints)
F = np.tile(f, num_joints)
weight2 = np.reshape(np.tile(np.sqrt(joints_conf), (2, 1)).T, -1)
# least squares
Q = np.array([F * np.tile(np.array([1, 0]), num_joints), F * np.tile(np.array([0, 1]), num_joints),
O - np.reshape(joints_2d, -1)]).T
c = (np.reshape(joints_2d, -1) - O) * Z - F * XY
# weighted least squares
W = np.diagflat(weight2)
Q = np.dot(W, Q)
c = np.dot(W, c)
# square matrix
A = np.dot(Q.T, Q)
b = np.dot(Q.T, c)
# solution
trans = np.linalg.solve(A, b)
return trans
def estimate_translation(S, joints_2d, focal_length=5000., img_size=224.):
"""Find camera translation that brings 3D joints S closest to 2D the corresponding joints_2d.
Input:
S: (B, 49, 3) 3D joint locations
joints: (B, 49, 3) 2D joint locations and confidence
Returns:
(B, 3) camera translation vectors
"""
device = S.device
# Use only joints 25:49 (GT joints)
S = S[:, 25:, :].cpu().numpy()
joints_2d = joints_2d[:, 25:, :].cpu().numpy()
joints_conf = joints_2d[:, :, -1]
joints_2d = joints_2d[:, :, :-1]
trans = np.zeros((S.shape[0], 3), dtype=np.float32)
# Find the translation for each example in the batch
for i in range(S.shape[0]):
S_i = S[i]
joints_i = joints_2d[i]
conf_i = joints_conf[i]
trans[i] = estimate_translation_np(S_i, joints_i, conf_i, focal_length=focal_length, img_size=img_size)
return torch.from_numpy(trans).to(device)
|
bac67c1500413befa3d5fd688e98c2569c43c9c8
|
467be8fc9c975638fcb7a64d098e1526fd1c96f0
|
/dlint/linters/bad_subprocess_use.py
|
8db788355662d2d8e3046f84bbf29f7b2bb89050
|
[
"BSD-3-Clause"
] |
permissive
|
dlint-py/dlint
|
ed8d2ca0446914fceded654a2b810b7f8ad0d9d3
|
307b301cd9e280dcd7a7f9d5edfda3d58e4855f5
|
refs/heads/master
| 2023-04-13T08:54:52.987469
| 2023-04-10T19:27:01
| 2023-04-10T19:27:15
| 232,599,661
| 154
| 16
|
BSD-3-Clause
| 2023-03-09T21:21:19
| 2020-01-08T15:53:36
|
Python
|
UTF-8
|
Python
| false
| false
| 1,834
|
py
|
bad_subprocess_use.py
|
#!/usr/bin/env python
from .helpers import bad_kwarg_use
from .. import tree
class BadSubprocessUseLinter(bad_kwarg_use.BadKwargUseLinter):
"""This linter looks for use of the "shell=True" kwarg when using the
"subprocess" module.
"If the shell is invoked explicitly, via shell=True, it is the
application's responsibility to ensure that all whitespace and
metacharacters are quoted appropriately to avoid shell injection
vulnerabilities."
https://docs.python.org/3.6/library/subprocess.html#security-considerations
"""
off_by_default = False
_code = 'DUO116'
_error_tmpl = 'DUO116 use of "shell=True" is insecure in "subprocess" module'
@property
def kwargs(self):
def present_and_not_false(call, kwarg_name):
return (
tree.kwarg_present(call, kwarg_name)
and not tree.kwarg_false(call, kwarg_name)
)
return [
{
"module_path": "subprocess.call",
"kwarg_name": "shell",
"predicate": present_and_not_false,
},
{
"module_path": "subprocess.check_call",
"kwarg_name": "shell",
"predicate": present_and_not_false,
},
{
"module_path": "subprocess.check_output",
"kwarg_name": "shell",
"predicate": present_and_not_false,
},
{
"module_path": "subprocess.Popen",
"kwarg_name": "shell",
"predicate": present_and_not_false,
},
{
"module_path": "subprocess.run",
"kwarg_name": "shell",
"predicate": present_and_not_false,
},
]
|
7fe1d194e916c5b6f42a6cb0e6dfcc6a712846df
|
f487532281c1c6a36a5c62a29744d8323584891b
|
/sdk/python/pulumi_azure/appservice/get_function_app_host_keys.py
|
4621200bf0762a5971db1f49c1d6cc4faab1ba57
|
[
"BSD-3-Clause",
"MPL-2.0",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-azure
|
a8f8f21c46c802aecf1397c737662ddcc438a2db
|
c16962e5c4f5810efec2806b8bb49d0da960d1ea
|
refs/heads/master
| 2023-08-25T00:17:05.290397
| 2023-08-24T06:11:55
| 2023-08-24T06:11:55
| 103,183,737
| 129
| 57
|
Apache-2.0
| 2023-09-13T05:44:10
| 2017-09-11T20:19:15
|
Java
|
UTF-8
|
Python
| false
| false
| 8,852
|
py
|
get_function_app_host_keys.py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetFunctionAppHostKeysResult',
'AwaitableGetFunctionAppHostKeysResult',
'get_function_app_host_keys',
'get_function_app_host_keys_output',
]
@pulumi.output_type
class GetFunctionAppHostKeysResult:
"""
A collection of values returned by getFunctionAppHostKeys.
"""
def __init__(__self__, blobs_extension_key=None, default_function_key=None, durabletask_extension_key=None, event_grid_extension_config_key=None, id=None, name=None, primary_key=None, resource_group_name=None, signalr_extension_key=None, webpubsub_extension_key=None):
if blobs_extension_key and not isinstance(blobs_extension_key, str):
raise TypeError("Expected argument 'blobs_extension_key' to be a str")
pulumi.set(__self__, "blobs_extension_key", blobs_extension_key)
if default_function_key and not isinstance(default_function_key, str):
raise TypeError("Expected argument 'default_function_key' to be a str")
pulumi.set(__self__, "default_function_key", default_function_key)
if durabletask_extension_key and not isinstance(durabletask_extension_key, str):
raise TypeError("Expected argument 'durabletask_extension_key' to be a str")
pulumi.set(__self__, "durabletask_extension_key", durabletask_extension_key)
if event_grid_extension_config_key and not isinstance(event_grid_extension_config_key, str):
raise TypeError("Expected argument 'event_grid_extension_config_key' to be a str")
pulumi.set(__self__, "event_grid_extension_config_key", event_grid_extension_config_key)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if primary_key and not isinstance(primary_key, str):
raise TypeError("Expected argument 'primary_key' to be a str")
pulumi.set(__self__, "primary_key", primary_key)
if resource_group_name and not isinstance(resource_group_name, str):
raise TypeError("Expected argument 'resource_group_name' to be a str")
pulumi.set(__self__, "resource_group_name", resource_group_name)
if signalr_extension_key and not isinstance(signalr_extension_key, str):
raise TypeError("Expected argument 'signalr_extension_key' to be a str")
pulumi.set(__self__, "signalr_extension_key", signalr_extension_key)
if webpubsub_extension_key and not isinstance(webpubsub_extension_key, str):
raise TypeError("Expected argument 'webpubsub_extension_key' to be a str")
pulumi.set(__self__, "webpubsub_extension_key", webpubsub_extension_key)
@property
@pulumi.getter(name="blobsExtensionKey")
def blobs_extension_key(self) -> str:
"""
Function App resource's Blobs Extension system key.
"""
return pulumi.get(self, "blobs_extension_key")
@property
@pulumi.getter(name="defaultFunctionKey")
def default_function_key(self) -> str:
"""
Function App resource's default function key.
"""
return pulumi.get(self, "default_function_key")
@property
@pulumi.getter(name="durabletaskExtensionKey")
def durabletask_extension_key(self) -> str:
"""
Function App resource's Durable Task Extension system key.
"""
return pulumi.get(self, "durabletask_extension_key")
@property
@pulumi.getter(name="eventGridExtensionConfigKey")
def event_grid_extension_config_key(self) -> str:
"""
Function App resource's Event Grid Extension Config system key.
"""
return pulumi.get(self, "event_grid_extension_config_key")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> str:
"""
Function App resource's secret key
"""
return pulumi.get(self, "primary_key")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> str:
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="signalrExtensionKey")
def signalr_extension_key(self) -> str:
"""
Function App resource's SignalR Extension system key.
"""
return pulumi.get(self, "signalr_extension_key")
@property
@pulumi.getter(name="webpubsubExtensionKey")
def webpubsub_extension_key(self) -> str:
"""
Function App resource's Web PubSub Extension system key.
"""
return pulumi.get(self, "webpubsub_extension_key")
class AwaitableGetFunctionAppHostKeysResult(GetFunctionAppHostKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetFunctionAppHostKeysResult(
blobs_extension_key=self.blobs_extension_key,
default_function_key=self.default_function_key,
durabletask_extension_key=self.durabletask_extension_key,
event_grid_extension_config_key=self.event_grid_extension_config_key,
id=self.id,
name=self.name,
primary_key=self.primary_key,
resource_group_name=self.resource_group_name,
signalr_extension_key=self.signalr_extension_key,
webpubsub_extension_key=self.webpubsub_extension_key)
def get_function_app_host_keys(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetFunctionAppHostKeysResult:
"""
Use this data source to fetch the Host Keys of an existing Function App
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.appservice.get_function_app_host_keys(name="example-function",
resource_group_name=azurerm_resource_group["example"]["name"])
```
:param str name: The name of the Function App.
:param str resource_group_name: The name of the Resource Group where the Function App exists.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure:appservice/getFunctionAppHostKeys:getFunctionAppHostKeys', __args__, opts=opts, typ=GetFunctionAppHostKeysResult).value
return AwaitableGetFunctionAppHostKeysResult(
blobs_extension_key=pulumi.get(__ret__, 'blobs_extension_key'),
default_function_key=pulumi.get(__ret__, 'default_function_key'),
durabletask_extension_key=pulumi.get(__ret__, 'durabletask_extension_key'),
event_grid_extension_config_key=pulumi.get(__ret__, 'event_grid_extension_config_key'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
primary_key=pulumi.get(__ret__, 'primary_key'),
resource_group_name=pulumi.get(__ret__, 'resource_group_name'),
signalr_extension_key=pulumi.get(__ret__, 'signalr_extension_key'),
webpubsub_extension_key=pulumi.get(__ret__, 'webpubsub_extension_key'))
@_utilities.lift_output_func(get_function_app_host_keys)
def get_function_app_host_keys_output(name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetFunctionAppHostKeysResult]:
"""
Use this data source to fetch the Host Keys of an existing Function App
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.appservice.get_function_app_host_keys(name="example-function",
resource_group_name=azurerm_resource_group["example"]["name"])
```
:param str name: The name of the Function App.
:param str resource_group_name: The name of the Resource Group where the Function App exists.
"""
...
|
8153f1c55a36630d38dbeb7ae40100973b31ffed
|
5f1881006aaf4f3c2515f375ad29c15fd6612de2
|
/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/metrics/precision.py
|
72cfd74acaa21b51c8cdcd979a394eceb3c1b59d
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.1-or-later",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
microsoft/ContextualSP
|
4edb598d40f683f9a1143b92a9d24e1066d51ec4
|
4198ebce942f4afe7ddca6a96ab6f4464ade4518
|
refs/heads/master
| 2023-08-02T22:08:40.503853
| 2023-07-14T07:22:50
| 2023-07-14T07:22:50
| 255,534,819
| 332
| 70
|
MIT
| 2023-07-25T19:23:48
| 2020-04-14T07:01:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,813
|
py
|
precision.py
|
"""Precision for ranking."""
import numpy as np
from matchzoo.engine.base_metric import (
BaseMetric, sort_and_couple, RankingMetric
)
class Precision(RankingMetric):
"""Precision metric."""
ALIAS = 'precision'
def __init__(self, k: int = 1, threshold: float = 0.):
"""
:class:`PrecisionMetric` constructor.
:param k: Number of results to consider.
:param threshold: the label threshold of relevance degree.
"""
self._k = k
self._threshold = threshold
def __repr__(self) -> str:
""":return: Formated string representation of the metric."""
return f"{self.ALIAS}@{self._k}({self._threshold})"
def __call__(self, y_true: np.array, y_pred: np.array) -> float:
"""
Calculate precision@k.
Example:
>>> y_true = [0, 0, 0, 1]
>>> y_pred = [0.2, 0.4, 0.3, 0.1]
>>> Precision(k=1)(y_true, y_pred)
0.0
>>> Precision(k=2)(y_true, y_pred)
0.0
>>> Precision(k=4)(y_true, y_pred)
0.25
>>> Precision(k=5)(y_true, y_pred)
0.2
:param y_true: The ground true label of each document.
:param y_pred: The predicted scores of each document.
:return: Precision @ k
:raises: ValueError: len(r) must be >= k.
"""
if self._k <= 0:
raise ValueError(f"k must be greater than 0."
f"{self._k} received.")
coupled_pair = sort_and_couple(y_true, y_pred)
precision = 0.0
for idx, (label, score) in enumerate(coupled_pair):
if idx >= self._k:
break
if label > self._threshold:
precision += 1.
return precision / self._k
|
5b7bace80ca5eaf6e3891913ddf352a920d22461
|
811f4cdb25e26f3b27640aaa2e2bca93e660d2d7
|
/src/anomalib/utils/callbacks/model_loader.py
|
5bae4841f55335ffa031e1bbf522e9c6b963f877
|
[
"CC-BY-SA-4.0",
"CC-BY-SA-3.0",
"CC-BY-NC-SA-4.0",
"Python-2.0",
"Apache-2.0"
] |
permissive
|
openvinotoolkit/anomalib
|
4467dfc392398845e816387267cdf979ff76fe15
|
4abfa93dcfcb98771bc768b334c929ff9a02ce8b
|
refs/heads/main
| 2023-09-03T16:49:05.019269
| 2023-08-28T14:22:19
| 2023-08-28T14:22:19
| 423,775,360
| 2,325
| 454
|
Apache-2.0
| 2023-09-14T11:21:33
| 2021-11-02T09:11:38
|
Python
|
UTF-8
|
Python
| false
| false
| 1,009
|
py
|
model_loader.py
|
"""Callback that loads model weights from the state dict."""
# Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import logging
import torch
from pytorch_lightning import Callback, Trainer
from anomalib.models.components import AnomalyModule
logger = logging.getLogger(__name__)
class LoadModelCallback(Callback):
"""Callback that loads the model weights from the state dict."""
def __init__(self, weights_path) -> None:
self.weights_path = weights_path
def setup(self, trainer: Trainer, pl_module: AnomalyModule, stage: str | None = None) -> None:
"""Call when inference begins.
Loads the model weights from ``weights_path`` into the PyTorch module.
"""
del trainer, stage # These variables are not used.
logger.info("Loading the model from %s", self.weights_path)
pl_module.load_state_dict(torch.load(self.weights_path, map_location=pl_module.device)["state_dict"])
|
0ab597d378e2c7b5cf9c0bbf5a8c3339c448f87b
|
da1721d2783ea4d67ff4e73cee6eee71292f2ef7
|
/toontown/parties/DistributedPartyJukeboxActivity.py
|
bb1f2ca23ff81616e0db54856aebdfe6e107448d
|
[
"BSD-3-Clause"
] |
permissive
|
open-toontown/open-toontown
|
bbdeb1b7bf0fb2861eba2df5483738c0112090ca
|
464c2d45f60551c31397bd03561582804e760b4a
|
refs/heads/develop
| 2023-07-07T01:34:31.959657
| 2023-05-30T23:49:10
| 2023-05-30T23:49:10
| 219,221,570
| 143
| 104
|
BSD-3-Clause
| 2023-09-11T09:52:34
| 2019-11-02T22:24:38
|
Python
|
UTF-8
|
Python
| false
| false
| 455
|
py
|
DistributedPartyJukeboxActivity.py
|
from toontown.parties.DistributedPartyJukeboxActivityBase import DistributedPartyJukeboxActivityBase
from toontown.parties import PartyGlobals
class DistributedPartyJukeboxActivity(DistributedPartyJukeboxActivityBase):
notify = directNotify.newCategory('DistributedPartyJukeboxActivity')
def __init__(self, cr):
DistributedPartyJukeboxActivityBase.__init__(self, cr, PartyGlobals.ActivityIds.PartyJukebox, PartyGlobals.PhaseToMusicData)
|
2f0cc1da1916bd293e1deb472f2db3489becfea5
|
b049a961f100444dde14599bab06a0a4224d869b
|
/sdk/python/pulumi_azure_native/videoanalyzer/v20211101preview/get_pipeline_topology.py
|
e300e13e2a1fe6d462f3799026d98ba9e7fe9d23
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-azure-native
|
b390c88beef8381f9a71ab2bed5571e0dd848e65
|
4c499abe17ec6696ce28477dde1157372896364e
|
refs/heads/master
| 2023-08-30T08:19:41.564780
| 2023-08-28T19:29:04
| 2023-08-28T19:29:04
| 172,386,632
| 107
| 29
|
Apache-2.0
| 2023-09-14T13:17:00
| 2019-02-24T20:30:21
|
Python
|
UTF-8
|
Python
| false
| false
| 9,986
|
py
|
get_pipeline_topology.py
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetPipelineTopologyResult',
'AwaitableGetPipelineTopologyResult',
'get_pipeline_topology',
'get_pipeline_topology_output',
]
@pulumi.output_type
class GetPipelineTopologyResult:
"""
Pipeline topology describes the processing steps to be applied when processing content for a particular outcome. The topology should be defined according to the scenario to be achieved and can be reused across many pipeline instances which share the same processing characteristics. For instance, a pipeline topology which captures content from a RTSP camera and archives the content can be reused across many different cameras, as long as the same processing is to be applied across all the cameras. Individual instance properties can be defined through the use of user-defined parameters, which allow for a topology to be parameterized. This allows individual pipelines refer to different values, such as individual cameras' RTSP endpoints and credentials. Overall a topology is composed of the following:
- Parameters: list of user defined parameters that can be references across the topology nodes.
- Sources: list of one or more data sources nodes such as an RTSP source which allows for content to be ingested from cameras.
- Processors: list of nodes which perform data analysis or transformations.
- Sinks: list of one or more data sinks which allow for data to be stored or exported to other destinations.
"""
def __init__(__self__, description=None, id=None, kind=None, name=None, parameters=None, processors=None, sinks=None, sku=None, sources=None, system_data=None, type=None):
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if parameters and not isinstance(parameters, list):
raise TypeError("Expected argument 'parameters' to be a list")
pulumi.set(__self__, "parameters", parameters)
if processors and not isinstance(processors, list):
raise TypeError("Expected argument 'processors' to be a list")
pulumi.set(__self__, "processors", processors)
if sinks and not isinstance(sinks, list):
raise TypeError("Expected argument 'sinks' to be a list")
pulumi.set(__self__, "sinks", sinks)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if sources and not isinstance(sources, list):
raise TypeError("Expected argument 'sources' to be a list")
pulumi.set(__self__, "sources", sources)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
An optional description of the pipeline topology. It is recommended that the expected use of the topology to be described here.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> str:
"""
Topology kind.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def parameters(self) -> Optional[Sequence['outputs.ParameterDeclarationResponse']]:
"""
List of the topology parameter declarations. Parameters declared here can be referenced throughout the topology nodes through the use of "${PARAMETER_NAME}" string pattern. Parameters can have optional default values and can later be defined in individual instances of the pipeline.
"""
return pulumi.get(self, "parameters")
@property
@pulumi.getter
def processors(self) -> Optional[Sequence['outputs.EncoderProcessorResponse']]:
"""
List of the topology processor nodes. Processor nodes enable pipeline data to be analyzed, processed or transformed.
"""
return pulumi.get(self, "processors")
@property
@pulumi.getter
def sinks(self) -> Sequence['outputs.VideoSinkResponse']:
"""
List of the topology sink nodes. Sink nodes allow pipeline data to be stored or exported.
"""
return pulumi.get(self, "sinks")
@property
@pulumi.getter
def sku(self) -> 'outputs.SkuResponse':
"""
Describes the properties of a SKU.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def sources(self) -> Sequence[Any]:
"""
List of the topology source nodes. Source nodes enable external data to be ingested by the pipeline.
"""
return pulumi.get(self, "sources")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetPipelineTopologyResult(GetPipelineTopologyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPipelineTopologyResult(
description=self.description,
id=self.id,
kind=self.kind,
name=self.name,
parameters=self.parameters,
processors=self.processors,
sinks=self.sinks,
sku=self.sku,
sources=self.sources,
system_data=self.system_data,
type=self.type)
def get_pipeline_topology(account_name: Optional[str] = None,
pipeline_topology_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPipelineTopologyResult:
"""
Retrieves a specific pipeline topology by name. If a topology with that name has been previously created, the call will return the JSON representation of that topology.
:param str account_name: The Azure Video Analyzer account name.
:param str pipeline_topology_name: Pipeline topology unique identifier.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['pipelineTopologyName'] = pipeline_topology_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:videoanalyzer/v20211101preview:getPipelineTopology', __args__, opts=opts, typ=GetPipelineTopologyResult).value
return AwaitableGetPipelineTopologyResult(
description=pulumi.get(__ret__, 'description'),
id=pulumi.get(__ret__, 'id'),
kind=pulumi.get(__ret__, 'kind'),
name=pulumi.get(__ret__, 'name'),
parameters=pulumi.get(__ret__, 'parameters'),
processors=pulumi.get(__ret__, 'processors'),
sinks=pulumi.get(__ret__, 'sinks'),
sku=pulumi.get(__ret__, 'sku'),
sources=pulumi.get(__ret__, 'sources'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_pipeline_topology)
def get_pipeline_topology_output(account_name: Optional[pulumi.Input[str]] = None,
pipeline_topology_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPipelineTopologyResult]:
"""
Retrieves a specific pipeline topology by name. If a topology with that name has been previously created, the call will return the JSON representation of that topology.
:param str account_name: The Azure Video Analyzer account name.
:param str pipeline_topology_name: Pipeline topology unique identifier.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
...
|
7e94e4b9db27cdc5de5a57623db8cbf8bd8c2562
|
787022de03a2dd6998c1518673830395b389e3df
|
/tests/integrationTests/tests/tutorial_09_java_testing/__init__.py
|
90b05c3c3466bb6bcc51d76e04b79843c185352a
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
Submitty/Submitty
|
e6b8731656291a025aa77f928eb067bc9a307540
|
b223d9e952bcdb8664721a55593bc75e0e3c8c4f
|
refs/heads/main
| 2023-08-31T23:56:11.291752
| 2023-08-31T19:12:18
| 2023-08-31T19:12:18
| 16,236,118
| 592
| 727
|
BSD-3-Clause
| 2023-09-13T05:36:08
| 2014-01-25T17:43:57
|
PHP
|
UTF-8
|
Python
| false
| false
| 6,926
|
py
|
__init__.py
|
# Necessary imports. Provides library functions to ease writing tests.
from lib import prebuild, testcase, SUBMITTY_TUTORIAL_DIR
import subprocess
import os
import glob
import shutil
import traceback
############################################################################
# COPY THE ASSIGNMENT FROM THE SAMPLE ASSIGNMENTS DIRECTORIES
SAMPLE_ASSIGNMENT_CONFIG = SUBMITTY_TUTORIAL_DIR + "/examples/09_java_testing/config"
SAMPLE_SUBMISSIONS = SUBMITTY_TUTORIAL_DIR + "/examples/09_java_testing/submissions/"
@prebuild
def initialize(test):
try:
os.mkdir(os.path.join(test.testcase_path, "assignment_config"))
except OSError:
pass
try:
data_path = os.path.join(test.testcase_path, "data")
if os.path.isdir(data_path):
shutil.rmtree(data_path)
os.mkdir(data_path)
except OSError:
pass
subprocess.call(["cp",
os.path.join(SAMPLE_ASSIGNMENT_CONFIG, "config.json"),
os.path.join(test.testcase_path, "assignment_config")])
subprocess.call(["cp",
os.path.join(SAMPLE_ASSIGNMENT_CONFIG, "provided_code", "FactorialTest.java"),
os.path.join(test.testcase_path, "data")])
############################################################################
def cleanup(test):
# seem to need to cleanup this class file, otherwise it doesn't recompile
subprocess.call(["rm"] + ["-f"] +
glob.glob(os.path.join(test.testcase_path, "data/", "*.zip")))
subprocess.call(["rm"] + ["-f"] +
glob.glob(os.path.join(test.testcase_path, "data/", "Factorial.class")))
subprocess.call(["rm"] + ["-f"] +
glob.glob(os.path.join(test.testcase_path, "data/", "Factorial.java")))
subprocess.call(["rm"] + ["-rf"] +
glob.glob(os.path.join(test.testcase_path, "data/", "test*")))
subprocess.call(["rm"] + ["-f"] +
glob.glob(os.path.join(test.testcase_path, "data/grade.txt")))
subprocess.call(["rm"] + ["-f"] +
glob.glob(os.path.join(test.testcase_path, "data/results.json")))
@testcase
def schema_validation(test):
cleanup(test)
config_path = os.path.join(test.testcase_path, 'assignment_config', 'complete_config.json')
try:
test.validate_complete_config(config_path)
except Exception:
traceback.print_exc()
raise
@testcase
def correct(test):
return # TODO: REMOVE THIS!
cleanup(test)
subprocess.call(["cp",
os.path.join(SAMPLE_SUBMISSIONS, "correct.zip"),
os.path.join(test.testcase_path, "data/")])
subprocess.call(["unzip",
"-q", # quiet
"-o", # overwrite files
os.path.join(test.testcase_path, "data/correct.zip"),
"-d", # save to directory
os.path.join(test.testcase_path, "data/")])
test.run_compile()
test.run_run()
test.run_validator()
test.empty_file("test01/STDOUT.txt")
test.diff("test01/STDERR.txt", "not_empty.txt")
test.empty_file("test02/STDOUT.txt")
test.diff("test02/STDERR.txt", "not_empty.txt")
test.junit_diff("test03/STDOUT.txt", "correct_test03_STDOUT.txt")
test.diff("test03/STDERR.txt", "not_empty.txt")
test.empty_file("test03/execute_logfile.txt")
test.diff("grade.txt", "correct_grade.txt", "-b")
test.json_diff("results.json", "correct_results.json")
@testcase
def does_not_compile(test):
return # TODO: REMOVE THIS!
cleanup(test)
subprocess.call(["cp",
os.path.join(SAMPLE_SUBMISSIONS, "does_not_compile.zip"),
os.path.join(test.testcase_path, "data/")])
subprocess.call(["unzip",
"-q", # quiet
"-o", # overwrite files
os.path.join(test.testcase_path, "data/does_not_compile.zip"),
"-d", # save to directory
os.path.join(test.testcase_path, "data/")])
test.run_compile()
test.run_run()
test.run_validator()
test.empty_file("test01/STDOUT.txt")
test.diff("test01/STDERR.txt", "does_not_compile_test01_STDERR.txt")
test.empty_file("test02/STDOUT.txt")
test.diff("test02/STDERR.txt", "does_not_compile_test02_STDERR.txt")
test.junit_diff("test03/STDOUT.txt", "does_not_compile_test03_STDOUT.txt")
test.diff("test03/STDERR.txt", "not_empty.txt")
test.diff("test03/execute_logfile.txt", "exit_status_1.txt")
test.diff("grade.txt", "does_not_compile_grade.txt", "-b")
test.json_diff("results.json", "does_not_compile_results.json")
@testcase
def buggy(test):
return # TODO: REMOVE THIS!
cleanup(test)
subprocess.call(["cp",
os.path.join(SAMPLE_SUBMISSIONS, "buggy.zip"),
os.path.join(test.testcase_path, "data/")])
subprocess.call(["unzip",
"-q", # quiet
"-o", # overwrite files
os.path.join(test.testcase_path, "data/buggy.zip"),
"-d", # save to directory
os.path.join(test.testcase_path, "data/")])
test.run_compile()
test.run_run()
test.run_validator()
test.empty_file("test01/STDOUT.txt")
test.diff("test01/STDERR.txt", "not_empty.txt")
test.empty_file("test02/STDOUT.txt")
test.diff("test02/STDERR.txt", "not_empty.txt")
test.junit_diff("test03/STDOUT.txt", "buggy_test03_STDOUT.txt")
test.diff("test03/STDERR.txt", "not_empty.txt")
test.diff("test03/execute_logfile.txt", "exit_status_1.txt")
test.diff("grade.txt", "buggy_grade.txt", "-b")
test.json_diff("results.json", "buggy_results.json")
@testcase
def still_buggy(test):
return # TODO: REMOVE THIS!
cleanup(test)
subprocess.call(["cp",
os.path.join(SAMPLE_SUBMISSIONS, "still_buggy.zip"),
os.path.join(test.testcase_path, "data/")])
subprocess.call(["unzip",
"-q", # quiet
"-o", # overwrite files
os.path.join(test.testcase_path, "data/still_buggy.zip"),
"-d", # save to directory
os.path.join(test.testcase_path, "data/")])
test.run_compile()
test.run_run()
test.run_validator()
test.empty_file("test01/STDOUT.txt")
test.diff("test01/STDERR.txt", "not_empty.txt")
test.empty_file("test02/STDOUT.txt")
test.diff("test02/STDERR.txt", "not_empty.txt")
test.junit_diff("test03/STDOUT.txt", "still_buggy_test03_STDOUT.txt")
test.diff("test03/STDERR.txt", "not_empty.txt")
test.diff("test03/execute_logfile.txt", "exit_status_1.txt")
test.diff("grade.txt", "still_buggy_grade.txt", "-b")
test.json_diff("results.json", "still_buggy_results.json")
|
ca4af8199f01db37d5f704d63efb943d104c08e3
|
2d9957f2c7a6883004b1a801f97eab3b033d9c08
|
/pulsar-functions/instance/src/main/python/contextimpl.py
|
bfe7b23927ba760603ad625fece7cee435eac397
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown",
"LicenseRef-scancode-protobuf"
] |
permissive
|
apache/pulsar
|
ca729cfb8d2c031312d30096e13431b2e29fb9bf
|
843b8307f44cd5e3a2d59ab93cc6b766f0c4ce0f
|
refs/heads/master
| 2023-08-31T23:53:41.323458
| 2023-08-31T18:37:00
| 2023-08-31T18:37:00
| 62,117,812
| 11,865
| 3,546
|
Apache-2.0
| 2023-09-14T12:13:23
| 2016-06-28T07:00:03
|
Java
|
UTF-8
|
Python
| false
| false
| 8,937
|
py
|
contextimpl.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -*- encoding: utf-8 -*-
"""contextimpl.py: ContextImpl class that implements the Context interface
"""
import re
import time
import os
import json
import log
import pulsar
import util
from prometheus_client import Summary
from function_stats import Stats
from functools import partial
Log = log.Log
class ContextImpl(pulsar.Context):
# add label to indicate user metric
user_metrics_label_names = Stats.metrics_label_names + ["metric"]
def __init__(self, instance_config, logger, pulsar_client, user_code, consumers,
secrets_provider, metrics_labels, state_context, stats):
self.instance_config = instance_config
self.log = logger
self.pulsar_client = pulsar_client
self.user_code_dir = os.path.dirname(user_code)
self.consumers = consumers
self.secrets_provider = secrets_provider
self.state_context = state_context
self.publish_producers = {}
self.publish_serializers = {}
self.message = None
self.current_start_time = None
self.user_config = json.loads(instance_config.function_details.userConfig) \
if instance_config.function_details.userConfig \
else []
self.secrets_map = json.loads(instance_config.function_details.secretsMap) \
if instance_config.function_details.secretsMap \
else {}
self.metrics_labels = metrics_labels
self.user_metrics_map = dict()
self.user_metrics_summary = Summary("pulsar_function_user_metric",
'Pulsar Function user defined metric',
ContextImpl.user_metrics_label_names)
self.stats = stats
# Called on a per message basis to set the context for the current message
def set_current_message_context(self, message, topic):
self.message = message
self.current_start_time = time.time()
def get_message_id(self):
return self.message.message_id()
def get_message_key(self):
return self.message.partition_key()
def get_message_eventtime(self):
return self.message.event_timestamp()
def get_message_properties(self):
return self.message.properties()
def get_current_message_topic_name(self):
return self.message.topic_name()
def get_message_sequence_id(self):
if not self.get_message_id():
return None
ledger_id = self.get_message_id().ledger_id()
entry_id = self.get_message_id().entry_id()
offset = (ledger_id << 28) | entry_id
return offset
def get_message_partition_index(self):
if not self.get_message_id():
return None
return self.get_message_id().partition()
def get_partition_key(self):
return self.message.partition_key()
def get_function_name(self):
return self.instance_config.function_details.name
def get_function_tenant(self):
return self.instance_config.function_details.tenant
def get_function_namespace(self):
return self.instance_config.function_details.namespace
def get_function_id(self):
return self.instance_config.function_id
def get_instance_id(self):
return self.instance_config.instance_id
def get_function_version(self):
return self.instance_config.function_version
def get_logger(self):
return self.log
def get_user_config_value(self, key):
if key in self.user_config:
return self.user_config[key]
else:
return None
def get_user_config_map(self):
return self.user_config
def get_secret(self, secret_key):
if not secret_key in self.secrets_map:
return None
return self.secrets_provider.provide_secret(secret_key, self.secrets_map[secret_key])
def record_metric(self, metric_name, metric_value):
if metric_name not in self.user_metrics_map:
user_metrics_labels = self.metrics_labels + [metric_name]
self.user_metrics_map[metric_name] = self.user_metrics_summary.labels(*user_metrics_labels)
self.user_metrics_map[metric_name].observe(metric_value)
def get_input_topics(self):
return list(self.instance_config.function_details.source.inputSpecs.keys())
def get_output_topic(self):
return self.instance_config.function_details.sink.topic
def get_output_serde_class_name(self):
return self.instance_config.function_details.sink.serDeClassName
def callback_wrapper(self, callback, topic, message_id, result, msg):
if result != pulsar.Result.Ok:
error_msg = "Failed to publish to topic [%s] with error [%s] with src message id [%s]" % (topic, result, message_id)
Log.error(error_msg)
self.stats.incr_total_sys_exceptions(Exception(error_msg))
if callback:
callback(result, msg)
def publish(self, topic_name, message, serde_class_name="serde.IdentitySerDe", properties=None, compression_type=None, callback=None, message_conf=None):
# Just make sure that user supplied values are properly typed
topic_name = str(topic_name)
serde_class_name = str(serde_class_name)
pulsar_compression_type = pulsar._pulsar.CompressionType.NONE
if compression_type is not None:
pulsar_compression_type = compression_type
if topic_name not in self.publish_producers:
self.publish_producers[topic_name] = self.pulsar_client.create_producer(
topic_name,
block_if_queue_full=True,
batching_enabled=True,
batching_max_publish_delay_ms=10,
compression_type=pulsar_compression_type,
properties=util.get_properties(util.getFullyQualifiedFunctionName(
self.instance_config.function_details.tenant,
self.instance_config.function_details.namespace,
self.instance_config.function_details.name),
self.instance_config.instance_id)
)
if serde_class_name not in self.publish_serializers:
serde_klass = util.import_class(self.user_code_dir, serde_class_name)
self.publish_serializers[serde_class_name] = serde_klass()
output_bytes = bytes(self.publish_serializers[serde_class_name].serialize(message))
if properties:
# The deprecated properties args was passed. Need to merge into message_conf
if not message_conf:
message_conf = {}
message_conf['properties'] = properties
if message_conf:
self.publish_producers[topic_name].send_async(
output_bytes, partial(self.callback_wrapper, callback, topic_name, self.get_message_id()), **message_conf)
else:
self.publish_producers[topic_name].send_async(
output_bytes, partial(self.callback_wrapper, callback, topic_name, self.get_message_id()))
def ack(self, msgid, topic):
topic_consumer = None
if topic in self.consumers:
topic_consumer = self.consumers[topic]
else:
# if this topic is a partitioned topic
m = re.search('(.+)-partition-(\d+)', topic)
if not m:
raise ValueError('Invalid topicname %s' % topic)
elif m.group(1) in self.consumers:
topic_consumer = self.consumers[m.group(1)]
else:
raise ValueError('Invalid topicname %s' % topic)
topic_consumer.acknowledge(msgid)
def get_and_reset_metrics(self):
metrics = self.get_metrics()
# TODO(sanjeev):- Make this thread safe
self.reset_metrics()
return metrics
def reset_metrics(self):
# TODO: Make it thread safe
for user_metric in self.user_metrics_map.values():
user_metric._sum.set(0.0)
user_metric._count.set(0.0)
def get_metrics(self):
metrics_map = {}
for metric_name, user_metric in self.user_metrics_map.items():
metrics_map["%s%s_sum" % (Stats.USER_METRIC_PREFIX, metric_name)] = user_metric._sum.get()
metrics_map["%s%s_count" % (Stats.USER_METRIC_PREFIX, metric_name)] = user_metric._count.get()
return metrics_map
def incr_counter(self, key, amount):
return self.state_context.incr(key, amount)
def get_counter(self, key):
return self.state_context.get_amount(key)
def del_counter(self, key):
return self.state_context.delete_key(key)
def put_state(self, key, value):
return self.state_context.put(key, value)
def get_state(self, key):
return self.state_context.get_value(key)
def get_pulsar_client(self):
return self.pulsar_client
|
3dd7dc983882689917e1fd892a4a025f762a6a06
|
157d84f8aafc76ba9ea0dbbf08ede744966b4250
|
/tests/integration/cattletest/core/test_account.py
|
a15671255bc83461e5a0303415f544faf55df722
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
rancher/cattle
|
81d165a0339a41950561fe534c7529ec74203c56
|
82d154a53f4089fecfb9f320caad826bb4f6055f
|
refs/heads/v1.6
| 2023-08-27T20:19:31.989806
| 2020-05-01T18:15:55
| 2020-05-01T20:11:28
| 18,023,059
| 487
| 233
|
Apache-2.0
| 2022-01-03T18:07:33
| 2014-03-23T00:19:52
|
Java
|
UTF-8
|
Python
| false
| false
| 6,177
|
py
|
test_account.py
|
from common_fixtures import * # NOQA
@pytest.mark.parametrize('kind', ['user', 'admin'])
def test_account_create(kind, admin_user_client, random_str):
account = admin_user_client.create_account(kind=kind,
name=random_str)
assert account.state == "registering"
assert account.transitioning == "yes"
account = admin_user_client.wait_success(account)
assert account.transitioning == "no"
assert account.state == "active"
count = len(admin_user_client.list_account(name=random_str))
assert count == 1
creds = account.credentials()
assert len(creds) == 0
def test_account_uuid(admin_user_client):
a = admin_user_client.create_account(uuid=None)
assert a.uuid is not None
uuid = random_str()
a = admin_user_client.create_account(uuid=uuid)
assert a.uuid == uuid
def test_account_external(admin_user_client):
account = admin_user_client.create_account(externalId='extid',
externalIdType='extType')
account = admin_user_client.wait_success(account)
assert account.state == 'active'
assert account.externalId == 'extid'
assert account.externalIdType == 'extType'
def test_account_no_key(super_client):
account = super_client.create_account(kind='admin')
account = super_client.wait_success(account)
creds = account.credentials()
assert len(creds) == 0
account = super_client.create_account(kind='unknown')
account = super_client.wait_success(account)
creds = account.credentials()
assert len(creds) == 0
def test_account_new_data(admin_user_client, super_client):
user = admin_user_client.create_account(kind='user')
user = admin_user_client.wait_success(user)
assert user.state == 'active'
assert super_client.reload(user).defaultNetworkId is None
assert len(user.networks()) == 0
account = admin_user_client.create_account(kind='project')
account = admin_user_client.wait_success(account)
assert account.state == 'active'
assert super_client.reload(account).defaultNetworkId is None
networks = super_client.list_network(accountId=account.id)
by_kind = {}
for i in range(len(networks)):
network = super_client.wait_success(networks[i])
by_kind[networks[i].kind] = network
assert network.state == 'active'
assert len(networks) == 4
assert len(by_kind) == 4
assert 'dockerHost' in by_kind
assert 'dockerNone' in by_kind
assert 'dockerBridge' in by_kind
assert 'dockerContainer' in by_kind
def test_account_context_create(new_context):
assert new_context.client is not None
assert new_context.user_client is not None
assert new_context.project is not None
assert new_context.account is not None
assert len(new_context.user_client.list_project()) == 1
def test_account_purge(admin_user_client, super_client, new_context):
account_id = new_context.project.id
account = new_context.project
client = new_context.client
image_uuid = 'sim:{}'.format(random_num())
host = new_context.host
assert host.state == 'active'
# Create another host
host2 = register_simulated_host(new_context)
assert host2.state == 'active'
# create containers
c1 = client.create_container(imageUuid=image_uuid,
requestedHostId=host.id)
c1 = client.wait_success(c1)
assert c1.state == 'running'
c2 = client.create_container(imageUuid=image_uuid,
requestedHostId=host.id)
c2 = client.wait_success(c2)
assert c2.state == 'running'
# create stack and services
env = client.create_stack(name=random_str())
env = client.wait_success(env)
assert env.state == "active"
launch_config = {"imageUuid": image_uuid}
service1 = client.create_service(name=random_str(),
stackId=env.id,
launchConfig=launch_config)
service1 = client.wait_success(service1)
assert service1.state == "inactive"
service2 = client.create_service(accountId=account_id,
name=random_str(),
stackId=env.id,
launchConfig=launch_config)
service2 = client.wait_success(service2)
assert service2.state == "inactive"
env.activateservices()
service1 = client.wait_success(service1, 120)
service2 = client.wait_success(service2, 120)
assert service1.state == "active"
assert service2.state == "active"
account = super_client.reload(account)
account = super_client.wait_success(account.deactivate())
account = super_client.wait_success(account.remove())
assert account.removed is not None
account = super_client.wait_success(account.purge())
assert account.state == 'purged'
host = super_client.wait_success(host)
assert host.removed is not None
host2 = super_client.wait_success(host2)
assert host2.removed is not None
c1 = super_client.wait_success(c1)
assert c1.removed is not None
assert c1.state == 'removed'
c2 = super_client.wait_success(c2)
assert c2.removed is not None
assert c2.state == 'removed'
c1 = super_client.wait_success(c1.purge())
assert c1.state == 'purged'
volumes = c1.volumes()
assert len(volumes) == 0
wait_state(super_client, service1, 'removed')
wait_state(super_client, service2, 'removed')
wait_state(super_client, env, 'removed')
def test_user_account_cant_create_account(admin_user_client, super_client):
account = admin_user_client.create_account(name=random_str(),
kind='user')
account = admin_user_client.wait_success(account)
api_key = admin_user_client.create_api_key(accountId=account.id)
admin_user_client.wait_success(api_key)
client = api_client(api_key.publicValue, api_key.secretValue)
with pytest.raises(AttributeError) as e:
client.create_account()
assert 'create_account' in e.value.message
|
ddb4e4025032bd45a5fd1a2cb82832f1ca236eb1
|
150a7b11cb531f8bc2a045aefcf2ebe1d151efa3
|
/ocs_ci/pause/pause.py
|
5003c9b9b220dadfadd452d2bddf6351b88a85e7
|
[
"MIT"
] |
permissive
|
red-hat-storage/ocs-ci
|
c7ac414e1b86552da0439223dfa9bca39977f31a
|
5e9e504957403148e413326f65c3769bf9d8eb39
|
refs/heads/master
| 2023-08-17T16:19:51.154403
| 2023-08-17T13:27:12
| 2023-08-17T13:27:12
| 179,558,938
| 146
| 210
|
MIT
| 2023-09-14T16:38:44
| 2019-04-04T19:00:02
|
Python
|
UTF-8
|
Python
| false
| false
| 3,301
|
py
|
pause.py
|
import argparse
import logging
import pickle
import os
from ocs_ci.framework import config
from ocs_ci.ocs.cluster import CephCluster
from ocs_ci.ocs.exceptions import CommandFailed
from ocs_ci.ocs import platform_nodes
from ocs_ci.ocs.node import get_node_objs
from ocs_ci.utility.retry import retry
from ocs_ci.ocs.constants import NODE_OBJ_FILE, NODE_FILE, INSTANCE_FILE
FORMAT = "%(asctime)s - %(threadName)s - %(name)s - %(levelname)s - %(message)s"
logging.basicConfig(format=FORMAT, level=logging.DEBUG)
log = logging.getLogger(__name__)
def cycle_nodes(cluster_path, action):
"""
Start/Stop AWS nodes to save costs when not in use.
Args:
cluster_path(str): location of cluster path that has auth files
action (str): action to perform either start or stop
"""
node_obj_file = os.path.join(cluster_path, NODE_OBJ_FILE)
nodes_file = os.path.join(cluster_path, NODE_FILE)
instance_file = os.path.join(cluster_path, INSTANCE_FILE)
if action == "stop":
ceph = CephCluster()
ceph.set_noout()
node_objs = get_node_objs()
kls = platform_nodes.PlatformNodesFactory()
nodes = kls.get_nodes_platform()
with open(instance_file, "wb") as instance_file:
log.info("Storing ocs instances objects")
pickle.dump(nodes.get_ec2_instances(nodes=node_objs), instance_file)
with open(nodes_file, "wb") as node_file:
log.info("Storing ocp nodes objects")
pickle.dump(nodes, node_file)
with open(node_obj_file, "wb") as node_obj_file:
log.info("Stopping all nodes")
pickle.dump(node_objs, node_obj_file)
nodes.stop_nodes(nodes=node_objs)
elif action == "start":
with open(instance_file, "rb") as instance_file:
log.info("Reading instance objects")
instances = pickle.load(instance_file)
with open(nodes_file, "rb") as node_file:
log.info("Reading ocp nodes object")
nodes = pickle.load(node_file)
with open(node_obj_file, "rb") as node_obj_file:
log.info("Starting ocs nodes")
node_objs = pickle.load(node_obj_file)
nodes.start_nodes(instances=instances, nodes=node_objs)
unset_noout()
@retry((CommandFailed), tries=10, delay=10, backoff=1)
def unset_noout():
"""
unset_noout with 10 retries and delay of 10 seconds.
"""
ceph = CephCluster()
ceph.unset_noout()
def cluster_pause():
"""
Entry point to start/stop cluster nodes - AWS only
"""
parser = argparse.ArgumentParser(description="Start/Stop Cluster Nodes - AWS Only")
parser.add_argument(
"--cluster-path",
action="store",
required=True,
help="Location of cluster path that was used during installation ",
)
parser.add_argument(
"--action", nargs="?", required=True, choices=("start", "stop"), help=""
)
logging.basicConfig(level=logging.INFO)
args = parser.parse_args()
cluster_path = os.path.expanduser(args.cluster_path)
config.ENV_DATA["cluster_path"] = cluster_path
os.environ["KUBECONFIG"] = os.path.join(
cluster_path, config.RUN["kubeconfig_location"]
)
cycle_nodes(cluster_path, args.action)
|
5707c43d77269fd2e65725428a5e1cc21d98e852
|
29156ee762cae629184cde55918a92c9cad75ef5
|
/seam_erasure/seam_gradient.py
|
656f1fd473365c6feccbc90a5c80219de2640751
|
[
"MIT"
] |
permissive
|
zfergus/seam-erasure
|
a215660c5c627a1b861ad156b34125ee1f779931
|
5a99be0c02e61ee34f3b071bfe9b22c8d1a27e3f
|
refs/heads/main
| 2022-08-02T15:22:03.041012
| 2022-07-18T00:15:56
| 2022-07-18T00:15:56
| 93,452,890
| 112
| 19
|
MIT
| 2022-07-18T00:10:03
| 2017-06-05T22:32:07
|
Python
|
UTF-8
|
Python
| false
| false
| 7,010
|
py
|
seam_gradient.py
|
"""
Seam gradient energy to get a better gradient energy across the seam.
Written by Zachary Ferguson
"""
import itertools
import logging
import numpy
import scipy.sparse
from tqdm import tqdm
from .accumulate_coo import AccumulateCOO
from .seam_intervals import compute_edgePair_intervals
from .util import (is_counterclockwise, lerp_UV, surrounding_pixels,
globalEdge_to_local, pairwise, QuadEnergy)
import warnings
warnings.simplefilter('ignore', scipy.sparse.SparseEfficiencyWarning)
def A_Mat(st_edge, gamma_perp, p00, p10, p01, p11, nPixels):
"""Create a cooefficent matrix A for the equation ApT + Bp."""
c = gamma_perp[1] * (st_edge[1][0] - st_edge[0][0]) + \
gamma_perp[0] * (st_edge[1][1] - st_edge[0][1])
coeffs = numpy.zeros((1, nPixels))
coeffs[0, p00] = c
coeffs[0, p10] = -c
coeffs[0, p01] = -c
coeffs[0, p11] = c
return coeffs
def B_Mat(st_edge, gamma_perp, p00, p10, p01, p11, nPixels):
"""Create a cooefficent matrix B for the equation ApT + Bp."""
c1 = gamma_perp[1] * st_edge[0][0] + gamma_perp[0] * st_edge[0][1]
c2 = gamma_perp[0]
c3 = gamma_perp[1]
coeffs = numpy.zeros((1, nPixels))
coeffs[0, p00] = c1 - c2 - c3
coeffs[0, p10] = -c1 + c2
coeffs[0, p01] = -c1 + c3
coeffs[0, p11] = c1
return coeffs
def inside_perpendicular_vector(mesh, edge):
"""
Returns the normalized vector in the perpendicular inside directions.
Inputs:
mesh - the model in OBJ format
edge - the edge in (fi, (fv0, fv1)) format
Output:
Returns the appropriate perpendicular vector pointing inside the UV
face.
"""
p0, p1 = [numpy.array(mesh.vt[mesh.f[edge[0]][i].vt]) for i in edge[1]]
vec = p1 - p0
if is_counterclockwise(*[mesh.vt[fv.vt] for fv in mesh.f[edge[0]]]):
perp = numpy.array([-vec[1], vec[0]])
else:
perp = numpy.array([vec[1], -vec[0]])
length = float(numpy.linalg.norm(perp))
return (perp / length) if (abs(length) > 1e-8) else perp
def E_ab(a, b, mesh, edgePair, width, height):
"""
Calculate the Energy in the inverval a to b.
Inputs:
mesh - the model in OBJ format
edgePair - the edgePair of the model in (fi, (fv0, fv1)) format
width, height - texture's dimensions
Output:
Returns the energy coefficient matrix for the interval.
"""
# Get the UV coordinates of the edge pair, swaping endpoints of one edge
((uv0, uv1), (uv1p, uv0p)) = [
[mesh.vt[mesh.f[edge[0]][i].vt] for i in edge[1]] for edge in edgePair]
# Determine the midpoint of the interval in UV-space
mid_uv = lerp_UV((a + b) / 2., uv0, uv1)
mid_uv_p = lerp_UV((a + b) / 2., uv0p, uv1p)
# Determine surrounding pixel indices
(p00, p10, p01, p11) = surrounding_pixels(
mid_uv, width, height, as_index=True)
(p00p, p10p, p01p, p11p) = surrounding_pixels(
mid_uv_p, width, height, as_index=True)
nPixels = width * height
st_edge = globalEdge_to_local(uv0, uv1, p00, width, height)
st_edge_p = globalEdge_to_local(uv0p, uv1p, p00p, width, height)
perp_edge = inside_perpendicular_vector(mesh, edgePair[0])
A = A_Mat(st_edge, perp_edge, 0, 1, 2, 3, 8)
B = B_Mat(st_edge, perp_edge, 0, 1, 2, 3, 8)
perp_edge_p = inside_perpendicular_vector(mesh, edgePair[1])
Ap = A_Mat(st_edge_p, perp_edge_p, 4, 5, 6, 7, 8)
Bp = B_Mat(st_edge_p, perp_edge_p, 4, 5, 6, 7, 8)
# Each of the A, Ap, B, Bp are 1xN matrices.
# E is Nx1 * 1xN = NxN
def term(M, n):
"""
Compute the integral term with constant matrix (M) and power n after
integration.
"""
M *= (1. / n * (b**n - a**n)) # Prevent unnecessary copying
return M
# Sum of matrices (1x8)
Asum = A + Ap
Bsum = B + Bp
# Product of sums (8x8)
AA = Asum.T.dot(Asum)
BB = Bsum.T.dot(Bsum)
AB = Asum.T.dot(Bsum)
values = term(AA, 3.) + term(AB + AB.T, 2.) + term(BB, 1.)
ijs = numpy.array(list(itertools.product(
(p00, p10, p01, p11, p00p, p10p, p01p, p11p), repeat=2)))
# import pdb; pdb.set_trace()
E = scipy.sparse.coo_matrix(
(values.ravel(), ijs.reshape(-1, 2).T), shape=(nPixels, nPixels))
return E
def E_edgePair(mesh, edgePair, width, height, edge_len):
"""
Compute the energy coefficient matrix over a single edge pair.
Inputs:
mesh - the model in OBJ format
edgePair - the edgePair of the model in (fi, (fv0, fv1)) format
width, height - texture's dimensions
edge_len - the length of the edge in 3D space
Output:
Returns the energy coefficient matrix over a single edge pair.
"""
uv_edgePair = [[mesh.vt[mesh.f[edge[0]][i].vt] for i in edge[1]]
for edge in edgePair]
intervals = compute_edgePair_intervals(uv_edgePair, width, height)
N = width * height
# Space for the matrix.
# E_edge = scipy.sparse.coo_matrix((N, N))
E_edge = AccumulateCOO()
# Solve for the energy coeff matrix over the edge pair
for a, b in pairwise(intervals):
# Add intervals energy to total Energy
# UPDATE: For some reason scipy is converting back and forth to CSR
# to do the +.
# E_edge = E_edge + E_ab(a, b, mesh, edgePair, width, height)
# Grab the guts of each coo matrix.
E_edge.add(E_ab(a, b, mesh, edgePair, width, height))
# Finally accumulate the total.
E_edge = E_edge.total((N, N))
# Multiply by the length of the edge in 3D
return E_edge * edge_len
def E_total(mesh, seam, width, height, depth, edge_lens):
"""
Calculate the energy coeff matrix for a width x height texture.
Inputs:
mesh - the model in OBJ format
seam - the seam of the model in (fi, (fv0, fv1)) format
width, height - texture's dimensions
edge_lens - a list containing the lengths of each edge in 3D space.
Output:
Returns the quadtratic term matrix for the seam gradient.
"""
# Sum up the energy coefficient matrices for all the edge pairs
N = width * height
# E = scipy.sparse.coo_matrix((N, N))
E = AccumulateCOO()
sum_edge_lens = 0.0
desc = "Building Seam Gradient Matrix"
disable_pbar = logging.getLogger().getEffectiveLevel() > logging.INFO
for i, (edgePair, edge_len) in enumerate(zip(tqdm(seam, unit="edge pairs",
desc=desc,
disable=disable_pbar),
edge_lens)):
sum_edge_lens += edge_len
E.add(E_edgePair(mesh, edgePair, width, height, edge_len))
E = E.total((N, N))
# Divide by the total edge length in 3D
return QuadEnergy((E / sum_edge_lens).tocsc(),
scipy.sparse.csc_matrix((N, depth)),
scipy.sparse.csc_matrix((depth, depth)))
|
cb8390914ff36e52102b0c1cd19328c5aab06271
|
98f1a0bfa5b20a0b81e9e555d76e706c62d949c9
|
/python/dgl/distributed/server_state.py
|
0eac8d40c67008d4110cf1de0cb9e47771921bb6
|
[
"Apache-2.0"
] |
permissive
|
dmlc/dgl
|
3a8fbca3a7f0e9adf6e69679ad62948df48dfc42
|
bbc8ff6261f2e0d2b5982e992b6fbe545e2a4aa1
|
refs/heads/master
| 2023-08-31T16:33:21.139163
| 2023-08-31T07:49:22
| 2023-08-31T07:49:22
| 130,375,797
| 12,631
| 3,482
|
Apache-2.0
| 2023-09-14T15:48:24
| 2018-04-20T14:49:09
|
Python
|
UTF-8
|
Python
| false
| false
| 2,243
|
py
|
server_state.py
|
"""Server data"""
from .._ffi.function import _init_api
# Remove C++ bindings for now, since not used
class ServerState:
"""Data stored in one DGL server.
In a distributed setting, DGL partitions all data associated with the graph
(e.g., node and edge features, graph structure, etc.) to multiple partitions,
each handled by one DGL server. Hence, the ServerState class includes all
the data associated with a graph partition.
Under some setup, users may want to deploy servers in a heterogeneous way
-- servers are further divided into special groups for fetching/updating
node/edge data and for sampling/querying on graph structure respectively.
In this case, the ServerState can be configured to include only node/edge
data or graph structure.
Each machine can have multiple server and client processes, but only one
server is the *master* server while all the others are backup servers. All
clients and backup servers share the state of the master server via shared
memory, which means the ServerState class must be serializable and large
bulk data (e.g., node/edge features) must be stored in NDArray to leverage
shared memory.
Attributes
----------
kv_store : KVServer
reference for KVServer
graph : DGLGraph
Graph structure of one partition
total_num_nodes : int
Total number of nodes
total_num_edges : int
Total number of edges
partition_book : GraphPartitionBook
Graph Partition book
"""
def __init__(self, kv_store, local_g, partition_book):
self._kv_store = kv_store
self._graph = local_g
self.partition_book = partition_book
self._roles = {}
@property
def roles(self):
"""Roles of the client processes"""
return self._roles
@property
def kv_store(self):
"""Get data store."""
return self._kv_store
@kv_store.setter
def kv_store(self, kv_store):
self._kv_store = kv_store
@property
def graph(self):
"""Get graph data."""
return self._graph
@graph.setter
def graph(self, graph):
self._graph = graph
_init_api("dgl.distributed.server_state")
|
4a62b25b746199bbd84dac21555930c75503489b
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/DPGAnalysis/SiStripTools/test/TIDTECInnerRingInvestigator_cfg.py
|
2d2dd0736e46518d9ab3252dad7d2cdf313c13e8
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 6,439
|
py
|
TIDTECInnerRingInvestigator_cfg.py
|
import FWCore.ParameterSet.Config as cms
import FWCore.ParameterSet.VarParsing as VarParsing
process = cms.Process("TIDTECInnerRingInvestigator")
#prepare options
options = VarParsing.VarParsing("analysis")
options.register ('globalTag',
"DONOTEXIST",
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.string, # string, int, or float
"GlobalTag")
options.parseArguments()
#
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True),
fileMode = cms.untracked.string("FULLMERGE")
)
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.cout.enable = cms.untracked.bool(True)
process.MessageLogger.cout.threshold = cms.untracked.string("INFO")
process.MessageLogger.cout.default = cms.untracked.PSet(
limit = cms.untracked.int32(10000000)
)
process.MessageLogger.cout.FwkReport = cms.untracked.PSet(
reportEvery = cms.untracked.int32(10000)
)
process.MessageLogger.cerr.enable = cms.untracked.bool(True)
process.MessageLogger.cerr.threshold = cms.untracked.string("WARNING")
process.MessageLogger.cerr.default = cms.untracked.PSet(
limit = cms.untracked.int32(10000000)
)
process.MessageLogger.cerr.FwkReport = cms.untracked.PSet(
reportEvery = cms.untracked.int32(100000)
)
#------------------------------------------------------------------
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(options.maxEvents) )
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(options.inputFiles),
# skipBadFiles = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring("keep *", "drop *_MEtoEDMConverter_*_*")
)
#--------------------------------------
process.froml1abcHEs = cms.EDProducer("EventWithHistoryProducerFromL1ABC",
l1ABCCollection=cms.InputTag("scalersRawToDigi")
)
process.load("DPGAnalysis.SiStripTools.apvcyclephaseproducerfroml1tsDB_cfi")
process.load("DPGAnalysis.SiStripTools.eventtimedistribution_cfi")
process.seqEventHistoryReco = cms.Sequence(process.froml1abcHEs + process.APVPhases)
process.seqEventHistory = cms.Sequence(process.eventtimedistribution)
process.eventtimedistribution.historyProduct = cms.InputTag("froml1abcHEs")
process.load("DPGAnalysis.SiStripTools.sistripclustermultiplicityprod_cfi")
#process.ssclustermultprod.withClusterSize=cms.untracked.bool(True)
process.ssclustermultprod.wantedSubDets = cms.VPSet(
cms.PSet(detSelection = cms.uint32(101),detLabel = cms.string("TIDring1"),selection=cms.untracked.vstring("0x1e000600-0x18000200")),
cms.PSet(detSelection = cms.uint32(102),detLabel = cms.string("TIDring2"),selection=cms.untracked.vstring("0x1e000600-0x18000400")),
cms.PSet(detSelection = cms.uint32(201),detLabel = cms.string("TECring1"),selection=cms.untracked.vstring("0x1e0000e0-0x1c000020")),
cms.PSet(detSelection = cms.uint32(202),detLabel = cms.string("TECring2"),selection=cms.untracked.vstring("0x1e0000e0-0x1c000040"))
)
process.seqMultProd = cms.Sequence(process.ssclustermultprod)
process.load("RecoLocalTracker.SiStripClusterizer.SiStripClusterToDigiProducer_cfi")
process.seqProducers = cms.Sequence(process.seqEventHistoryReco + process.seqMultProd + process.siStripClustersToDigis)
#from HLTrigger.HLTfilters.triggerResultsFilter_cfi import *
#process.hltSelection = triggerResultsFilter.clone(
# triggerConditions = cms.vstring("HLT_ZeroBias_*"),
# hltResults = cms.InputTag( "TriggerResults", "", "HLT" ),
# l1tResults = cms.InputTag( "" ),
# throw = cms.bool(False)
# )
process.load("DPGAnalysis.SiStripTools.ssclusmultinvestigator_cfi")
process.ssclusmultinvestigator.runHisto = cms.untracked.bool(True)
process.ssclusmultinvestigator.scaleFactor=cms.untracked.int32(1)
process.ssclusmultinvestigator.wantedSubDets = cms.untracked.VPSet(
cms.PSet(detSelection = cms.uint32(101),detLabel = cms.string("TIDring1"), binMax = cms.int32(1000)),
cms.PSet(detSelection = cms.uint32(102),detLabel = cms.string("TIDring2"), binMax = cms.int32(1000)),
cms.PSet(detSelection = cms.uint32(201),detLabel = cms.string("TECring1"), binMax = cms.int32(1000)),
cms.PSet(detSelection = cms.uint32(202),detLabel = cms.string("TECring2"), binMax = cms.int32(1000))
)
process.load("DPGAnalysis.SiStripTools.clusterbigeventsdebugger_cfi")
process.clusterbigeventsdebugger.selections = cms.VPSet(
cms.PSet(detSelection = cms.uint32(101),label = cms.string("TIDring1"),selection=cms.untracked.vstring("0x1e000600-0x18000200")),
cms.PSet(detSelection = cms.uint32(102),label = cms.string("TIDring2"),selection=cms.untracked.vstring("0x1e000600-0x18000400")),
cms.PSet(detSelection = cms.uint32(201),label = cms.string("TECring1"),selection=cms.untracked.vstring("0x1e0000e0-0x1c000020")),
cms.PSet(detSelection = cms.uint32(202),label = cms.string("TECring2"),selection=cms.untracked.vstring("0x1e0000e0-0x1c000040"))
)
process.load("DPGAnalysis.SiStripTools.digibigeventsdebugger_cfi")
process.digibigeventsdebugger.selections = process.clusterbigeventsdebugger.selections
process.digibigeventsdebugger.collection = cms.InputTag("siStripClustersToDigis","ZeroSuppressed")
#process.digibigeventsdebugger.foldedStrips = cms.untracked.bool(True)
process.seqClusMultInvest = cms.Sequence(process.ssclusmultinvestigator + process.clusterbigeventsdebugger + process.digibigeventsdebugger )
process.p0 = cms.Path(
# process.hltSelection +
process.seqProducers +
process.seqEventHistory +
process.seqClusMultInvest)
#----GlobalTag ------------------------
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, options.globalTag, '')
process.TFileService = cms.Service('TFileService',
fileName = cms.string('TIDTECInnerRingInvestigator_'+options.tag+'.root')
)
#print process.dumpPython()
|
a51d093a06182ddcf30b3624824370c787ca361b
|
afc3558e47ea4c82cb70190743472274eae7aeb1
|
/projects/ABCNet/config/abcnet/abcnet_resnet50_fpn_500e_icdar2015.py
|
424a35254ebdd3050e8e13b506b7ee5d97a565fb
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] |
permissive
|
open-mmlab/mmocr
|
86a77fb77ca80cede9c41a9a22080eeeaf364002
|
9551af6e5a2482e72a2af1e3b8597fd54b999d69
|
refs/heads/main
| 2023-08-03T14:06:11.075037
| 2023-07-26T02:32:14
| 2023-07-26T02:32:14
| 355,559,187
| 3,734
| 801
|
Apache-2.0
| 2023-09-12T03:17:12
| 2021-04-07T13:40:21
|
Python
|
UTF-8
|
Python
| false
| false
| 1,180
|
py
|
abcnet_resnet50_fpn_500e_icdar2015.py
|
_base_ = [
'_base_abcnet_resnet50_fpn.py',
'../_base_/datasets/icdar2015.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_sgd_500e.py',
]
# dataset settings
icdar2015_textspotting_train = _base_.icdar2015_textspotting_train
icdar2015_textspotting_train.pipeline = _base_.train_pipeline
icdar2015_textspotting_test = _base_.icdar2015_textspotting_test
icdar2015_textspotting_test.pipeline = _base_.test_pipeline
train_dataloader = dict(
batch_size=2,
num_workers=8,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=icdar2015_textspotting_train)
val_dataloader = dict(
batch_size=1,
num_workers=4,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=icdar2015_textspotting_test)
test_dataloader = val_dataloader
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
custom_imports = dict(imports=['abcnet'], allow_failed_imports=False)
load_from = 'https://download.openmmlab.com/mmocr/textspotting/abcnet/abcnet_resnet50_fpn_500e_icdar2015/abcnet_resnet50_fpn_pretrain-d060636c.pth' # noqa
find_unused_parameters = True
|
c3205406a826a77da44e00ba7e9fb1485b0235b4
|
e5827d8ba612f83dbb9529ba27c6a9d43b10e91f
|
/WechatOfConsole/Wechat.py
|
f6931bbede8cb5a148bf2753cd4a35780d69b029
|
[
"MIT"
] |
permissive
|
TheThreeDog/TouchFish
|
56e16edbaf43bcf2f1d354d13c740bc9e7036a72
|
72ef18ccbc9fc5b32d1c31687702c7b461399864
|
refs/heads/master
| 2023-01-21T07:10:35.067864
| 2023-01-04T03:48:34
| 2023-01-04T03:48:34
| 184,549,367
| 134
| 18
|
MIT
| 2023-01-04T03:48:35
| 2019-05-02T08:50:03
|
Python
|
UTF-8
|
Python
| false
| false
| 929
|
py
|
Wechat.py
|
# Authro : ThreeDog
# Data : 2019-04-29
# Thanks : 底层使用itchat :https://github.com/littlecodersh/itchat
# Function : 在控制台使用微信,通过接口调用,接收并发送消息。 接收端需要一个线程来itchat.run()执行。
# Remark : 仅支持文字消息,尽可能保持微信的用户体验
# Requests :
# - pip install itchat
# 7、仅支持单行输入,一旦换行会有显示上的bug
import platform
if platform.system().lower() != 'linux':
print("不支持的平台:{}".format(platform.system().lower()))
exit(0)
import User
from translator import translator
# 主程序
if __name__ == '__main__':
translator.load("lang/en_US.ts") # 翻译机、默认加载中文
users = User.Users.instance() # 初始化好友列表 (单例模式)
users.exec() # 进入users的事件循环
|
2ffc37182ec54d0d1e1f240290291a9d9aaac69c
|
3e4c84031a9d7843ef0dab64c78fbeadd674b35e
|
/glslc/test/option_dash_cap_O.py
|
fa474f9a41a554863cd520f98f6bd8acefdf7143
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
google/shaderc
|
c58f733ade920094824c83c10f1911759536fb9a
|
e166325b24d79d64bfa47065328890ce116ea642
|
refs/heads/main
| 2023-09-04T03:19:00.235676
| 2023-08-10T19:25:29
| 2023-08-10T19:25:29
| 40,277,133
| 1,712
| 426
|
NOASSERTION
| 2023-09-07T15:48:59
| 2015-08-06T01:17:42
|
C++
|
UTF-8
|
Python
| false
| false
| 5,864
|
py
|
option_dash_cap_O.py
|
# Copyright 2016 The Shaderc Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import expect
from environment import File, Directory
from glslc_test_framework import inside_glslc_testsuite
from placeholder import FileShader
MINIMAL_SHADER = '#version 310 es\nvoid main() {}'
EMPTY_SHADER_IN_CWD = Directory('.', [File('shader.vert', MINIMAL_SHADER)])
ASSEMBLY_WITH_DEBUG_SOURCE = [
'; SPIR-V\n',
'; Version: 1.0\n',
'; Generator: Google Shaderc over Glslang; 11\n',
'; Bound: 7\n',
'; Schema: 0\n',
' OpCapability Shader\n',
' %2 = OpExtInstImport "GLSL.std.450"\n',
' OpMemoryModel Logical GLSL450\n',
' OpEntryPoint Vertex %main "main"\n',
' %1 = OpString "shader.vert"\n',
' OpSource ESSL 310 %1 "// OpModuleProcessed entry-point main\n',
'// OpModuleProcessed client vulkan100\n',
'// OpModuleProcessed target-env vulkan1.0\n',
'// OpModuleProcessed entry-point main\n',
'#line 1\n',
'#version 310 es\n',
'void main() {}"\n',
' OpSourceExtension "GL_GOOGLE_cpp_style_line_directive"\n',
' OpSourceExtension "GL_GOOGLE_include_directive"\n',
' OpName %main "main"\n',
' %void = OpTypeVoid\n',
' %4 = OpTypeFunction %void\n',
' OpLine %1 2 11\n',
' %main = OpFunction %void None %4\n',
' %6 = OpLabel\n',
' OpReturn\n',
' OpFunctionEnd\n']
ASSEMBLY_WITH_DEBUG = [
'; SPIR-V\n',
'; Version: 1.0\n',
'; Generator: Google Shaderc over Glslang; 11\n',
'; Bound: 6\n',
'; Schema: 0\n',
' OpCapability Shader\n',
' %1 = OpExtInstImport "GLSL.std.450"\n',
' OpMemoryModel Logical GLSL450\n',
' OpEntryPoint Vertex %main "main"\n',
' OpSource ESSL 310\n',
' OpSourceExtension "GL_GOOGLE_cpp_style_line_directive"\n',
' OpSourceExtension "GL_GOOGLE_include_directive"\n',
' OpName %main "main"\n',
' %void = OpTypeVoid\n',
' %3 = OpTypeFunction %void\n',
' %main = OpFunction %void None %3\n',
' %5 = OpLabel\n',
' OpReturn\n',
' OpFunctionEnd\n']
ASSEMBLY_WITHOUT_DEBUG = [
'; SPIR-V\n',
'; Version: 1.0\n',
'; Generator: Google Shaderc over Glslang; 11\n',
'; Bound: 6\n',
'; Schema: 0\n',
' OpCapability Shader\n',
' %1 = OpExtInstImport "GLSL.std.450"\n',
' OpMemoryModel Logical GLSL450\n',
' OpEntryPoint Vertex %4 "main"\n',
' %void = OpTypeVoid\n',
' %3 = OpTypeFunction %void\n',
' %4 = OpFunction %void None %3\n', # %4 vs. %main
' %5 = OpLabel\n',
' OpReturn\n',
' OpFunctionEnd\n']
@inside_glslc_testsuite('OptionDashCapO')
class TestDashCapO0(expect.ValidFileContents):
"""Tests that -O0 works."""
environment = EMPTY_SHADER_IN_CWD
glslc_args = ['-S', '-O0', 'shader.vert']
target_filename = 'shader.vert.spvasm'
expected_file_contents = ASSEMBLY_WITH_DEBUG
@inside_glslc_testsuite('OptionDashCapO')
class TestDashCapOPerformance(expect.ValidFileContents):
"""Tests -O works."""
environment = EMPTY_SHADER_IN_CWD
glslc_args = ['-S', '-O', 'shader.vert']
target_filename = 'shader.vert.spvasm'
expected_file_contents = ASSEMBLY_WITHOUT_DEBUG
@inside_glslc_testsuite('OptionDashCapO')
class TestDashCapOs(expect.ValidFileContents):
"""Tests that -Os works."""
environment = EMPTY_SHADER_IN_CWD
glslc_args = ['-S', '-Os', 'shader.vert']
target_filename = 'shader.vert.spvasm'
expected_file_contents = ASSEMBLY_WITHOUT_DEBUG
@inside_glslc_testsuite('OptionDashCapO')
class TestDashCapOOverriding(expect.ValidFileContents):
"""Tests that if there are multiple -O's, only the last one takes effect."""
environment = EMPTY_SHADER_IN_CWD
glslc_args = ['-S', '-Os', '-O0', '-Os', '-O0', 'shader.vert']
target_filename = 'shader.vert.spvasm'
expected_file_contents = ASSEMBLY_WITH_DEBUG
@inside_glslc_testsuite('OptionDashCapO')
class TestDashCapOWithDashG(expect.ValidFileContents):
"""Tests that -g restrains -O from turning on strip debug info."""
environment = EMPTY_SHADER_IN_CWD
glslc_args = ['-S', '-Os', '-g', 'shader.vert']
target_filename = 'shader.vert.spvasm'
expected_file_contents = ASSEMBLY_WITH_DEBUG_SOURCE
@inside_glslc_testsuite('OptionDashCapO')
class TestDashGWithDashCapO(expect.ValidFileContents):
"""Tests that -g restrains -O from turning on strip debug info."""
environment = EMPTY_SHADER_IN_CWD
glslc_args = ['-S', '-g', '-Os', 'shader.vert']
target_filename = 'shader.vert.spvasm'
expected_file_contents = ASSEMBLY_WITH_DEBUG_SOURCE
@inside_glslc_testsuite('OptionDashCapO')
class TestWrongOptLevel(expect.NoGeneratedFiles, expect.ErrorMessage):
"""Tests erroring out with wrong optimization level."""
shader = FileShader(MINIMAL_SHADER, '.vert')
glslc_args = ['-c', '-O2', shader]
expected_error = "glslc: error: invalid value '2' in '-O2'\n"
|
4c496bd212c64f820b292285b2ca5b112828a341
|
8d1c7fba7cd15f8a1e33fd27d11eefd1c67d579f
|
/src/test/py/bazel/runfiles_sandboxed_test.py
|
9ae6457023864ccdb2bc572739c930a890e7e7a5
|
[
"Apache-2.0"
] |
permissive
|
bazelbuild/bazel
|
5896162455f032efc899b8de60aa39b9d2cad4a6
|
171aae3f9c57b41089e25ec61fc84c35baa3079d
|
refs/heads/master
| 2023-08-22T22:52:48.714735
| 2023-08-22T18:01:53
| 2023-08-22T18:01:53
| 20,773,773
| 20,294
| 4,383
|
Apache-2.0
| 2023-09-14T18:38:44
| 2014-06-12T16:00:38
|
Java
|
UTF-8
|
Python
| false
| false
| 4,916
|
py
|
runfiles_sandboxed_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from src.test.py.bazel import test_base
class RunfilesSandboxedTest(test_base.TestBase):
def _FailWithContents(self, msg, contents):
self.fail("%s\ncontents =\n | %s\n---" % (msg, "\n | ".join(contents)))
def testRunfilesLibrariesFindRunfilesWithoutEnvvars(self):
for s, t, exe in [
("WORKSPACE.mock", "WORKSPACE", False),
("bar/BUILD.mock", "bar/BUILD", False),
("bar/bar.py", "bar/bar.py", True),
("bar/bar-py-data.txt", "bar/bar-py-data.txt", False),
("bar/Bar.java", "bar/Bar.java", False),
("bar/bar-java-data.txt", "bar/bar-java-data.txt", False),
("bar/bar.sh", "bar/bar.sh", True),
("bar/bar-sh-data.txt", "bar/bar-sh-data.txt", False),
("bar/bar.cc", "bar/bar.cc", False),
("bar/bar-cc-data.txt", "bar/bar-cc-data.txt", False),
]:
self.CopyFile(
self.Rlocation("io_bazel/src/test/py/bazel/testdata/runfiles_test/" +
s), t, exe)
self.ScratchFile(
"foo/BUILD",
[
"genrule(",
" name = 'gen',",
" outs = ['stdout.txt', 'data_files.txt'],",
" cmd = 'cat $$(' + ",
# The genrule runs all bar-<language> tools, saves the complete
# stdout into stdout.txt, and prints the contents of rlocations
# reported by the tools (i.e. the contents of the
# bar-<language>-data.txt files) into data_files.txt.
" ' ( $(location //bar:bar-py) && ' +",
" ' $(location //bar:bar-java) && ' +",
" ' $(location //bar:bar-sh) && ' +",
" ' $(location //bar:bar-cc) ; ' +",
" ' ) | ' + ",
" ' tee $(location stdout.txt) | ' + ",
" ' grep \"^rloc=\" | ' + ",
" ' sed \"s,^rloc=,,\"' + ",
" ') > $(location data_files.txt)',",
" tools = [",
" '//bar:bar-cc',",
" '//bar:bar-java',",
" '//bar:bar-py',",
" '//bar:bar-sh',",
" ],",
")"
])
_, stdout, _ = self.RunBazel(["info", "bazel-genfiles"])
bazel_genfiles = stdout[0]
self.RunBazel([
"build",
"--verbose_failures",
"//foo:gen",
"--genrule_strategy=sandboxed",
])
stdout_txt = os.path.join(bazel_genfiles, "foo/stdout.txt")
self.assertTrue(os.path.isfile(stdout_txt))
data_files_txt = os.path.join(bazel_genfiles, "foo/data_files.txt")
self.assertTrue(os.path.isfile(data_files_txt))
# Output of the bar-<language> binaries that they printed to stdout.
stdout_lines = []
with open(stdout_txt, "rt") as f:
stdout_lines = [line.strip() for line in f.readlines()]
# Contents of the bar-<language>-data.txt files.
data_files = []
with open(data_files_txt, "rt") as f:
data_files = [line.strip() for line in f.readlines()]
if len(stdout_lines) != 8:
self._FailWithContents("wrong number of output lines", stdout_lines)
i = 0
for lang in [("py", "Python", "bar.py"), ("java", "Java", "Bar.java"),
("sh", "Bash", "bar.sh"), ("cc", "C++", "bar.cc")]:
# Check that the bar-<language> binary printed the expected output.
if stdout_lines[i * 2] != "Hello %s Bar!" % lang[1]:
self._FailWithContents("wrong line for " + lang[1], stdout_lines)
if not stdout_lines[i * 2 + 1].startswith("rloc="):
self._FailWithContents("wrong line for " + lang[1], stdout_lines)
if not stdout_lines[i * 2 + 1].endswith(
"foo_ws/bar/bar-%s-data.txt" % lang[0]):
self._FailWithContents("wrong line for " + lang[1], stdout_lines)
# Assert the contents of bar-<language>-data.txt. This indicates that
# the runfiles library in the bar-<language> binary found the correct
# runfile and returned a valid path.
if data_files[i] != "data for " + lang[2]:
self._FailWithContents("runfile does not exist for " + lang[1],
stdout_lines)
i += 1
if __name__ == "__main__":
unittest.main()
|
74149d886a7116d04856dad779ef3a692352e8fd
|
dcbef06d5a00f07756339b9e62c684dec2fee425
|
/nuitka/tools/testing/Common.py
|
5a46c9921c651efcc499c0a7f588d3f650b2744f
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
Nuitka/Nuitka
|
f9543d8d95bfa0b81d4e60af0dfad99fb72893a4
|
d87faf2f7e1d6ed9bfe4cf8c1d648f34307e33f2
|
refs/heads/develop
| 2023-08-28T14:00:32.861328
| 2023-08-27T09:16:45
| 2023-08-27T09:16:45
| 9,626,741
| 8,573
| 599
|
Apache-2.0
| 2023-09-13T02:49:41
| 2013-04-23T15:40:33
|
Python
|
UTF-8
|
Python
| false
| false
| 55,464
|
py
|
Common.py
|
# Copyright 2023, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Common test infrastructure functions. To be used by test runners. """
import ast
import atexit
import gc
import hashlib
import os
import re
import shutil
import signal
import sys
import tempfile
import threading
import time
from contextlib import contextmanager
from optparse import OptionParser
from nuitka.__past__ import subprocess
from nuitka.PythonVersions import getTestExecutionPythonVersions, isDebugPython
from nuitka.Tracing import OurLogger, my_print
from nuitka.tree.SourceHandling import readSourceCodeFromFilename
from nuitka.utils.AppDirs import getCacheDir
from nuitka.utils.Execution import (
check_output,
createProcess,
getNullInput,
getNullOutput,
)
from nuitka.utils.FileOperations import (
areSamePaths,
getExternalUsePath,
getFileContentByLine,
getFileContents,
getFileList,
isFilenameSameAsOrBelowPath,
makePath,
openTextFile,
removeDirectory,
)
from nuitka.utils.InstalledPythons import findInstalledPython
from nuitka.utils.Jinja2 import getTemplate
from nuitka.utils.Utils import getOS, isMacOS, isWin32Windows
from .SearchModes import (
SearchModeByPattern,
SearchModeCoverage,
SearchModeImmediate,
SearchModeOnly,
SearchModeResume,
)
# spellchecker: ignore popenargs,pathsep,killpg
test_logger = OurLogger("", base_style="blue")
def check_result(*popenargs, **kwargs):
if "stdout" in kwargs:
raise ValueError("stdout argument not allowed, it will be overridden.")
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
_unused_output, _unused_err = process.communicate()
ret_value = process.poll()
if ret_value:
return False
else:
return True
_start_dir = None
def goMainDir():
global _start_dir # singleton, pylint: disable=global-statement
_start_dir = os.getcwd()
# Go its own directory, to have it easy with path knowledge.
os.chdir(os.path.dirname(os.path.abspath(sys.modules["__main__"].__file__)))
def getStartDir():
return _start_dir
_python_version_str = None
_python_version = None
_python_arch = None
_python_executable = None
_python_vendor = None
def _parsePythonVersionOutput(python_binary):
version_output = check_output(
(
python_binary,
"-c",
"""\
import sys, os;\
print(".".join(str(s) for s in list(sys.version_info)[:3]));\
print(("x86_64" if "AMD64" in sys.version else "x86") if os.name == "nt" else os.uname()[4]);\
print(sys.executable);\
print("Anaconda" if os.path.exists(os.path.join(sys.prefix, 'conda-meta')) else "Unknown")\
""",
),
stderr=subprocess.STDOUT,
)
python_version_str = version_output.split(b"\n")[0].strip()
python_arch = version_output.split(b"\n")[1].strip()
python_executable = version_output.split(b"\n")[2].strip()
python_vendor = version_output.split(b"\n")[3].strip()
if str is not bytes:
python_version_str = python_version_str.decode("utf8")
python_arch = python_arch.decode("utf8")
python_executable = python_executable.decode("utf8")
python_vendor = python_vendor.decode("utf8")
assert type(python_version_str) is str, repr(python_version_str)
assert type(python_arch) is str, repr(python_arch)
assert type(python_executable) is str, repr(_python_executable)
python_version = tuple(int(d) for d in python_version_str.split("."))
return (
python_version,
python_version_str,
python_arch,
python_executable,
python_vendor,
)
def setup(suite="", needs_io_encoding=False, silent=False, go_main=True):
if go_main:
goMainDir()
if "PYTHON" not in os.environ:
os.environ["PYTHON"] = sys.executable
# Allow test code to use this to make caching specific.
os.environ["NUITKA_TEST_SUITE"] = suite
# Allow providing 33, 27, and expand that to python2.7
if (
len(os.environ["PYTHON"]) == 2
and os.environ["PYTHON"].isdigit()
and not isWin32Windows()
):
os.environ["PYTHON"] = "python%s.%s" % (
os.environ["PYTHON"][0],
os.environ["PYTHON"][1],
)
if needs_io_encoding and "PYTHONIOENCODING" not in os.environ:
os.environ["PYTHONIOENCODING"] = "utf-8"
global _python_version_str, _python_version, _python_arch, _python_executable, _python_vendor # singleton, pylint: disable=global-statement
(
_python_version,
_python_version_str,
_python_arch,
_python_executable,
_python_vendor,
) = _parsePythonVersionOutput(python_binary=os.environ["PYTHON"])
if not silent:
my_print("Using concrete python", _python_version_str, "on", _python_arch)
if "COVERAGE_FILE" not in os.environ:
os.environ["COVERAGE_FILE"] = os.path.join(
os.path.dirname(__file__), "..", "..", "..", ".coverage"
)
return _python_version
def getPythonArch():
return _python_arch
def getPythonVendor():
return _python_vendor
def getPythonVersionString():
return _python_version_str
tmp_dir = None
def getTempDir():
# Create a temporary directory to work in, automatically remove it in case
# it is empty in the end.
global tmp_dir # singleton, pylint: disable=global-statement
if tmp_dir is None:
tmp_dir = tempfile.mkdtemp(
prefix=os.path.basename(
os.path.dirname(os.path.abspath(sys.modules["__main__"].__file__))
)
+ "-",
dir=tempfile.gettempdir() if not os.path.exists("/var/tmp") else "/var/tmp",
)
def removeTempDir():
removeDirectory(path=tmp_dir, ignore_errors=True)
atexit.register(removeTempDir)
return tmp_dir
def convertUsing2to3(path, force=False):
command = [os.environ["PYTHON"], "-m", "py_compile", path]
if not force:
if "xrange" not in getFileContents(path):
if check_result(command, stderr=getNullOutput()):
return path, False
filename = os.path.basename(path)
new_path = os.path.join(getTempDir(), filename)
# This may already be a temp file, e.g. because of construct creation.
try:
shutil.copy(path, new_path)
except shutil.Error:
pass
# For Python2.6 and 3.2 the -m lib2to3 was not yet supported.
use_binary = sys.version_info[:2] in ((2, 6), (3, 2))
if use_binary:
# On Windows, we cannot rely on 2to3 to be in the path.
if isWin32Windows():
command = [
sys.executable,
os.path.join(os.path.dirname(sys.executable), "Tools/Scripts/2to3.py"),
]
else:
command = ["2to3"]
else:
command = [sys.executable, "-m", "lib2to3"]
command += ("-w", "-n", "--no-diffs", new_path)
try:
check_output(command, stderr=getNullOutput())
except subprocess.CalledProcessError:
if isWin32Windows():
raise
command[0:3] = ["2to3"]
check_output(command, stderr=getNullOutput())
data = getFileContents(new_path)
with openTextFile(new_path, "w") as result_file:
result_file.write("__file__ = %r\n" % os.path.abspath(path))
result_file.write(data)
return new_path, True
def decideFilenameVersionSkip(filename):
"""Make decision whether to skip based on filename and Python version.
This codifies certain rules that files can have as suffixes or prefixes
to make them be part of the set of tests executed for a version or not.
Generally, an ending of "<major><minor>.py" indicates that it must be that
Python version or higher. There is no need for ending in "26.py" as this
is the minimum version anyway.
The "_2.py" indicates a maximum version of 2.7, i.e. not Python 3.x, for
language syntax no more supported.
"""
# This will make many decisions with immediate returns.
# pylint: disable=too-many-branches,too-many-return-statements
assert type(filename) is str, repr(filename)
# Skip runner scripts by default.
if filename.startswith("run_"):
return False
if filename.endswith(".j2"):
filename = filename[:-3]
# Skip tests that require Python 2.7 at least.
if filename.endswith("27.py") and _python_version < (2, 7):
return False
# Skip tests that require Python 2 at maximum.
if filename.endswith("_2.py") and _python_version >= (3,):
return False
# Skip tests that require Python 3.7 at maximum.
if filename.endswith("_37.py") and _python_version >= (3, 8):
return False
# Skip tests that require Python 3.2 at least.
if filename.endswith("32.py") and _python_version < (3, 2):
return False
# Skip tests that require Python 3.3 at least.
if filename.endswith("33.py") and _python_version < (3, 3):
return False
# Skip tests that require Python 3.4 at least.
if filename.endswith("34.py") and _python_version < (3, 4):
return False
# Skip tests that require Python 3.5 at least.
if filename.endswith("35.py") and _python_version < (3, 5):
return False
# Skip tests that require Python 3.6 at least.
if filename.endswith("36.py") and _python_version < (3, 6):
return False
# Skip tests that require Python 3.7 at least.
if filename.endswith("37.py") and _python_version < (3, 7):
return False
# Skip tests that require Python 3.8 at least.
if filename.endswith("38.py") and _python_version < (3, 8):
return False
# Skip tests that require Python 3.9 at least.
if filename.endswith("39.py") and _python_version < (3, 9):
return False
# Skip tests that require Python 3.10 at least.
if filename.endswith("310.py") and _python_version < (3, 10):
return False
# Skip tests that require Python 3.11 at least.
if filename.endswith("311.py") and _python_version < (3, 11):
return False
return True
def decideNeeds2to3(filename):
return _python_version >= (3,) and not re.match(r".*3\d+\.py", filename)
def _removeCPythonTestSuiteDir():
# Cleanup, some tests apparently forget that.
try:
if os.path.isdir("@test"):
removeDirectory("@test", ignore_errors=False)
elif os.path.isfile("@test"):
os.unlink("@test")
except OSError:
# TODO: Move this into removeDirectory maybe. Doing an external
# call as last resort could be a good idea.
# This seems to work for broken "lnk" files.
if isWin32Windows():
os.system("rmdir /S /Q @test")
if os.path.exists("@test"):
raise
def compareWithCPython(
dirname, filename, extra_flags, search_mode, needs_2to3, on_error=None
):
"""Call the comparison tool. For a given directory filename.
The search mode decides if the test case aborts on error or gets extra
flags that are exceptions.
"""
# Many cases to consider here, pylint: disable=too-many-branches
if dirname is None:
path = filename
else:
path = os.path.join(dirname, filename)
# Apply 2to3 conversion if necessary.
if needs_2to3:
path, converted = convertUsing2to3(path)
else:
converted = False
if os.getenv("NUITKA_TEST_INSTALLED", "") == "1":
command = [
sys.executable,
"-m",
"nuitka.tools.testing.compare_with_cpython",
path,
"silent",
]
else:
compare_with_cpython = os.path.join("..", "..", "bin", "compare_with_cpython")
if os.path.exists(compare_with_cpython):
command = [sys.executable, compare_with_cpython, path, "silent"]
else:
test_logger.sysexit("Error, cannot locate Nuitka comparison runner.")
if extra_flags is not None:
command += extra_flags
command += search_mode.getExtraFlags(dirname, filename)
# Cleanup before and after test stage directory.
_removeCPythonTestSuiteDir()
try:
result = subprocess.call(command)
except KeyboardInterrupt:
result = 2
# Cleanup before and after test stage directory.
_removeCPythonTestSuiteDir()
if result != 0 and result != 2 and search_mode.abortOnFinding(dirname, filename):
if on_error is not None:
on_error(dirname, filename)
search_mode.onErrorDetected("Error exit! %s" % result)
if converted:
os.unlink(path)
if result == 2:
test_logger.sysexit("Interrupted, with CTRL-C\n", exit_code=2)
def checkCompilesNotWithCPython(dirname, filename, search_mode):
if dirname is None:
path = filename
else:
path = os.path.join(dirname, filename)
command = [_python_executable, "-mcompileall", path]
try:
result = subprocess.call(command)
except KeyboardInterrupt:
result = 2
if result != 1 and result != 2 and search_mode.abortOnFinding(dirname, filename):
search_mode.onErrorDetected("Error exit! %s" % result)
def checkSucceedsWithCPython(filename):
command = [_python_executable, filename]
result = subprocess.call(command, stdout=getNullOutput(), stderr=subprocess.STDOUT)
return result == 0
def getDebugPython():
# For all Python, if it's the one also executing the runner, which is
# very probably the case, we check that. We don't check the provided
# binary here, this could be done as well.
if sys.executable == os.environ["PYTHON"] and isDebugPython():
return sys.executable
# On Debian systems, these work.
debug_python = os.path.join("/usr/bin/", os.environ["PYTHON"] + "-dbg")
if os.path.exists(debug_python):
return debug_python
# On Fedora systems, these work, but on for Python3
debug_python = os.path.join("/usr/bin/", os.environ["PYTHON"] + "-debug")
if os.path.exists(debug_python) and _parsePythonVersionOutput(debug_python)[0] >= (
3,
):
return debug_python
# On Windows systems, these work. TODO: Python asserts in Nuitka with
# these, not sure why, pylint: disable=using-constant-test
if False:
debug_python = os.environ["PYTHON"]
if debug_python.lower().endswith(".exe"):
debug_python = debug_python[:-4]
debug_python = debug_python + "_d.exe"
if os.path.exists(debug_python):
return debug_python
# Otherwise no.
return None
def displayRuntimeTraces(logger, path):
if not os.path.exists(path):
# TODO: Have a logger package passed.
logger.sysexit("Error, cannot find %r (%r)." % (path, os.path.abspath(path)))
path = os.path.abspath(path)
# TODO: Merge code for building command with below function, this is otherwise
# horribly bad.
if os.name == "posix":
# Run with traces to help debugging, specifically in CI environment.
if getOS() in ("Darwin", "FreeBSD"):
test_logger.info("dtruss:")
os.system("sudo dtruss %s" % path)
else:
test_logger.info("strace:")
os.system("strace -s4096 -e file %s" % path)
def hasModule(module_name):
result = subprocess.call(
(os.environ["PYTHON"], "-c", "import %s" % module_name),
stdout=getNullOutput(),
stderr=subprocess.STDOUT,
)
return result == 0
m1 = {}
m2 = {}
def cleanObjRefCntMaps():
m1.clear()
m2.clear()
# Warm out repr
for x in gc.get_objects():
try:
str(x)
except Exception: # Catch all the things, pylint: disable=broad-except
pass
def snapObjRefCntMap(before):
# Inherently complex, pylint: disable=too-many-branches
if before:
m = m1
else:
m = m2
m.clear()
gc.collect()
for x in gc.get_objects():
# The dictionary is cyclic, and contains itself, avoid that.
if x is m1 or x is m2:
continue
if type(x) is str and (x in m1 or x in m2):
continue
if type(x) is not str and isinstance(x, str):
k = "str_overload_" + x.__class__.__name__ + str(x)
elif type(x) is dict:
if "__builtins__" in x:
k = "<module dict %s>" % x["__name__"]
elif "__spec__" in x and "__name__" in x:
k = "<module dict %s>" % x["__name__"]
else:
k = str(x)
elif hasattr(x, "__class__") and x.__class__.__name__ == "compiled_frame":
k = "<compiled_frame at xxx, line %d code %s" % (x.f_lineno, x.f_code)
else:
k = str(x)
c = sys.getrefcount(x)
if k in m:
m[k] += c
else:
m[k] = c
orig_print = None
def disablePrinting():
# Singleton, pylint: disable=global-statement
global orig_print
if orig_print is None:
orig_print = __builtins__["print"]
__builtins__["print"] = lambda *args, **kwargs: None
def reenablePrinting():
# Singleton, pylint: disable=global-statement
global orig_print
if orig_print is not None:
__builtins__["print"] = orig_print
orig_print = None
_debug_python = isDebugPython()
def getTotalReferenceCount():
if _debug_python:
gc.collect()
return sys.gettotalrefcount()
else:
gc.collect()
all_objects = gc.get_objects()
# Sum object reference twice, once without the sum value type, then switch
# the type, and use the type used to avoid the integers before that.
result = 0.0
for obj in all_objects:
if type(obj) is float:
continue
result += sys.getrefcount(obj)
result = int(result)
for obj in all_objects:
if type(obj) is not float:
continue
result += sys.getrefcount(obj)
return result
def checkReferenceCount(checked_function, max_rounds=20, explain=False):
# This is obviously going to be complex, pylint: disable=too-many-branches
# Clean start conditions.
assert sys.exc_info() == (None, None, None), sys.exc_info()
my_print(checked_function.__name__ + ": ", end="")
sys.stdout.flush()
disablePrinting()
# Make sure reference for these are already taken at the start.
ref_count1 = 17
ref_count2 = 17
if explain:
cleanObjRefCntMaps()
assert max_rounds > 0
result = False
for count in range(max_rounds):
if explain and count == max_rounds - 1:
snapObjRefCntMap(before=True)
ref_count1 = getTotalReferenceCount()
checked_function()
ref_count2 = getTotalReferenceCount()
# Not allowed, but happens when bugs occur.
assert sys.exc_info() == (None, None, None), sys.exc_info()
if ref_count1 == ref_count2:
result = True
break
if explain and count == max_rounds - 1:
snapObjRefCntMap(before=False)
reenablePrinting()
if result:
my_print("PASSED")
else:
my_print(
"FAILED %d %d leaked %d" % (ref_count1, ref_count2, ref_count2 - ref_count1)
)
if explain:
print("REPORT of differences:")
assert m1
assert m2
# Using items will unwanted usages, pylint: disable=consider-using-dict-items
for key in m1:
if key not in m2:
my_print("*" * 80)
my_print("extra:", m1[key], key)
elif m1[key] != m2[key]:
my_print("*" * 80)
my_print(m1[key], "->", m2[key], key)
else:
pass
for key in m2:
if key not in m1:
my_print("*" * 80)
my_print("missing:", m2[key], key)
# print m1[key]
assert sys.exc_info() == (None, None, None), sys.exc_info()
gc.collect()
sys.stdout.flush()
return result
def createSearchMode():
# Dealing with many options, pylint: disable=too-many-branches
parser = OptionParser()
select_group = parser.add_option_group("Select Tests")
select_group.add_option(
"--pattern",
action="store",
dest="pattern",
default="",
help="""\
Execute only tests matching the pattern. Defaults to all tests.""",
)
select_group.add_option(
"--all",
action="store_true",
dest="all",
default=False,
help="""\
Execute all tests, continue execution even after failure of one.""",
)
del select_group
debug_group = parser.add_option_group("Test features")
debug_group.add_option(
"--debug",
action="store_true",
dest="debug",
default=False,
help="""\
Executing all self checks possible to find errors in Nuitka, good for test coverage.
Defaults to off.""",
)
debug_group.add_option(
"--commands",
action="store_true",
dest="show_commands",
default=False,
help="""Output commands being done in output comparison.
Defaults to off.""",
)
del debug_group
options, positional_args = parser.parse_args()
if options.debug:
addExtendedExtraOptions("--debug")
if options.show_commands:
os.environ["NUITKA_TRACE_COMMANDS"] = "1"
# Default to searching.
mode = positional_args[0] if positional_args else "search"
# Avoid having to use options style.
if mode in ("search", "only", "coverage"):
if len(positional_args) >= 2 and not options.pattern:
options.pattern = positional_args[1]
if mode == "search":
if options.all:
return SearchModeByPattern(start_at=None)
elif options.pattern:
return SearchModeByPattern(
start_at=options.pattern.replace("/", os.path.sep)
)
else:
return SearchModeImmediate()
elif mode == "resume":
return SearchModeResume(sys.modules["__main__"].__file__)
elif mode == "only":
if options.pattern:
pattern = options.pattern.replace("/", os.path.sep)
return SearchModeOnly(pattern)
else:
assert False
elif mode == "coverage":
return SearchModeCoverage(
start_at=options.pattern.replace("/", os.path.sep)
if options.pattern
else None
)
else:
test_logger.sysexit("Error, using unknown search mode %r" % mode)
def reportSkip(reason, dirname, filename):
case = os.path.join(dirname, filename)
case = os.path.normpath(case)
test_logger.info("Skipped, %s (%s)." % (case, reason))
def executeReferenceChecked(
prefix, names, tests_skipped=(), tests_stderr=(), explain=False
):
gc.disable()
extract_number = lambda name: int(name.replace(prefix, ""))
# Find the function names.
matching_names = tuple(
name for name in names if name.startswith(prefix) and name[-1].isdigit()
)
old_stderr = sys.stderr
# Everything passed
result = True
for name in sorted(matching_names, key=extract_number):
number = extract_number(name)
# print(tests_skipped)
if number in tests_skipped:
my_print(name + ": SKIPPED (%s)" % tests_skipped[number])
continue
# Avoid non-raisable output.
try:
if number in tests_stderr:
sys.stderr = getNullOutput()
except OSError: # Windows
if not checkReferenceCount(names[name], explain=explain):
result = False
else:
if not checkReferenceCount(names[name], explain=explain):
result = False
if number in tests_stderr:
new_stderr = sys.stderr
sys.stderr = old_stderr
new_stderr.close()
gc.enable()
return result
def addToPythonPath(python_path, in_front=False):
if type(python_path) in (tuple, list):
python_path = os.pathsep.join(python_path)
if python_path:
if "PYTHONPATH" in os.environ:
if in_front:
os.environ["PYTHONPATH"] = (
python_path + os.pathsep + os.environ["PYTHONPATH"]
)
else:
os.environ["PYTHONPATH"] += os.pathsep + python_path
else:
os.environ["PYTHONPATH"] = python_path
@contextmanager
def withPythonPathChange(python_path):
if python_path:
if type(python_path) not in (tuple, list):
python_path = python_path.split(os.pathsep)
python_path = [
os.path.normpath(os.path.abspath(element)) for element in python_path
]
python_path = os.pathsep.join(python_path)
if "PYTHONPATH" in os.environ:
old_path = os.environ["PYTHONPATH"]
os.environ["PYTHONPATH"] += os.pathsep + python_path
else:
old_path = None
os.environ["PYTHONPATH"] = python_path
yield
if python_path:
if old_path is None:
del os.environ["PYTHONPATH"]
else:
os.environ["PYTHONPATH"] = old_path
def addExtendedExtraOptions(*args):
old_value = os.environ.get("NUITKA_EXTRA_OPTIONS")
value = old_value
for arg in args:
if value is None:
value = arg
else:
value += " " + arg
os.environ["NUITKA_EXTRA_OPTIONS"] = value
return old_value
@contextmanager
def withExtendedExtraOptions(*args):
assert args
old_value = addExtendedExtraOptions(*args)
yield
if old_value is None:
del os.environ["NUITKA_EXTRA_OPTIONS"]
else:
os.environ["NUITKA_EXTRA_OPTIONS"] = old_value
def indentedCode(codes, count):
"""Indent code, used for generating test codes."""
return "\n".join(" " * count + line if line else "" for line in codes)
def convertToPython(doctests, line_filter=None):
"""Convert give doctest string to static Python code."""
# This is convoluted, but it just needs to work, pylint: disable=too-many-branches
import doctest
code = doctest.script_from_examples(doctests)
if code.endswith("\n"):
code += "#\n"
else:
assert False
output = []
inside = False
def getPrintPrefixed(evaluated, line_number):
try:
node = ast.parse(evaluated.lstrip(), "eval")
except SyntaxError:
return evaluated
if node.body[0].__class__.__name__ == "Expr":
count = 0
while evaluated.startswith(" " * count):
count += 1
if sys.version_info < (3,):
modified = (count - 1) * " " + "print " + evaluated
return (
(count - 1) * " "
+ ("print 'Line %d'" % line_number)
+ "\n"
+ modified
)
else:
modified = (count - 1) * " " + "print(" + evaluated + "\n)\n"
return (
(count - 1) * " "
+ ("print('Line %d'" % line_number)
+ ")\n"
+ modified
)
else:
return evaluated
def getTried(evaluated, line_number):
if sys.version_info < (3,):
return """
try:
%(evaluated)s
except Exception as __e:
print "Occurred", type(__e), __e
""" % {
"evaluated": indentedCode(
getPrintPrefixed(evaluated, line_number).split("\n"), 4
)
}
else:
return """
try:
%(evaluated)s
except Exception as __e:
print("Occurred", type(__e), __e)
""" % {
"evaluated": indentedCode(
getPrintPrefixed(evaluated, line_number).split("\n"), 4
)
}
def isOpener(evaluated):
evaluated = evaluated.lstrip()
if evaluated == "":
return False
return evaluated.split()[0] in (
"def",
"with",
"class",
"for",
"while",
"try:",
"except",
"except:",
"finally:",
"else:",
)
chunk = None
for line_number, line in enumerate(code.split("\n")):
# print "->", inside, line
if line_filter is not None and line_filter(line):
continue
if inside and line and line[0].isalnum() and not isOpener(line):
output.append(getTried("\n".join(chunk), line_number))
chunk = []
inside = False
if inside and not (line.startswith("#") and line.find("SyntaxError:") != -1):
chunk.append(line)
elif line.startswith("#"):
if line.find("SyntaxError:") != -1:
# print "Syntax error detected"
if inside:
# print "Dropping chunk", chunk
chunk = []
inside = False
else:
del output[-1]
elif isOpener(line):
inside = True
chunk = [line]
elif line.strip() == "":
output.append(line)
else:
output.append(getTried(line, line_number))
return "\n".join(output).rstrip() + "\n"
def compileLibraryPath(search_mode, path, stage_dir, decide, action):
my_print("Checking standard library path:", path)
for root, dirnames, filenames in os.walk(path):
dirnames_to_remove = [dirname for dirname in dirnames if "-" in dirname]
for dirname in dirnames_to_remove:
dirnames.remove(dirname)
dirnames.sort()
filenames = [filename for filename in filenames if decide(root, filename)]
for filename in sorted(filenames):
if not search_mode.consider(root, filename):
continue
full_path = os.path.join(root, filename)
my_print(full_path, ":", end=" ")
sys.stdout.flush()
action(stage_dir, path, full_path)
def compileLibraryTest(search_mode, stage_dir, decide, action):
if not os.path.exists(stage_dir):
os.makedirs(stage_dir)
my_dirname = os.path.join(os.path.dirname(__file__), "../../..")
my_dirname = os.path.normpath(my_dirname)
paths = [path for path in sys.path if not path.startswith(my_dirname)]
my_print("Using standard library paths:")
for path in paths:
my_print(path)
for path in paths:
print("Checking path:", path)
compileLibraryPath(
search_mode=search_mode,
path=path,
stage_dir=stage_dir,
decide=decide,
action=action,
)
search_mode.finish()
def run_async(coro):
"""Execute a coroutine until it's done."""
values = []
result = None
while True:
try:
values.append(coro.send(None))
except StopIteration as ex:
result = ex.args[0] if ex.args else None
break
return values, result
def async_iterate(g):
"""Execute async generator until it's done."""
# Test code for Python3, catches all kinds of exceptions.
# pylint: disable=broad-except
# Also Python3 only, pylint: disable=I0021,undefined-variable
res = []
while True:
try:
g.__anext__().__next__()
except StopAsyncIteration:
res.append("STOP")
break
except StopIteration as ex:
if ex.args:
res.append("ex arg %s" % ex.args[0])
else:
res.append("EMPTY StopIteration")
break
except Exception as ex:
res.append(str(type(ex)))
return res
def getTestingCacheDir():
cache_dir = getCacheDir()
result = os.path.join(cache_dir, "tests_state")
makePath(result)
return result
def getTestingCPythonOutputsCacheDir():
cache_dir = getCacheDir()
result = os.path.join(
cache_dir, "cpython_outputs", os.environ.get("NUITKA_TEST_SUITE", "")
)
makePath(result)
return result
def scanDirectoryForTestCases(dirname, template_context=None):
filenames = os.listdir(dirname)
filenames = [
filename
for filename in filenames
if (filename.endswith(".py") and not filename + ".j2" in filenames)
or filename.endswith(".j2")
]
for filename in sorted(filenames):
if not decideFilenameVersionSkip(filename):
continue
if filename.endswith(".j2"):
# Needs to be a dictionary with template arguments.
assert template_context is not None
template = getTemplate(
package_name=None,
template_name=filename,
template_subdir=dirname,
extensions=("jinja2.ext.do",),
)
code = template.render(name=template.name, **template_context)
filename = filename[:-3]
with openTextFile(filename, "w") as output:
output.write(
"'''Automatically generated test, not part of releases or git.\n\n'''\n"
)
output.write(code)
yield filename
def scanDirectoryForTestCaseFolders(dirname):
filenames = os.listdir(dirname)
for filename in sorted(filenames):
if not decideFilenameVersionSkip(filename + ".py"):
continue
filename = os.path.join(dirname, filename)
filename = os.path.relpath(filename)
if (
not os.path.isdir(filename)
or filename.endswith((".dist", ".build"))
or os.path.basename(filename).startswith("venv_")
):
continue
filename_main = getMainProgramFilename(filename)
yield filename, filename_main
def setupCacheHashSalt(test_code_path):
assert os.path.exists(test_code_path)
if os.path.exists(os.path.join(test_code_path, ".git")):
git_cmd = ["git", "ls-tree", "-r", "HEAD", test_code_path]
process = subprocess.Popen(
args=git_cmd,
stdin=getNullInput(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout_git, stderr_git = process.communicate()
assert process.returncode == 0, stderr_git
salt_value = hashlib.md5(stdout_git)
else:
salt_value = hashlib.md5()
for filename in getFileList(test_code_path):
if filename.endswith(".py"):
salt_value.update(getFileContents(filename, mode="rb"))
os.environ["NUITKA_HASH_SALT"] = salt_value.hexdigest()
def displayFolderContents(name, path):
test_logger.info("Listing of %s '%s':" % (name, path))
if os.path.exists(path):
if isWin32Windows():
command = "dir /b /s /a:-D %s" % path
else:
command = "ls -Rla %s" % path
os.system(command)
else:
test_logger.info("Does not exist.")
def displayFileContents(name, path):
test_logger.info("Contents of %s %r:" % (name, path))
if os.path.exists(path):
for line in getFileContentByLine(path):
my_print(line)
else:
test_logger.info("Does not exist.")
def someGenerator():
yield 1
yield 2
yield 3
def someGeneratorRaising():
yield 1
raise TypeError(2)
# checks requirements needed to run each test module, according to the specified special comment
# special comments are in the following formats:
# "# nuitka-skip-unless-expression: expression to be evaluated"
# OR
# "# nuitka-skip-unless-imports: module1,module2,..."
def checkTestRequirements(filename):
if os.path.isdir(filename):
candidate = os.path.join(filename, "__main__.py")
if os.path.isfile(candidate):
filename = candidate
for line in readSourceCodeFromFilename(None, filename).splitlines():
if line.startswith("# nuitka-skip-unless-"):
if line[21:33] == "expression: ":
expression = line[33:]
result = subprocess.call(
(
os.environ["PYTHON"],
"-c",
"import sys, os; sys.exit(not bool(%s))" % expression,
),
stdout=getNullOutput(),
stderr=subprocess.STDOUT,
)
if result != 0:
return (False, "Expression '%s' evaluated to false" % expression)
elif line[21:30] == "imports: ":
imports_needed = line[30:].rstrip().split(",")
for i in imports_needed:
if not hasModule(i):
return (
False,
i
+ " not installed for this Python version, but test needs it",
)
# default return value
return (True, "")
class DelayedExecutionThread(threading.Thread):
def __init__(self, timeout, func):
threading.Thread.__init__(self)
self.timeout = timeout
self.func = func
def run(self):
time.sleep(self.timeout)
self.func()
def executeAfterTimePassed(message, timeout, func):
test_logger.info(message % timeout)
alarm = DelayedExecutionThread(timeout=timeout, func=func)
alarm.start()
def killProcessGroup(process_name, pid):
"""Kill a process in a portable way.
Right now SIGINT is used, unclear what to do on Windows
with Python2 or non-related processes.
"""
if isWin32Windows():
test_logger.sysexit("Error, cannot send kill signal on Windows")
else:
test_logger.info("Killing test process group '%s'." % process_name)
os.killpg(pid, signal.SIGINT)
def checkLoadedFileAccesses(loaded_filenames, current_dir):
# Many details to consider, pylint: disable=too-many-branches,too-many-statements
current_dir = os.path.normpath(current_dir)
current_dir = os.path.normcase(current_dir)
current_dir_ext = os.path.normcase(getExternalUsePath(current_dir))
illegal_accesses = []
for loaded_filename in loaded_filenames:
orig_loaded_filename = loaded_filename
loaded_filename = os.path.normpath(loaded_filename)
loaded_filename = os.path.normcase(loaded_filename)
loaded_basename = os.path.basename(loaded_filename)
if isWin32Windows():
if areSamePaths(
os.path.dirname(loaded_filename),
os.path.normpath(os.path.join(os.environ["SYSTEMROOT"], "System32")),
):
continue
if areSamePaths(
os.path.dirname(loaded_filename),
os.path.normpath(os.path.join(os.environ["SYSTEMROOT"], "SysWOW64")),
):
continue
if r"windows\winsxs" in loaded_filename:
continue
# GitHub actions have these in PATH overriding SYSTEMROOT
if r"windows performance toolkit" in loaded_filename:
continue
if r"powershell" in loaded_filename:
continue
if r"azure dev spaces cli" in loaded_filename:
continue
if r"tortoisesvn" in loaded_filename:
continue
if loaded_filename.startswith(current_dir):
continue
if loaded_filename.startswith(os.path.abspath(current_dir)):
continue
if loaded_filename.startswith(current_dir_ext):
continue
ignore = True
for ignored_dir in (
# System configuration is OK
"/etc",
"/usr/etc",
"/usr/local/etc",
# Runtime user state and kernel information is OK.
"/proc",
"/dev",
"/run",
"/sys",
"/tmp",
"/var",
# Locals may of course be loaded.
"/usr/lib/locale",
"/usr/share/locale",
"/usr/share/X11/locale",
# Themes may of course be loaded.
"/usr/share/themes",
# Terminal info files are OK too.
"/lib/terminfo",
):
if isFilenameSameAsOrBelowPath(ignored_dir, loaded_filename):
ignore = False
break
if not ignore:
continue
# Themes may of course be loaded.
if loaded_filename.startswith("/usr/share/themes"):
continue
if "gtk" in loaded_filename and "/engines/" in loaded_filename:
continue
if loaded_filename in (
"/usr",
"/usr/local",
"/usr/local/lib",
"/usr/share",
"/usr/local/share",
"/usr/lib64",
):
continue
# TCL/tk for tkinter for non-Windows is OK.
if loaded_filename.startswith(
(
"/usr/lib/tcltk/",
"/usr/share/tcltk/",
"/usr/lib/tcl/",
"/usr/lib64/tcl/",
)
):
continue
if loaded_filename in (
"/usr/lib/tcltk",
"/usr/share/tcltk",
"/usr/lib/tcl",
"/usr/lib64/tcl",
):
continue
if loaded_filename in (
"/lib",
"/lib64",
"/lib/sse2",
"/lib/tls",
"/lib64/tls",
"/usr/lib/sse2",
"/usr/lib/tls",
"/usr/lib64/tls",
):
continue
if loaded_filename in ("/usr/share/tcl8.6", "/usr/share/tcl8.5"):
continue
if loaded_filename in (
"/usr/share/tcl8.6/init.tcl",
"/usr/share/tcl8.5/init.tcl",
):
continue
if loaded_filename in (
"/usr/share/tcl8.6/encoding",
"/usr/share/tcl8.5/encoding",
):
continue
# System SSL config on Linux. TODO: Should this not be included and
# read from dist folder.
if loaded_basename == "openssl.cnf":
continue
# Taking these from system is harmless and desirable
if loaded_basename.startswith(("libz.so", "libgcc_s.so")):
continue
# System C libraries are to be expected.
if loaded_basename.startswith(
(
"ld-linux-x86-64.so",
"libc.so.",
"libpthread.so.",
"libm.so.",
"libdl.so.",
"libBrokenLocale.so.",
"libSegFault.so",
"libanl.so.",
"libcidn.so.",
"libcrypt.so.",
"libmemusage.so",
"libmvec.so.",
"libnsl.so.",
"libnss_compat.so.",
"libnss_db.so.",
"libnss_dns.so.",
"libnss_files.so.",
"libnss_hesiod.so.",
"libnss_nis.so.",
"libnss_nisplus.so.",
"libpcprofile.so",
"libresolv.so.",
"librt.so.",
"libthread_db-1.0.so",
"libthread_db.so.",
"libutil.so.",
)
):
continue
# System C++ standard library is also OK.
if loaded_basename.startswith("libstdc++."):
continue
# Curses library is OK from system too.
if loaded_basename.startswith("libtinfo.so."):
continue
# Loaded by C library potentially for DNS lookups.
if loaded_basename.startswith(
(
"libnss_",
"libnsl",
# Some systems load a lot more, this is CentOS 7 on OBS
"libattr.so.",
"libbz2.so.",
"libcap.so.",
"libdw.so.",
"libelf.so.",
"liblzma.so.",
# Some systems load a lot more, this is Fedora 26 on OBS
"libselinux.so.",
"libpcre.so.",
# And this is Fedora 29 on OBS
"libblkid.so.",
"libmount.so.",
"libpcre2-8.so.",
# CentOS 8 on OBS
"libuuid.so.",
)
):
continue
# Loaded by dtruss on macOS X.
if loaded_filename.startswith("/usr/lib/dtrace/"):
continue
# Loaded by cowbuilder and pbuilder on Debian
if loaded_basename == ".ilist":
continue
if "cowdancer" in loaded_filename:
continue
if "eatmydata" in loaded_filename:
continue
# Loading from home directories is OK too.
if any(
isFilenameSameAsOrBelowPath(path, loaded_filename)
for path in ("/home", "/data", "/root", "/Users", "/Library/Preferences")
):
continue
# For Debian builders, /build is OK too.
if loaded_filename.startswith("/build/") or loaded_filename == "/build":
continue
# TODO: Unclear, loading gconv from filesystem of installed system
# may be OK or not. I think it should be.
if loaded_basename == "gconv-modules.cache":
continue
if "/gconv/" in loaded_filename:
continue
if loaded_basename.startswith("libicu"):
continue
if loaded_filename.startswith("/usr/share/icu/"):
continue
# Loading from caches is OK.
if loaded_filename.startswith("/var/cache/"):
continue
# At least Python3.7 considers the default Python3 path and checks it.
if loaded_filename == "/usr/bin/python3":
continue
# Accessing the versioned Python3.x binary is also happening.
if loaded_filename in (
"/usr/bin/python." + version for version in getTestExecutionPythonVersions()
):
continue
binary_path = _python_executable
found = False
while binary_path:
if loaded_filename == binary_path:
found = True
break
if binary_path == os.path.dirname(binary_path):
break
binary_path = os.path.dirname(binary_path)
if loaded_filename == os.path.join(
binary_path,
"python" + ("%d%d" % (_python_version[0], _python_version[1])),
):
found = True
break
if found:
continue
lib_prefix_dir = "/usr/lib/python%d.%s" % (
_python_version[0],
_python_version[1],
)
# TODO: These must all go away, we should not compile from Debian packages at all,
# it is warned against, and it really don't matter what wrong files that accesses
# or not.
# PySide accesses its directory.
if loaded_filename == os.path.join(lib_prefix_dir, "dist-packages/PySide"):
continue
# GTK accesses package directories only.
if loaded_filename == os.path.join(lib_prefix_dir, "dist-packages/gtk-2.0/gtk"):
continue
if loaded_filename == os.path.join(lib_prefix_dir, "dist-packages/glib"):
continue
if loaded_filename == os.path.join(lib_prefix_dir, "dist-packages/gtk-2.0/gio"):
continue
if loaded_filename == os.path.join(lib_prefix_dir, "dist-packages/gobject"):
continue
# PyQt5 and PySide6 seems to do this, but won't use contents then.
if loaded_filename in (
"/usr/lib/qt6/plugins",
"/usr/lib/qt6",
"/usr/lib64/qt6/plugins",
"/usr/lib64/qt6",
"/usr/lib/qt5/plugins",
"/usr/lib/qt5",
"/usr/lib64/qt5/plugins",
"/usr/lib64/qt5",
"/usr/lib/x86_64-linux-gnu/qt5/plugins",
"/usr/lib/x86_64-linux-gnu/qt5",
"/usr/lib/x86_64-linux-gnu",
"/usr/lib",
):
continue
# Can look at the interpreters of the system.
if loaded_basename in "python3":
continue
if loaded_basename in (
"python%s" + supported_version
for supported_version in getTestExecutionPythonVersions()
):
continue
# Current Python executable can actually be a symlink and
# the real executable which it points to will be on the
# loaded_filenames list. This is all fine, let's ignore it.
# Also, because the loaded_filename can be yet another symlink
# (this is weird, but it's true), let's better resolve its real
# path too.
if os.path.realpath(loaded_filename) == os.path.realpath(sys.executable):
continue
# Accessing SE-Linux is OK.
if loaded_filename in ("/sys/fs/selinux", "/selinux"):
continue
# Looking at device is OK.
if loaded_filename.startswith("/sys/devices/"):
continue
# Allow reading time zone info of local system.
if loaded_filename.startswith("/usr/share/zoneinfo/"):
continue
# The access to .pth files has no effect.
if loaded_filename.endswith(".pth"):
continue
# Looking at site-package dir alone is alone.
if loaded_filename.endswith(
("site-packages", "dist-packages", "vendor-packages")
):
continue
# QtNetwork insist on doing this it seems.
if loaded_basename.startswith(("libcrypto.so", "libssl.so")):
continue
# macOS uses these:
if loaded_basename in (
"libcrypto.1.0.0.dylib",
"libssl.1.0.0.dylib",
"libcrypto.1.1.dylib",
"libffi.dylib",
):
continue
# Linux onefile uses this
if loaded_basename.startswith("libfuse.so."):
continue
# MSVC run time DLLs, due to SxS come from system.
if loaded_basename.upper() in ("MSVCRT.DLL", "MSVCR90.DLL"):
continue
if isMacOS():
ignore = True
for ignored_dir in (
"/System/Library/PrivateFrameworks",
"/System/Library/CoreServices",
"/System/Library/Frameworks/",
"/System/Library/dyld",
"/AppleInternal",
"/System/Volumes/Preboot",
"/usr/lib/system/",
):
if isFilenameSameAsOrBelowPath(ignored_dir, loaded_filename):
ignore = False
break
if not ignore:
continue
if loaded_filename == "/usr/libexec/rosetta/runtime":
continue
if loaded_filename in (
"/usr/lib/libSystem.B.dylib",
"/usr/lib/libc++.1.dylib",
"/usr/lib/libc++abi.dylib",
"/usr/lib/libfakelink.dylib",
"/usr/lib/liboah.dylib",
"/usr/lib/libobjc.A.dylib",
):
continue
illegal_accesses.append(orig_loaded_filename)
return illegal_accesses
def getMainProgramFilename(filename):
for filename_main in os.listdir(filename):
if filename_main.endswith(("Main.py", "Main")):
return filename_main
if filename_main in (
"setup.py",
"setup.cfg",
"pyproject.cpython.toml",
"pyproject.nuitka.toml",
):
return filename_main
test_logger.sysexit(
"""\
Error, no file ends with 'Main.py' or 'Main' in '%s', incomplete test case."""
% (filename)
)
def getInstalledPythonVersion(python_version, must_exist):
result = findInstalledPython(
python_versions=(python_version,), module_name=None, module_version=None
)
if result is None and must_exist:
test_logger.sysexit(
"Error, cannot find required Python version %s installation."
% python_version
)
return result
_sys_path_path = None
def getPythonSysPath():
global _sys_path_path # singleton, pylint: disable=global-statement
if _sys_path_path is None:
_sys_path_path = check_output(
[
os.environ["PYTHON"],
"-c",
"import sys, os; print(os.pathsep.join(sys.path))",
]
)
if str is not bytes:
_sys_path_path = _sys_path_path.decode("utf8")
_sys_path_path = _sys_path_path.strip()
return _sys_path_path
_web_server_process = None
_web_server_port = 27272
_web_server_hostname = "localhost"
def getLocalWebServerUrl():
return "http://%s:%d" % (_web_server_hostname, _web_server_port)
def getLocalWebServerDir(base_dir):
global _web_server_process # singleton, pylint: disable=global-statement
web_dir = os.path.join(getTempDir(), "local-web-server", base_dir)
if _web_server_process is None:
web_server_directory_supporting_pythons = ("3.11", "3.10", "3.9", "3.8", "3.7")
web_server_python = findInstalledPython(
python_versions=web_server_directory_supporting_pythons,
module_name=None,
module_version=None,
)
if web_server_python is None:
return None
os.makedirs(web_dir)
command = [
web_server_python.getPythonExe(),
"-m",
"http.server",
"--bind",
_web_server_hostname,
"--dir",
web_dir,
str(_web_server_port),
]
test_logger.my_print(" ".join(command))
_web_server_process = createProcess(
command,
stdout=sys.stderr,
stderr=sys.stderr,
)
def killWebServerProcess():
_web_server_process.kill()
atexit.register(killWebServerProcess)
return web_dir
def traceExecutedCommand(description, command):
my_print(description, ":", *command, style="pink")
|
a255bc90161f1023f253aa1692ec7e34b1e985d4
|
04e5b6df2ee3bcfb7005d8ec91aab8e380333ac4
|
/Pyto/Samples/OpenCV/aruco_markers.py
|
6d0e3ce7246391ae2551449192329e6ada753330
|
[
"MIT"
] |
permissive
|
ColdGrub1384/Pyto
|
64e2a593957fd640907f0e4698d430ea7754a73e
|
7557485a733dd7e17ba0366b92794931bdb39975
|
refs/heads/main
| 2023-08-01T03:48:35.694832
| 2022-07-20T14:38:45
| 2022-07-20T14:38:45
| 148,944,721
| 884
| 157
|
MIT
| 2023-02-26T21:34:04
| 2018-09-15T22:29:07
|
C
|
UTF-8
|
Python
| false
| false
| 862
|
py
|
aruco_markers.py
|
"""
An example of using filters with OpenCV.
"""
import cv2
import sys
import cv2.aruco as aruco
device = 0 # Back camera
try:
device = int(sys.argv[1]) # 1 for front camera
except IndexError:
pass
cap = cv2.VideoCapture(device)
while cap.isOpened():
# Capture frame-by-frame
ret, frame = cap.read()
# Check if frame is not empty
if not ret:
continue
# Auto rotate camera
frame = cv2.autorotate(frame, device)
# Convert from BGR to RGB
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
parameters = aruco.DetectorParameters_create()
corners, ids, _ = aruco.detectMarkers(frame, aruco_dict, parameters=parameters)
frame = aruco.drawDetectedMarkers(frame, corners, ids)
# Display the resulting frame
cv2.imshow('frame', frame)
|
74eb8b6394cb2e433cebfb264661e01a4f46104e
|
9abd182d02355ddf0b79afd4a35f7127a4a66f7a
|
/gluoncv/model_zoo/__init__.py
|
3ca71daed39d7f3cab42609c0ad11fef9d32d020
|
[
"Apache-2.0"
] |
permissive
|
dmlc/gluon-cv
|
e1303086419a5733661d0fcb9095c09d4f2382ad
|
567775619f3b97d47e7c360748912a4fd883ff52
|
refs/heads/master
| 2023-07-19T12:02:36.824294
| 2023-01-19T00:37:33
| 2023-01-19T00:37:33
| 122,896,249
| 6,064
| 1,458
|
Apache-2.0
| 2023-01-19T00:37:35
| 2018-02-26T01:33:21
|
Python
|
UTF-8
|
Python
| false
| false
| 1,117
|
py
|
__init__.py
|
"""GluonCV Model Zoo"""
# pylint: disable=wildcard-import
from .model_zoo import get_model, get_model_list
from .model_store import pretrained_model_list
from .rcnn.faster_rcnn import *
from .rcnn.mask_rcnn import *
from .ssd import *
from .yolo import *
from .cifarresnet import *
from .cifarwideresnet import *
from .fcn import *
from .pspnet import *
from .deeplabv3 import *
from .deeplabv3_plus import *
from .deeplabv3b_plus import *
from . import segbase
from .resnetv1b import *
from .se_resnet import *
from .nasnet import *
from .simple_pose.simple_pose_resnet import *
from .simple_pose.mobile_pose import *
from .action_recognition import *
from .wideresnet import *
from .resnest import *
from .resnext import *
from .alexnet import *
from .densenet import *
from .googlenet import *
from .inception import *
from .xception import *
from .resnet import *
from .squeezenet import *
from .vgg import *
from .mobilenet import *
from .residual_attentionnet import *
from .center_net import *
from .hrnet import *
from .siamrpn import *
from .fastscnn import *
from .monodepthv2 import *
from .smot import *
|
c90036b5da675c1a8dfd451ca4474e92fc56ba25
|
d8aabbc108b074817cb05eba4acff68d4f5c2d6c
|
/test/zmq/zmq_watcher_client.py
|
8f1082b7a9eca4ac49daefb9c3522e49a149587c
|
[
"MIT",
"BSD-2-Clause",
"LGPL-2.1-or-later",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
microsoft/tensorwatch
|
e5e868795bd1536f9f2e3cb56b34a97a82e6704e
|
f59730dc7a8735232ef417685800652372c3b5dd
|
refs/heads/master
| 2023-06-29T21:52:27.900779
| 2023-06-12T18:21:59
| 2023-06-12T18:21:59
| 186,783,422
| 3,626
| 394
|
MIT
| 2023-08-30T06:59:14
| 2019-05-15T08:29:34
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 295
|
py
|
zmq_watcher_client.py
|
from tensorwatch.watcher_client import WatcherClient
import time
from tensorwatch import utils
utils.set_debug_verbosity(10)
def main():
watcher = WatcherClient()
stream = watcher.create_stream(expr='lambda vars:vars.x**2')
stream.console_debug = True
input('pause')
main()
|
848e07486894b255c50f110fbe42ce771a4a1b6d
|
84724b34b3f1e84dc53cbca5f3660590dbc34a9f
|
/nova/tests/unit/policies/test_migrations.py
|
25cd75a125296376986bebb179f92f83df38635d
|
[
"Apache-2.0"
] |
permissive
|
openstack/nova
|
2c24b64e3677595611715bae6dda14edd3f90a24
|
065c5906d2da3e2bb6eeb3a7a15d4cd8d98b35e9
|
refs/heads/master
| 2023-08-28T15:10:05.126314
| 2023-08-25T20:31:27
| 2023-08-25T20:31:27
| 790,031
| 2,287
| 2,320
|
Apache-2.0
| 2023-07-08T02:10:29
| 2010-07-22T02:04:27
|
Python
|
UTF-8
|
Python
| false
| false
| 3,163
|
py
|
test_migrations.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from nova.api.openstack.compute import migrations
from nova.policies import migrations as migrations_policies
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.policies import base
class MigrationsPolicyTest(base.BasePolicyTest):
"""Test Migrations APIs policies with all possible context.
This class defines the set of context with different roles
which are allowed and not allowed to pass the policy checks.
With those set of context, it will call the API operation and
verify the expected behaviour.
"""
def setUp(self):
super(MigrationsPolicyTest, self).setUp()
self.controller = migrations.MigrationsController()
self.req = fakes.HTTPRequest.blank('')
# With legacy rule, any admin is able to list migrations.
self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
@mock.patch('nova.compute.api.API.get_migrations')
def test_list_migrations_policy(self, mock_migration):
rule_name = migrations_policies.POLICY_ROOT % 'index'
self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.index,
self.req)
class MigrationsNoLegacyNoScopeTest(MigrationsPolicyTest):
"""Test Migrations API policies with deprecated rules
disabled, but scope checking still disabled.
"""
without_deprecated_rules = True
class MigrationsScopeTypePolicyTest(MigrationsPolicyTest):
"""Test Migrations APIs policies with system scope enabled.
This class set the nova.conf [oslo_policy] enforce_scope to True
so that we can switch on the scope checking on oslo policy side.
It defines the set of context with scoped token
which are allowed and not allowed to pass the policy checks.
With those set of context, it will run the API operation and
verify the expected behaviour.
"""
def setUp(self):
super(MigrationsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# With scope enabled, system admin is not allowed.
self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.project_admin_context]
class MigrationsScopeTypeNoLegacyPolicyTest(
MigrationsScopeTypePolicyTest):
"""Test Migrations APIs policies with system scope enabled,
and no more deprecated rules.
"""
without_deprecated_rules = True
|
11937e326013a3eebdcae7f3201085771bf90b3e
|
b38247a5d84d8b52ce8363f8dd81629cfbe17f65
|
/reagent/test/net_builder/test_discrete_dqn_net_builder.py
|
a006daafec5e69e3340a256f9274ee15769bc48b
|
[
"BSD-3-Clause"
] |
permissive
|
facebookresearch/ReAgent
|
7f2b82eaaf7a19e58cc50aacc307d7b001231440
|
c5f1a8371a677b4f8fb0882b600bf331eba5259d
|
refs/heads/main
| 2023-09-05T15:56:49.175072
| 2023-08-29T21:48:40
| 2023-08-29T21:48:40
| 98,565,575
| 1,480
| 290
|
BSD-3-Clause
| 2023-09-12T23:09:30
| 2017-07-27T17:53:21
|
Python
|
UTF-8
|
Python
| false
| false
| 6,230
|
py
|
test_discrete_dqn_net_builder.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
from reagent.core import types as rlt
from reagent.core.fb_checker import IS_FB_ENVIRONMENT
from reagent.core.parameters import NormalizationData, NormalizationParameters
from reagent.net_builder import discrete_dqn
from reagent.net_builder.unions import DiscreteDQNNetBuilder__Union
from reagent.preprocessing.identify_types import CONTINUOUS
from torchrec import PoolingType
if IS_FB_ENVIRONMENT:
from reagent.fb.prediction.fb_predictor_wrapper import (
FbDiscreteDqnPredictorWrapper as DiscreteDqnPredictorWrapper,
)
else:
from reagent.prediction.predictor_wrapper import DiscreteDqnPredictorWrapper
class TestDiscreteDQNNetBuilder(unittest.TestCase):
def _test_discrete_dqn_net_builder(
self,
chooser: DiscreteDQNNetBuilder__Union,
state_feature_config: rlt.ModelFeatureConfig,
serving_module_class=DiscreteDqnPredictorWrapper,
) -> None:
builder = chooser.value
state_normalization_data = NormalizationData(
dense_normalization_parameters={
fi.feature_id: NormalizationParameters(
feature_type=CONTINUOUS, mean=0.0, stddev=1.0
)
for fi in state_feature_config.float_feature_infos
}
)
action_names = ["L", "R"]
q_network = builder.build_q_network(
state_feature_config, state_normalization_data, len(action_names)
)
x = q_network.input_prototype()
y = q_network(x)
self.assertEqual(y.shape, (1, 2))
serving_module = builder.build_serving_module(
q_network, state_normalization_data, action_names, state_feature_config
)
self.assertIsInstance(serving_module, serving_module_class)
def test_fully_connected(self) -> None:
# Intentionally used this long path to make sure we included it in __init__.py
# pyre-fixme[28]: Unexpected keyword argument `FullyConnected`.
chooser = DiscreteDQNNetBuilder__Union(
FullyConnected=discrete_dqn.fully_connected.FullyConnected()
)
state_feature_config = rlt.ModelFeatureConfig(
float_feature_infos=[
rlt.FloatFeatureInfo(name=f"f{i}", feature_id=i) for i in range(3)
]
)
self._test_discrete_dqn_net_builder(chooser, state_feature_config)
def test_dueling(self) -> None:
# Intentionally used this long path to make sure we included it in __init__.py
# pyre-fixme[28]: Unexpected keyword argument `Dueling`.
chooser = DiscreteDQNNetBuilder__Union(Dueling=discrete_dqn.dueling.Dueling())
state_feature_config = rlt.ModelFeatureConfig(
float_feature_infos=[
rlt.FloatFeatureInfo(name=f"f{i}", feature_id=i) for i in range(3)
]
)
self._test_discrete_dqn_net_builder(chooser, state_feature_config)
def test_fully_connected_with_embedding(self) -> None:
# Intentionally used this long path to make sure we included it in __init__.py
# pyre-fixme[28]: Unexpected keyword argument `FullyConnectedWithEmbedding`.
chooser = DiscreteDQNNetBuilder__Union(
FullyConnectedWithEmbedding=discrete_dqn.fully_connected_with_embedding.FullyConnectedWithEmbedding()
)
EMBEDDING_TABLE_SIZE = 10
EMBEDDING_DIM = 32
# only id_list
state_feature_config = rlt.ModelFeatureConfig(
float_feature_infos=[
rlt.FloatFeatureInfo(name=str(i), feature_id=i) for i in range(1, 5)
],
id_list_feature_configs=[
rlt.IdListFeatureConfig(
name="A", feature_id=10, id_mapping_name="A_mapping"
)
],
id_mapping_config={
"A_mapping": rlt.IdMappingConfig(
embedding_table_size=EMBEDDING_TABLE_SIZE,
embedding_dim=EMBEDDING_DIM,
hashing=False,
pooling_type=PoolingType.SUM,
)
},
)
self._test_discrete_dqn_net_builder(
chooser, state_feature_config=state_feature_config
)
# only id_score_list
state_feature_config = rlt.ModelFeatureConfig(
float_feature_infos=[
rlt.FloatFeatureInfo(name=str(i), feature_id=i) for i in range(1, 5)
],
id_score_list_feature_configs=[
rlt.IdScoreListFeatureConfig(
name="A", feature_id=10, id_mapping_name="A_mapping"
)
],
id_mapping_config={
"A_mapping": rlt.IdMappingConfig(
embedding_table_size=EMBEDDING_TABLE_SIZE,
embedding_dim=EMBEDDING_DIM,
hashing=False,
pooling_type=PoolingType.SUM,
)
},
)
self._test_discrete_dqn_net_builder(
chooser, state_feature_config=state_feature_config
)
# id_list + id_score_list
state_feature_config = rlt.ModelFeatureConfig(
float_feature_infos=[
rlt.FloatFeatureInfo(name=str(i), feature_id=i) for i in range(1, 5)
],
id_list_feature_configs=[
rlt.IdListFeatureConfig(
name="A", feature_id=10, id_mapping_name="A_mapping"
)
],
id_score_list_feature_configs=[
rlt.IdScoreListFeatureConfig(
name="B", feature_id=100, id_mapping_name="A_mapping"
)
],
id_mapping_config={
"A_mapping": rlt.IdMappingConfig(
embedding_table_size=EMBEDDING_TABLE_SIZE,
embedding_dim=EMBEDDING_DIM,
hashing=False,
pooling_type=PoolingType.SUM,
)
},
)
self._test_discrete_dqn_net_builder(
chooser, state_feature_config=state_feature_config
)
|
d16db12c348b4a987849f2c0acdc285d6fd4d21a
|
1be404eb8dcae6f6f5acaaf54d2389765990b23e
|
/local/GIPManager.py
|
3b9ca8ace653487212f613a17900e6675d32acec
|
[] |
no_license
|
SeaHOH/GotoX
|
7036fecfcd8955783a69182b1f9a32599e52ded8
|
bab19173fe5c73fff706376183e657dc4d88d366
|
refs/heads/master
| 2023-08-31T09:55:35.198791
| 2023-08-28T15:19:05
| 2023-08-28T15:19:05
| 71,294,077
| 794
| 262
| null | 2022-09-13T01:14:43
| 2016-10-18T21:45:02
|
Python
|
UTF-8
|
Python
| false
| false
| 41,243
|
py
|
GIPManager.py
|
# coding:utf-8
'''Auto check and update google IPs'''
import os
import logging
import random
import socket
import collections
from shutil import copyfile
from copy import deepcopy
from time import time, mtime, sleep, localtime, strftime
from threading import _start_new_thread as start_new_thread
from .common.internet_active import internet_v4, internet_v6
from .common.net import NetWorkIOError, random_hostname, isip, isipv4, isipv6
from .common.decorator import make_lock_decorator
from .common.path import data_dir
from .common.util import LimiterFull
from .compat.openssl import zero_errno, zero_EOF_error, CertificateError
from .HTTPUtil import http_gws
from .ProxyServer import network_test
from .GlobalConfig import GC
#连接超时设置,单位:秒
g_timeout = 5
g_conntimeout = 1.5
g_handshaketimeout = 3
def get_index_1(o):
return o[1]
def clear_zero_file(file):
if os.path.isfile(file) and os.path.getsize(file) == 0:
os.remove(file)
def exists(file):
clear_zero_file(file)
return os.path.exists(file)
def getmtime(file):
st = os.stat(file)
return max(st.st_mtime, st.st_ctime)
def backup_file(file, bak_file=None, no_copy=None):
if exists(file):
if bak_file is None:
bak_file = file + '.bak'
if no_copy:
if exists(bak_file):
os.remove(bak_file)
os.rename(file, bak_file)
else:
copyfile(file, bak_file)
def restore_file(file, bak_file=None):
if not exists(file):
if bak_file is None:
bak_file = file + '.bak'
if exists(bak_file):
copyfile(bak_file, file)
def get_littery_list(iter):
l = list(iter)
random.shuffle(l)
return l
if GC.LINK_PROFILE == 'ipv4':
is_ip_use = isipv4
elif GC.LINK_PROFILE == 'ipv6':
is_ip_use = isipv6
elif GC.LINK_PROFILE == 'ipv46':
is_ip_use = isip
_lock_file_source = make_lock_decorator()
_lock_file_stat = make_lock_decorator()
_lock_log_stat = make_lock_decorator(rlock=True)
_lock_get_ip = make_lock_decorator()
_lock_save_use = make_lock_decorator()
_lock_remove_slow = make_lock_decorator()
_lock_pick_worker = make_lock_decorator()
class IPSource:
ip_file = os.path.join(data_dir, 'ip.txt')
ip_file_ex = os.path.join(data_dir, 'ip_ex.txt')
ip_file_bad = os.path.join(data_dir, 'ip_bad')
ip_file_del = os.path.join(data_dir, 'ip_del.txt')
ip_stat_split = '|'
ex_del_min = 60 * 60 * 2
ex_del_max = 60 * 60 * 12
time_to_reload = 60 * 60 * 8
save_stat_interval = 60 * 15
save_per_log_stat = 200
save_stat_bad_interval = 60 * 15
save_per_log_stat_bad = 20
def __init__(self):
now = time()
self.logger = logging.getLogger('[ip source]')
self.log_stat_times = 0
self.log_stat_bad_times = 0
self.ip_mtime = 0
self.ip_mtime_ex = 0
self.ip_mtime_ex_start_time = 0
self.ip_stat_block = {}
self.ip_stat_files = []
self.ip_set_bad = set()
self.ip_set_assoeted = set()
self.save_stat_time = now
self.save_stat_bad_time = now
self.update_time = now
self.load_config()
self.load_stat_bad()
self.load_source()
def load_config(self):
self.block_prefixs = GC.PICKER_BLOCK
self.stat_days = GC.PICKER_STATDAYS
self.block_time = GC.PICKER_BLOCKTIME * 60 * 60
self.fail_times_to_block = GC.PICKER_TIMESBLOCK
self.block_times_to_del = GC.PICKER_TIMESDEL
self.del_assoeted_ip = GC.PICKER_DELASSOETED
if GC.PICKER_SORTSTAT:
self.sort_ip_stat = lambda s: self.sort_ip_stat_good((s[-1][:-1], s[:-1]))
self.sort_ip_stat_bad = get_index_1
else:
self.sort_ip_stat = None
self.sort_ip_stat_bad = None
def sort_ip_stat_good(self, p):
ip, s = p
return s[2] * 2 / max(s[0] * self.ip_stat_bad.get(ip, [1])[0], 1) - s[3] - s[1] * 10
@_lock_file_source
def _load_source(self, file):
ip_cnt_source = 0
ip_set_block = set()
ip_set = set()
#不自动读取备份
if exists(file):
with open(file, 'r') as f:
for line in f:
#不检查 IP 有效性
ip = line.strip()
if ip:
ip_cnt_source += 1
if line.startswith(self.block_prefixs):
ip_set_block.add(ip)
else:
ip_set.add(ip)
ip_cnt = len(ip_set)
ip_cnt_block = len(ip_set_block)
ip_cnt_dup = ip_cnt_source - ip_cnt - ip_cnt_block
self.logger.debug('%r:载入 IP %d 个,未载入 %d 个,发现重复 %d 个',
file, ip_cnt, ip_cnt_dup, ip_cnt_block)
return ip_set, ip_set_block, bool(ip_cnt_dup)
def load_source(self):
ip_set, self.ip_set_block, need_save_ip = self._load_source(self.ip_file)
ip_set_ex, self.ip_set_ex_block, need_save_ip_ex = self._load_source(self.ip_file_ex)
if hasattr(self, 'ip_set_del'):
ip_set_del = self.ip_set_del
else:
ip_set_del, self.ip_set_del_block, _ = self._load_source(self.ip_file_del)
self.load_time = time()
self.ip_set = ip_set - ip_set_ex
self.ip_set_ex = ip_set_ex
self.ip_set_del = ip_set_del - ip_set - ip_set_ex
if not self.ip_stat_files:
self.load_stat()
ip_set_add = ip_set_ex - ip_set
if ip_set_add:
self.logger.test('检测到新添加的 IP,数量:%d。', len(ip_set_add))
if need_save_ip or self.ip_set != ip_set or ip_set_add:
self.save_source(self.ip_file)
if need_save_ip_ex:
self.save_source(self.ip_file_ex)
ip_set_undel = ip_set_del - self.ip_set_del
if ip_set_undel:
self.logger.test('检测到被撤销永久屏蔽的 IP,数量:%d。', len(ip_set_undel))
_ip = ip_set_undel.pop()
for ip in ip_set_undel:
self.reset_ip_stat(ip, save=False)
self.reset_ip_stat(_ip)
@_lock_file_source
def _save_source(self, ip_set, file):
backup_file(file, no_copy=True)
with open(file, 'w', newline='\n') as f:
for ip in ip_set:
f.write(ip)
f.write('\n')
self.logger.debug('%r:保存 IP %d 个', file, len(ip_set))
def save_source(self, file):
mtime = getmtime(file) if exists(file) else time()
if file is self.ip_file:
self._save_source(self.ip_set | self.ip_set_block |
self.ip_set_ex | self.ip_set_ex_block, file)
elif file is self.ip_file_ex:
self._save_source(self.ip_set_ex, file)
elif file is self.ip_file_del:
self._save_source(self.ip_set_del | self.ip_set_del_block, file)
os.utime(file, times=(mtime, mtime))
def get_stat_filenames(self, clear_outdated=True):
now = time()
filenames = []
for i in range(self.stat_days):
n = strftime('%y%j', localtime(now - 3600 * 24 * i))
filename = os.path.join(data_dir, 'ip_stat_' + n)
filenames.append(filename)
filenames.append(os.path.join(data_dir, 'ip_stat_bak'))
if clear_outdated:
for name in os.listdir(data_dir):
if name.startswith('ip_stat_'):
isdel = True
for filename in filenames:
if filename.endswith(name):
isdel = False
break
if isdel:
os.remove(os.path.join(data_dir, name))
return filenames
@_lock_file_stat
def load_stat(self):
ip_unstat = set()
ip_stat = {}
ip_stat_today = None
ip_stat_files = self.get_stat_filenames()
restore_file(ip_stat_files[1], ip_stat_files[-1])
for file in ip_stat_files[:-1]:
if not exists(file):
continue
with open(file, 'r') as f:
for line in f:
try:
(check_ok_times, check_fail_times,
recheck_ok_times, recheck_fail_times,
unstat, ip) = _ip_stat = \
[int(x) if x.isdigit() else x
for x in line.split(self.ip_stat_split)]
except:
self.logger.debug('load_stat: %r', line)
else:
ip = ip.strip()
if _ip_stat[4]:
ip_unstat.add(ip)
if ip in ip_unstat or \
ip in self.ip_set_del or \
ip.startswith(self.block_prefixs):
continue
if ip in ip_stat:
ip_stat[ip] = [x + y
for x, y in zip(ip_stat[ip], _ip_stat[:-1])]
else:
ip_stat[ip] = _ip_stat[:-1]
if file is ip_stat_files[0]:
ip_stat_today = deepcopy(ip_stat)
self.ip_stat_files = ip_stat_files
self.ip_stat = ip_stat
self.ip_stat_today = ip_stat_today or {}
@_lock_file_stat
def _save_stat(self, ip_stat, ip_stat_file, sort_ip_stat):
ip_stat = (_ip_stat + [ip + '\n'] for ip, _ip_stat in ip_stat.items())
if sort_ip_stat:
ip_stat = sorted(ip_stat, key=sort_ip_stat)
with open(ip_stat_file, 'w', newline='\n') as f:
for _ip_stat in ip_stat:
_ip_stat = (str(x) for x in _ip_stat)
f.write(self.ip_stat_split.join(_ip_stat))
def save_stat(self):
backup_file(self.ip_stat_files[0], self.ip_stat_files[-1], no_copy=True)
self._save_stat(self.ip_stat_today, self.ip_stat_files[0], self.sort_ip_stat)
if not self.ip_stat_files[0].endswith(strftime('%y%j')):
self.load_stat()
self.save_stat_bad()
@_lock_file_stat
def load_stat_bad(self):
ip_stat_bad = {}
restore_file(self.ip_file_bad)
if exists(self.ip_file_bad):
with open(self.ip_file_bad, 'r') as f:
for line in f:
try:
(block_times, del_times, log_time,
ip) = _ip_stat_bad = \
[int(x) if x.isdigit() else x
for x in line.split(self.ip_stat_split)]
except:
self.logger.debug('load_stat_bad: %r', line)
else:
ip = ip.strip()
ip_stat_bad[ip] = _ip_stat_bad[:-1]
self.ip_stat_bad = ip_stat_bad
def save_stat_bad(self):
backup_file(self.ip_file_bad, no_copy=True)
self._save_stat(self.ip_stat_bad, self.ip_file_bad, self.sort_ip_stat_bad)
@_lock_log_stat
def _log_stat(self, ip, index, save=False):
# 0: check_ok_times
# 1: check_fail_times
# 2: recheck_ok_times
# 3: recheck_fail_times
# 4: unstat
skip_log = True
for ip_stat in (self.ip_stat, self.ip_stat_today):
if ip in ip_stat:
_ip_stat = ip_stat[ip]
skip_log = False
elif index is 4 and skip_log:
continue
else:
ip_stat[ip] = _ip_stat = [0] * 5
if index in (1, 3):
_ip_stat[4] = 0
_ip_stat[index] += 1
if index in (1, 3):
_, cf, _, _, _ = self.ip_stat[ip]
_, _, _, rf, _ = self.ip_stat_today[ip]
if ip in self.ip_stat_block:
_ip_stat_block = self.ip_stat_block[ip]
else:
self.ip_stat_block[ip] = _ip_stat_block = [0] * 2
_ip_stat_block[0] = cf
if ip in self.ip_stat_bad:
_ip_stat_block[1] = self.ip_stat_bad[ip][0]
if cf + rf - _ip_stat_block[0] > self.fail_times_to_block:
_ip_stat_block[0] = cf
self.block_ip(ip)
if not save:
if skip_log:
return
self.log_stat_times += 1
save = self.log_stat_times >= self.save_per_log_stat or \
time() - self.save_stat_time > self.save_stat_interval
if save:
self.log_stat_times = 0
self.save_stat()
self.save_stat_time = time()
def report_check_ok(self, ip):
self._log_stat(ip, 0)
def report_check_fail(self, ip):
self._log_stat(ip, 1)
def report_recheck_ok(self, ip):
self._log_stat(ip, 2)
def report_recheck_fail(self, ip):
self._log_stat(ip, 3)
@_lock_log_stat
def _log_stat_bad(self, ip, index, save=False):
# 0: block_times
# 1: del_times
# 2: log_time
if ip in self.ip_stat_bad:
_ip_stat_bad = self.ip_stat_bad[ip]
else:
self.ip_stat_bad[ip] = _ip_stat_bad = [0] * 3
_ip_stat_bad[index] += 1
_ip_stat_bad[2] = int(time())
if index is 0:
bt, dt, _ = self.ip_stat_bad[ip]
if ip in self.ip_stat_block:
_ip_stat_block = self.ip_stat_block[ip]
else:
self.ip_stat_block[ip] = _ip_stat_block = [0] * 2
if ip in self.ip_stat_today:
_ip_stat_block[0] = self.ip_stat[ip][1]
_ip_stat_block[1] = bt
if bt - _ip_stat_block[1] > self.block_times_to_del:
_ip_stat_block[1] = bt
self.del_ip(ip)
if not save:
self.log_stat_bad_times += 1
save = self.log_stat_bad_times >= self.save_per_log_stat_bad or \
time() - self.save_stat_bad_time > self.save_stat_bad_interval
if save:
self.log_stat_bad_times = 0
self.save_stat_bad()
self.save_stat_bad_time = time()
def block_ip(self, ip):
self._log_stat_bad(ip, 0)
self.ip_set_bad.add(ip)
def unblock_ip(self, ip):
self.ip_set_bad.discard(ip)
def del_ip(self, ip):
if not self.del_assoeted_ip and ip in self.ip_set_assoeted:
return
self._log_stat_bad(ip, 1, save=True)
self.ip_set_del.add(ip)
self.ip_set.discard(ip)
if ip in self.ip_set_ex:
self.ip_set_ex.remove(ip)
self.save_source(self.ip_file_ex)
self.save_source(self.ip_file)
self.save_source(self.ip_file_del)
def undel_ip(self, ip):
self.ip_set_del.discard(ip)
self.ip_set.add(ip)
self.save_source(self.ip_file)
self.save_source(self.ip_file_del)
def reset_ip_stat(self, ip, save=True):
self._log_stat(ip, 4, save=save)
self.ip_stat_bad.pop(ip, None)
self.ip_set_bad.discard(ip)
self.ip_set_del.discard(ip)
if save:
self.save_stat_bad()
self.save_source(self.ip_file_del)
def make_good_list(self):
ip_list = sorted(self.ip_stat.items(), key=self.sort_ip_stat_good)
return [ip for ip, _ in ip_list if ip in self.ip_set_good]
def update_list(self, update_source=False):
now = time()
if update_source:
self.load_source()
elif now - self.update_time < 60:
return
self.update_time = now
self.ip_set_bad = set(ip for ip, (_, _, t) in self.ip_stat_bad.items() if now - t < self.block_time) \
- self.ip_set_del
self.ip_set_good = set(ip for ip, (co, _, ro, _, unstat) in self.ip_stat.items() if co and not unstat) \
& self.ip_set \
- self.ip_set_ex \
- self.ip_set_bad \
- self.ip_set_del \
- self.ip_set_used
self.ip_set_weak = set(self.ip_stat_bad.keys()) \
& self.ip_set \
- self.ip_set_ex \
- self.ip_set_good \
- self.ip_set_bad \
- self.ip_set_del \
- self.ip_set_used
self.ip_list_ex = get_littery_list(self.ip_set_ex
- self.ip_set_bad
- self.ip_set_used)
self.ip_list = get_littery_list(self.ip_set
- self.ip_set_ex
- self.ip_set_assoeted
- self.ip_set_good
- self.ip_set_weak
- self.ip_set_bad
- self.ip_set_del
- self.ip_set_used)
self.ip_list_weak = get_littery_list(self.ip_set_weak)
def check_update(self, force=False):
now = time()
update_source = False
ip_mtime = ip_mtime_ex = 0
if exists(self.ip_file):
ip_mtime = getmtime(self.ip_file)
if ip_mtime > self.ip_mtime:
backup_file(self.ip_file)
else:
self.logger.error('未发现 IP 列表文件 "%s",请创建!', self.ip_file)
if exists(self.ip_file_ex):
ip_mtime_ex = getmtime(self.ip_file_ex)
if ip_mtime_ex > self.ip_mtime_ex:
backup_file(self.ip_file_ex)
self.ip_mtime_ex_start_time = now
elif self.ip_mtime_ex_start_time:
self.ip_mtime_ex_start_time = 0
update_source = True
if ip_mtime > self.ip_mtime or ip_mtime_ex > self.ip_mtime_ex:
self.ip_mtime = ip_mtime
self.ip_mtime_ex = ip_mtime_ex
update_source = True
elif len(self.ip_list_weak) < len(self.ip_set_weak) // 2 or \
now - self.load_time > self.time_to_reload:
update_source = True
if force or update_source:
self.update_list(update_source=update_source)
if ip_mtime_ex:
pass_time = now - self.ip_mtime_ex_start_time
idle_time = self.ip_mtime_ex_start_time - ip_mtime_ex
if idle_time > self.ex_del_max:
ex_del_max = self.ex_del_max ** 2 // idle_time
else:
ex_del_max = self.ex_del_max
if pass_time > ex_del_max or \
len(self.ip_list_ex) == 0 and pass_time > self.ex_del_min:
os.remove(self.ip_file_ex)
self.ip_mtime_ex = 0
self.ip_mtime_ex_start_time = 0
self.logger.test('删除优先使用 IP 列表文件:%s', self.ip_file_ex)
return update_source
class IPPoolSource:
check_per_ip = 50
get_per_ip_good = 20
get_per_ip_other = 10
save_interval = 60 * 5
save_per_save_cmd = 10
def __new__(cls, ip_source, type):
m = object.__new__(cls)
setattr(cls, type, m)
return m
def __init__(self, ip_source, type):
now = time()
self.update_time = now
self.last_save_time = now
self.save_cmd_times = 0
self.check_cnt = 0
self._ip_source = ip_source
self.type = type
self.ip_file = os.path.join(data_dir, 'ip_' + type)
self.ip_set, self.ip_set_block, _ = self._load_source(self.ip_file)
ip_source.ip_set_assoeted |= self.ip_set
self.ip_list_ed = collections.deque()
def __getattr__(self, name):
return getattr(self._ip_source, name)
def update_list_good(self, force=False):
now = time()
if not force and now - self.update_time < 60:
return
self.update_time = now
self.get_cnt = 0
self.get_cnt_good = 0
self.get_cnt_other = 0
ip_list_good = self.make_good_list()
ip_set = self.ip_set \
- self.ip_set_ex \
- self.ip_set_weak \
- self.ip_set_bad \
- self.ip_set_del \
- self.ip_set_used \
- set(self.ip_list_ed)
self.ip_list_good = [ip for ip in ip_list_good if ip in ip_set]
self.ip_list_other = get_littery_list(ip_set - set(self.ip_list_good))
self.cnt_to_update_good = max((len(self.ip_list_good) + 1) // 2, 50)
def check_update(self, force=False):
if self._ip_source.check_update(force=force):
for m in (self.gae, self.gws):
m.update_list_good(force=force)
elif not self.ip_list_good or self.get_cnt_good > self.cnt_to_update_good:
self.update_list_good(force=force)
def _get_ip(self):
self.get_cnt += 1
if self.ip_list_good and \
self.get_cnt_good * self.get_per_ip_good < self.get_cnt:
self.get_cnt_good += 1
return self.ip_list_good.pop()
if self.ip_list_other and \
self.get_cnt_other * self.get_per_ip_other < self.get_cnt:
self.get_cnt_other += 1
return self.ip_list_other.pop()
ip_list = self.ip_list_ex or \
self.ip_list or \
self.ip_list_good or \
self.ip_list_other or \
self.ip_list_weak
if ip_list:
return ip_list.pop()
@_lock_get_ip
def get_ip(self):
if self.ip_list_ed:
return self.ip_list_ed.pop(), self.type
if self.check_cnt > self.check_per_ip:
network_test()
self.check_update()
self.check_cnt = 0
ip = self._get_ip()
while ip and not is_ip_use(ip) and (
not internet_v4.last_stat and isipv4(ip) or
not internet_v6.last_stat and isipv6(ip)):
ip = self._get_ip()
if ip:
self.check_cnt += 1
if ip in self.gae.ip_set:
type = 'gae'
elif ip in self.gws.ip_set:
type = 'gws'
else:
type = None
return ip, type
else:
self.check_update(force=True)
return None, None
def push_ip(self, ip, type=None):
m = getattr(self, type or self.type, self)
m.ip_list_ed.appendleft(ip)
def save_source(self, force=False):
now = time()
self.save_cmd_times += 1
if force or self.save_cmd_times >= self.save_per_save_cmd or \
now - self.last_save_time > self.save_interval:
self.save_cmd_times = 0
self._save_source(self.ip_set | self.ip_set_block, self.ip_file)
self.last_save_time = now
def add_ip(self, ip, type=None):
m = getattr(self, type or self.type, self)
m.ip_set.add(ip)
ip_source.ip_set_assoeted.add(ip)
m.save_source()
def remove_ip(self, ip, type=None):
m = getattr(self, type or self.type, self)
m.ip_set.discard(ip)
ip_source.ip_set_assoeted.discard(ip)
m.save_source()
class IPManager:
pick_http_req = (
b'HEAD / HTTP/1.1\r\n'
b'Host: www.appspot.com\r\n'
b'Connection: Close\r\n\r\n'
)
pick_gae_req = (
b'HEAD / HTTP/1.1\r\n'
b'Host: gweb-cloudblog-publish.appspot.com\r\n'
b'Connection: Close\r\n\r\n'
)
pick_gws_req = (
b'HEAD / HTTP/1.1\r\n'
b'Host: www.google.com\r\n'
b'Connection: Close\r\n\r\n'
)
pick_gae_code = b'404', b'405', b'502'
pick_gae_verify_code = b'500', b'302'
pick_gws_res = (
b' Found\r\n'
b'Location: https://console.cloud.google.com/appengine'
)
ip_set = set()
def __new__(cls, ip_source):
m = object.__new__(cls)
setattr(cls, ip_source.type, m)
return m
def __init__(self, ip_source):
self.running = False
self.pick_worker_cnt = 0
self.kill_pick_worker_cnt = 0
type = ip_source.type
if type == 'gae':
self.check_callback = self.check_gae_callback
elif type == 'gws':
self.check_callback = self.check_gws_callback
self.type = type
self.logger = logging.getLogger('[picker %s]' % type)
self.logger.setLevel(GC.LOG_LEVEL)
self.list_name = 'google_' + type
self.cache_key = self.list_name + '|:443'
self.ip_list = collections.deque(GC.IPLIST_MAP[self.list_name])
GC.IPLIST_MAP[self.list_name] = self.ip_list
self.ip_set |= set(self.ip_list)
ip_source._ip_source.ip_set_used = self.ip_set
self.ip_source = ip_source
self.load_config()
now = mtime()
self.last_update = now
self.last_check = now - self.min_recheck_time
def load_config(self):
if self.type == 'gae':
enable = GC.PICKER_GAE_ENABLE
min_recheck_time = GC.PICKER_GAE_MINRECHECKTIME
min_cnt = GC.PICKER_GAE_MINCNT
max_timeout = GC.PICKER_GAE_MAXTIMEOUT
max_threads = GC.PICKER_GAE_MAXTHREADS
elif self.type == 'gws':
enable = GC.PICKER_GWS_ENABLE
min_recheck_time = GC.PICKER_GWS_MINRECHECKTIME
min_cnt = GC.PICKER_GWS_MINCNT
max_timeout = GC.PICKER_GWS_MAXTIMEOUT
max_threads = GC.PICKER_GWS_MAXTHREADS
self.enable = enable
self.strict = GC.PICKER_STRICT
self.min_recheck_time = min_recheck_time
self.min_cnt = min_cnt
self.max_cnt = int(min_cnt * 1.4)
self.max_timeout = max_timeout
self.max_threads = max_threads
self.server_name = GC.PICKER_SERVERNAME
self.com_domain =GC.PICKER_COMDOMAIN
self.recheck_loop_time = max(90, GC.GAE_KEEPTIME) + min(10, min_cnt) * 20
def get_timeout(self, type=None):
m = getattr(self, type or self.type, self)
return m.max_timeout
@_lock_save_use
def save_ip(self):
headers = ('#coding: utf-8\n'
'#此文件由 GotoX 自动维护,请不要修改。\n'
'[iplist]\n')
with open(GC.CONFIG_IPDB, 'w', encoding='utf_8', newline='\n') as f:
f.write(headers)
for m in (self.gae, self.gws):
f.write(m.list_name)
f.write(' = ')
f.write('|'.join(m.ip_list))
f.write('\n')
self.last_update = mtime()
def add_ip(self, ip, type=None):
m = getattr(self, type or self.type, self)
if ip not in m.ip_set:
m.ip_set.add(ip)
m.ip_list.append(ip)
m.logger.test('添加 %s 到 %s', ip, m.list_name)
if len(m.ip_list) > m.max_cnt:
m.remove_slow_ip()
self.save_ip()
def remove_ip(self, ip, type=None):
m = getattr(self, type or self.type, self)
if ip in m.ip_set:
m.ip_set.remove(ip)
m.ip_list.remove(ip)
m.logger.test('remove_ip 从 %s 移除 %s', m.list_name, ip)
self.save_ip()
@_lock_remove_slow
def remove_slow_ip(self, type=None):
m = getattr(self, type or self.type, self)
ip_list = ((ip, http_gws.get_ssl_connection_time((ip, 443))) for ip in m.ip_list)
ip_list = sorted(ip_list, key=get_index_1)
while len(ip_list) > m.max_cnt:
ip = ip_list.pop()[0]
m.ip_set.remove(ip)
m.ip_list.remove(ip)
m.logger.test('remove_slow_ip 从 %s 移除 %s', m.list_name, ip)
def check_ip(self, ip, type=None):
m = getattr(self, type or self.type, self)
return http_gws._create_ssl_connection((ip, 443), m.cache_key, None, None,
callback=m.check_callback)
def check_gae_callback(self, result):
self.check_gws_callback(result)
if isinstance(result, Exception):
return
try:
http_gws.match_hostname(result, hostname='www.appspot.com')
except CertificateError:
ip = result.xip[0]
if ip in self.ip_set:
self.remove_ip(ip, 'gae')
self.add_ip(ip, 'gws')
self.ip_source.remove_ip(ip, 'gae')
self.ip_source.add_ip(ip, 'gws')
self.logger.warning('IP 类型错误,移动 %s 到 GWS 列表', ip)
return self.gws.cache_key
def check_gws_callback(self, result):
timeout = self.get_timeout()
ip = result.xip[0]
is_recheck = ip in self.ip_set
if isinstance(result, Exception):
if is_recheck:
if isinstance(result, LimiterFull):
self.ip_list.append(self.ip_list.popleft())
elif len(self.ip_list) < self.max_cnt and (
self.max_threads == 0 or (
isinstance(result, socket.timeout) and
result.args[0][-3:] == ' ms' and
self.pick_worker_cnt >= self.max_threads)) or \
len(self.ip_list) <= self.min_cnt:
http_gws.ssl_connection_time[result.xip] = http_gws.timeout + 1
self.ip_list.append(self.ip_list.popleft())
self.logger.warning('%s 测试失败(超时:%d ms)%s,%s',
self.pick_worker_cnt, timeout, ip, result)
#不移除会持续记录,此处抵消
self.ip_source.report_recheck_ok(ip)
else:
self.remove_ip(ip)
self.logger.warning('%s 测试失败(超时:%d ms)%s,%s,'
'Bad IP 已删除',
self.pick_worker_cnt, timeout, ip, result)
return
ssl_time = int(result.ssl_time * 1000)
if ssl_time > timeout and len(self.ip_list) > self.min_cnt:
raise socket.timeout('%d ms' % ssl_time)
self.logger.test('%d 测试连接(超时:%d ms)%s,%d ms',
self.pick_worker_cnt, timeout, ip, ssl_time)
if is_recheck:
self.ip_list.append(self.ip_list.popleft())
else:
self.add_ip(ip)
def get_ip_info(self, ip, server_name=None, callback=None,
conntimeout=g_conntimeout,
handshaketimeout=g_handshaketimeout,
timeout=g_timeout):
retry = None
server_name = server_name or self.server_name
callback = callback or self.check_type_status
while True:
start_time = mtime()
ssl_time = 1e5
type = None
domain = None
sock = None
ssl_sock = None
try:
sock = http_gws.get_tcp_socket(ip)
http_gws.set_tcp_socket(sock, set_buffer=False)
ssl_sock = http_gws.get_ssl_socket(sock, server_name)
ssl_sock.settimeout(conntimeout)
ssl_sock.connect((ip, 443))
ssl_sock.settimeout(handshaketimeout)
ssl_sock.do_handshake()
ssl_sock.settimeout(timeout)
handshaked_time = mtime() - start_time
ssl_time = int(handshaked_time * 1000)
if handshaked_time > handshaketimeout:
raise socket.error('handshake 超时:%d ms' % ssl_time)
cert = http_gws.google_verify(ssl_sock)
domain = cert.get_subject().CN
if not domain:
raise CertificateError(-1, '%s 无法获取 commonName:%s' % (ip, cert))
type = callback(ssl_sock, ip)
except NetWorkIOError as e:
self.logger.debug('get_ip_info 发生错误:%s', e)
if not retry and (e.args == zero_EOF_error or e.args[0] in zero_errno):
retry = True
continue
finally:
if ssl_sock:
ssl_sock.close()
elif sock:
sock.close()
if server_name is self.server_name and domain == self.com_domain:
domain = '*.google.com'
if type is 'gae' and not self.test_ip_gae(ip) or \
type is 'gws' and not self.test_ip_gws(ip):
type = None
return domain, ssl_time, type
def check_type_status(self, conn, ip):
try:
conn.send(self.pick_http_req)
conn.read(9)
if conn.read(3) in self.pick_gae_code:
return 'gae'
elif conn.read(60) == self.pick_gws_res:
return 'gws'
except NetWorkIOError as e:
self.logger.debug('从 %s 获取服务器信息时发生错误:%r', ip, e)
def check_gae_status(self, conn, ip):
try:
http_gws.match_hostname(conn, hostname='www.appspot.com')
conn.send(self.pick_gae_req)
conn.read(9)
return conn.read(3) in self.pick_gae_verify_code
except CertificateError:
return False
except:
pass
def test_ip_gae(self, ip):
server_name = random_hostname('*com')
_, _, type = self.get_ip_info(ip,
server_name=server_name,
callback=self.check_gae_status)
if type:
return True
gae = self.gae
try:
if gae.enable:
gae.remove_ip(ip)
else:
gae.ip_list.remove(ip)
except (KeyError, ValueError):
pass
if type is False:
#无法使用的 IP
gae.ip_source.remove_ip(ip)
gae.ip_source.del_ip(ip)
self.logger.debug('从 gae 分类移除 %s', ip)
else:
#无法肯定判断,但是可先加入
gae.ip_source.add_ip(ip)
def check_gws_status(self, conn, ip):
try:
conn.send(self.pick_gws_req)
conn.read(9)
return conn.read(3) == b'200' # HEAD -> 200, GET -> 302
except CertificateError:
return False
except:
pass
def test_ip_gws(self, ip):
_, _, type = self.get_ip_info(ip, callback=self.check_gws_status)
if type:
return True
gws = self.gws
try:
if gws.enable:
gws.remove_ip(ip)
else:
gws.ip_list.remove(ip)
except (KeyError, ValueError):
pass
if type is False:
#无法使用的 IP
gws.ip_source.remove_ip(ip)
gws.ip_source.del_ip(ip)
self.logger.debug('从 gws 分类移除 %s', ip)
else:
#无法肯定判断,但是可先加入
gws.ip_source.add_ip(ip)
def pick_ip_worker(self):
while True:
try:
if self.kill_pick_ip_worker():
break
ip, type = self.ip_source.get_ip()
if ip is None:
sleep(10)
continue
checked = False
if type is None:
domain, ssl_time, type = self.get_ip_info(ip)
if type:
self.ip_source.add_ip(ip, type)
checked = domain and ssl_time <= self.get_timeout(type)
elif not self.strict:
checked = True
elif type is 'gae':
checked = self.test_ip_gae(ip)
elif type is 'gws':
checked = self.test_ip_gws(ip)
if type is not self.type and checked:
self.ip_source.push_ip(ip, type)
continue
if checked:
#再次 check_ip 以记录连接时间
checked = self.check_ip(ip, type)
if checked:
self.ip_source.report_check_ok(ip)
else:
self.ip_source.report_check_fail(ip)
except Exception as e:
self.logger.exception('pick_ip_worker 发生错误:%s', e)
@_lock_pick_worker
def kill_pick_ip_worker(self):
if self.kill_pick_worker_cnt > 0:
self.pick_worker_cnt -= 1
self.kill_pick_worker_cnt -= 1
return True
@_lock_pick_worker
def check_pick_ip_worker(self):
new_worker_cnt = min((self.max_cnt - len(self.ip_list)) * 2,
self.max_threads or 1) - self.pick_worker_cnt
if new_worker_cnt > 0:
self.pick_worker_cnt += new_worker_cnt
for _ in range(new_worker_cnt):
start_new_thread(self.pick_ip_worker, ())
sleep(0.5)
elif new_worker_cnt < 0:
self.kill_pick_worker_cnt = - new_worker_cnt
def recheck_ip_worker(self):
while self.running:
try:
sleep(1)
if not internet_v4.last_stat and not internet_v6.last_stat:
self.kill_pick_worker_cnt = self.pick_worker_cnt
continue
self.check_pick_ip_worker()
pass_time = mtime() - self.last_check
if not self.ip_list:
if pass_time > self.min_recheck_time:
self.logger.warning('当前 %s IP 数量为 0', self.type)
self.last_check = mtime()
continue
if pass_time < self.min_recheck_time or \
pass_time < self.recheck_loop_time / len(self.ip_list):
continue
ip = self.ip_list[0]
if not is_ip_use(ip):
self.logger.warning('发现配置未使用的 IP:%s', ip)
self.remove_ip(ip)
continue
self.last_check = mtime()
if self.check_ip(ip):
self.ip_source.report_recheck_ok(ip)
else:
self.ip_source.report_recheck_fail(ip)
except Exception as e:
self.logger.exception('recheck_ip_worker 发生错误:%s', e)
else:
self.kill_pick_worker_cnt = self.pick_worker_cnt
@_lock_pick_worker
def start(self):
if self.running:
return
if self.enable:
self.running = True
if not hasattr(self.ip_source, 'get_cnt'):
self.ip_source.check_update(force=True)
start_new_thread(self.recheck_ip_worker, ())
def stop(self):
self.running = False
ip_source = IPSource()
ip_source_gae = IPPoolSource(ip_source, 'gae')
ip_source_gws = IPPoolSource(ip_source, 'gws')
ip_manager_gae = IPManager(ip_source_gae)
ip_manager_gws = IPManager(ip_source_gws)
test_ip_gae = ip_manager_gae.test_ip_gae
test_ip_gws = ip_manager_gae.test_ip_gws
def test_ip_type(ip):
_, _, type = ip_manager_gae.get_ip_info(ip)
if type is 'gae' and not test_ip_gae(ip) or \
type is 'gws' and not test_ip_gws(ip):
type = None
return type
def start_ip_check():
ip_manager_gae.start()
ip_manager_gws.start()
def stop_ip_check():
ip_manager_gae.stop()
ip_manager_gws.stop()
def fixed_iplist():
list_gae = []
list_gws = []
list_unknown = []
cnt_gae = 0
cnt_gws = 0
while True:
for ip in ip_source.ip_set_used:
if ip in ip_source_gae.ip_set:
type = 'gae'
elif ip in ip_source_gws.ip_set:
type = 'gws'
else:
type = test_ip_type(ip)
if type is 'gae':
ip_source_gae.ip_set.add(ip)
list_gae.append(ip)
elif type is 'gws':
ip_source_gws.ip_set.add(ip)
list_gws.append(ip)
else:
list_unknown.append(ip)
if len(list_gae) > cnt_gae:
ip_source_gae.save_source(True)
cnt_gae = len(list_gae)
if len(list_gws) > cnt_gws:
ip_source_gws.save_source(True)
cnt_gws = len(list_gws)
ip_manager_gae.logger.test('更新固定 GIP 列表(共 %d 个 IP),'
'包含 GAE %d 个,GWS %d 个。',
len(ip_source.ip_set_used), cnt_gae, cnt_gws)
GC.IPLIST_MAP['google_gae'][:] = list_gae + list_unknown
GC.IPLIST_MAP['google_gws'][:] = list_gws + list_unknown
list_gae.clear()
list_gws.clear()
list_unknown.clear()
sleep(3600)
|
3c66e1daf7b24791c7d71d3b34090e257263794b
|
71acb7214efd91c0d327f6d8958e1798eadb4401
|
/locations/spiders/lidl_fi.py
|
1348d28fe83e11d8399e47f7c862b069ddd224cd
|
[
"CC0-1.0",
"MIT"
] |
permissive
|
alltheplaces/alltheplaces
|
21b9f8b4ace1352e52ae7b8f8825a930d2cb033e
|
1bcbb55cfcf06f2c714465570711f6e83f205c22
|
refs/heads/master
| 2023-08-30T19:45:35.098658
| 2023-08-30T17:51:54
| 2023-08-30T17:51:54
| 61,166,935
| 453
| 176
|
NOASSERTION
| 2023-09-14T17:16:40
| 2016-06-15T01:09:18
|
Python
|
UTF-8
|
Python
| false
| false
| 933
|
py
|
lidl_fi.py
|
import re
from locations.hours import DAYS_FI, OpeningHours, sanitise_day
from locations.spiders.lidl_gb import LidlGBSpider
from locations.storefinders.virtualearth import VirtualEarthSpider
class LidlFISpider(VirtualEarthSpider):
name = "lidl_fi"
item_attributes = LidlGBSpider.item_attributes
dataset_id = "d5239b243d6b4672810cbd11f82750f5"
dataset_name = "Filialdaten-FI/Filialdaten-FI"
key = "AhRg1sJKLrhfytyanzu32Io1e7le8W-AZs5Xo88SgdwF33tPSxjVn9h72EpJ7gqD"
def parse_item(self, item, feature, **kwargs):
item["name"] = feature["ShownStoreName"]
item["opening_hours"] = OpeningHours()
for day, start_time, end_time in re.findall(
r"(\w+) (\d{2}:\d{2})-(\d{2}:\d{2})",
feature["OpeningTimes"],
):
if day := sanitise_day(day, DAYS_FI):
item["opening_hours"].add_range(day, start_time, end_time)
yield item
|
5886b28cb93da1f1051d090078bf6e36c7ec9c7d
|
dac12c9178b13d60f401c4febff5569af8aa2719
|
/cvat/apps/log_viewer/urls.py
|
0de56682a37e8f4e1d986556933da825c05d7e60
|
[
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later"
] |
permissive
|
opencv/cvat
|
39dc66ca20f972ba40b79c44d7ce43590dc0b0b5
|
899c9fd75146744def061efd7ab1b1c6c9f6942f
|
refs/heads/develop
| 2023-08-19T04:27:56.974498
| 2023-08-18T09:58:25
| 2023-08-18T09:58:25
| 139,156,354
| 6,558
| 1,887
|
MIT
| 2023-09-14T12:44:39
| 2018-06-29T14:02:45
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 297
|
py
|
urls.py
|
# Copyright (C) 2018-2022 Intel Corporation
#
# SPDX-License-Identifier: MIT
from rest_framework import routers
from . import views
router = routers.DefaultRouter(trailing_slash=False)
router.register('analytics', views.LogViewerAccessViewSet, basename='analytics')
urlpatterns = router.urls
|
1d1a53ac149b773e1d0d09be743bb719468aa4aa
|
20dda4f19ec777d1a69ae20b5e2a48b9b28bb4a4
|
/flexbe_core/test/test_core.py
|
beef515d43c3f4a07e6f67eab3af764bf3da3d3e
|
[] |
permissive
|
team-vigir/flexbe_behavior_engine
|
fd94ac2b75bfef6ca318d700d94b76f16cfd6552
|
6028c8585d852be55f4512024dcca5caa53e57c2
|
refs/heads/main
| 2023-05-12T20:25:50.388882
| 2022-03-09T22:19:43
| 2022-03-09T22:19:43
| 38,892,260
| 131
| 72
|
BSD-3-Clause
| 2023-06-23T03:06:37
| 2015-07-10T17:06:37
|
Python
|
UTF-8
|
Python
| false
| false
| 20,879
|
py
|
test_core.py
|
#!/usr/bin/env python
import unittest
import rospy
from flexbe_core import EventState, OperatableStateMachine, ConcurrencyContainer
from flexbe_core.core import PreemptableState
from flexbe_core.proxy import ProxySubscriberCached
from flexbe_core.core.exceptions import StateMachineError
from std_msgs.msg import Bool, Empty, UInt8, String
from flexbe_msgs.msg import CommandFeedback, OutcomeRequest
class TestSubjectState(EventState):
def __init__(self):
super(TestSubjectState, self).__init__(outcomes=['done', 'error'])
self.result = None
self.last_events = []
self.count = 0
def execute(self, userdata):
self.count += 1
return self.result
def on_enter(self, userdata):
self.last_events.append('on_enter')
def on_exit(self, userdata):
self.last_events.append('on_exit')
def on_start(self):
self.last_events.append('on_start')
def on_stop(self):
self.last_events.append('on_stop')
def on_pause(self):
self.last_events.append('on_pause')
def on_resume(self, userdata):
self.last_events.append('on_resume')
class TestCore(unittest.TestCase):
def _create(self):
state = TestSubjectState()
state._enable_ros_control()
with OperatableStateMachine(outcomes=['done', 'error']):
OperatableStateMachine.add('subject', state,
transitions={'done': 'done', 'error': 'error'},
autonomy={'done': 1, 'error': 2})
return state
def _execute(self, state):
state.last_events = []
return state.parent.execute(None)
def assertMessage(self, sub, topic, msg, timeout=1):
rate = rospy.Rate(100)
for i in range(int(timeout * 100)):
if sub.has_msg(topic):
received = sub.get_last_msg(topic)
sub.remove_last_msg(topic)
break
rate.sleep()
else:
raise AssertionError('Did not receive message on topic %s, expected:\n%s'
% (topic, str(msg)))
for slot in msg.__slots__:
expected = getattr(msg, slot)
actual = getattr(received, slot)
error = "Mismatch for %s, is %s but expected %s" % (slot, actual, expected)
if isinstance(expected, list):
self.assertListEqual(expected, actual, error)
else:
self.assertEqual(expected, actual, error)
def assertNoMessage(self, sub, topic, timeout=1):
rate = rospy.Rate(100)
for i in range(int(timeout * 100)):
if sub.has_msg(topic):
received = sub.get_last_msg(topic)
sub.remove_last_msg(topic)
raise AssertionError('Should not receive message on topic %s, but got:\n%s'
% (topic, str(received)))
rate.sleep()
# Test Cases
def test_event_state(self):
state = self._create()
fb_topic = 'flexbe/command_feedback'
sub = ProxySubscriberCached({fb_topic: CommandFeedback})
rospy.sleep(0.2) # wait for pub/sub
# enter during first execute
self._execute(state)
self.assertListEqual(['on_enter'], state.last_events)
self._execute(state)
self.assertListEqual([], state.last_events)
# pause and resume as commanded
state._sub._callback(Bool(True), 'flexbe/command/pause')
self._execute(state)
self.assertListEqual(['on_pause'], state.last_events)
self.assertMessage(sub, fb_topic, CommandFeedback(command="pause"))
state.result = 'error'
outcome = self._execute(state)
state.result = None
self.assertIsNone(outcome)
state._sub._callback(Bool(False), 'flexbe/command/pause')
self._execute(state)
self.assertListEqual(['on_resume'], state.last_events)
self.assertMessage(sub, fb_topic, CommandFeedback(command="resume"))
# repeat triggers exit and enter again
state._sub._callback(Empty(), 'flexbe/command/repeat')
self._execute(state)
self.assertListEqual(['on_exit'], state.last_events)
self.assertMessage(sub, fb_topic, CommandFeedback(command="repeat"))
self._execute(state)
self.assertListEqual(['on_enter'], state.last_events)
self._execute(state)
self.assertListEqual([], state.last_events)
# exit during last execute when returning an outcome
state.result = 'done'
outcome = self._execute(state)
self.assertListEqual(['on_exit'], state.last_events)
self.assertEqual('done', outcome)
def test_operatable_state(self):
state = self._create()
out_topic = 'flexbe/mirror/outcome'
req_topic = 'flexbe/outcome_request'
sub = ProxySubscriberCached({out_topic: UInt8, req_topic: OutcomeRequest})
rospy.sleep(0.2) # wait for pub/sub
# return outcome in full autonomy, no request
state.result = 'error'
self._execute(state)
self.assertNoMessage(sub, req_topic)
self.assertMessage(sub, out_topic, UInt8(1))
# request outcome on same autnomy and clear request on loopback
OperatableStateMachine.autonomy_level = 2
self._execute(state)
self.assertNoMessage(sub, out_topic)
self.assertMessage(sub, req_topic, OutcomeRequest(outcome=1, target='/subject'))
state.result = None
self._execute(state)
self.assertMessage(sub, req_topic, OutcomeRequest(outcome=255, target='/subject'))
# still return other outcomes
state.result = 'done'
self._execute(state)
self.assertNoMessage(sub, req_topic)
self.assertMessage(sub, out_topic, UInt8(0))
# request outcome on lower autonomy, return outcome after level increase
OperatableStateMachine.autonomy_level = 0
self._execute(state)
self.assertNoMessage(sub, out_topic)
self.assertMessage(sub, req_topic, OutcomeRequest(outcome=0, target='/subject'))
OperatableStateMachine.autonomy_level = 3
self._execute(state)
self.assertMessage(sub, out_topic, UInt8(0))
def test_preemptable_state(self):
state = self._create()
fb_topic = 'flexbe/command_feedback'
sub = ProxySubscriberCached({fb_topic: CommandFeedback})
rospy.sleep(0.2) # wait for pub/sub
# preempt when trigger variable is set
PreemptableState.preempt = True
outcome = self._execute(state)
self.assertEqual(outcome, PreemptableState._preempted_name)
self.assertRaises(StateMachineError, lambda: state.parent.current_state)
PreemptableState.preempt = False
outcome = self._execute(state)
self.assertIsNone(outcome)
# preempt when command is received
state._sub._callback(Empty(), 'flexbe/command/preempt')
outcome = self._execute(state)
self.assertEqual(outcome, PreemptableState._preempted_name)
self.assertRaises(StateMachineError, lambda: state.parent.current_state)
self.assertMessage(sub, fb_topic, CommandFeedback(command='preempt'))
PreemptableState.preempt = False
def test_lockable_state(self):
state = self._create()
fb_topic = 'flexbe/command_feedback'
sub = ProxySubscriberCached({fb_topic: CommandFeedback})
rospy.sleep(0.2) # wait for pub/sub
# lock and unlock as commanded, return outcome after unlock
state._sub._callback(String('/subject'), 'flexbe/command/lock')
state.result = 'done'
outcome = self._execute(state)
self.assertIsNone(outcome)
self.assertTrue(state._locked)
self.assertMessage(sub, fb_topic, CommandFeedback(command='lock', args=['/subject', '/subject']))
state.result = None
state._sub._callback(String('/subject'), 'flexbe/command/unlock')
outcome = self._execute(state)
self.assertEqual(outcome, 'done')
self.assertMessage(sub, fb_topic, CommandFeedback(command='unlock', args=['/subject', '/subject']))
# lock and unlock without target
state._sub._callback(String(''), 'flexbe/command/lock')
state.result = 'done'
outcome = self._execute(state)
self.assertIsNone(outcome)
self.assertMessage(sub, fb_topic, CommandFeedback(command='lock', args=['/subject', '/subject']))
state._sub._callback(String(''), 'flexbe/command/unlock')
outcome = self._execute(state)
self.assertEqual(outcome, 'done')
self.assertMessage(sub, fb_topic, CommandFeedback(command='unlock', args=['/subject', '/subject']))
# reject invalid lock command
state._sub._callback(String('/invalid'), 'flexbe/command/lock')
outcome = self._execute(state)
self.assertEqual(outcome, 'done')
self.assertMessage(sub, fb_topic, CommandFeedback(command='lock', args=['/invalid', '']))
# reject generic unlock command when not locked
state._sub._callback(String(''), 'flexbe/command/unlock')
self._execute(state)
self.assertMessage(sub, fb_topic, CommandFeedback(command='unlock', args=['', '']))
# do not transition out of locked container
state.parent._locked = True
outcome = self._execute(state)
self.assertIsNone(outcome)
state.parent._locked = False
state.result = None
outcome = self._execute(state)
self.assertEqual(outcome, 'done')
def test_manually_transitionable_state(self):
state = self._create()
fb_topic = 'flexbe/command_feedback'
sub = ProxySubscriberCached({fb_topic: CommandFeedback})
rospy.sleep(0.2) # wait for pub/sub
# return requested outcome
state._sub._callback(OutcomeRequest(target='subject', outcome=1), 'flexbe/command/transition')
outcome = self._execute(state)
self.assertEqual(outcome, 'error')
self.assertMessage(sub, fb_topic, CommandFeedback(command='transition', args=['subject', 'subject']))
# reject outcome request for different target
state._sub._callback(OutcomeRequest(target='invalid', outcome=1), 'flexbe/command/transition')
outcome = self._execute(state)
self.assertIsNone(outcome)
self.assertMessage(sub, fb_topic, CommandFeedback(command='transition', args=['invalid', 'subject']))
def test_ros_state(self):
state = self._create()
# default rate is 10Hz
start = rospy.get_time()
for i in range(10):
state.sleep()
duration = rospy.get_time() - start
self.assertAlmostEqual(duration, 1., places=2)
self.assertAlmostEqual(state.sleep_duration, .1, places=2)
# change of rate works as expected
state.set_rate(1)
start = rospy.get_time()
state.sleep()
duration = rospy.get_time() - start
self.assertAlmostEqual(duration, 1., places=2)
self.assertAlmostEqual(state.sleep_duration, 1., places=2)
def test_cross_combinations(self):
state = self._create()
# manual transition works on low autonomy
OperatableStateMachine.autonomy_level = 0
state.result = 'error'
outcome = self._execute(state)
self.assertIsNone(outcome)
state._sub._callback(OutcomeRequest(target='subject', outcome=0), 'flexbe/command/transition')
outcome = self._execute(state)
self.assertEqual(outcome, 'done')
OperatableStateMachine.autonomy_level = 3
state.result = None
# manual transition blocked by lock
state._sub._callback(String('/subject'), 'flexbe/command/lock')
outcome = self._execute(state)
self.assertIsNone(outcome)
state._sub._callback(OutcomeRequest(target='subject', outcome=1), 'flexbe/command/transition')
outcome = self._execute(state)
self.assertIsNone(outcome)
state._sub._callback(String('/subject'), 'flexbe/command/unlock')
outcome = self._execute(state)
self.assertEqual(outcome, 'error')
# preempt works on low autonomy
OperatableStateMachine.autonomy_level = 0
state.result = 'error'
outcome = self._execute(state)
self.assertIsNone(outcome)
state._sub._callback(Empty(), 'flexbe/command/preempt')
outcome = self._execute(state)
self.assertEqual(outcome, PreemptableState._preempted_name)
PreemptableState.preempt = False
OperatableStateMachine.autonomy_level = 3
state.result = None
# preempt also works when locked
state._sub._callback(String('/subject'), 'flexbe/command/lock')
outcome = self._execute(state)
self.assertIsNone(outcome)
state._sub._callback(Empty(), 'flexbe/command/preempt')
outcome = self._execute(state)
self.assertEqual(outcome, PreemptableState._preempted_name)
PreemptableState.preempt = False
state._sub._callback(String('/subject'), 'flexbe/command/unlock')
outcome = self._execute(state)
self.assertIsNone(outcome)
def test_concurrency_container(self):
cc = ConcurrencyContainer(outcomes=['done', 'error'],
conditions=[
('error', [('main', 'error')]),
('error', [('side', 'error')]),
('done', [('main', 'done'), ('side', 'done')])
])
with cc:
OperatableStateMachine.add('main', TestSubjectState(),
transitions={'done': 'done', 'error': 'error'},
autonomy={'done': 1, 'error': 2})
OperatableStateMachine.add('side', TestSubjectState(),
transitions={'done': 'done', 'error': 'error'},
autonomy={'done': 1, 'error': 2})
with OperatableStateMachine(outcomes=['done', 'error']):
OperatableStateMachine.add('cc', cc,
transitions={'done': 'done', 'error': 'error'},
autonomy={'done': 1, 'error': 2})
class FakeRate(object):
def remaining(self):
return rospy.Duration(0)
def sleep(self):
pass
# all states are called with their correct rate
cc.execute(None)
cc.sleep()
cc.execute(None)
self.assertAlmostEqual(cc.sleep_duration, .1, places=2)
cc.sleep()
cc['main'].set_rate(15)
cc['side'].set_rate(10)
cc['main'].count = 0
cc['side'].count = 0
start = rospy.get_time()
cc_count = 0
while rospy.get_time() - start <= 1.:
cc_count += 1
cc.execute(None)
self.assertLessEqual(cc.sleep_duration, .1)
cc.sleep()
self.assertIn(cc['main'].count, [14, 15, 16])
self.assertIn(cc['side'].count, [9, 10, 11])
self.assertLessEqual(cc_count, 27)
# verify ROS properties and disable sleep
cc._enable_ros_control()
self.assertTrue(cc['main']._is_controlled)
self.assertFalse(cc['side']._is_controlled)
cc['main']._rate = FakeRate()
cc['side']._rate = FakeRate()
# return outcome when all return done or any returns error
outcome = cc.execute(None)
self.assertIsNone(outcome)
cc['main'].result = 'error'
outcome = cc.execute(None)
self.assertEqual(outcome, 'error')
cc['main'].result = None
cc['side'].result = 'error'
outcome = cc.execute(None)
self.assertEqual(outcome, 'error')
cc['side'].result = 'done'
outcome = cc.execute(None)
self.assertIsNone(outcome)
cc['main'].result = 'done'
outcome = cc.execute(None)
self.assertEqual(outcome, 'done')
cc['main'].result = None
cc['side'].result = None
# always call on_exit exactly once when returning an outcome
outcome = cc.execute(None)
self.assertIsNone(outcome)
cc['main'].last_events = []
cc['side'].last_events = []
cc['main'].result = 'error'
outcome = cc.execute(None)
self.assertEqual(outcome, 'error')
self.assertListEqual(cc['main'].last_events, ['on_exit'])
self.assertListEqual(cc['side'].last_events, ['on_exit'])
def test_user_data(self):
class TestUserdata(EventState):
def __init__(self, out_content='test_data'):
super(TestUserdata, self).__init__(outcomes=['done'], input_keys=['data_in'], output_keys=['data_out'])
self.data = None
self._out_content = out_content
def execute(self, userdata):
rospy.logwarn('\033[0m%s\n%s' % (self.path, str(userdata))) # log for manual inspection
self.data = userdata.data_in
userdata.data_out = self._out_content
return 'done'
inner_sm = OperatableStateMachine(outcomes=['done'], input_keys=['sm_in'], output_keys=['sm_out'])
inner_sm.userdata.own = 'own_data'
with inner_sm:
OperatableStateMachine.add('own_state', TestUserdata('inner_data'), transitions={'done': 'outside_state'},
remapping={'data_in': 'own', 'data_out': 'sm_out'})
OperatableStateMachine.add('outside_state', TestUserdata(), transitions={'done': 'internal_state'},
remapping={'data_in': 'sm_in', 'data_out': 'data_in'})
OperatableStateMachine.add('internal_state', TestUserdata(), transitions={'done': 'done'},
remapping={})
sm = OperatableStateMachine(outcomes=['done'])
sm.userdata.outside = 'outside_data'
with sm:
OperatableStateMachine.add('before_state', TestUserdata(), transitions={'done': 'inner_sm'},
remapping={'data_in': 'outside'})
OperatableStateMachine.add('inner_sm', inner_sm, transitions={'done': 'after_state'},
remapping={'sm_in': 'outside'})
OperatableStateMachine.add('after_state', TestUserdata('last_data'), transitions={'done': 'modify_state'},
remapping={'data_in': 'sm_out'})
OperatableStateMachine.add('modify_state', TestUserdata(), transitions={'done': 'final_state'},
remapping={'data_out': 'outside', 'data_in': 'outside'})
OperatableStateMachine.add('final_state', TestUserdata(), transitions={'done': 'done'},
remapping={'data_in': 'data_out'})
# can pass userdata to state and get it from state
sm.execute(None)
self.assertEqual(sm['before_state'].data, 'outside_data')
self.assertEqual(sm._userdata.data_out, 'test_data')
# sub-state machine can set its own local userdata
sm.execute(None)
self.assertEqual(sm['inner_sm']['own_state'].data, 'own_data')
self.assertNotIn('own', sm._userdata) # transparent to outer sm
# sub-state machine can read data from parent state machine
sm.execute(None)
self.assertEqual(sm['inner_sm']['outside_state'].data, 'outside_data')
# sub-state machine can pass along its local userdata
self.assertIn('data_in', sm['inner_sm']._userdata)
sm.execute(None)
self.assertEqual(sm['inner_sm']['internal_state'].data, 'test_data')
self.assertNotIn('data_in', sm._userdata) # transparent to outer sm
# sub-state machine userdata is wrote back to the parent
self.assertEqual(sm._userdata.sm_out, 'inner_data')
# outer state machine can read data set by inner state machine
sm.execute(None)
self.assertEqual(sm['after_state'].data, 'inner_data')
# can remap different keys to achieve read-write access
sm.execute(None)
self.assertEqual(sm['modify_state'].data, 'outside_data')
self.assertEqual(sm._userdata.outside, 'test_data')
# one state can read data set by another one
outcome = sm.execute(None)
self.assertEqual(sm['final_state'].data, 'last_data')
self.assertEqual(outcome, 'done')
if __name__ == '__main__':
rospy.init_node('test_flexbe_core')
import rostest
rostest.rosrun('flexbe_core', 'test_flexbe_core', TestCore)
|
cea4dcbd5ed877cc4b96a73150f0d9090a4d13f9
|
62eaa871e4e825a0a8c3a014b5d08fcf976aedef
|
/tsai/losses.py
|
b6368b4c6413eae7a289f13e19f1af084fd46155
|
[
"Apache-2.0"
] |
permissive
|
timeseriesAI/tsai
|
f1006b37062a328edabb2fae3e8361dcda0fc68b
|
06ab2a9c6870b311fa0efe4cb3fc4df0009d1965
|
refs/heads/main
| 2023-07-19T22:11:06.425058
| 2023-07-13T07:06:16
| 2023-07-13T07:06:16
| 211,822,289
| 3,526
| 458
|
Apache-2.0
| 2023-06-15T13:57:12
| 2019-09-30T09:18:31
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 6,287
|
py
|
losses.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/016_losses.ipynb.
# %% auto 0
__all__ = ['HuberLoss', 'LogCoshLoss', 'MaskedLossWrapper', 'CenterLoss', 'CenterPlusLoss', 'FocalLoss', 'TweedieLoss']
# %% ../nbs/016_losses.ipynb 3
from .imports import *
from fastai.losses import *
# %% ../nbs/016_losses.ipynb 4
## Available in Pytorch 1.9
class HuberLoss(nn.Module):
"""Huber loss
Creates a criterion that uses a squared term if the absolute
element-wise error falls below delta and a delta-scaled L1 term otherwise.
This loss combines advantages of both :class:`L1Loss` and :class:`MSELoss`; the
delta-scaled L1 region makes the loss less sensitive to outliers than :class:`MSELoss`,
while the L2 region provides smoothness over :class:`L1Loss` near 0. See
`Huber loss <https://en.wikipedia.org/wiki/Huber_loss>`_ for more information.
This loss is equivalent to nn.SmoothL1Loss when delta == 1.
"""
def __init__(self, reduction='mean', delta=1.0):
assert reduction in ['mean', 'sum', 'none'], "You must set reduction to 'mean', 'sum' or 'none'"
self.reduction, self.delta = reduction, delta
super().__init__()
def forward(self, input: Tensor, target: Tensor) -> Tensor:
diff = input - target
abs_diff = torch.abs(diff)
mask = abs_diff < self.delta
loss = torch.cat([(.5*diff[mask]**2), self.delta * (abs_diff[~mask] - (.5 * self.delta))])
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
else:
return loss
# %% ../nbs/016_losses.ipynb 5
class LogCoshLoss(nn.Module):
def __init__(self, reduction='mean', delta=1.0):
assert reduction in ['mean', 'sum', 'none'], "You must set reduction to 'mean', 'sum' or 'none'"
self.reduction, self.delta = reduction, delta
super().__init__()
def forward(self, input: Tensor, target: Tensor) -> Tensor:
loss = torch.log(torch.cosh(input - target + 1e-12))
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
else:
return loss
# %% ../nbs/016_losses.ipynb 7
class MaskedLossWrapper(Module):
def __init__(self, crit):
self.loss = crit
def forward(self, inp, targ):
inp = inp.flatten(1)
targ = targ.flatten(1)
mask = torch.isnan(targ)
inp, targ = inp[~mask], targ[~mask]
return self.loss(inp, targ)
# %% ../nbs/016_losses.ipynb 9
class CenterLoss(Module):
r"""
Code in Pytorch has been slightly modified from: https://github.com/KaiyangZhou/pytorch-center-loss/blob/master/center_loss.py
Based on paper: Wen et al. A Discriminative Feature Learning Approach for Deep Face Recognition. ECCV 2016.
Args:
c_out (int): number of classes.
logits_dim (int): dim 1 of the logits. By default same as c_out (for one hot encoded logits)
"""
def __init__(self, c_out, logits_dim=None):
logits_dim = ifnone(logits_dim, c_out)
self.c_out, self.logits_dim = c_out, logits_dim
self.centers = nn.Parameter(torch.randn(c_out, logits_dim))
self.classes = nn.Parameter(torch.arange(c_out).long(), requires_grad=False)
def forward(self, x, labels):
"""
Args:
x: feature matrix with shape (batch_size, logits_dim).
labels: ground truth labels with shape (batch_size).
"""
bs = x.shape[0]
distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(bs, self.c_out) + \
torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.c_out, bs).T
distmat = torch.addmm(distmat, x, self.centers.T, beta=1, alpha=-2)
labels = labels.unsqueeze(1).expand(bs, self.c_out)
mask = labels.eq(self.classes.expand(bs, self.c_out))
dist = distmat * mask.float()
loss = dist.clamp(min=1e-12, max=1e+12).sum() / bs
return loss
class CenterPlusLoss(Module):
def __init__(self, loss, c_out, λ=1e-2, logits_dim=None):
self.loss, self.c_out, self.λ = loss, c_out, λ
self.centerloss = CenterLoss(c_out, logits_dim)
def forward(self, x, labels):
return self.loss(x, labels) + self.λ * self.centerloss(x, labels)
def __repr__(self): return f"CenterPlusLoss(loss={self.loss}, c_out={self.c_out}, λ={self.λ})"
# %% ../nbs/016_losses.ipynb 12
class FocalLoss(Module):
""" Weighted, multiclass focal loss"""
def __init__(self, alpha:Optional[Tensor]=None, gamma:float=2., reduction:str='mean'):
"""
Args:
alpha (Tensor, optional): Weights for each class. Defaults to None.
gamma (float, optional): A constant, as described in the paper. Defaults to 2.
reduction (str, optional): 'mean', 'sum' or 'none'. Defaults to 'mean'.
"""
self.alpha, self.gamma, self.reduction = alpha, gamma, reduction
self.nll_loss = nn.NLLLoss(weight=alpha, reduction='none')
def forward(self, x: Tensor, y: Tensor) -> Tensor:
log_p = F.log_softmax(x, dim=-1)
pt = log_p[torch.arange(len(x)), y].exp()
ce = self.nll_loss(log_p, y)
loss = (1 - pt) ** self.gamma * ce
if self.reduction == 'mean':
loss = loss.mean()
elif self.reduction == 'sum':
loss = loss.sum()
return loss
# %% ../nbs/016_losses.ipynb 14
class TweedieLoss(Module):
def __init__(self, p=1.5, eps=1e-8):
"""
Tweedie loss as calculated in LightGBM
Args:
p: tweedie variance power (1 < p < 2)
eps: small number to avoid log(zero).
"""
assert 1 < p < 2, "make sure 1 < p < 2"
self.p, self.eps = p, eps
def forward(self, inp, targ):
"Poisson and compound Poisson distribution, targ >= 0, inp > 0"
inp = inp.flatten()
targ = targ.flatten()
torch.clamp_min_(inp, self.eps)
a = targ * torch.exp((1 - self.p) * torch.log(inp)) / (1 - self.p)
b = torch.exp((2 - self.p) * torch.log(inp)) / (2 - self.p)
loss = -a + b
return loss.mean()
|
89d9f46eff7effbdeb5e7a04a5b5b29b5fc3700b
|
28cf7b16dd29a5802d09b44b0186f6ae2c5ff0ed
|
/kuryr_kubernetes/tests/unit/test_utils.py
|
5ac48f215ceafb66ab0d93a46c206f8643c5ac02
|
[
"Apache-2.0"
] |
permissive
|
openstack/kuryr-kubernetes
|
c292826abfb8aa0d3f8ef3b1007362162db16956
|
4993c7a4b2d7e4b053832bf39602f2573fad6266
|
refs/heads/master
| 2023-08-18T19:21:02.487908
| 2023-08-03T13:58:11
| 2023-08-03T13:58:11
| 58,626,548
| 169
| 78
|
Apache-2.0
| 2022-04-13T02:27:52
| 2016-05-12T09:14:29
|
Python
|
UTF-8
|
Python
| false
| false
| 21,734
|
py
|
test_utils.py
|
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
import uuid
from openstack import exceptions as os_exc
from openstack.network.v2 import port as os_port
from openstack.network.v2 import subnet as os_subnet
from os_vif import objects
from oslo_config import cfg
from oslo_utils import timeutils
from kuryr_kubernetes import constants as k_const
from kuryr_kubernetes import exceptions as k_exc
from kuryr_kubernetes.objects import vif
from kuryr_kubernetes.tests import base as test_base
from kuryr_kubernetes.tests import fake
from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix
from kuryr_kubernetes import utils
CONF = cfg.CONF
class TestUtils(test_base.TestCase):
def setUp(self):
super().setUp()
cfg.CONF.set_override('resource_tags', [], group='neutron_defaults')
@mock.patch('socket.gethostname')
def test_get_node_name(self, m_gethostname):
m_gethostname.return_value = 'foo'
res = utils.get_node_name()
self.assertEqual('foo', res)
m_gethostname.assert_called_once_with()
@mock.patch('requests.get')
def test_get_leader_name(self, m_get):
m_get.return_value = mock.Mock(json=mock.Mock(
return_value={'name': 'foo'}))
res = utils.get_leader_name()
m_get.assert_called_once_with(
'http://localhost:%d' % CONF.kubernetes.controller_ha_elector_port)
self.assertEqual('foo', res)
@mock.patch('requests.get')
def test_get_leader_name_malformed(self, m_get):
m_get.return_value = mock.Mock(json=mock.Mock(
return_value={'name2': 'foo'}))
res = utils.get_leader_name()
m_get.assert_called_once_with(
'http://localhost:%d' % CONF.kubernetes.controller_ha_elector_port)
self.assertIsNone(res)
@mock.patch('requests.get')
def test_get_leader_name_exc(self, m_get):
m_get.side_effect = Exception
res = utils.get_leader_name()
m_get.assert_called_once_with(
'http://localhost:%d' % CONF.kubernetes.controller_ha_elector_port)
self.assertIsNone(res)
@mock.patch('kuryr_kubernetes.os_vif_util.neutron_to_osvif_network')
@mock.patch('kuryr_kubernetes.os_vif_util.neutron_to_osvif_subnet')
def test_get_subnet(self, m_osv_subnet, m_osv_network):
os_net = self.useFixture(k_fix.MockNetworkClient()).client
subnet = mock.MagicMock()
network = mock.MagicMock()
subnet_id = mock.sentinel.subnet_id
network_id = mock.sentinel.network_id
neutron_subnet = os_subnet.Subnet(**{'network_id': network_id})
neutron_network = mock.sentinel.neutron_network
os_net.get_subnet.return_value = neutron_subnet
os_net.get_network.return_value = neutron_network
m_osv_subnet.return_value = subnet
m_osv_network.return_value = network
ret = utils.get_subnet(subnet_id)
self.assertEqual(network, ret)
os_net.get_subnet.assert_called_once_with(subnet_id)
os_net.get_network.assert_called_once_with(network_id)
m_osv_subnet.assert_called_once_with(neutron_subnet)
m_osv_network.assert_called_once_with(neutron_network)
network.subnets.objects.append.assert_called_once_with(subnet)
def test_extract_pod_annotation(self):
vif_obj = objects.vif.VIFBase()
ps = vif.PodState(default_vif=vif_obj)
d = ps.obj_to_primitive()
result = utils.extract_pod_annotation(d)
self.assertEqual(vif.PodState.obj_name(), result.obj_name())
self.assertEqual(vif_obj, result.default_vif)
def test_extract_pod_annotation_convert(self):
vif_obj = objects.vif.VIFBase()
d = vif_obj.obj_to_primitive()
result = utils.extract_pod_annotation(d)
self.assertEqual(vif.PodState.obj_name(), result.obj_name())
self.assertEqual(vif_obj, result.default_vif)
def test__has_kuryrnetwork_crd(self):
kuryrnet_crd = {
"apiVersion": "openstack.org/v1",
"items": [
],
"kind": "KuryrNetworkList",
"metadata": {
"continue": "",
"resourceVersion": "33018",
}
}
kubernetes = self.useFixture(k_fix.MockK8sClient()).client
kubernetes.get.return_value = kuryrnet_crd
kuryrnets_url = k_const.K8S_API_CRD_KURYRNETWORKS
resp = utils.has_kuryr_crd(kuryrnets_url)
self.assertEqual(resp, True)
def test__has_kuryr_crd_error(self):
crds = [k_const.K8S_API_CRD_KURYRNETWORKS,
k_const.K8S_API_CRD_KURYRNETWORKPOLICIES,
k_const.K8S_API_CRD_KURYRLOADBALANCERS]
for crd_url in crds:
kubernetes = self.useFixture(k_fix.MockK8sClient()).client
kubernetes.get.side_effect = k_exc.K8sClientException
resp = utils.has_kuryr_crd(crd_url)
self.assertEqual(resp, False)
kubernetes.get.assert_called_once()
def test_get_endpoints_link(self):
service = {'apiVersion': 'v1',
'kind': 'Service',
'metadata': {'namespace': 'default',
'name': 'test'}}
ret = utils.get_endpoints_link(service)
expected_link = "/api/v1/namespaces/default/endpoints/test"
self.assertEqual(expected_link, ret)
def test_get_service_ports(self):
service = {'spec': {'ports': [
{'port': 1, 'targetPort': 1},
{'port': 2, 'name': 'X', 'protocol': 'UDP', 'targetPort': 2},
{'port': 3, 'name': 'Y', 'protocol': 'SCTP', 'targetPort': 3}
]}}
expected_ret = [
{'port': 1, 'name': None, 'protocol': 'TCP', 'targetPort': '1'},
{'port': 2, 'name': 'X', 'protocol': 'UDP', 'targetPort': '2'},
{'port': 3, 'name': 'Y', 'protocol': 'SCTP', 'targetPort': '3'}]
ret = utils.get_service_ports(service)
self.assertEqual(expected_ret, ret)
@mock.patch('kuryr_kubernetes.utils.get_service_ports')
def test_has_port_changes(self, m_get_service_ports):
service = {
'apiVersion': 'v1',
'kind': 'Service',
'metadata': {
'name': 'serv-1',
'namespace': 'ns1'
},
'spec': {
'ports': [
{
'port': 1,
'name': 'X',
'protocol': 'TCP',
'targetPort': '1'
}
]
}
}
lb_crd_spec = {
'spec': {
'ports': [
{
'name': 'Y',
'protocol': 'TCP',
'port': 2,
'targetPort': 2
}
]
}
}
ret = utils.has_port_changes(service, lb_crd_spec)
self.assertTrue(ret)
@mock.patch('kuryr_kubernetes.utils.get_service_ports')
def test_has_port_changes_more_ports(self, m_get_service_ports):
service = {
'apiVersion': 'v1',
'kind': 'Service',
'metadata': {
'name': 'serv-1',
'namespace': 'ns1'
},
'spec': {
'ports': [
{
'port': 1,
'name': 'X',
'protocol': 'TCP',
'targetPort': '1'
}
]
}
}
lb_crd_spec = {
'spec': {
'ports': [
{
'name': 'X',
'protocol': 'TCP',
'port': 1,
'targetPort': 1
},
{
'name': 'Y',
'protocol': 'TCP',
'port': 2,
'targetPort': 2
}
]
}
}
ret = utils.has_port_changes(service, lb_crd_spec)
self.assertTrue(ret)
@mock.patch('kuryr_kubernetes.utils.get_service_ports')
def test_has_port_changes_no_changes(self, m_get_service_ports):
service = {
'apiVersion': 'v1',
'kind': 'Service',
'metadata': {
'name': 'serv-1',
'namespace': 'ns1'
},
'spec': {
'ports': [
{
'port': 1,
'name': 'X',
'protocol': 'TCP',
'targetPort': '1'
},
{
'name': 'Y',
'protocol': 'TCP',
'port': 2,
'targetPort': '2'
}
]
}
}
lb_crd_spec = {
'spec': {
'ports': [
{
'name': 'X',
'protocol': 'TCP',
'port': 1,
'targetPort': '1'
},
{
'name': 'Y',
'protocol': 'TCP',
'port': 2,
'targetPort': '2'
}
]
}
}
ret = utils.has_port_changes(service, lb_crd_spec)
self.assertFalse(ret)
def test_get_nodes_ips(self):
os_net = self.useFixture(k_fix.MockNetworkClient()).client
ip1 = os_port.Port(
fixed_ips=[{'ip_address': '10.0.0.1', 'subnet_id': 'foo'}],
trunk_details={'trunk_id': 'wow', 'sub_ports': []},
)
ip2 = os_port.Port(
fixed_ips=[{'ip_address': '10.0.0.2', 'subnet_id': 'bar'}],
trunk_details={'trunk_id': 'odd', 'sub_ports': []},
)
ip3 = os_port.Port(
fixed_ips=[{'ip_address': '10.0.0.3', 'subnet_id': 'baz'}],
trunk_details=None,
)
ip4 = os_port.Port(
fixed_ips=[{'ip_address': '10.0.0.4', 'subnet_id': 'zab'}],
trunk_details={'trunk_id': 'eek', 'sub_ports': []},
)
ports = (p for p in [ip1, ip2, ip3, ip4])
os_net.ports.return_value = ports
trunk_ips = utils.get_nodes_ips(['foo', 'bar'])
os_net.ports.assert_called_once_with(status='ACTIVE')
self.assertEqual(trunk_ips, [ip1.fixed_ips[0]['ip_address'],
ip2.fixed_ips[0]['ip_address']])
def test_get_nodes_ips_tagged(self):
CONF.set_override('resource_tags', ['foo'], group='neutron_defaults')
self.addCleanup(CONF.clear_override, 'resource_tags',
group='neutron_defaults')
os_net = self.useFixture(k_fix.MockNetworkClient()).client
ip1 = os_port.Port(
fixed_ips=[{'ip_address': '10.0.0.1', 'subnet_id': 'foo'}],
trunk_details={'trunk_id': 'wow', 'sub_ports': []},
)
ip2 = os_port.Port(
fixed_ips=[{'ip_address': '10.0.0.2', 'subnet_id': 'bar'}],
trunk_details=None,
)
ports = (p for p in [ip1, ip2])
os_net.ports.return_value = ports
trunk_ips = utils.get_nodes_ips(['foo'])
os_net.ports.assert_called_once_with(status='ACTIVE', tags=['foo'])
self.assertEqual(trunk_ips, [ip1.fixed_ips[0]['ip_address']])
def test_get_subnet_cidr(self):
os_net = self.useFixture(k_fix.MockNetworkClient()).client
subnet_id = mock.sentinel.subnet_id
subnet = os_subnet.Subnet(cidr='10.0.0.0/24')
os_net.get_subnet.return_value = subnet
result = utils.get_subnet_cidr(subnet_id)
os_net.get_subnet.assert_called_once_with(subnet_id)
self.assertEqual(result, '10.0.0.0/24')
def test_get_subnet_cidr_no_such_subnet(self):
os_net = self.useFixture(k_fix.MockNetworkClient()).client
subnet_id = mock.sentinel.subnet_id
os_net.get_subnet.side_effect = os_exc.ResourceNotFound
self.assertRaises(os_exc.ResourceNotFound, utils.get_subnet_cidr,
subnet_id)
os_net.get_subnet.assert_called_once_with(subnet_id)
def test_get_current_endpoints_target_with_target_ref(self):
ep = {'addresses': ['10.0.2.107'], 'conditions': {'ready': True},
'targetRef': {'kind': 'Pod', 'name': 'test-868d9cbd68-xq2fl',
'namespace': 'test2'}}
port = {'port': 8080, 'protocol': 'TCP'}
spec_ports = {None: '31d59e41-05db-4a39-8aca-6a9a572c83cd'}
ep_name = 'test'
target = utils.get_current_endpoints_target(
ep, port, spec_ports, ep_name)
self.assertEqual(
target, ('10.0.2.107', 'test-868d9cbd68-xq2fl', 8080,
'31d59e41-05db-4a39-8aca-6a9a572c83cd'))
def test_get_current_endpoints_target_without_target_ref(self):
ep = {'addresses': ['10.0.1.208'], 'conditions': {'ready': True}}
port = {'port': 8080, 'protocol': 'TCP'}
spec_ports = {None: '4472fab1-f01c-46a7-b197-5cba4f2d7135'}
ep_name = 'test'
target = utils.get_current_endpoints_target(
ep, port, spec_ports, ep_name)
self.assertEqual(
target, ('10.0.1.208', 'test', 8080,
'4472fab1-f01c-46a7-b197-5cba4f2d7135'))
def test_get_klb_crd_path(self):
res = {'apiVersion': 'v1',
'kind': 'Endpoints',
'metadata': {'name': 'my-service',
'namespace': 'default'}}
self.assertEqual(utils.get_klb_crd_path(res),
'/apis/openstack.org/v1/namespaces/default/'
'kuryrloadbalancers/my-service')
def test_get_res_link_core_res(self):
res = {'apiVersion': 'v1',
'kind': 'Pod',
'metadata': {'name': 'pod-1',
'namespace': 'default'}}
self.assertEqual(utils.get_res_link(res),
'/api/v1/namespaces/default/pods/pod-1')
def test_get_res_link_no_existent(self):
res = {'apiVersion': 'customapi/v1',
'kind': 'ItsATrap!',
'metadata': {'name': 'pod-1',
'namespace': 'default'}}
self.assertRaises(KeyError, utils.get_res_link, res)
def test_get_res_link_beta_res(self):
res = {'apiVersion': 'networking.k8s.io/v2beta2',
'kind': 'NetworkPolicy',
'metadata': {'name': 'np-1',
'namespace': 'default'}}
self.assertEqual(utils.get_res_link(res), '/apis/networking.k8s.io/'
'v2beta2/namespaces/default/networkpolicies/np-1')
def test_get_res_link_no_namespace(self):
res = {'apiVersion': 'v1',
'kind': 'Namespace',
'metadata': {'name': 'ns-1'}}
self.assertEqual(utils.get_res_link(res), '/api/v1/namespaces/ns-1')
def test_get_res_link_custom_api(self):
res = {'apiVersion': 'openstack.org/v1',
'kind': 'KuryrPort',
'metadata': {'name': 'kp-1',
'namespace': 'default'}}
self.assertEqual(utils.get_res_link(res),
'/apis/openstack.org/v1/namespaces/default/'
'kuryrports/kp-1')
def test_get_res_link_no_apiversion(self):
res = {'kind': 'KuryrPort',
'metadata': {'name': 'kp-1',
'namespace': 'default'}}
self.assertRaises(KeyError, utils.get_res_link, res)
def test_get_api_ver_core_api(self):
path = '/api/v1/namespaces/default/pods/pod-123'
self.assertEqual(utils.get_api_ver(path), 'v1')
def test_get_api_ver_custom_resource(self):
path = '/apis/openstack.org/v1/namespaces/default/kuryrport/pod-123'
self.assertEqual(utils.get_api_ver(path), 'openstack.org/v1')
def test_get_api_ver_random_path(self):
path = '/?search=foo'
self.assertRaises(ValueError, utils.get_api_ver, path)
def test_get_res_selflink_still_available(self):
res = {'metadata': {'selfLink': '/foo'}}
self.assertEqual(utils.get_res_link(res), '/foo')
@mock.patch('kuryr_kubernetes.clients.get_network_client')
def test_get_subnet_id(self, m_get_net):
m_net = mock.Mock()
m_get_net.return_value = m_net
subnets = (mock.Mock(id=mock.sentinel.subnet1),
mock.Mock(id=mock.sentinel.subnet2))
m_net.subnets.return_value = iter(subnets)
filters = {'name': 'foo', 'tags': 'bar'}
sub = utils.get_subnet_id(**filters)
m_net.subnets.assert_called_with(**filters)
self.assertEqual(mock.sentinel.subnet1, sub)
@mock.patch('kuryr_kubernetes.clients.get_network_client')
def test_get_subnet_not_found(self, m_get_net):
m_net = mock.Mock()
m_get_net.return_value = m_net
m_net.subnets.return_value = iter(())
filters = {'name': 'foo', 'tags': 'bar'}
sub = utils.get_subnet_id(**filters)
m_net.subnets.assert_called_with(**filters)
self.assertIsNone(sub)
def test_is_pod_completed_pending(self):
self.assertFalse(utils.is_pod_completed({'status': {'phase':
k_const.K8S_POD_STATUS_PENDING}}))
def test_is_pod_completed_succeeded(self):
self.assertTrue(utils.is_pod_completed({'status': {'phase':
k_const.K8S_POD_STATUS_SUCCEEDED}}))
def test_is_pod_completed_failed(self):
self.assertTrue(utils.is_pod_completed({'status': {'phase':
k_const.K8S_POD_STATUS_FAILED}}))
@mock.patch('kuryr_kubernetes.clients.get_network_client')
def test_cleanup_dead_ports_no_tags(self, m_get_net):
utils.cleanup_dead_ports()
m_get_net.assert_not_called()
@mock.patch('oslo_utils.timeutils.utcnow')
@mock.patch('kuryr_kubernetes.clients.get_network_client')
@mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
def test_cleanup_dead_ports(self, m_get_k8s, m_get_net, m_utcnow):
cfg.CONF.set_override('resource_tags', ['foo'],
group='neutron_defaults')
m_net = mock.Mock()
time1 = '2022-04-14T09:00:00Z'
now = '2022-04-14T09:00:00Z'
m_utcnow.return_value = timeutils.parse_isotime(now)
port = os_port.Port(updated_at=time1, tags=['foo'])
m_net.ports.return_value = iter((port,))
m_get_net.return_value = m_net
m_k8s = mock.Mock()
m_k8s.get.return_value = {'items': [{'status': {'netId': 'netid'}}]}
m_get_k8s.return_value = m_k8s
utils.cleanup_dead_ports()
m_get_net.assert_called_once()
@mock.patch('oslo_utils.timeutils.utcnow')
@mock.patch('kuryr_kubernetes.clients.get_network_client')
@mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
def test_cleanup_dead_no_tagged_ports(self, m_get_k8s, m_get_net,
m_utcnow):
cfg.CONF.set_override('resource_tags', ['foo'],
group='neutron_defaults')
m_net = mock.Mock()
time1 = '2022-04-14T09:00:00Z'
now = '2022-04-14T09:16:00Z'
m_utcnow.return_value = timeutils.parse_isotime(now)
port = os_port.Port(updated_at=time1, tags=[])
m_net.ports.return_value = iter((port,))
m_get_net.return_value = m_net
m_k8s = mock.Mock()
m_k8s.get.return_value = {'items': [{'status': {'netId': 'netid'}}]}
m_get_k8s.return_value = m_k8s
utils.cleanup_dead_ports()
m_get_net.assert_called_once()
m_net.delete_port.assert_called_once_with(port)
@mock.patch('kuryr_kubernetes.clients.get_network_client')
@mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
def test_cleanup_dead_no_networks(self, m_get_k8s, m_get_net):
cfg.CONF.set_override('resource_tags', ['foo'],
group='neutron_defaults')
m_net = mock.Mock()
m_net.ports.return_value = iter([])
m_get_net.return_value = m_net
m_k8s = mock.Mock()
m_k8s.get.return_value = {'items': []}
m_get_k8s.return_value = m_k8s
utils.cleanup_dead_ports()
m_get_net.assert_called_once()
m_net.delete_port.assert_not_called()
def test__get_parent_port_ip(self):
os_net = self.useFixture(k_fix.MockNetworkClient()).client
port_id = str(uuid.uuid4())
ip_address = mock.sentinel.ip_address
port_obj = fake.get_port_obj(ip_address=ip_address)
os_net.get_port.return_value = port_obj
self.assertEqual(ip_address, utils.get_parent_port_ip(port_id))
|
ca4f498a83ce19173956868571bd7dbd9acd5ee4
|
010279e2ba272d09e9d2c4e903722e5faba2cf7a
|
/contrib/tools/python/src/Lib/plat-mac/lib-scriptpackages/Netscape/PowerPlant.py
|
d37e66fc807f9f9f2a889b5bc499923f5669bc24
|
[
"Apache-2.0"
] |
permissive
|
catboost/catboost
|
854c1a1f439a96f1ae6b48e16644be20aa04dba2
|
f5042e35b945aded77b23470ead62d7eacefde92
|
refs/heads/master
| 2023-09-01T12:14:14.174108
| 2023-09-01T10:01:01
| 2023-09-01T10:22:12
| 97,556,265
| 8,012
| 1,425
|
Apache-2.0
| 2023-09-11T03:32:32
| 2017-07-18T05:29:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,590
|
py
|
PowerPlant.py
|
"""Suite PowerPlant:
Level 0, version 0
Generated from /Volumes/Sap/Applications (Mac OS 9)/Netscape Communicator\xe2\x84\xa2 Folder/Netscape Communicator\xe2\x84\xa2
AETE/AEUT resource version 1/0, language 0, script 0
"""
import aetools
import MacOS
_code = 'ppnt'
class PowerPlant_Events:
_argmap_SwitchTellTarget = {
'to' : 'data',
}
def SwitchTellTarget(self, _no_object=None, _attributes={}, **_arguments):
"""SwitchTellTarget: Makes an object the \xd2focus\xd3 of AppleEvents
Keyword argument to: reference to new focus of AppleEvents
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'ppnt'
_subcode = 'sttg'
aetools.keysubst(_arguments, self._argmap_SwitchTellTarget)
if _no_object is not None: raise TypeError, 'No direct arg expected'
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_select = {
'data' : 'data',
}
def select(self, _object, _attributes={}, **_arguments):
"""select: Sets the present selection
Required argument: object to select or container of sub-objects to select
Keyword argument data: sub-object(s) to select
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'misc'
_subcode = 'slct'
aetools.keysubst(_arguments, self._argmap_select)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_Enum_dbac = {
'DoNothing' : '\x00\x00\x00\x00', # No debugging action is taken.
'PostAlert' : '\x00\x00\x00\x01', # Post an alert.
'LowLevelDebugger' : '\x00\x00\x00\x02', # Break into the low level debugger (MacsBug).
'SourceDebugger' : '\x00\x00\x00\x03', # Break into the source level debugger (if source debugger is executing).
}
#
# Indices of types declared in this module
#
_classdeclarations = {
}
_propdeclarations = {
}
_compdeclarations = {
}
_enumdeclarations = {
'dbac' : _Enum_dbac,
}
|
a60c2f3c12453879560a2b019718359c2ebf7a63
|
1d0613fb401e92b6861ea3f615561df854603db6
|
/KiBuzzard/deps/fonttools/Tests/misc/filenames_test.py
|
bb7b63c2983932182eda5212dd13ec9d7e058524
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"OFL-1.1",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
gregdavill/KiBuzzard
|
8c84a4339108c9942e1ec0e05e4110bba49fd265
|
88c4129b3fbed2cad718c01e5e2d29204e2f2071
|
refs/heads/main
| 2023-09-01T19:46:45.146077
| 2023-08-31T11:55:10
| 2023-08-31T11:55:10
| 328,686,533
| 358
| 36
|
MIT
| 2023-08-31T12:12:45
| 2021-01-11T14:16:42
|
Python
|
UTF-8
|
Python
| false
| false
| 4,267
|
py
|
filenames_test.py
|
import unittest
from fontTools.misc.filenames import (
userNameToFileName, handleClash1, handleClash2)
class UserNameToFilenameTest(unittest.TestCase):
def test_names(self):
self.assertEqual(userNameToFileName("a"),"a")
self.assertEqual(userNameToFileName("A"), "A_")
self.assertEqual(userNameToFileName("AE"), "A_E_")
self.assertEqual(userNameToFileName("Ae"), "A_e")
self.assertEqual(userNameToFileName("ae"), "ae")
self.assertEqual(userNameToFileName("aE"), "aE_")
self.assertEqual(userNameToFileName("a.alt"), "a.alt")
self.assertEqual(userNameToFileName("A.alt"), "A_.alt")
self.assertEqual(userNameToFileName("A.Alt"), "A_.A_lt")
self.assertEqual(userNameToFileName("A.aLt"), "A_.aL_t")
self.assertEqual(userNameToFileName(u"A.alT"), "A_.alT_")
self.assertEqual(userNameToFileName("T_H"), "T__H_")
self.assertEqual(userNameToFileName("T_h"), "T__h")
self.assertEqual(userNameToFileName("t_h"), "t_h")
self.assertEqual(userNameToFileName("F_F_I"), "F__F__I_")
self.assertEqual(userNameToFileName("f_f_i"), "f_f_i")
self.assertEqual(
userNameToFileName("Aacute_V.swash"),
"A_acute_V_.swash")
self.assertEqual(userNameToFileName(".notdef"), "_notdef")
self.assertEqual(userNameToFileName("con"), "_con")
self.assertEqual(userNameToFileName("CON"), "C_O_N_")
self.assertEqual(userNameToFileName("con.alt"), "_con.alt")
self.assertEqual(userNameToFileName("alt.con"), "alt._con")
def test_prefix_suffix(self):
prefix = "TEST_PREFIX"
suffix = "TEST_SUFFIX"
name = "NAME"
name_file = "N_A_M_E_"
self.assertEqual(
userNameToFileName(name, prefix=prefix, suffix=suffix),
prefix + name_file + suffix)
def test_collide(self):
prefix = "TEST_PREFIX"
suffix = "TEST_SUFFIX"
name = "NAME"
name_file = "N_A_M_E_"
collision_avoidance1 = "000000000000001"
collision_avoidance2 = "000000000000002"
exist = set()
generated = userNameToFileName(
name, exist, prefix=prefix, suffix=suffix)
exist.add(generated.lower())
self.assertEqual(generated, prefix + name_file + suffix)
generated = userNameToFileName(
name, exist, prefix=prefix, suffix=suffix)
exist.add(generated.lower())
self.assertEqual(
generated,
prefix + name_file + collision_avoidance1 + suffix)
generated = userNameToFileName(
name, exist, prefix=prefix, suffix=suffix)
self.assertEqual(
generated,
prefix + name_file + collision_avoidance2+ suffix)
def test_ValueError(self):
with self.assertRaises(ValueError):
userNameToFileName(b"a")
with self.assertRaises(ValueError):
userNameToFileName({"a"})
with self.assertRaises(ValueError):
userNameToFileName(("a",))
with self.assertRaises(ValueError):
userNameToFileName(["a"])
with self.assertRaises(ValueError):
userNameToFileName(["a"])
with self.assertRaises(ValueError):
userNameToFileName(b"\xd8\x00")
def test_handleClash1(self):
prefix = ("0" * 5) + "."
suffix = "." + ("0" * 10)
existing = ["a" * 5]
e = list(existing)
self.assertEqual(
handleClash1(userName="A" * 5, existing=e, prefix=prefix,
suffix=suffix),
'00000.AAAAA000000000000001.0000000000'
)
e = list(existing)
e.append(prefix + "aaaaa" + "1".zfill(15) + suffix)
self.assertEqual(
handleClash1(userName="A" * 5, existing=e, prefix=prefix,
suffix=suffix),
'00000.AAAAA000000000000002.0000000000'
)
e = list(existing)
e.append(prefix + "AAAAA" + "2".zfill(15) + suffix)
self.assertEqual(
handleClash1(userName="A" * 5, existing=e, prefix=prefix,
suffix=suffix),
'00000.AAAAA000000000000001.0000000000'
)
def test_handleClash2(self):
prefix = ("0" * 5) + "."
suffix = "." + ("0" * 10)
existing = [prefix + str(i) + suffix for i in range(100)]
e = list(existing)
self.assertEqual(
handleClash2(existing=e, prefix=prefix, suffix=suffix),
'00000.100.0000000000'
)
e = list(existing)
e.remove(prefix + "1" + suffix)
self.assertEqual(
handleClash2(existing=e, prefix=prefix, suffix=suffix),
'00000.1.0000000000'
)
e = list(existing)
e.remove(prefix + "2" + suffix)
self.assertEqual(
handleClash2(existing=e, prefix=prefix, suffix=suffix),
'00000.2.0000000000'
)
if __name__ == "__main__":
import sys
sys.exit(unittest.main())
|
584fbd63242da458753fe6e11c276a5bffdc9736
|
551990e68feda34d2a9173b05cc3a7259f4e8c9a
|
/direct/nn/cirim/config.py
|
3c3606c20b0a3d39ce741085bd12ee82df208ab4
|
[
"Apache-2.0"
] |
permissive
|
NKI-AI/direct
|
a5c1ca0cb75d709b62e94ff76aba361e188d2d59
|
2a4c29342bc52a404aae097bc2654fb4323e1ac8
|
refs/heads/main
| 2023-08-03T11:37:52.941124
| 2023-06-28T14:11:56
| 2023-06-28T14:11:56
| 269,966,010
| 151
| 35
|
Apache-2.0
| 2023-06-28T14:11:58
| 2020-06-06T11:53:07
|
Python
|
UTF-8
|
Python
| false
| false
| 339
|
py
|
config.py
|
# coding=utf-8
# Copyright (c) DIRECT Contributors
from dataclasses import dataclass
from direct.config.defaults import ModelConfig
@dataclass
class CIRIMConfig(ModelConfig):
time_steps: int = 8 # :math:`T`
depth: int = 2
recurrent_hidden_channels: int = 64
num_cascades: int = 8
no_parameter_sharing: bool = True
|
0bf374947e304dd2f6d404c2056a38d4cf46dca7
|
7aeeca15144ad05ea237db29d9525243d57bc14b
|
/config/_zaborona_v2/root/zaborona-vpn/scripts/collapse_blockedbyip_noid2971.py
|
82935905b407018f552be0f43e34fc1919987c78
|
[] |
no_license
|
zhovner/zaborona_help
|
bd1826fb3e0b99943634f0c4fb0e295c497d6b47
|
6f1eaa92835b35684b52835499848e0f673d7b67
|
refs/heads/master
| 2023-08-09T07:30:48.003208
| 2023-08-03T14:52:18
| 2023-08-03T14:52:18
| 91,894,569
| 353
| 142
| null | 2023-09-07T17:40:20
| 2017-05-20T14:28:39
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 727
|
py
|
collapse_blockedbyip_noid2971.py
|
#!/usr/bin/env python3
import ipaddress
addrlist = open('result/iplist_blockedbyip_noid2971.txt', 'r').read()
speciallist = open('result/iplist_special_range.txt', 'r').read()
nlist = [ipaddress.IPv4Network(addr) for addr in addrlist.split()]
slist = [ipaddress.IPv4Network(addr) for addr in speciallist.split()]
print('IP Addresses before collapsing:', len(nlist))
for i, v in enumerate(nlist):
if any([addr.overlaps(v) for addr in slist]):
del nlist[i]
print('IP Addresses after removing special ranges:', len(nlist))
collapsed_file_prefix = open('result/iplist_blockedbyip_noid2971_collapsed.txt', 'w')
cnt = 0
for addr in nlist:
print(str(addr).replace('/32', ''), file=collapsed_file_prefix)
cnt+=1
|
187efa77aea39fb8492d706373900d3f16e28ad7
|
c1ab5fc6d37749cf7dd693a8f6d5475dfa54cd45
|
/examples/dynamic-client/accept_header.py
|
7bc27e3abc332788d428a93d19fdfe87348a7be0
|
[
"Apache-2.0"
] |
permissive
|
kubernetes-client/python
|
2d10e5d7c1358aa4473c1fcd54d2c5a1085cf56e
|
68d5a1479e7d735ea454021bc54e453c9b31baf7
|
refs/heads/master
| 2023-09-01T11:23:54.508420
| 2023-08-31T21:04:31
| 2023-08-31T21:04:31
| 72,473,727
| 5,792
| 3,654
|
Apache-2.0
| 2023-09-13T18:34:16
| 2016-10-31T20:08:03
|
Python
|
UTF-8
|
Python
| false
| false
| 1,381
|
py
|
accept_header.py
|
# Copyright 2021 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This example demonstrates how to pass the custom header in the cluster.
"""
from kubernetes import config, dynamic
from kubernetes.client import api_client
def main():
# Creating a dynamic client
client = dynamic.DynamicClient(
api_client.ApiClient(configuration=config.load_kube_config())
)
# fetching the node api
api = client.resources.get(api_version="v1", kind="Node")
# Creating a custom header
params = {'header_params': {'Accept': 'application/json;as=PartialObjectMetadataList;v=v1;g=meta.k8s.io'}}
resp = api.get(**params)
# Printing the kind and apiVersion after passing new header params.
print("%s\t\t\t%s" %("VERSION", "KIND"))
print("%s\t\t%s" %(resp.apiVersion, resp.kind))
if __name__ == "__main__":
main()
|
96423ac8521ea8873ec7419c3729ef9c15211157
|
6d54a7b26d0eb82152a549a6a9dfde656687752c
|
/scripts/tools/memory/memdf/util/subprocess.py
|
8901d19b0cef568661903e08858e1dc816af0dc0
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
project-chip/connectedhomeip
|
81a123d675cf527773f70047d1ed1c43be5ffe6d
|
ea3970a7f11cd227ac55917edaa835a2a9bc4fc8
|
refs/heads/master
| 2023-09-01T11:43:37.546040
| 2023-09-01T08:01:32
| 2023-09-01T08:01:32
| 244,694,174
| 6,409
| 1,789
|
Apache-2.0
| 2023-09-14T20:56:31
| 2020-03-03T17:05:10
|
C++
|
UTF-8
|
Python
| false
| false
| 1,002
|
py
|
subprocess.py
|
#
# Copyright (c) 2021 Project CHIP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Subprocess utilities."""
import logging
import subprocess
from typing import List
from memdf.util.config import Config
def run_tool_pipe(config: Config, command: List[str]) -> subprocess.Popen:
"""Run a command."""
if tool := config.getl(['tool', command[0]]):
command[0] = tool
logging.info('Execute: %s', ' '.join(command))
return subprocess.Popen(command, stdout=subprocess.PIPE)
|
c86bd2bc7829765f09e59c197d0299f384efd64c
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/Alignment/MuonAlignment/test/test-read_cfg.py
|
f1afb82b96b2b66f3a366f9640606b6187ffee90
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 1,287
|
py
|
test-read_cfg.py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
# DT geometry
process.load("Geometry.MuonCommonData.muonIdealGeometryXML_cfi")
process.load("Geometry.MuonNumbering.muonNumberingInitialization_cfi")
process.load("Geometry.DTGeometry.dtGeometry_cfi")
# CSC geometry
#include "Geometry/MuonCommonData/data/muonEndcapIdealGeometryXML.cfi"
process.load("Geometry.CSCGeometry.cscGeometry_cfi")
# Misalignment example scenario producer
process.load("Alignment.MuonAlignment.Scenarios_cff")
process.MessageLogger = cms.Service("MessageLogger",
cerr = cms.untracked.PSet(
threshold = cms.untracked.string('ERROR')
),
files = cms.untracked.PSet(
info_txt = cms.untracked.PSet(
threshold = cms.untracked.string('INFO')
)
)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.source = cms.Source("EmptySource")
process.MisalignedTracker = cms.ESProducer("MisalignedTrackerESProducer",
process.MuonNoMovementsScenario
)
process.myprod = cms.EDAnalyzer("TestTranslation",
fileName = cms.untracked.string('misaligned-2.root')
)
process.asciiPrint = cms.OutputModule("AsciiOutputModule")
process.p1 = cms.Path(process.myprod)
process.ep = cms.EndPath(process.asciiPrint)
|
df97dc68b21abadc57421a1f2bbce4e587ed6634
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/RecoBTag/Configuration/python/RecoBTag_FrontierConditions_DevDB_cff.py
|
14ab2d0db043fb8e1da28747a051e85737f27b01
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 194
|
py
|
RecoBTag_FrontierConditions_DevDB_cff.py
|
import FWCore.ParameterSet.Config as cms
from RecoBTag.TrackProbability.trackProbabilityFrontierCond_cfi import *
trackProbabilityFrontierCond.connect = 'frontier://FrontierDev/CMS_COND_BTAU'
|
4593e9660c524e611905683738db4b9e8393489b
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/common/Lib/Crypto/Random/Fortuna/FortunaGenerator.py
|
614bbd05d995ab093bea1f50deafdf3f247a1b96
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 2,559
|
py
|
FortunaGenerator.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/common/Lib/Crypto/Random/Fortuna/FortunaGenerator.py
__revision__ = '$Id$'
import sys
if sys.version_info[0] == 2 and sys.version_info[1] == 1:
from Crypto.Util.py21compat import *
from Crypto.Util.py3compat import *
import struct
from Crypto.Util.number import ceil_shift, exact_log2, exact_div
from Crypto.Util import Counter
from Crypto.Cipher import AES
import SHAd256
class AESGenerator(object):
block_size = AES.block_size
key_size = 32
max_blocks_per_request = 65536
_four_kiblocks_of_zeros = b('\x00') * block_size * 4096
def __init__(self):
self.counter = Counter.new(nbits=self.block_size * 8, initial_value=0, little_endian=True)
self.key = None
self.block_size_shift = exact_log2(self.block_size)
self.blocks_per_key = exact_div(self.key_size, self.block_size)
self.max_bytes_per_request = self.max_blocks_per_request * self.block_size
return
def reseed(self, seed):
if self.key is None:
self.key = b('\x00') * self.key_size
self._set_key(SHAd256.new(self.key + seed).digest())
self.counter()
return
def pseudo_random_data(self, bytes):
num_full_blocks = bytes >> 20
remainder = bytes & 1048575
retval = []
for i in xrange(num_full_blocks):
retval.append(self._pseudo_random_data(1048576))
retval.append(self._pseudo_random_data(remainder))
return b('').join(retval)
def _set_key(self, key):
self.key = key
self._cipher = AES.new(key, AES.MODE_CTR, counter=self.counter)
def _pseudo_random_data(self, bytes):
if not 0 <= bytes <= self.max_bytes_per_request:
raise AssertionError('You cannot ask for more than 1 MiB of data per request')
num_blocks = ceil_shift(bytes, self.block_size_shift)
retval = self._generate_blocks(num_blocks)[:bytes]
self._set_key(self._generate_blocks(self.blocks_per_key))
return retval
def _generate_blocks(self, num_blocks):
if self.key is None:
raise AssertionError('generator must be seeded before use')
retval = []
for i in xrange(num_blocks >> 12):
retval.append(self._cipher.encrypt(self._four_kiblocks_of_zeros))
remaining_bytes = (num_blocks & 4095) << self.block_size_shift
retval.append(self._cipher.encrypt(self._four_kiblocks_of_zeros[:remaining_bytes]))
return b('').join(retval)
|
f2df8d57c760b808525abcf2d98e919926157ed9
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/A_Primer_on_Scientific_Programming_with_Python/files/read_pairs3.py
|
e9eb13bd96b5cfc34e2ff3dccc94675796838871
|
[] |
no_license
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134
| 2023-08-14T10:05:37
| 2023-08-14T10:05:37
| 72,460,321
| 223
| 174
| null | 2022-10-24T12:15:06
| 2016-10-31T17:24:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 274
|
py
|
read_pairs3.py
|
infile = open('read_pairs3.dat', 'r')
listtext = '['
for line in infile:
# add line, without newline (line[:-1]), with a trailing comma:
listtext += line[:-1] + ', '
infile.close()
listtext = listtext + ']'
pairs = eval(listtext)
import pprint; pprint.pprint(pairs)
|
f0c0aa70551724566dd90d1a7e046167faf83ebf
|
e5f8d24525a211750900c3c8e7a631b344aa4443
|
/demo/futures/test_futures.py
|
ff1cb481d4b7e78963ced8680944a67baecb41af
|
[] |
permissive
|
mpi4py/mpi4py
|
569ce3f4707e54fa2c1e041cc9b96147337a1f10
|
8bdd0c30f98797deefa4e4f129898fefb2b1e171
|
refs/heads/master
| 2023-08-31T21:39:18.799184
| 2023-08-28T09:41:32
| 2023-08-28T13:24:37
| 12,620,272
| 720
| 125
|
BSD-2-Clause
| 2023-09-14T21:16:17
| 2013-09-05T14:44:25
|
Python
|
UTF-8
|
Python
| false
| false
| 51,326
|
py
|
test_futures.py
|
import os
import sys
import time
import random
import warnings
import functools
import threading
import unittest
from mpi4py import MPI
from mpi4py import futures
from concurrent.futures._base import (
PENDING,
RUNNING,
CANCELLED,
CANCELLED_AND_NOTIFIED,
FINISHED
)
SHARED_POOL = futures._core.SharedPool is not None
WORLD_SIZE = MPI.COMM_WORLD.Get_size()
def create_future(state=PENDING, exception=None, result=None):
f = futures.Future()
f._state = state
f._exception = exception
f._result = result
return f
PENDING_FUTURE = create_future(state=PENDING)
RUNNING_FUTURE = create_future(state=RUNNING)
CANCELLED_FUTURE = create_future(state=CANCELLED)
CANCELLED_AND_NOTIFIED_FUTURE = create_future(state=CANCELLED_AND_NOTIFIED)
EXCEPTION_FUTURE = create_future(state=FINISHED, exception=OSError())
SUCCESSFUL_FUTURE = create_future(state=FINISHED, result=42)
def mul(x, y):
return x * y
def sleep_and_raise(t):
time.sleep(t)
raise Exception('this is an exception')
def check_global_var(x):
return global_var == x
def check_run_name(name):
return __name__ == name
def check_comm_workers():
comm = futures.get_comm_workers()
return comm.Get_size()
def sys_flags_get(name):
return getattr(sys.flags, name)
class ExecutorMixin:
worker_count = 2
def setUp(self):
self.t1 = time.time()
try:
self.executor = self.executor_type(max_workers=self.worker_count)
except NotImplementedError as exc:
self.skipTest(str(exc))
self._prime_executor()
def tearDown(self):
self.executor.shutdown(wait=True)
dt = time.time() - self.t1
self.assertLess(dt, 60, 'synchronization issue: test lasted too long')
def _prime_executor(self):
# Make sure that the executor is ready to do work before running the
# tests. This should reduce the probability of timeouts in the tests.
futures = [self.executor.submit(time.sleep, 0)
for _ in range(self.worker_count)]
for f in futures:
f.result()
class ProcessPoolMixin(ExecutorMixin):
executor_type = futures.MPIPoolExecutor
if 'coverage' in sys.modules:
executor_type = functools.partial(
executor_type,
python_args='-m coverage run'.split(),
)
@unittest.skipIf(not SHARED_POOL, 'not-shared-pool')
class SharedPoolInitTest(unittest.TestCase):
executor_type = futures.MPIPoolExecutor
def test_initializer_0(self):
executor = self.executor_type(
initializer=time.sleep,
initargs=(0,),
)
executor.bootup()
executor.submit(time.sleep, 0).result()
executor.shutdown()
def test_initializer_1(self):
for _ in range(2):
executor = self.executor_type(
initializer=sleep_and_raise,
initargs=(0.2,),
)
executor.submit(time.sleep, 0).cancel()
future = executor.submit(time.sleep, 0)
with self.assertRaises(futures.BrokenExecutor):
executor.submit(time.sleep, 0).result()
with self.assertRaises(futures.BrokenExecutor):
future.result()
with self.assertRaises(futures.BrokenExecutor):
executor.submit(time.sleep, 0)
def test_initializer_2(self):
executor = self.executor_type(
initializer=time.sleep,
initargs=(0,),
)
executor.bootup()
executor.submit(time.sleep, 0).result()
executor.shutdown()
def test_initializer_3(self):
executor = self.executor_type()
executor.submit(time.sleep, 0).result()
executor.shutdown()
def test_initializer_4(self):
def test(tid):
with self.executor_type(
initializer=time.sleep,
initargs=(random.random()/100,),
) as executor:
futures.as_completed([
executor.submit(time.sleep, random.random()/100)
for _ in range(executor.num_workers + tid)
])
ts = [threading.Thread(target=test, args=(i,)) for i in range(5)]
for t in ts: t.start()
for t in ts: t.join()
class ProcessPoolInitTest(ProcessPoolMixin,
unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def _prime_executor(self):
pass
def test_init(self):
self.executor_type()
def test_init_args(self):
self.executor_type(1)
def test_init_kwargs(self):
executor = self.executor_type(
python_exe=sys.executable,
max_workers=None,
mpi_info=dict(soft="0:1"),
globals=None,
main=False,
path=[],
wdir=os.getcwd(),
env={},
use_pkl5=None,
backoff=0.001,
)
futures = [executor.submit(time.sleep, 0)
for _ in range(self.worker_count)]
for f in futures:
f.result()
executor.shutdown()
def test_init_pyargs(self):
executor_type = futures.MPIPoolExecutor
executor = executor_type(python_args=['-B', '-Wi'])
executor.submit(time.sleep, 0).result()
executor.shutdown()
@unittest.skipIf(SHARED_POOL, 'shared-pool')
def test_init_sys_flags(self):
executor_type = futures.MPIPoolExecutor
sys_flags = [
('debug', '-d', 1),
('debug', '-dd', 2),
('optimize', '-O', 1),
('optimize', '-OO', 2),
('dont_write_bytecode', '-B', True),
]
if sys.version_info >= (3, 7):
sys_flags.extend([
('dev_mode', '-Xdev', True),
('utf8_mode', '-Xutf8', True),
])
if sys.version_info >= (3, 11):
sys_flags.extend([
('safe_path', '-P', True),
])
for (name, flag, value) in sys_flags:
if not isinstance(value, bool):
if isinstance(value, int):
value += getattr(sys.flags, name)
with executor_type(python_args=[flag]) as executor:
result = executor.submit(sys_flags_get, name).result()
if isinstance(value, bool):
result = bool(result)
self.assertEqual(value, result, f"sys.flags.{name}")
@unittest.skipIf(SHARED_POOL, 'shared-pool')
def test_init_globals(self):
executor = self.executor_type(globals=dict(global_var=42))
future1 = executor.submit(check_global_var, 42)
future2 = executor.submit(check_global_var, 24)
self.assertTrue(future1.result())
self.assertFalse(future2.result())
executor.shutdown()
@unittest.skipIf(SHARED_POOL and WORLD_SIZE == 1, 'shared-pool')
def test_run_name(self):
executor = self.executor_type()
run_name = futures._core.MAIN_RUN_NAME
future = executor.submit(check_run_name, run_name)
self.assertTrue(future.result())
@unittest.skipIf(SHARED_POOL and WORLD_SIZE > 2, 'shared-pool')
def test_max_workers(self):
executor = self.executor_type(max_workers=1)
self.assertEqual(executor.num_workers, 1)
self.assertEqual(executor.num_workers, executor._max_workers)
executor.shutdown()
self.assertEqual(executor.num_workers, 0)
self.assertEqual(executor.num_workers, executor._max_workers)
@unittest.skipIf(SHARED_POOL and WORLD_SIZE > 2, 'shared-pool')
def test_max_workers_environ(self):
save = os.environ.get('MPI4PY_FUTURES_MAX_WORKERS')
os.environ['MPI4PY_FUTURES_MAX_WORKERS'] = '1'
try:
executor = self.executor_type()
executor.submit(time.sleep, 0).result()
executor.shutdown()
executor = self.executor_type()
self.assertEqual(executor.num_workers, 1)
executor.shutdown()
self.assertEqual(executor.num_workers, 0)
finally:
del os.environ['MPI4PY_FUTURES_MAX_WORKERS']
if save is not None:
os.environ['MPI4PY_FUTURES_MAX_WORKERS'] = save
def test_max_workers_negative(self):
for number in (0, -1):
with self.assertRaises(ValueError):
self.executor_type(max_workers=number)
def test_get_comm_workers(self):
executor = self.executor_type()
num_workers = executor.submit(check_comm_workers).result()
self.assertTrue(executor.num_workers, num_workers)
self.assertRaises(RuntimeError, check_comm_workers)
@unittest.skipIf(SHARED_POOL, 'shared-pool')
def test_use_pkl5_kwarg(self):
executor = self.executor_type(use_pkl5=True)
executor.submit(time.sleep, 0).result()
executor.shutdown()
@unittest.skipIf(SHARED_POOL, 'shared-pool')
def test_use_pkl5_environ(self):
save = os.environ.get('MPI4PY_FUTURES_USE_PKL5')
try:
for value in ('false', 'true'):
os.environ['MPI4PY_FUTURES_USE_PKL5'] = value
executor = self.executor_type()
executor.submit(time.sleep, 0).result()
executor.shutdown()
with warnings.catch_warnings(record=True) as wlist:
warnings.simplefilter('always')
os.environ['MPI4PY_FUTURES_USE_PKL5'] = 'foobar'
executor = self.executor_type()
executor.submit(time.sleep, 0).result()
executor.shutdown()
self.assertTrue(wlist)
msg = wlist[0].message
self.assertIsInstance(msg, RuntimeWarning)
self.assertIn('foobar', msg.args[0])
finally:
del os.environ['MPI4PY_FUTURES_USE_PKL5']
if save is not None:
os.environ['MPI4PY_FUTURES_USE_PKL5'] = save
def test_initializer(self):
executor = self.executor_type(
initializer=time.sleep,
initargs=(0,),
)
executor.submit(time.sleep, 0).result()
def test_initializer_bad(self):
with self.assertRaises(TypeError):
self.executor_type(initializer=123)
def test_initializer_error(self):
executor = self.executor_type(
initializer=sleep_and_raise,
initargs=(0.2,),
)
executor.submit(time.sleep, 0).cancel()
future = executor.submit(time.sleep, 0)
with self.assertRaises(futures.BrokenExecutor):
executor.submit(time.sleep, 0).result()
with self.assertRaises(futures.BrokenExecutor):
future.result()
with self.assertRaises(futures.BrokenExecutor):
executor.submit(time.sleep, 0)
self.assertEqual(executor.num_workers, 0)
def test_initializer_error_del(self):
executor = self.executor_type(
initializer=sleep_and_raise,
initargs=(0.2,),
)
executor.bootup()
del executor
def test_initializer_error_del_nowait(self):
executor = self.executor_type(
initializer=sleep_and_raise,
initargs=(1.2,),
)
executor.bootup(wait=False)
executor.shutdown(wait=False)
del executor
class ProcessPoolBootupTest(ProcessPoolMixin,
unittest.TestCase):
def _prime_executor(self):
pass
def test_bootup(self):
executor = self.executor_type(1)
executor.bootup()
executor.bootup()
executor.shutdown()
with self.assertRaises(RuntimeError):
executor.bootup()
def test_bootup_wait(self):
executor = self.executor_type(1)
executor.bootup(wait=True)
executor.bootup(wait=True)
executor.shutdown(wait=True)
with self.assertRaises(RuntimeError):
executor.bootup(wait=True)
def test_bootup_nowait(self):
executor = self.executor_type(1)
executor.bootup(wait=False)
executor.bootup(wait=False)
executor.shutdown(wait=False)
with self.assertRaises(RuntimeError):
executor.bootup(wait=False)
executor.shutdown(wait=True)
def test_bootup_nowait_wait(self):
executor = self.executor_type(1)
executor.bootup(wait=False)
executor.bootup(wait=True)
executor.shutdown()
with self.assertRaises(RuntimeError):
executor.bootup()
def test_bootup_shutdown_nowait(self):
executor = self.executor_type(1)
executor.bootup(wait=False)
executor.shutdown(wait=False)
worker = executor._pool
del executor
worker.join()
class ExecutorShutdownTestMixin:
def test_run_after_shutdown(self):
self.executor.shutdown()
with self.assertRaises(RuntimeError):
self.executor.submit(pow, 2, 5)
def test_hang_issue12364(self):
fs = [self.executor.submit(time.sleep, 0.01) for _ in range(50)]
self.executor.shutdown()
for f in fs:
f.result()
class ProcessPoolShutdownTest(ProcessPoolMixin,
ExecutorShutdownTestMixin,
unittest.TestCase):
def _prime_executor(self):
pass
def test_shutdown(self):
executor = self.executor_type(max_workers=1)
self.assertIsNone(executor._pool)
self.assertFalse(executor._shutdown)
executor.submit(mul, 21, 2)
executor.submit(mul, 6, 7)
executor.submit(mul, 3, 14)
self.assertIsNotNone(executor._pool.thread)
self.assertFalse(executor._shutdown)
executor.shutdown(wait=False)
self.assertIsNotNone(executor._pool.thread)
self.assertTrue(executor._shutdown)
executor.shutdown(wait=True)
self.assertIsNone(executor._pool)
self.assertTrue(executor._shutdown)
def test_submit_shutdown_cancel(self):
executor = self.executor_type(max_workers=1)
executor.bootup()
num_workers = executor.num_workers
for _ in range(num_workers*100):
executor.submit(time.sleep, 0.1)
fut = executor.submit(time.sleep, 0)
executor.shutdown(wait=False, cancel_futures=False)
self.assertFalse(fut.cancelled())
executor.shutdown(wait=True, cancel_futures=True)
self.assertTrue(fut.cancelled())
def test_submit_shutdown_cancel_wait(self):
executor = self.executor_type(max_workers=1)
executor.bootup()
num_workers = executor.num_workers
fut1 = executor.submit(time.sleep, 0.1)
for _ in range(num_workers*100):
executor.submit(time.sleep, 0.1)
fut2 = executor.submit(time.sleep, 0)
fut3 = executor.submit(time.sleep, 0)
time.sleep(0.2)
executor.shutdown(wait=False, cancel_futures=True)
done, not_done = futures.wait({fut1, fut2, fut3})
self.assertEqual(len(not_done), 0)
self.assertFalse(fut1.cancelled())
self.assertTrue(fut2.cancelled())
self.assertTrue(fut3.cancelled())
executor.shutdown(wait=True, cancel_futures=True)
def test_shutdown_cancel(self):
executor = self.executor_type(max_workers=1)
executor.bootup()
executor._pool.cancel()
executor.shutdown(wait=False, cancel_futures=False)
executor.shutdown(wait=False, cancel_futures=False)
executor.shutdown(wait=False, cancel_futures=True)
executor.shutdown(wait=False, cancel_futures=True)
executor.shutdown(wait=True, cancel_futures=True)
executor.shutdown(wait=True, cancel_futures=True)
def test_init_bootup_shutdown(self):
executor = self.executor_type(max_workers=1)
self.assertIsNone(executor._pool)
self.assertFalse(executor._shutdown)
executor.bootup()
self.assertTrue(executor._pool.event.is_set())
self.assertFalse(executor._shutdown)
executor.shutdown()
self.assertIsNone(executor._pool)
self.assertTrue(executor._shutdown)
def test_context_manager_shutdown(self):
with self.executor_type(max_workers=1) as e:
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
threads = [e._pool.thread]
queues = [e._pool.queue]
events = [e._pool.event]
for t in threads:
t.join()
for q in queues:
with self.assertRaises(LookupError):
q.pop()
for e in events:
self.assertTrue(e.is_set())
def test_del_shutdown(self):
executor = self.executor_type(max_workers=1)
list(executor.map(abs, range(-5, 5)))
threads = [executor._pool.thread]
queues = [executor._pool.queue]
events = [executor._pool.event]
if hasattr(sys, 'pypy_version_info'):
executor.shutdown(False)
else:
del executor
for t in threads:
t.join()
for q in queues:
with self.assertRaises(LookupError):
q.pop()
for e in events:
self.assertTrue(e.is_set())
class WaitTestMixin:
def test_first_completed(self):
future1 = self.executor.submit(mul, 21, 2)
future2 = self.executor.submit(time.sleep, 0.25)
done, not_done = futures.wait(
[CANCELLED_FUTURE, future1, future2],
return_when=futures.FIRST_COMPLETED)
self.assertEqual({future1}, done)
self.assertEqual({CANCELLED_FUTURE, future2}, not_done)
def test_first_completed_some_already_completed(self):
future1 = self.executor.submit(time.sleep, 0.5)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
return_when=futures.FIRST_COMPLETED)
self.assertEqual({
CANCELLED_AND_NOTIFIED_FUTURE,
SUCCESSFUL_FUTURE},
finished)
self.assertEqual({future1}, pending)
def test_first_exception(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(sleep_and_raise, 0.25)
future3 = self.executor.submit(time.sleep, 0.5)
finished, pending = futures.wait(
[future1, future2, future3],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual({future1, future2}, finished)
self.assertEqual({future3}, pending)
def test_first_exception_some_already_complete(self):
future1 = self.executor.submit(divmod, 21, 0)
future2 = self.executor.submit(time.sleep, 0.5)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1, future2],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual({
SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1},
finished)
self.assertEqual({
CANCELLED_FUTURE,
future2},
pending)
def test_first_exception_one_already_failed(self):
future1 = self.executor.submit(time.sleep, 0.25)
finished, pending = futures.wait(
[EXCEPTION_FUTURE, future1],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual({EXCEPTION_FUTURE}, finished)
self.assertEqual({future1}, pending)
def test_all_completed(self):
future1 = self.executor.submit(divmod, 2, 0)
future2 = self.executor.submit(mul, 2, 21)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2],
return_when=futures.ALL_COMPLETED)
self.assertEqual({
SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2},
finished)
self.assertEqual(set(), pending)
def test_timeout(self):
future1 = self.executor.submit(mul, 6, 7)
future2 = self.executor.submit(time.sleep, 0.75)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2],
timeout=0.5,
return_when=futures.ALL_COMPLETED)
self.assertEqual({
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1},
finished)
self.assertEqual({future2}, pending)
class ProcessPoolWaitTest(ProcessPoolMixin,
WaitTestMixin,
unittest.TestCase):
pass
class AsCompletedTestMixin:
def test_no_timeout(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(mul, 7, 6)
completed = set(futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]))
self.assertEqual({
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2},
completed)
def test_zero_timeout(self):
future1 = self.executor.submit(time.sleep, 0.5)
completed_futures = set()
try:
for future in futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1],
timeout=0):
completed_futures.add(future)
except futures.TimeoutError:
pass
self.assertEqual({
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE},
completed_futures)
def test_nonzero_timeout(self):
future1 = self.executor.submit(time.sleep, 0.0)
future2 = self.executor.submit(time.sleep, 0.5)
completed_futures = set()
try:
for future in futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1],
timeout=0.2):
completed_futures.add(future)
except futures.TimeoutError:
pass
self.assertEqual({
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1},
completed_futures)
def test_duplicate_futures(self):
# Issue 20367. Duplicate futures should not raise exceptions or give
# duplicate responses.
future1 = self.executor.submit(time.sleep, 0.1)
completed = [f for f in futures.as_completed([future1, future1])]
self.assertEqual(len(completed), 1)
class ProcessPoolAsCompletedTest(ProcessPoolMixin,
AsCompletedTestMixin,
unittest.TestCase):
pass
class ExecutorTestMixin:
def test_submit(self):
future = self.executor.submit(pow, 2, 8)
self.assertEqual(256, future.result())
def test_submit_keyword(self):
future = self.executor.submit(mul, 2, y=8)
self.assertEqual(16, future.result())
future = self.executor.submit(mul, x=2, y=8)
self.assertEqual(16, future.result())
def test_submit_cancel(self):
fs = []
num_workers = self.executor.num_workers
for _ in range(num_workers*100):
f = self.executor.submit(time.sleep, 0.1)
fs.append(f)
future = self.executor.submit(time.sleep, 0)
future.cancel()
self.assertTrue(future.cancelled())
for f in fs:
f.cancel()
def test_map(self):
self.assertEqual(
list(self.executor.map(pow, range(10), range(10))),
list(map(pow, range(10), range(10))))
def test_starmap(self):
sequence = [(a,a) for a in range(10)]
self.assertEqual(
list(self.executor.starmap(pow, sequence)),
list(map(pow, range(10), range(10))))
self.assertEqual(
list(self.executor.starmap(pow, iter(sequence))),
list(map(pow, range(10), range(10))))
def test_map_exception(self):
i = self.executor.map(divmod, [1, 1, 1, 1], [2, 3, 0, 5])
self.assertEqual(next(i), (0, 1))
self.assertEqual(next(i), (0, 1))
with self.assertRaises(ZeroDivisionError):
next(i)
def test_map_timeout(self):
results = []
try:
for i in self.executor.map(time.sleep, [0, 0, 1], timeout=0.25):
results.append(i)
except futures.TimeoutError:
pass
else:
self.fail('expected TimeoutError')
self.assertEqual([None, None], results)
def test_map_timeout_one(self):
results = []
for i in self.executor.map(time.sleep, [0, 0, 0], timeout=1):
results.append(i)
self.assertEqual([None, None, None], results)
class ProcessPoolExecutorTest(ProcessPoolMixin,
ExecutorTestMixin,
unittest.TestCase):
def test_map_chunksize(self):
ref = list(map(pow, range(40), range(40)))
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=6)),
ref)
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=50)),
ref)
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=40)),
ref)
with self.assertRaises(ValueError):
list(self.executor.map(pow, range(40), range(40), chunksize=-1))
def test_starmap_chunksize(self):
ref = list(map(pow, range(40), range(40)))
sequence = [(a, a) for a in range(40)]
self.assertEqual(
list(self.executor.starmap(pow, sequence, chunksize=6)),
ref)
self.assertEqual(
list(self.executor.starmap(pow, sequence, chunksize=50)),
ref)
self.assertEqual(
list(self.executor.starmap(pow, sequence, chunksize=40)),
ref)
self.assertEqual(
list(self.executor.starmap(pow, iter(sequence), chunksize=6)),
ref)
self.assertEqual(
list(self.executor.starmap(pow, iter(sequence), chunksize=50)),
ref)
self.assertEqual(
list(self.executor.starmap(pow, iter(sequence), chunksize=40)),
ref)
with self.assertRaises(ValueError):
list(self.executor.starmap(pow, sequence, chunksize=-1))
def test_map_unordered(self):
map_unordered = functools.partial(self.executor.map, unordered=True)
self.assertEqual(
set(map_unordered(pow, range(10), range(10))),
set(map(pow, range(10), range(10))))
def test_map_unordered_timeout(self):
map_unordered = functools.partial(self.executor.map, unordered=True)
num_workers = self.executor.num_workers
results = []
try:
args = [1] + [0]*(num_workers-1)
for i in map_unordered(time.sleep, args, timeout=0.25):
results.append(i)
except futures.TimeoutError:
pass
else:
self.fail('expected TimeoutError')
self.assertEqual([None]*(num_workers-1), results)
def test_map_unordered_timeout_one(self):
map_unordered = functools.partial(self.executor.map, unordered=True)
results = []
for i in map_unordered(time.sleep, [0, 0, 0], timeout=1):
results.append(i)
self.assertEqual([None, None, None], results)
def test_map_unordered_exception(self):
map_unordered = functools.partial(self.executor.map, unordered=True)
i = map_unordered(divmod, [1, 1, 1, 1], [2, 3, 0, 5])
try:
self.assertEqual(next(i), (0, 1))
except ZeroDivisionError:
return
def test_map_unordered_chunksize(self):
map_unordered = functools.partial(self.executor.map, unordered=True)
ref = set(map(pow, range(40), range(40)))
self.assertEqual(
set(map_unordered(pow, range(40), range(40), chunksize=6)),
ref)
self.assertEqual(
set(map_unordered(pow, range(40), range(40), chunksize=50)),
ref)
self.assertEqual(
set(map_unordered(pow, range(40), range(40), chunksize=40)),
ref)
with self.assertRaises(ValueError):
set(map_unordered(pow, range(40), range(40), chunksize=-1))
class ProcessPoolSubmitTest(unittest.TestCase):
@unittest.skipIf(MPI.get_vendor()[0] == 'Microsoft MPI', 'msmpi')
def test_multiple_executors(self):
executor1 = futures.MPIPoolExecutor(1).bootup(wait=True)
executor2 = futures.MPIPoolExecutor(1).bootup(wait=True)
executor3 = futures.MPIPoolExecutor(1).bootup(wait=True)
fs1 = [executor1.submit(abs, i) for i in range(100, 200)]
fs2 = [executor2.submit(abs, i) for i in range(200, 300)]
fs3 = [executor3.submit(abs, i) for i in range(300, 400)]
futures.wait(fs3+fs2+fs1)
for i, f in enumerate(fs1):
self.assertEqual(f.result(), i + 100)
for i, f in enumerate(fs2):
self.assertEqual(f.result(), i + 200)
for i, f in enumerate(fs3):
self.assertEqual(f.result(), i + 300)
executor1 = executor2 = executor3 = None
def test_mpi_serialized_support(self):
futures._core.setup_mpi_threads()
serialized = futures._core.serialized
lock_save = serialized.lock
try:
if lock_save is None:
serialized.lock = threading.Lock()
executor = futures.MPIPoolExecutor(1).bootup()
executor.submit(abs, 0).result()
executor.shutdown()
serialized.lock = lock_save
else:
serialized.lock = None
with lock_save:
executor = futures.MPIPoolExecutor(1).bootup()
executor.submit(abs, 0).result()
executor.shutdown()
serialized.lock = lock_save
finally:
serialized.lock = lock_save
def test_shared_executors(self):
if not SHARED_POOL: return
executors = [futures.MPIPoolExecutor() for _ in range(16)]
fs = []
for i in range(128):
fs.extend(
e.submit(abs, i*16+j)
for j, e in enumerate(executors)
)
self.assertEqual(sorted(f.result() for f in fs), list(range(16*128)))
world_size = MPI.COMM_WORLD.Get_size()
num_workers = max(1, world_size - 1)
for e in executors:
self.assertEqual(e.num_workers, num_workers)
del e, executors
def inout(arg):
return arg
class GoodPickle:
def __init__(self, value=0):
self.value = value
self.pickled = False
self.unpickled = False
def __getstate__(self):
self.pickled = True
return (self.value,)
def __setstate__(self, state):
self.unpickled = True
self.value = state[0]
class BadPickle:
def __init__(self):
self.pickled = False
def __getstate__(self):
self.pickled = True
1/0
def __setstate__(self, state):
pass
class BadUnpickle:
def __init__(self):
self.pickled = False
def __getstate__(self):
self.pickled = True
return (None,)
def __setstate__(self, state):
if state[0] is not None:
raise ValueError
1/0
@unittest.skipIf(SHARED_POOL and WORLD_SIZE == 1, 'shared-pool')
class ProcessPoolPickleTest(unittest.TestCase):
def setUp(self):
self.executor = futures.MPIPoolExecutor(1)
def tearDown(self):
self.executor.shutdown()
def test_good_pickle(self):
o = GoodPickle(42)
r = self.executor.submit(inout, o).result()
self.assertEqual(o.value, r.value)
self.assertTrue(o.pickled)
self.assertTrue(r.unpickled)
r = self.executor.submit(GoodPickle, 77).result()
self.assertEqual(r.value, 77)
self.assertTrue(r.unpickled)
def test_bad_pickle(self):
o = BadPickle()
self.assertFalse(o.pickled)
f = self.executor.submit(inout, o)
with self.assertRaises(ZeroDivisionError):
f.result()
self.assertTrue(o.pickled)
f = self.executor.submit(BadPickle)
with self.assertRaises(ZeroDivisionError):
f.result()
f = self.executor.submit(abs, 42)
self.assertEqual(f.result(), 42)
def test_bad_unpickle(self):
o = BadUnpickle()
self.assertFalse(o.pickled)
f = self.executor.submit(inout, o)
with self.assertRaises(ZeroDivisionError):
f.result()
self.assertTrue(o.pickled)
f = self.executor.submit(BadUnpickle)
with self.assertRaises(ZeroDivisionError):
f.result()
f = self.executor.submit(abs, 42)
self.assertEqual(f.result(), 42)
def test_exc_pickle(self):
o = BadPickle()
f = self.executor.submit(inout, o)
exc = f.exception()
self.assertIsInstance(exc, ZeroDivisionError)
cause = exc.__cause__
self.assertIsNone(cause)
def test_exc_unpickle(self):
o = BadUnpickle()
f = self.executor.submit(inout, o)
exc = f.exception()
self.assertIsInstance(exc, ZeroDivisionError)
cause = exc.__cause__
self.assertIsInstance(cause, futures._core.RemoteTraceback)
class MPICommExecutorTest(unittest.TestCase):
MPICommExecutor = futures.MPICommExecutor
def test_default(self):
with self.MPICommExecutor() as executor:
if executor is not None:
executor.bootup()
future1 = executor.submit(time.sleep, 0)
future2 = executor.submit(time.sleep, 0)
executor.shutdown()
self.assertIsNone(future1.result())
self.assertIsNone(future2.result())
def test_self(self):
with self.MPICommExecutor(MPI.COMM_SELF) as executor:
future = executor.submit(time.sleep, 0)
self.assertIsNone(future.result())
self.assertIsNone(future.exception())
future = executor.submit(sleep_and_raise, 0)
with self.assertRaises(Exception):
future.result()
self.assertEqual(Exception, type(future.exception()))
list(executor.map(time.sleep, [0, 0]))
list(executor.map(time.sleep, [0, 0], timeout=1))
iterator = executor.map(time.sleep, [0.2, 0], timeout=0)
with self.assertRaises(futures.TimeoutError):
list(iterator)
def test_args(self):
with self.MPICommExecutor(MPI.COMM_SELF) as executor:
self.assertIsNotNone(executor)
with self.MPICommExecutor(MPI.COMM_SELF, 0) as executor:
self.assertIsNotNone(executor)
def test_kwargs(self):
with self.MPICommExecutor(comm=MPI.COMM_SELF) as executor:
self.assertIsNotNone(executor)
with self.MPICommExecutor(comm=MPI.COMM_SELF, root=0) as executor:
self.assertIsNotNone(executor)
@unittest.skipIf(SHARED_POOL, 'shared-pool')
def test_arg_root(self):
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
for root in range(comm.Get_size()):
with self.MPICommExecutor(comm, root) as executor:
if rank == root:
self.assertIsNotNone(executor)
else:
self.assertIsNone(executor)
with self.MPICommExecutor(root=root) as executor:
if rank == root:
self.assertIsNotNone(executor)
else:
self.assertIsNone(executor)
@unittest.skipIf(SHARED_POOL, 'shared-pool')
def test_arg_bad_root(self):
size = MPI.COMM_WORLD.Get_size()
with self.assertRaises(ValueError):
self.MPICommExecutor(root=-size)
with self.assertRaises(ValueError):
self.MPICommExecutor(root=-1)
with self.assertRaises(ValueError):
self.MPICommExecutor(root=+size)
@unittest.skipIf(SHARED_POOL, 'shared-pool')
def test_arg_bad_comm(self):
if MPI.COMM_WORLD.Get_size() == 1: return
intercomm, intracomm = futures._core.comm_split(MPI.COMM_WORLD, 0)
try:
with self.assertRaises(ValueError):
self.MPICommExecutor(intercomm)
finally:
intercomm.Free()
if intracomm:
intracomm.Free()
def test_with_bad(self):
mpicommexecutor = self.MPICommExecutor(MPI.COMM_SELF)
with mpicommexecutor as executor:
try:
with mpicommexecutor:
pass
except RuntimeError:
pass
else:
self.fail('expected RuntimeError')
def test_initializer(self):
mpicommexecutor = self.MPICommExecutor(
initializer=time.sleep,
initargs=(0,),
)
with mpicommexecutor as executor:
if executor is not None:
executor.bootup()
del executor
with mpicommexecutor as executor:
if executor is not None:
executor.submit(time.sleep, 0).result()
def test_initializer_error(self):
mpicommexecutor = self.MPICommExecutor(
initializer=sleep_and_raise,
initargs=(0.2,),
)
with mpicommexecutor as executor:
if executor is not None:
executor.submit(time.sleep, 0).cancel()
future = executor.submit(time.sleep, 0)
with self.assertRaises(futures.BrokenExecutor):
executor.submit(time.sleep, 0).result()
with self.assertRaises(futures.BrokenExecutor):
future.result()
def test_initializer_error_del(self):
mpicommexecutor = self.MPICommExecutor(
initializer=sleep_and_raise,
initargs=(0.2,),
)
with mpicommexecutor as executor:
if executor is not None:
executor.bootup()
del executor
def test_initializer_error_del_nowait(self):
mpicommexecutor = self.MPICommExecutor(
initializer=sleep_and_raise,
initargs=(0.2,),
)
with mpicommexecutor as executor:
if executor is not None:
executor.bootup(wait=False)
executor.shutdown(wait=False)
del executor
def test_get_comm_workers(self):
for comm in (MPI.COMM_SELF, MPI.COMM_WORLD):
with self.MPICommExecutor(MPI.COMM_SELF) as executor:
num_workers = executor.submit(check_comm_workers).result()
self.assertTrue(executor.num_workers, num_workers)
self.assertRaises(RuntimeError, check_comm_workers)
class ThreadPoolMixin(ExecutorMixin):
executor_type = futures.ThreadPoolExecutor
class ThreadPoolTest(ThreadPoolMixin,
ExecutorTestMixin,
ExecutorShutdownTestMixin,
unittest.TestCase):
pass
from mpi4py.futures.aplus import ThenableFuture
class ThenTest(unittest.TestCase):
assert_ = unittest.TestCase.assertTrue
def test_not_done(self):
base_f = ThenableFuture()
new_f = base_f.then()
self.assertTrue(base_f is not new_f)
self.assertTrue(not base_f.done())
self.assertTrue(not new_f.done())
base_f._invoke_callbacks()
self.assertTrue(new_f.cancelled())
def test_cancel(self):
base_f = ThenableFuture()
new_f = base_f.then()
self.assertTrue(base_f is not new_f)
self.assertTrue(not base_f.done())
self.assertTrue(not new_f.done())
base_f.cancel()
self.assertTrue(base_f.done())
self.assertTrue(new_f.done())
self.assertTrue(base_f.cancelled())
self.assertTrue(new_f.cancelled())
def test_then_multiple(self):
base_f = ThenableFuture()
new_f1 = base_f.then()
new_f2 = base_f.then()
new_f3 = base_f.then()
self.assertTrue(base_f is not new_f1)
self.assertTrue(base_f is not new_f2)
self.assertTrue(base_f is not new_f3)
self.assertTrue(not base_f.done())
self.assertTrue(not new_f1.done())
self.assertTrue(not new_f2.done())
self.assertTrue(not new_f3.done())
base_f.set_result('done')
self.assertTrue(base_f.done())
self.assertTrue(new_f1.done())
self.assertTrue(new_f2.done())
self.assertTrue(new_f3.done())
self.assertTrue(not new_f1.exception())
self.assertTrue(not new_f2.exception())
self.assertTrue(not new_f3.exception())
self.assertTrue(new_f1.result() == 'done')
self.assertTrue(new_f2.result() == 'done')
self.assertTrue(new_f3.result() == 'done')
def test_no_callbacks_and_success(self):
base_f = ThenableFuture()
new_f = base_f.then()
self.assertTrue(base_f is not new_f)
self.assertTrue(not base_f.done())
self.assertTrue(not new_f.done())
base_f.set_result('done')
self.assertTrue(base_f.done())
self.assertTrue(new_f.done())
self.assertTrue(not new_f.exception())
self.assertTrue(new_f.result() == 'done')
def test_no_callbacks_and_failure(self):
class MyException(Exception):
pass
base_f = ThenableFuture()
new_f = base_f.then()
self.assertTrue(base_f is not new_f)
self.assertTrue(not base_f.done())
self.assertTrue(not new_f.done())
base_f.set_exception(MyException('sad'))
self.assertTrue(base_f.done())
self.assertTrue(new_f.done())
self.assertTrue(new_f.exception())
with self.assertRaises(MyException) as catcher:
new_f.result()
self.assertTrue(catcher.exception.args[0] == 'sad')
def test_success_callback_and_success(self):
base_f = ThenableFuture()
new_f = base_f.then(lambda result: result + ' manipulated')
self.assertTrue(base_f is not new_f)
self.assertTrue(not base_f.done())
self.assertTrue(not new_f.done())
base_f.set_result('done')
self.assertTrue(base_f.done())
self.assertTrue(new_f.done())
self.assertTrue(not new_f.exception())
self.assertTrue(new_f.result() == 'done manipulated')
def test_err_callback_and_failure_repackage(self):
class MyException(Exception):
pass
class MyRepackagedException(Exception):
pass
class NotMatched(Exception):
pass
def on_failure(ex):
if isinstance(ex, MyException):
return MyRepackagedException(ex.args[0] + ' repackaged')
else:
return NotMatched('?')
base_f = ThenableFuture()
new_f = base_f.then(None, on_failure)
self.assertTrue(base_f is not new_f)
self.assertTrue(not base_f.done())
self.assertTrue(not new_f.done())
base_f.set_exception(MyException('sad'))
self.assertTrue(base_f.done())
self.assertTrue(new_f.done())
self.assertTrue(new_f.exception())
with self.assertRaises(MyRepackagedException) as catcher:
new_f.result()
self.assertTrue(catcher.exception.args[0] == 'sad repackaged')
def test_err_callback_and_failure_raised(self):
class MyException(Exception):
pass
class MyRepackagedException(Exception):
pass
def raise_something_else(ex):
raise MyRepackagedException(ex.args[0] + ' repackaged')
base_f = ThenableFuture()
new_f = base_f.then(None, raise_something_else)
self.assertTrue(base_f is not new_f)
self.assertTrue(not base_f.done())
self.assertTrue(not new_f.done())
base_f.set_exception(MyException('sad'))
self.assertTrue(base_f.done())
self.assertTrue(new_f.done())
self.assertTrue(new_f.exception())
with self.assertRaises(MyRepackagedException) as catcher:
new_f.result()
self.assertTrue(catcher.exception.args[0] == 'sad repackaged')
def test_err_callback_convert_to_success(self):
class MyException(Exception):
pass
class NotMatched(Exception):
pass
def on_failure(ex):
if isinstance(ex, MyException):
return ex.args[0] + ' repackaged'
else:
return NotMatched('?')
base_f = ThenableFuture()
new_f = base_f.catch(on_failure)
self.assertTrue(base_f is not new_f)
self.assertTrue(not base_f.done())
self.assertTrue(not new_f.done())
base_f.set_exception(MyException('sad'))
self.assertTrue(base_f.done())
self.assertTrue(new_f.done())
self.assertTrue(not new_f.exception())
self.assertTrue(new_f.result() == 'sad repackaged')
def test_err_catch_ignore(self):
base_f = ThenableFuture()
new_f = base_f.catch()
self.assertTrue(base_f is not new_f)
self.assertTrue(not base_f.done())
self.assertTrue(not new_f.done())
base_f.set_exception(Exception('sad'))
self.assertTrue(base_f.done())
self.assertTrue(new_f.done())
self.assertTrue(new_f.exception() is None)
self.assertTrue(new_f.result() is None)
def test_success_callback_and_failure_raised(self):
class MyException(Exception):
pass
def raise_something_else(value):
raise MyException(value + ' repackaged')
base_f = ThenableFuture()
new_f = base_f.then(raise_something_else)
self.assertTrue(base_f is not new_f)
self.assertTrue(not base_f.done())
self.assertTrue(not new_f.done())
base_f.set_result('sad')
self.assertTrue(base_f.done())
self.assertTrue(new_f.done())
self.assertTrue(new_f.exception())
with self.assertRaises(MyException) as catcher:
new_f.result()
assert catcher.exception.args[0] == 'sad repackaged'
def test_chained_success_callback_and_success(self):
def transform(value):
f = ThenableFuture()
if value < 5:
f.set_result(transform(value+1))
else:
f.set_result(value)
return f
base_f = ThenableFuture()
new_f = base_f.then(transform)
self.assertTrue(base_f is not new_f)
self.assertTrue(not base_f.done())
self.assertTrue(not new_f.done())
base_f.set_result(1)
self.assertTrue(base_f.done())
self.assertTrue(new_f.done())
self.assertTrue(not new_f.exception())
self.assertTrue(new_f.result() == 5)
def test_detect_circular_chains(self):
f1 = ThenableFuture()
f2 = ThenableFuture()
chain = [f1, f2, f1]
def transform(a):
try:
f = chain.pop(0)
r = transform(a)
f.__init__()
f.set_result(r)
return f
except IndexError:
return 42
base_f = ThenableFuture()
new_f = base_f.then(transform)
self.assertTrue(base_f is not new_f)
self.assertTrue(not base_f.done())
self.assertTrue(not new_f.done())
base_f.set_result(1)
self.assertTrue(base_f.done())
self.assertTrue(new_f.done())
self.assertTrue(new_f.exception())
with self.assertRaises(RuntimeError) as catcher:
new_f.result()
self.assertTrue(
'Circular future chain detected'
in catcher.exception.args[0],
)
SKIP_POOL_TEST = False
name, version = MPI.get_vendor()
if name == 'Open MPI':
if version < (3,0,0):
SKIP_POOL_TEST = True
if version == (4,0,0):
SKIP_POOL_TEST = True
if version == (4,0,1) and sys.platform=='darwin':
SKIP_POOL_TEST = True
if version == (4,0,2) and sys.platform=='darwin':
SKIP_POOL_TEST = True
if version == (4,1,2) and sys.platform=='linux':
SKIP_POOL_TEST = (os.environ.get('GITHUB_ACTIONS') == 'true')
if name == 'MPICH':
if sys.platform == 'darwin':
if version >= (3, 4) and version < (4, 0):
SKIP_POOL_TEST = True
if MPI.COMM_WORLD.Get_attr(MPI.APPNUM) is None:
SKIP_POOL_TEST = (version < (4, 1))
try:
port = MPI.Open_port()
MPI.Close_port(port)
except:
port = ""
if port == "":
SKIP_POOL_TEST = True
del port
if name == 'MVAPICH2':
SKIP_POOL_TEST = True
if name == 'MPICH2':
if MPI.COMM_WORLD.Get_attr(MPI.APPNUM) is None:
SKIP_POOL_TEST = True
if name == 'Microsoft MPI':
if version < (8,1,0):
SKIP_POOL_TEST = True
if MPI.COMM_WORLD.Get_attr(MPI.APPNUM) is None:
SKIP_POOL_TEST = True
if MPI.Get_version() < (2,0):
SKIP_POOL_TEST = True
if SHARED_POOL:
del ProcessPoolInitTest.test_init_sys_flags
del ProcessPoolInitTest.test_init_globals
del ProcessPoolInitTest.test_use_pkl5_kwarg
del ProcessPoolInitTest.test_use_pkl5_environ
if WORLD_SIZE == 1:
del ProcessPoolInitTest.test_run_name
if WORLD_SIZE > 2:
del ProcessPoolInitTest.test_max_workers
del ProcessPoolInitTest.test_max_workers_environ
if WORLD_SIZE == 1:
del ProcessPoolPickleTest
del MPICommExecutorTest.test_arg_root
del MPICommExecutorTest.test_arg_bad_root
del MPICommExecutorTest.test_arg_bad_comm
elif WORLD_SIZE > 1 or SKIP_POOL_TEST:
del ProcessPoolInitTest
del ProcessPoolBootupTest
del ProcessPoolShutdownTest
del ProcessPoolWaitTest
del ProcessPoolAsCompletedTest
del ProcessPoolExecutorTest
del ProcessPoolSubmitTest
del ProcessPoolPickleTest
if not SHARED_POOL:
del SharedPoolInitTest
if __name__ == '__main__':
unittest.main()
|
e528f2811d1ba4bb30931ade78fd701f58d1135b
|
cfb638fee5fa2cdd3149a8ea91043e6bc0808275
|
/examples/continuous_forall_init.py
|
aa892ba81c4f1cbd6b8e8420af5fb3364265aed7
|
[
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause"
] |
permissive
|
tulip-control/tulip-control
|
a23436a122dc317d39b0980c40f2da5740433ae5
|
83f993c2ae06aa8368e4bbba02bf52d68725e106
|
refs/heads/main
| 2023-08-22T14:39:02.797004
| 2022-06-23T19:40:03
| 2022-06-23T19:40:03
| 13,993,728
| 107
| 37
|
BSD-3-Clause
| 2023-09-06T17:29:38
| 2013-10-30T17:15:55
|
Python
|
UTF-8
|
Python
| false
| false
| 4,029
|
py
|
continuous_forall_init.py
|
#!/usr/bin/env python
"""Simulation example with continuous dynamics."""
from __future__ import division
from __future__ import print_function
import logging
import random
import numpy as np
import polytope as pc
from polytope import box2poly
try:
from matplotlib import pyplot as plt
except ImportError:
plt = None
from tulip import hybrid, spec, synth
from tulip.abstract import prop2part, discretize
from tulip.abstract.plot import plot_partition
from tulip.abstract import find_controller
from tulip.abstract.plot import simulate2d, pick_point_in_polytope
logging.basicConfig(level='WARNING')
show = False
# Problem parameters
input_bound = 10.0
uncertainty = 0.01
# Continuous state space
cont_state_space = box2poly([[0., 3.], [0., 2.]])
# Continuous dynamics
A = np.array([[1.0, 0.], [0., 1.0]])
B = np.array([[0.1, 0.], [0., 0.1]])
E = np.array([[1, 0], [0, 1]])
# Available control, possible disturbances
U = input_bound * np.array([[-1., 1.], [-1., 1.]])
W = uncertainty * np.array([[-1., 1.], [-1., 1.]])
# Convert to polyhedral representation
U = box2poly(U)
W = box2poly(W)
# Construct the LTI system describing the dynamics
sys_dyn = hybrid.LtiSysDyn(A, B, E, None, U, W, cont_state_space)
# Define atomic propositions for relevant regions of state space
cont_props = {}
cont_props['home'] = box2poly([[0., 1.], [0., 1.]])
cont_props['lot'] = box2poly([[2., 3.], [1., 2.]])
# Compute the proposition preserving partition of
# the continuous state space
cont_partition = prop2part(cont_state_space, cont_props)
plot_partition(cont_partition) if show else None
# Given dynamics & proposition-preserving partition,
# find feasible transitions
disc_dynamics = discretize(
cont_partition, sys_dyn, closed_loop=False,
conservative=True,
N=5, min_cell_volume=0.1, plotit=show)
# Visualize transitions in continuous domain (optional)
plot_partition(disc_dynamics.ppp, disc_dynamics.ts,
disc_dynamics.ppp2ts) if show else None
#
# Specification
# Environment variables and assumptions
env_vars = {'park'}
env_init = {'X0reach'} # qinit == '\A \A'
env_prog = '!park'
env_safe = set()
# System variables and requirements
sys_vars = {'X0reach'}
sys_init = set() # qinit == '\A \A'
sys_prog = {'home'} # []<>home
sys_safe = {'(X(X0reach) <-> lot) || (X0reach && !park)'}
sys_prog |= {'X0reach'}
# Create the specification
specs = spec.GRSpec(env_vars, sys_vars, env_init, sys_init,
env_safe, sys_safe, env_prog, sys_prog)
specs.qinit = '\A \A'
#
# Synthesize
disc_dynamics.ts.states.initial.add_from(disc_dynamics.ts.states)
ctrl = synth.synthesize(specs,
sys=disc_dynamics.ts,
ignore_sys_init=False)
assert ctrl is not None, 'unrealizable'
# Generate a graphical representation of the controller for viewing
if not ctrl.save('continuous.png'):
print(ctrl)
#
# Simulation
print('\n Simulation starts \n')
T = 100
# let us pick an environment signal
env_inputs = [{'park': random.randint(0, 1)} for b in range(T + 1)]
# Set up parameters for get_input()
disc_dynamics.disc_params['conservative'] = True
disc_dynamics.disc_params['closed_loop'] = False
def pick_initial_state(ctrl, disc_dynamics):
"""Construct initial discrete and continuous state
for `qinit == '\A \A'`.
"""
# pick initial discrete state
init_edges = ctrl.edges('Sinit', data=True)
u, v, edge_data = next(iter(init_edges))
assert u == 'Sinit', u
d_init = edge_data
# pick initial continuous state
s0_part = edge_data['loc']
init_poly = disc_dynamics.ppp.regions[s0_part].list_poly[0]
x_init = pick_point_in_polytope(init_poly)
s0_part_ = find_controller.find_discrete_state(
x_init, disc_dynamics.ppp)
assert s0_part == s0_part_, (s0_part, s0_part_)
return d_init, x_init
# for `qinit == '\A \A'`
d_init, x_init = pick_initial_state(ctrl, disc_dynamics)
simulate2d(env_inputs, sys_dyn, ctrl, disc_dynamics, T,
d_init=d_init, x_init=x_init, qinit=specs.qinit)
|
01027d68655c9a08bfb387c0520f94aa07c23083
|
2ed6ca32e81cafcef2acb4469e7f25bbf805a77e
|
/visualize.py
|
c88ca76a9b6106382a80e27f2a44fcd44ae636c0
|
[
"MIT"
] |
permissive
|
MahmudulAlam/Unified-Gesture-and-Fingertip-Detection
|
a55ff1b0da4f97b5c08c87ea49b50cb3bf07a911
|
92a211d8f2b377ea5b61fe907fa94c7da5d770ed
|
refs/heads/master
| 2022-06-14T12:27:06.680343
| 2022-06-02T16:24:07
| 2022-06-02T16:24:07
| 191,755,366
| 275
| 61
|
MIT
| 2021-03-08T23:48:36
| 2019-06-13T12:05:35
|
Python
|
UTF-8
|
Python
| false
| false
| 797
|
py
|
visualize.py
|
import cv2
import numpy as np
from preprocess.data_generator import label_generator
def visualize(img, prob, key):
index = 0
# preprocess
img = np.asarray(img, dtype=np.uint8)
prob = prob.squeeze()
key = key.squeeze()
color = [(15, 15, 240), (15, 240, 155), (240, 155, 15), (240, 15, 155), (240, 15, 240)]
for c, p in enumerate(prob):
if p > 0.5:
img = cv2.circle(img, (int(key[index]), int(key[index + 1])), radius=5, color=color[c], thickness=-2)
index = index + 2
cv2.imshow('Unified Gesture & Fingertips Detection', img)
cv2.waitKey(0)
if __name__ == '__main__':
image, probability, keypoints = label_generator(directory='./dataset/', dtype='train', sample=0)
visualize(img=image, prob=probability, key=keypoints)
|
97bcdbfab6fd68b8f4eb9e76ba479e101bbb9e73
|
568fa58296378fa129ab3349adf010daa44ed45b
|
/tests/st/ops/test_div.py
|
cbf03623d135c9ad10398a1d9e2358a66806aefb
|
[
"Apache-2.0",
"BSD-3-Clause",
"NCSA",
"X11-distribute-modifications-variant",
"Zlib",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"LLVM-exception",
"BSD-2-Clause"
] |
permissive
|
mindspore-ai/akg
|
37f471badc66de6a831f1f45ad84344f34d23ef2
|
99f33858d6972741748cbfc9ab0bf9600428fef7
|
refs/heads/master
| 2023-07-25T23:03:17.672665
| 2023-07-11T07:33:57
| 2023-07-11T07:33:57
| 274,077,856
| 319
| 36
|
Apache-2.0
| 2021-12-30T13:43:08
| 2020-06-22T08:09:05
|
Python
|
UTF-8
|
Python
| false
| false
| 4,594
|
py
|
test_div.py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
################################################
Testcase_PrepareCondition:
Testcase_TestSteps:
Testcase_ExpectedResult:
"""
import os
import pytest
import akg.utils as utils
from tests.common.base import TestBase
from tests.common.test_run import div_run
############################################################
# TestCase= class: put to tests/*/
############################################################
class TestCase(TestBase):
def setup(self):
case_name = "test_div_001"
case_path = os.getcwd()
self.params_init(case_name, case_path)
self.caseresult = True
self._log.info("============= {0} Setup case============".format(self.casename))
self.args_default = [
# testflag,opfuncname,testRunArgs, setdimArgs
("div_002", div_run, ((3,), (2, 3), "float16"), ["level0"]),
("div_011", div_run, ((2, 1024), (1,), "float32"), ["level0"]),
("div_012", div_run, ((1024, ), (1,), "float32"), ["level0"]),
("div_013", div_run, ((33, 64), (1,), "float32"), ["level0"]),
("div_014", div_run, ((4096, ), (1,), "float32"), ["level0"]),
("div_015", div_run, ((2, ), (1,), "float32"), ["level0"]),
("div_004", div_run, ((1, 3), (2, 3), "float32"), ["level0"]),
]
self.testarg_rpc_cloud = [
# testflag,opfuncname,testRunArgs, setdimArgs
("div_001", div_run, ((3, 4, 5), (3, 1, 5), "int32")),
("div_003", div_run, ((2, 3), (2, 3), "int8")),
("div_005", div_run, ((8, 24, 42), (8, 24, 42), "uint8")),
("div_016", div_run, ((2, 1024), (1,), "int32")),
("div_017", div_run, ((1024, ), (1,), "int32")),
("div_018", div_run, ((33, 64), (1,), "int32")),
("div_019", div_run, ((4096, ), (1,), "int32")),
("div_020", div_run, ((2, ), (1,), "int32")),
("test_bert_div_002", div_run, ((2, 1024), (1,), "float32")),
("test_bert_div_010", div_run, ((2,), (1,), "float32")),
("div_006", div_run, ((3, 4, 5), (3, 1, 5), "int32")),
("div_007", div_run, ((2, 3), (3,), "float16")),
("div_008", div_run, ((2, 3), (2, 3), "int8")),
("div_009", div_run, ((1, 3), (2, 3), "float32")),
("div_010", div_run, ((8, 24, 42), (8, 24, 42), "uint8")),
# bert cases
("div_021", div_run, ((21128, 1024), (1,), "float32")),
("div_022", div_run, ((1024, 1024), (1,), "float32")),
("div_023", div_run, ((1024, 4096), (1,), "float32")),
("div_024", div_run, ((4096, 1024), (1,), "float32")),
("div_025", div_run, ((21128, ), (1,), "float32")),
("div_026", div_run, ((21128, 1024), (1,), "int32")),
("div_027", div_run, ((1024, 1024), (1,), "int32")),
("div_028", div_run, ((1024, 4096), (1,), "int32")),
("div_029", div_run, ((4096, 1024), (1,), "int32")),
("div_030", div_run, ((21128, ), (1,), "int32")),
]
return True
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gpu_level0(self):
return self.run_cases(self.args_default, utils.CUDA, "level0")
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_cpu_level0(self):
return self.run_cases(self.args_default, utils.LLVM, "level0")
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_run(self):
return self.run_cases(self.args_default, utils.CCE, "level0")
def test_run_rpc_cloud(self):
self.common_run(self.testarg_rpc_cloud)
def teardown(self):
"""
clean environment
:return:
"""
self._log.info("============= {0} Teardown============".format(self.casename))
return
|
8f99a8595e43ed13109c919d6922174ddcb2db30
|
61602e99977f5b10081952220308b28dade6cb09
|
/machine_learning/neural_networks/sequence_networks.py
|
6d5f9dc712f29edd9cbedfcd9e0cab9cce61a320
|
[] |
no_license
|
jgmakin/machine_learning
|
9239f940a5ddfd59f09b08efc455818ba35d3e55
|
202eb8ba676ad99910611a018c3276cc444d7702
|
refs/heads/master
| 2023-01-28T05:25:26.501835
| 2023-01-26T14:58:57
| 2023-01-26T14:58:57
| 241,483,733
| 133
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 95,587
|
py
|
sequence_networks.py
|
# standard libraries
import pdb
import os
import math
import copy
from functools import reduce
from termcolor import cprint
from IPython.display import clear_output
import pickle
# third-party libraries
import numpy as np
from scipy.special import logsumexp
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow.python import pywrap_tensorflow
from tensor2tensor.layers import common_layers
from tensor2tensor.utils import beam_search as beam_search
try:
import tfmpl
except ModuleNotFoundError:
print('Package conflict (probably because you are using TF2.x)', end='')
print('...not loading tfmpl...')
# local
from utils_jgm import toolbox
from utils_jgm.machine_compatibility_utils import MachineCompatibilityUtils
from machine_learning.neural_networks import basic_components as nn
from machine_learning.neural_networks import tf_helpers as tfh
MCUs = MachineCompatibilityUtils()
'''
Neural networks for sequence-to-label and sequence-to-sequence problems.
The bulk of this module consists of the class SequenceNetwork, whose main
(external-facing) methods are .fit and .assess.
Etc....
:Author: J.G. Makin (except where otherwise noted)
Created: July 2017
by JGM
'''
@tfmpl.figure_tensor
def dual_violin_plot(
data, labels, label_list, x_axis_label=None, y_axis_label=None,
ymin=None, ymax=None, figsize=(12, 12)
):
fig = tfmpl.create_figure(figsize=figsize)
ax = fig.add_subplot(111)
# ax.axis('off')
# ax.scatter(x, y)
ax.violinplot(
dataset=[
data[labels == label] for label in label_list
if data[labels == label].shape[0]
],
positions=[
label for label in label_list if data[labels == label].shape[0]
],
showmeans=False, showmedians=True
)
ax.set_xlabel(x_axis_label)
ax.set_ylabel(y_axis_label)
ax.set_xticks(label_list)
ax.set_xticklabels(label_list)
ax.set_ylim((ymin, ymax))
fig.tight_layout()
return fig
def single_word_predictions(word, targets_list, targets_given_predictions):
prediction_counts_vector = targets_given_predictions[
targets_list.index(word)]
predicted_words = list(np.array(targets_list)[
prediction_counts_vector > 0])
prediction_counts = list(prediction_counts_vector[
prediction_counts_vector > 0])
predicted_words_and_counts = [(word, count) for word, count in
zip(predicted_words, prediction_counts)]
return predicted_words_and_counts
def _transpose_annotator(TRANSPOSED):
def wrapper(affine_fxn):
affine_fxn.TRANSPOSED = TRANSPOSED
return affine_fxn
return wrapper
# a class for the encoder-decoder network
class SequenceNetwork:
@toolbox.auto_attribute(CHECK_MANIFEST=True)
def __init__(
self,
manifest,
#####
# kwargs set in the manifest
temperature=None,
Nepochs=None,
layer_sizes=None,
FF_dropout=None,
RNN_dropout=None,
EMA_decay=None,
beam_width=None,
TEMPORALLY_CONVOLVE=None,
assessment_epoch_interval=None,
tf_summaries_dir=None,
#####
Ncases=256,
stiffness=0,
beam_alpha=0.6,
max_hyp_length=20,
EOS_token='<EOS>',
pad_token='<pad>',
OOV_token='<OOV>',
assessment_partitions=None,
num_guessable_classes=None,
num_training_shards_to_discard=0,
checkpoint_path='~/tmp/checkpoint_data/model.ckpt',
TARGETS_ARE_SEQUENCES=True,
ASSESS_ALL_DECIMATIONS=True,
ENCODER_RNN_IS_BIDIRECTIONAL=True,
MAX_POOL=False,
BIAS_DECODER_OUTPUTS=False, # to match seq2seq
k_for_top_k_accuracy=5,
assessment_op_set={
'decoder_word_error_rate',
'decoder_accuracy',
'decoder_confusions',
'decoder_top_k_inds',
'decoder_sequence_log_probs',
'decoder_targets',
'decoder_beam_targets',
"decoder_outputs",
},
summary_op_set={
'decoder_word_error_rate',
'decoder_accuracy',
'decoder_confusions_image',
'decoder_top_k_accuracy',
# 'decoder_xpct_normalized_accuracy',
# 'decoder_vrnc_normalized_accuracy',
# 'decoder_entropy',
# 'decoder_calibration',
# 'decoder_calibration_image',
},
PROBABILISTIC_CONFUSIONS=False,
inputs_to_occlude=None,
training_GPUs=None, # let your code decide where to put things
VERBOSE=True,
# private; don't assign these to self:
# ...
):
######
# Is this still necessary for the Windows version of tf??
# self.allow_gpu_growth = True
######
if training_GPUs is None:
self.assessment_GPU = 1 if MCUs.num_GPUs > 1 else 0
else:
# This is dubious: it should probably just pick whatever GPU is
# *not* being used for training--if there is one
self.assessment_GPU = 1
self.num_CPUs = MCUs.num_CPUs
self.num_seq2seq_shards = None
# announce
self.vprint('Creating a sequence network that will train on ', end='')
self.vprint('%2.0f%% of the training data' %
(100/(self.num_training_shards_to_discard+1)))
# if you TEMPORALLY_CONVOLVE make sure you don't ASSESS_ALL_DECIMATIONS
if self.TEMPORALLY_CONVOLVE:
self.vprint('Temporal convolution; enforcing ASSESS_ALL_DECIMATIONS = False...')
self.ASSESS_ALL_DECIMATIONS = False
# what partitions of the data should you assess the network on?
if self.assessment_partitions is None:
self.assessment_partitions = ['training', 'validation']
# check that the encoder and decoder sizes all work together
for e_size, d_size in zip(
self.layer_sizes['encoder_rnn'], self.layer_sizes['decoder_rnn']
):
assert d_size == (1+self.ENCODER_RNN_IS_BIDIRECTIONAL)*e_size, \
"encoder/decoder layer-size mismatch!"
# adjust the summary_op_set so that tensorboard shows top_k nicely
for op_name in self.summary_op_set:
if op_name.endswith('top_k_accuracy'):
# if 'decoder_top_k_accuracy' in self.summary_op_set:
self.summary_op_set.remove(op_name)
self.summary_op_set.add(op_name.replace(
'top_k_accuracy', 'top_%i_accuracy' % k_for_top_k_accuracy
))
def vprint(self, *args, **kwargs):
if self.VERBOSE:
print(*args, **kwargs)
def _initialize_assessment_struct(
self, initialize_data, data_type, num_epochs):
# set up a structure for items to be assessed and returned
class AssessmentTuple(toolbox.MutableNamedTuple):
__slots__ = ([
'writer', 'initializer', 'decoder_accuracies',
'decoder_word_error_rates'
] + list(self.assessment_op_set))
nums_assessments = math.ceil(num_epochs/self.assessment_epoch_interval)+1
return AssessmentTuple(
initializer=initialize_data,
writer=tf.compat.v1.summary.FileWriter(os.path.join(
self.tf_summaries_dir, data_type)),
decoder_accuracies=np.zeros((nums_assessments)),
decoder_word_error_rates=np.zeros((nums_assessments)),
**dict.fromkeys(self.assessment_op_set)
)
def fit(
self, subnets_params, train_vars_scope=None, reuse_vars_scope=None,
**graph_kwargs
):
'''
Fit the parameters of a neural network mapping variable-length
sequences to labels or to (variable-length) output sequences.
'''
# dump to disk a copy of each categorical var's feature_list
for data_manifest in subnets_params[-1].data_manifests.values():
if data_manifest.distribution == 'categorical':
file_name = '_'.join([data_manifest.sequence_type, 'vocab_file.pkl'])
with open(os.path.join(
os.path.dirname(self.checkpoint_path), file_name
), 'wb') as fp:
feature_list = [
t.encode('utf-8')
for t in data_manifest.get_feature_list()
]
pickle.dump(feature_list, fp)
# init
with tf.device('/cpu:0'):
optimizer = tf.compat.v1.train.AdamOptimizer(
self.compute_learning_rate(subnets_params, 813))
# I think the issue here is AdaM: it prefers to start with learning
# rates near 3e-4, independent of the total number of training
# data. So just hard-code Ntotal = 813 to yield 3e-4 with temp=0.4
'''
optimizer = tf.contrib.opt.AdamWOptimizer(
weight_decay=0.01,
learning_rate=10*self.compute_learning_rate(
subnets_params, self.Ncases, 813)/2
)
'''
# But remember to adjust for the *effective* batch size,
# num_cases*len(get_available_gpus())!!
# only the *last* subnet is used for evaluation
assessment_subnet_params = subnets_params[-1]
decoder_targets_list = assessment_subnet_params.data_manifests[
'decoder_targets'].get_feature_list()
def training_data_fxn(num_GPUs):
return self._batch_and_split_data(subnets_params, num_GPUs)
def assessment_data_fxn(num_epochs):
return self._generate_oneshot_datasets(
assessment_subnet_params, num_epochs
)
def training_net_builder(GPU_op_dict, CPU_op_dict, tower_name):
return self._build_training_net(
GPU_op_dict, CPU_op_dict, subnets_params,
(decoder_targets_list.index(self.EOS_token)
if self.EOS_token in decoder_targets_list else None),
train_vars_scope, tower_name)
@tfmpl.figure_tensor
def plotting_fxn(confusions, axis_labels):
fig = toolbox.draw_confusion_matrix(
confusions, axis_labels, (12, 12)
)
return fig
def assessment_net_builder(GPU_op_dict, CPU_op_dict):
return self._build_assessment_net(
GPU_op_dict, CPU_op_dict, assessment_subnet_params,
self._standard_indexer, plotting_fxn)
def assessor(
sess, assessment_struct, epoch, assessment_step, data_partition
):
return self._assess(
sess, assessment_struct, epoch, assessment_step,
decoder_targets_list, data_partition)
# use the general graph build to assemble these pieces
graph_builder = tfh.GraphBuilder(
training_data_fxn, assessment_data_fxn, training_net_builder,
assessment_net_builder, optimizer, assessor, self.checkpoint_path,
self.Nepochs,
EMA_decay=self.EMA_decay, reuse_vars_scope=reuse_vars_scope,
training_GPUs=self.training_GPUs, assessment_GPU=self.assessment_GPU,
**graph_kwargs
)
return graph_builder.train_and_assess(self.assessment_epoch_interval)
def restore_and_get_saliencies(
self, subnets_params, restore_epoch, assessment_type='norms',
data_partition='validation', **graph_kwargs
):
# init
tf.compat.v1.reset_default_graph()
FF_dropout = self.FF_dropout
self.FF_dropout = 0.0
RNN_dropout = self.RNN_dropout
self.RNN_dropout = 0.0
decoder_targets_list = subnets_params[-1].data_manifests[
'decoder_targets'].get_feature_list()
class FakeOptimizer:
def __init__(self):
pass
def compute_gradients(self, total_loss, get_inputs):
# In fact, you have to return gradients and variables
return [(g, None) for g in tf.gradients(ys=total_loss, xs=get_inputs)]
optimizer = FakeOptimizer()
def training_data_fxn(num_GPUs):
return self._batch_and_split_data(
subnets_params, num_GPUs, data_partition
)
def training_net_builder(GPU_op_dict, CPU_op_dict, tower_name):
tf.transpose(a=GPU_op_dict['decoder_targets'], perm=[0, 2, 1],
name='assess_decoder_targets')
total_loss, train_vars = self._build_training_net(
GPU_op_dict, CPU_op_dict, subnets_params,
(decoder_targets_list.index(self.EOS_token)
if self.EOS_token in decoder_targets_list else None),
None, tower_name)
return total_loss, GPU_op_dict['encoder_inputs']
def get_saliency_sequences(sess, initializer, get_input_saliencies):
# get the full sequences of dL/dinput--FOR ONE BATCH
sess.run(initializer)
return sess.run((
get_input_saliencies,
sess.graph.get_operation_by_name(
'tower_0/assess_decoder_targets').outputs[0]
))
def get_saliency_norms(sess, initializer, get_input_saliencies):
# desequence, take norm--across time and sequences
index_sequences, _ = nn.sequences_tools(get_input_saliencies) #[:, :50]
desequence_saliencies = tf.gather_nd(
get_input_saliencies, index_sequences)
get_squared_magnitudes = tf.reduce_sum(
tf.square(desequence_saliencies), axis=0)
# accumulate gradient norm across batches
sess.run(initializer)
accumulated_saliences = np.zeros(
(subnets_params[-1].data_manifests['encoder_inputs'].num_features))
while True:
try:
accumulated_saliences += sess.run(get_squared_magnitudes)
except tf.errors.OutOfRangeError:
break
return np.sqrt(accumulated_saliences)
def get_per_class_saliency_norms(sess, initializer, get_input_saliencies):
# take norm across time
get_per_example_norms = tf.sqrt(tf.reduce_sum(
tf.square(get_input_saliencies), axis=1)
)
# ...
all_per_example_norms = np.zeros(
[0, subnets_params[-1].data_manifests['encoder_inputs'].num_features])
######
# This won't work for word sequences (right?)
all_decoder_targets = np.zeros([0, 1, 1])
######
sess.run(initializer)
while True:
try:
per_example_norms, decoder_targets = sess.run((
get_per_example_norms,
sess.graph.get_operation_by_name(
'tower_0/assess_decoder_targets').outputs[0]
))
all_per_example_norms = np.concatenate(
(all_per_example_norms, per_example_norms)
)
all_decoder_targets = np.concatenate(
(all_decoder_targets, decoder_targets)
)
except tf.errors.OutOfRangeError:
break
return all_per_example_norms, decoder_targets
######
# For now, at least, abuse the assessor
assessor_dict = {
'sequences': get_saliency_sequences,
'norms': get_saliency_norms,
'per_class_norms': get_per_class_saliency_norms,
}
######
# use the general graph build to assemble these pieces
graph_builder = tfh.GraphBuilder(
training_data_fxn, None, training_net_builder, None, optimizer,
assessor_dict[assessment_type], self.checkpoint_path,
restore_epoch, restore_epoch-1, EMA_decay=self.EMA_decay,
training_GPUs=self.training_GPUs, **graph_kwargs
)
saliencies = graph_builder.get_saliencies()
# restore
self.FF_dropout = FF_dropout
self.RNN_dropout = RNN_dropout
return saliencies
def _build_training_net(
self, sequenced_op_dict, CPU_op_dict, subnets_params, eos_id,
train_vars_scope, tower_name
):
'''
The neural network to be trained
'''
# augment the encoder data?
sequenced_op_dict = data_augmentor(sequenced_op_dict, 'encoder_')
# build the training NN
with tf.compat.v1.variable_scope('seq2seq', reuse=tf.compat.v1.AUTO_REUSE):
# tensorflow requires that *something* be returned
final_RNN_states = tf.case([(
tf.equal(CPU_op_dict['subnet_id'], subnet_params.subnet_id),
lambda params=subnet_params: self._build_training_net_core(
sequenced_op_dict, params, tower_name, eos_id
)
) for subnet_params in subnets_params], exclusive=True)
# only train the part of the graph given by train_vars_scope
total_loss = tf.add_n(
[loss for loss in tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.LOSSES)
if loss.name.startswith(tower_name)]
)
train_vars = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES, scope=train_vars_scope)
return total_loss, train_vars
def _build_training_net_core(
self, sequenced_op_dict, subnet_params, tower_name, eos_id
):
# ENCODER
(sequenced_op_dict, final_encoder_states, stride, draw_initial_ind,
) = self._encode_sequences(
sequenced_op_dict, subnet_params, self.FF_dropout,
self.RNN_dropout, tower_name=tower_name
)
sequenced_op_dict = self._prepare_encoder_targets(
sequenced_op_dict, draw_initial_ind, stride
)
# DECODER
decode_training_data = (
self._decode_training_sequences if self.TARGETS_ARE_SEQUENCES
else self._decode_training_tokens
)
sequenced_op_dict = decode_training_data(
sequenced_op_dict, final_encoder_states, subnet_params,
self.FF_dropout, eos_id
)
######
# NB: Putting this in the if statement means a (nominally) different
# loss will be used for different subjects during *parallel* transfer
# learning.
######
# apply to every *_target in the data_manifests
for t_key, data_manifest in subnet_params.data_manifests.items():
if '_targets' in t_key:
compute_cross_entropy = cross_entropy(
t_key, data_manifest, sequenced_op_dict
)
tf.compat.v1.add_to_collection(
tf.compat.v1.GraphKeys.LOSSES,
compute_cross_entropy*data_manifest.penalty_scale
)
return final_encoder_states
##########
def save_prediction_graph(
self, subnets_params, restore_epoch, save_dir, num_sequences=1,
inputs_dtype=tf.float32
):
'''
Save the graph so that it can later be loaded and used for inference.
:param subnets_params:
:param restore_epoch:
:param N
:param inputs_dtype:
:return: predict, a function that takes input sequences (numpy arrays)
as input, and returns the target predictions as unnormalized log
probabilities
'''
# init
this_checkpoint = self.checkpoint_path + '-%i' % restore_epoch
inputs_shape = [
num_sequences,
None,
subnets_params[0].data_manifests['encoder_inputs'].num_features
]
decoder_targets_list = subnets_params[0].data_manifests[
'decoder_targets'].get_feature_list()
# the inference graph
prediction_graph = tf.Graph()
with prediction_graph.as_default():
place_encoder_inputs = tf.compat.v1.placeholder(
dtype=inputs_dtype, shape=inputs_shape, name='encoder_inputs')
# NB that this is only used when TARGETS_ARE_SEQUENCES
def indexing_fxn(targets):
row_inds, col_inds = tf.meshgrid(
tf.range(num_sequences),
tf.range(self.max_hyp_length), indexing='ij')
index_sequences_elements = tf.stack((
tf.reshape(row_inds, [-1]), tf.reshape(col_inds, [-1])), 1)
return index_sequences_elements, self.max_hyp_length
# build assessment graph
get_sequenced_outputs, get_sequenced_natural_params = \
self._build_assessment_net(
{
'encoder_inputs': place_encoder_inputs,
'decoder_targets': tf.cast(np.transpose(
np.array([[[]]]), axes=[0, 2, 1]), tf.int32),
# Since this graph will be used only for inference
# there's no point in saving the parts associated
# with encoder_targets, which are *auxiliary*.
'encoder_1_targets': tf.cast([[[]]], tf.float32),
},
{
'subnet_id': tf.constant(subnets_params[0].subnet_id)
},
subnets_params[0], indexing_fxn, None
)
get_decoder_probs = tf.nn.softmax(tf.reshape(
get_sequenced_natural_params, [-1, len(decoder_targets_list)]
))
# if no decoder_targets_list was passed...
get_sequenced_decoder_outputs = tf.identity(
get_sequenced_outputs, name='decoder_outputs')
get_decoder_probs = tf.identity(
get_decoder_probs, name='decoder_probs')
# set up the session, restoring the model at this_checkpoint
EMA = (tf.train.ExponentialMovingAverage(decay=self.EMA_decay)
if self.EMA_decay else None)
sess, saver = tfh.get_session_and_saver(EMA=EMA, allow_growth=True)
saver.restore(sess, this_checkpoint)
# save this map from inputs to two outputs
tf.compat.v1.saved_model.simple_save(
sess, save_dir,
inputs={'encoder_inputs': place_encoder_inputs},
outputs={
"decoder_probs": get_decoder_probs,
"decoder_outputs": get_sequenced_decoder_outputs
}
)
def _build_assessment_net(
self, sequenced_op_dict, CPU_op_dict, params, indexing_fxn, plotting_fxn
):
# identify for tensorboard
tf.identity(CPU_op_dict['subnet_id'], name='identify_subnet_id')
# reverse and desequence the encoder targets
stride = params.decimation_factor
if self.ASSESS_ALL_DECIMATIONS:
initial_initial_ind = 0
num_loops = stride
stride = 1
else:
# use the midpoint
initial_initial_ind = 0 # stride//2
num_loops = 1
sequenced_op_dict = self._prepare_encoder_targets(
sequenced_op_dict, initial_initial_ind, stride
)
if self.inputs_to_occlude:
sequenced_op_dict['encoder_inputs'] = nn.occlude_sequence_features(
sequenced_op_dict['encoder_inputs'], self.inputs_to_occlude)
# create the sequence-classification neural network
sequenced_op_dict = self._decode_assessments(
sequenced_op_dict, params, initial_initial_ind, num_loops,
indexing_fxn
)
# don't bother to write unless there is something to plot (??)
if plotting_fxn is not None:
self._write_assessments(sequenced_op_dict, params, plotting_fxn)
return (
sequenced_op_dict['decoder_outputs'],
sequenced_op_dict['decoder_natural_params']
)
def _decode_assessments(
self, sequenced_op_dict, params, initial_initial_ind, num_loops, indexer
):
# init
if self.TARGETS_ARE_SEQUENCES:
update_decoder_assessments = \
self._update_sequence_assessments
decode_assessments_core = self._decode_assessment_sequences
else:
update_decoder_assessments = \
self._update_token_assessments
decode_assessments_core = self._decode_assessment_tokens
# index the target sequences
index_decoder_targets, _ = indexer(sequenced_op_dict['decoder_targets'])
def loop_body(
initial_ind_op,
sequenced_natural_params_dict,
concatenate_sequenced_decoder_outputs,
concatenate_decoder_sequence_log_probs,
):
nonlocal sequenced_op_dict
with tf.compat.v1.variable_scope(
'seq2seq', reuse=tf.compat.v1.AUTO_REUSE
):
# encode decimated sequence starting at initial_ind
sequenced_op_dict, final_encoder_states, _, _ = self._encode_sequences(
sequenced_op_dict, params, 0.0, 0.0,
set_initial_ind=initial_ind_op
)
# *fill in* the *encoder* natural params for each initial_ind
sequenced_natural_params_dict = self._fill_in_decimated_sequences(
initial_ind_op, num_loops, sequenced_op_dict,
sequenced_natural_params_dict
)
# *accumulate* *decoder* natural params across all initial_inds
(sequenced_natural_params_dict,
concatenate_sequenced_decoder_outputs,
concatenate_decoder_sequence_log_probs
) = decode_assessments_core(
params, final_encoder_states, sequenced_op_dict,
sequenced_natural_params_dict,
concatenate_sequenced_decoder_outputs,
concatenate_decoder_sequence_log_probs
)
# help out tf's shape-inference engine--doesn't seem like it
# ought to be necessary but it is
for np_key, np_op in sequenced_natural_params_dict.items():
t_key = np_key.replace('natural_params', 'targets')
np_op.set_shape(
[None, None, params.data_manifests[t_key].num_features]
)
return (
initial_ind_op+1,
sequenced_natural_params_dict,
concatenate_sequenced_decoder_outputs,
concatenate_decoder_sequence_log_probs,
)
# initial values of loop vars
count_num_cases = tf.shape(sequenced_op_dict['decoder_targets'])[0]
initial_values = [
tf.constant(initial_initial_ind),
{
swap(key, 'natural_params'): tf.fill([
tf.shape(op)[0], tf.shape(op)[1],
params.data_manifests[key].num_features
], 0.0)
for key, op in sequenced_op_dict.items() if '_targets' in key
},
tf.fill((count_num_cases, 0, self.max_hyp_length), 0),
tf.fill((count_num_cases, 0), 0.0),
]
######
# count_num_cases is not altogether invariant, but it is invariant
# across the while_loop. It feels like you should therefore be
# able to communicate this.
shape_invariants = [
tf.TensorShape([]),
{
swap(key, 'natural_params'): tf.TensorShape([
None, None, params.data_manifests[key].num_features
])
for key in sequenced_op_dict if '_targets' in key
},
tf.TensorShape([None, None, None]),
tf.TensorShape([None, None]),
]
######
# run the loop
(_, sequenced_natural_params_dict,
sequenced_op_dict['decoder_outputs'],
sequenced_op_dict['decoder_sequence_log_probs'],
) = tf.while_loop(
cond=lambda initial_ind, aa, bb, cc:
initial_ind < (initial_initial_ind+num_loops),
body=loop_body, loop_vars=initial_values,
shape_invariants=shape_invariants, back_prop=False
)
sequenced_op_dict = {
**sequenced_op_dict, **sequenced_natural_params_dict
}
# and now update the sequenced_op_dict
sequenced_op_dict = update_decoder_assessments(
'decoder_targets', sequenced_op_dict, params,
index_decoder_targets, num_loops
)
# If any encoder targets are categorically distributed, collect the
# tensors required to compute a word error rate.
for key, data_manifest in params.data_manifests.items():
if key.endswith('targets') and key.startswith('encoder'):
if data_manifest.distribution == 'categorical':
# no beam search for the encoder (but see notes)
sequenced_op_dict[swap(key, 'beam_targets')] = \
tf.transpose(sequenced_op_dict[key], perm=[0, 2, 1])
sequenced_op_dict[swap(key, 'outputs')] = tf.expand_dims(
tf.argmax(
sequenced_op_dict[swap(key, 'natural_params')],
axis=2, output_type=tf.int32
), 1
)
sequenced_op_dict[swap(key, 'sequence_log_probs')] = \
tf.fill([tf.shape(sequenced_op_dict[key])[0], 1], 0.0)
return sequenced_op_dict
def _decode_assessment_tokens(
self, params, final_encoder_states, sequenced_op_dict,
sequenced_natural_params_dict, concatenate_sequenced_decoder_outputs,
concatenate_decoder_sequence_log_probs
):
# *sum* the *decoder* natural params across all initial_inds
sequenced_op_dict = self._decode_training_tokens(
sequenced_op_dict, final_encoder_states, params, 0.0, None
)
sequenced_natural_params_dict['decoder_natural_params'] += \
sequenced_op_dict['decoder_natural_params']
return (
sequenced_natural_params_dict,
concatenate_sequenced_decoder_outputs,
concatenate_decoder_sequence_log_probs
)
def _decode_assessment_sequences(
self, params, final_encoder_states, sequenced_op_dict,
sequenced_natural_params_dict, concatenate_sequenced_decoder_outputs,
concatenate_decoder_sequence_log_probs
):
(get_sequenced_decoder_outputs, get_decoder_sequence_log_probs
) = self._decode_assessment_sequences_core(
final_encoder_states, sequenced_op_dict['decoder_targets'], params,
params.data_manifests['decoder_targets'].get_feature_list(),
)
# as though the beam were (beam_width*temporal stride) wide
targ_length = tf.shape(get_sequenced_decoder_outputs)[2]
paddings = [[0, 0], [0, 0], [0, self.max_hyp_length-targ_length]]
get_sequenced_decoder_outputs = tf.pad(
tensor=get_sequenced_decoder_outputs, paddings=paddings,
constant_values=params.data_manifests[
'decoder_targets'].padding_value
)
concatenate_sequenced_decoder_outputs = tf.concat(
(concatenate_sequenced_decoder_outputs,
get_sequenced_decoder_outputs), axis=1)
concatenate_decoder_sequence_log_probs = tf.concat(
(concatenate_decoder_sequence_log_probs,
get_decoder_sequence_log_probs), axis=1)
return (
sequenced_natural_params_dict,
concatenate_sequenced_decoder_outputs,
concatenate_decoder_sequence_log_probs
)
def _decode_assessment_sequences_core(
self, final_encoder_states, get_targets, subnet_params,
decoder_targets_list,
):
eos_id = decoder_targets_list.index(self.EOS_token)
Nsequences = common_layers.shape_list(final_encoder_states[-1].h)[0]
num_decoder_target_features = subnet_params.data_manifests[
'decoder_targets'].num_features
if self.num_guessable_classes:
if get_targets is None:
print("can't restrict dictionary--targets are unknown")
else:
get_guess_indices = self._compute_guessable_class_indices(
get_targets, subnet_params)
def prev_symbols_to_natural_params(decoded_symbols, _, states):
'''
Takes the currently decoded symbols and returns the natural
parameters for the next symbol. For categorical distributions, the
natural parameters are (unnormalized) log probabilities.
Input:
decoded_symbols [Nsequences*beam_width, decoded_ids]
states the RNN hidden state
Output:
get_next_token_natural_params
[Nsequences, num_decoder_target_features]
'''
z, Ninputs = nn.feed_forward_multi_layer(
decoded_symbols[:, -1, None], num_decoder_target_features,
self.layer_sizes['decoder_embedding'], 0.0, 'decoder_embedding',
preactivation_fxns=[self._t2t_embedding_affine_fxn]*len(
self.layer_sizes['decoder_embedding'])
)
z, decoder_state = nn.LSTM_rnn(
tf.cast(tf.expand_dims(z, axis=1), tf.float32), None,
self.layer_sizes['decoder_rnn'], 0.0, 'decoder_rnn',
initial_state=states["decoder state"])
get_next_token_natural_params = self._output_net(
tf.squeeze(z, [1]),
self.layer_sizes['decoder_rnn'][-1],
self.layer_sizes['decoder_projection'],
num_decoder_target_features,
0.0, final_preactivation=self._t2t_final_affine_fxn)
if self.num_guessable_classes and (get_targets is not None):
get_next_token_natural_params = self._reduced_classes_hack(
get_next_token_natural_params, get_guess_indices)
return get_next_token_natural_params, {"decoder state": decoder_state}
# could replace with tf.contrib.seq2seq.BeamSearchDecoder
initial_ids = tf.fill([Nsequences], eos_id)
(get_sequenced_decoder_outputs, get_decoder_sequence_log_probs, _
) = beam_search.beam_search(
prev_symbols_to_natural_params, initial_ids, self.beam_width,
self.max_hyp_length, num_decoder_target_features, self.beam_alpha,
states={"decoder state": final_encoder_states}, eos_id=eos_id
)
# make sure that the sequences terminate with either <EOS> or <pad>
get_sequenced_decoder_outputs = self._set_final_nonpads(
get_sequenced_decoder_outputs,
eos_id,
subnet_params.data_manifests['decoder_targets'].padding_value
)
# outputs
return get_sequenced_decoder_outputs, get_decoder_sequence_log_probs
def _update_token_assessments(
self, key, sequenced_op_dict, params, index_targets, num_loops
):
sequenced_op_dict[swap(key, 'natural_params')] /= num_loops
# Only does something interesting for 'trial' data
(sequenced_op_dict[swap(key, 'beam_targets')],
sequenced_op_dict[swap(key, 'outputs')],
sequenced_op_dict[swap(key, 'sequence_log_probs')]
) = nn.fake_beam_for_sequence_targets(
tf.squeeze(sequenced_op_dict[key], [1]),
tf.squeeze(sequenced_op_dict[swap(key, 'natural_params')], [1]),
params.data_manifests[key].get_feature_list(),
self.beam_width, self.pad_token
)
return sequenced_op_dict
def _update_sequence_assessments(
self, key, sequenced_op_dict, params, index_targets, num_loops
):
# convert: beam, sequence log probs -> token, all-word log probs
desequenced_natural_params = nn.seq_log_probs_to_word_log_probs(
sequenced_op_dict[swap(key, 'outputs')],
sequenced_op_dict[swap(key, 'sequence_log_probs')],
params.data_manifests[key].num_features,
index_targets, self.max_hyp_length,
params.data_manifests[key].padding_value,
)
# resequence
get_targets = sequenced_op_dict[key]
index_targets, _ = nn.sequences_tools(get_targets)
resequenced_shape = [
tf.shape(get_targets)[0], tf.shape(get_targets)[0],
tf.shape(desequenced_natural_params)[-1]
]
sequenced_op_dict[swap(key, 'natural_params')] = tf.scatter_nd(
index_targets, desequenced_natural_params, resequenced_shape
)
# To facilitate printing:
# (Ncases x max_ref_length x 1) -> (Ncases x 1 x max_ref_length)
sequenced_op_dict[swap(key, 'beam_targets')] = tf.transpose(
sequenced_op_dict[key], perm=[0, 2, 1],
)
return sequenced_op_dict
def _number_sequence_elements(self, get_sequences):
'''
Returns something like:
[[ 0, 1, 2, 3, 0, 0],
[ 4, 5, 0, 0, 0, 0],
[ 6, 7, 8, 9, 10, 11],
[12, 13, 14, 0, 0, 0]]
I.e., the sequences' (non-zero) elements, which are intitially probably
vectors, are replaced with integers that number consecutively the
elements of *all* num_cases sequences.
'''
index_sequences, get_lengths = nn.sequences_tools(get_sequences)
index_consecutively = tf.scatter_nd(
index_sequences,
tf.expand_dims(tf.range(tf.reduce_sum(get_lengths)), axis=1),
[tf.shape(get_sequences)[0], tf.shape(get_sequences)[1], 1]
)
return index_consecutively
def _fill_in_decimated_sequences(
self, get_initial_ind, num_loops, sequenced_op_dict,
sequenced_natural_params_dict,
):
for key, sequenced_op in sequenced_op_dict.items():
if key.endswith('natural_params') and key.startswith('encoder'):
if num_loops == 1:
sequenced_natural_params_dict[key] = sequenced_op
else:
np = sequenced_natural_params_dict[key]
row_inds, col_inds = tf.meshgrid(
tf.range(0, tf.shape(np)[0]),
tf.range(get_initial_ind, tf.shape(np)[1], num_loops),
indexing='ij'
)
indices = tf.stack((
tf.reshape(row_inds, [-1]), tf.reshape(col_inds, [-1])
), 1)
#######
# supposedly tf.tensor_scatter_add now works here....
#######
sequenced_natural_params_dict[key] += tf.scatter_nd(
indices,
tf.reshape(sequenced_op, [-1, tf.shape(sequenced_op)[-1]]),
tf.shape(np)
)
return sequenced_natural_params_dict
def _encode_sequences(
self, sequenced_op_dict, params, FF_dropout, RNN_dropout,
set_initial_ind=None, tower_name='blank'
):
# Reverse, embed, RNN-encode. Also penalize encoder outputs.
# useful parameters for this network
net_id = params.subnet_id
stride = params.decimation_factor
num_encoder_input_features = params.data_manifests[
'encoder_inputs'].num_features
subscope = 'subnet_{}'.format(net_id)
with tf.compat.v1.variable_scope(subscope, reuse=tf.compat.v1.AUTO_REUSE):
# reverse (a la Sutskever2014)
_, get_lengths = nn.sequences_tools(tfh.hide_shape(
sequenced_op_dict['encoder_inputs']
))
reverse_encoder_inputs = tf.reverse_sequence(
sequenced_op_dict['encoder_inputs'], get_lengths, seq_axis=1,
batch_axis=0
)
# embed
if self.TEMPORALLY_CONVOLVE:
get_RNN_inputs, set_initial_ind = self._convolve_sequences(
reverse_encoder_inputs, stride, num_encoder_input_features,
self.layer_sizes['encoder_embedding'], FF_dropout,
'encoder_embedding', tower_name
)
# this is bullet-proof even in the case of USE_BIASES or MAX_POOL
index_decimated_sequences, get_decimated_lengths = nn.sequences_tools(
tfh.hide_shape(
reverse_encoder_inputs[:, set_initial_ind::stride, :]))
else:
print('Decimating at %ix for subnet %i ' % (stride, net_id))
if set_initial_ind is None:
# probably training
set_initial_ind = tf.random.uniform([1], 0, stride, tf.int32)[0]
decimate_inputs = reverse_encoder_inputs[:, set_initial_ind::stride, :]
# in case called by tf.case, hide possibly incompatible sizes
index_decimated_sequences, get_decimated_lengths = nn.sequences_tools(
tfh.hide_shape(decimate_inputs))
get_RNN_inputs = self._sequence_embed(
tfh.hide_shape(decimate_inputs),
[*common_layers.shape_list(decimate_inputs)[0:2],
num_encoder_input_features], index_decimated_sequences,
self.layer_sizes['encoder_embedding'], FF_dropout,
'encoder_embedding')
# push through all layers of the RNN--one at a time
all_outputs = []
all_final_states = []
for iLayer, layer_size in enumerate(self.layer_sizes['encoder_rnn']):
get_RNN_inputs, get_RNN_states = nn.LSTM_rnn(
get_RNN_inputs, get_decimated_lengths, [layer_size], RNN_dropout,
'encoder_rnn_%i' % iLayer,
BIDIRECTIONAL=self.ENCODER_RNN_IS_BIDIRECTIONAL
)
all_outputs.append(get_RNN_inputs)
# get_RNN_states is a length-one tuple
all_final_states.append(get_RNN_states[0])
all_final_states = tuple(all_final_states)
# impose penalties on any of the outputs?
with tf.compat.v1.variable_scope(subscope, reuse=tf.compat.v1.AUTO_REUSE):
for iLayer, (get_outputs, layer_size) in enumerate(
zip(all_outputs, self.layer_sizes['encoder_rnn'])
):
t_key = 'encoder_%i_targets' % iLayer
if t_key in params.data_manifests:
# useful strings
np_key = swap(t_key, 'natural_params')
subnet_name = swap(t_key, 'projection')
# penalize this output
desequence_RNN_outputs = tf.gather_nd(
get_outputs, index_decimated_sequences)
######
# Consider making only the last (linear) layer proprietary:
# The subjects have different voices, and in principle
# could in even have different numbers of cepstral
# coefficients--but the early transformations out of
# abstract RNN state could be conserved.
######
desequenced_natural_params = self._output_net(
desequence_RNN_outputs,
layer_size*(1+self.ENCODER_RNN_IS_BIDIRECTIONAL),
self.layer_sizes[subnet_name],
params.data_manifests[t_key].num_features,
FF_dropout, subnet_name=subnet_name
)
# re-sequence
sequenced_op_dict[np_key] = tf.scatter_nd(
index_decimated_sequences, desequenced_natural_params,
tf.cast([
tf.shape(get_outputs)[0],
tf.shape(get_outputs)[1],
params.data_manifests[t_key].num_features,
], tf.int32))
# We will initialize the decoder with the hidden states of the last n
# layers of the encoder, where n = number of decoder hidden layers
final_states = all_final_states[-len(self.layer_sizes['decoder_rnn']):]
return sequenced_op_dict, final_states, stride, set_initial_ind
def _convolve_sequences(
self, sequences, total_stride, num_features, layer_sizes,
FF_dropout, subnet_name, tower_name
):
# probably brittle...
layer_strides = toolbox.close_factors(total_stride, len(layer_sizes))
print('Temporally convolving with strides ' + repr(layer_strides))
# In 'VALID'-style convolution, the data are not padded to accommodate
# the filter, and the final (right-most) elements that don't fit a
# filter are simply dropped. Here we pad by a sufficient amount to
# ensure that no data are dropped. There's no danger in padding too
# much because we will subsequently extract out only sequences of the
# right length by computing get_decimated_lengths on the *inputs* to
# the convolution.
paddings = [[0, 0], [0, 4*total_stride], [0, 0]]
sequences = tf.pad(tensor=sequences, paddings=paddings)
set_initial_ind = 0 # stride//2
# Construct convolutional layers. For "VALID" vs. "SAME" padding, see
# https://stackoverflow.com/questions/37674306/
preactivation_fxns = [
lambda inputs, Nin, Nout, stride=layer_stride, name='conv_%i' % iLayer:
nn.tf_conv2d_wrapper(
inputs, Nin, Nout, name=name, stiffness=self.stiffness,
filter_width=stride, USE_BIASES=self.MAX_POOL,
strides=[1, 1, 1 if self.MAX_POOL else stride, 1],
) for iLayer, layer_stride in enumerate(layer_strides)
]
activation_fxns = [
(lambda inputs, name, stride=layer_stride: nn.tf_max_pool_wrapper(
inputs, name=name, ksize=[1, 1, stride, 1], ### ksize=[1,1,8,1],
strides=[1, 1, stride, 1])) if self.MAX_POOL else
(lambda inputs, name: inputs) for layer_stride in layer_strides
]
convolve_sequences, _ = nn.feed_forward_multi_layer(
tf.expand_dims(sequences, axis=1),
num_features, layer_sizes, FF_dropout, subnet_name,
preactivation_fxns=preactivation_fxns,
activation_fxns=activation_fxns,
)
return tf.squeeze(convolve_sequences, axis=1), set_initial_ind
def _prepare_encoder_targets(
self, sequenced_op_dict, draw_initial_ind, stride,
):
# for each sequence type...
for key, sequenced_op in sequenced_op_dict.items():
# if it's an encoder_target...
if key.startswith('encoder') and key.endswith('targets'):
# reverse (to match reversal of inputs) and decimate
_, get_targets_lengths = nn.sequences_tools(sequenced_op)
reverse_targets = tf.reverse_sequence(
input=sequenced_op, seq_lengths=get_targets_lengths,
seq_axis=1, batch_axis=0)
decimate_targets = reverse_targets[:, draw_initial_ind::stride, :]
# NB: this overwrites the targets with their decimated version
sequenced_op_dict[key] = decimate_targets
return sequenced_op_dict
def _decode_training_tokens(
self, sequenced_op_dict, final_encoder_states, subnet_params,
FF_dropout, eos_id
):
# ...
get_desequenced_natural_params = self._output_net(
final_encoder_states[-1].h,
self.layer_sizes['decoder_rnn'][-1],
self.layer_sizes['decoder_projection'],
subnet_params.data_manifests['decoder_targets'].num_features,
FF_dropout, final_preactivation=self._t2t_final_affine_fxn
)
# resequence
get_targets = sequenced_op_dict['decoder_targets']
index_elements, _ = nn.sequences_tools(get_targets)
sequenced_op_dict['decoder_natural_params'] = tf.scatter_nd(
index_elements, get_desequenced_natural_params,
[tf.shape(get_targets)[0], tf.shape(get_targets)[1],
tf.shape(get_desequenced_natural_params)[-1]]
)
return sequenced_op_dict
def _decode_training_sequences(
self, sequenced_op_dict, final_encoder_states, subnet_params,
FF_dropout, eos_id
):
'''
Initialize an RNN at the get_final_encoder_state, run on the targets,
right-shifted by one (so the first entry is an EOS), and collect up the
desequenced outputs, get_decoder_natural_params, for all time steps.
NB that the targets are *not* used here to take the sample average in
the cross entropy <-log(q)>_p. They are necessary nevertheless in order
to evaluate the decoder natural params themselves (and therefore q in
the cross entropy), which depend at each time step on the previous word
in the actual target sequence. See Eq'n (4) and surrounding disussion
in "Machine Translation of Cortical Activity to Text with an
Encoder-Decoder Framework."
This function additionally desequences the targets.
'''
# init
get_targets = sequenced_op_dict['decoder_targets']
index_sequences_elements, get_sequences_lengths = nn.sequences_tools(
get_targets)
Nsequences = common_layers.shape_list(final_encoder_states[-1].h)[0]
initial_ids = tf.fill([Nsequences, 1, 1], eos_id)
# embed input sequences; pass thru RNN; de-sequence outputs
targ_shapes = common_layers.shape_list(get_targets)[0:2] + [
subnet_params.data_manifests['decoder_targets'].num_features]
prev_targets = tf.concat((initial_ids, get_targets[:, :-1, :]), axis=1)
embed_output_sequences = self._sequence_embed(
prev_targets, targ_shapes, index_sequences_elements,
self.layer_sizes['decoder_embedding'], FF_dropout,
'decoder_embedding',
preactivation_fxn=self._t2t_embedding_affine_fxn)
get_RNN_outputs, _ = nn.LSTM_rnn(
tf.cast(embed_output_sequences, tf.float32), get_sequences_lengths,
self.layer_sizes['decoder_rnn'], self.RNN_dropout, 'decoder_rnn',
initial_state=final_encoder_states)
desequence_RNN_outputs = tf.gather_nd(
get_RNN_outputs, index_sequences_elements)
get_desequenced_natural_params = self._output_net(
desequence_RNN_outputs,
self.layer_sizes['decoder_rnn'][-1],
self.layer_sizes['decoder_projection'],
subnet_params.data_manifests['decoder_targets'].num_features,
FF_dropout,
final_preactivation=self._t2t_final_affine_fxn
)
# resequence
index_elements, _ = nn.sequences_tools(get_targets)
sequenced_op_dict['decoder_natural_params'] = tf.scatter_nd(
index_elements, get_desequenced_natural_params,
[tf.shape(get_targets)[0], tf.shape(get_targets)[1],
tf.shape(get_desequenced_natural_params)[-1]]
)
return sequenced_op_dict
@staticmethod
def _set_final_nonpads(ids, nonpad_value, pad_value):
# NB: THIS ASSUMES THAT 0 IS THE PADDING VALUE! Ideally this method
# would be more flexible, but scatter_nd inits its tensor to zeros.
# Instead you would first create a tensor of the right shape, and then
# use scatter_nd_update, but that's hard....
# index_nonpads is (Nnonpads x 2), 2 b/c row and col index
index_nonpads = tf.cast(
tf.compat.v1.where(tf.not_equal(ids[:, :, -1], 0)), tf.int32)
# make_nonpads_updates is (Nnonpads x 1)
make_nonpads_updates = tf.expand_dims(
tf.fill(tf.shape(input=index_nonpads)[0:1], nonpad_value), axis=1)
# terminal_ids has the shape of one slice of ids
# SEE nn.tf_sentence_to_word_ids, sparse_tensor_to_dense?????
#hold_terminal_ids = tf.placeholder(
# 'int32', shape=common_layers.shape_list(ids[:, :, -1, None]))
#pad_ids = tf.Variable(pad_value, dtype=hold_terminal_ids.dtype)
#pad_ids = tf.assign(pad_iterminal_ids = tf.scatter_nd_update(
# ds, hold_terminal_ids, validate_shape=False)
#terminal_ids = tf.scatter_nd_update(
# pad_ids, index_nonpads, make_nonpads_updates)
# NB: THIS *ASSUMES* THAT pad_index = 0
terminal_ids = tf.scatter_nd(
index_nonpads, make_nonpads_updates,
common_layers.shape_list(ids[:, :, -1, None]))
# Throw out the original last slice and concat on the new terminal_ids.
# Also eliminate the *first* entries, which are *always* <EOS>: This
# beam_search assumes its first output to be the zeroth input--forcing
# you to discard this output explicitly.
return tf.concat((ids[:, :, 1:-1], terminal_ids), axis=2)
def _sequence_embed(
self, get_sequences, sequences_shapes, index_sequences_elements,
emb_layer_sizes, FF_dropout, subnet_name, preactivation_fxn=None
):
'''
To embed sequence data, you have first to de-sequence them, from
[Ncases x max_sequence_length x len(single token vector)]
to
[\sum_i^Ncases sequence_length_i x len(single token vector)].
Then you "embed" into a matrix of size
[\sum_i^Ncases sequence_length_i x N_embedding_dims].
Finally, you re-sequence into
[Ncases x max_sequence_length x N_embedding_dims].
Note that the outputs of an RNN with this input are in a sense
also de-sequenced, since they have size
[Ncases x Nhiddens].
'''
# Ns
if preactivation_fxn is None:
preactivation_fxn = self._vanilla_affine_fxn
Ninputs = sequences_shapes[2]
# NB: there's a bug here: this hack will fail if there is no input
# embedding! (emb_layer_sizes=[]). In that case Ninputs = Nclasses
# which will conflict w/the actual input size, sc. 1.
###
desequence = tf.gather_nd(get_sequences, index_sequences_elements)
embed_desequenced, Ninputs = nn.feed_forward_multi_layer(
desequence, Ninputs, emb_layer_sizes, FF_dropout, subnet_name,
preactivation_fxns=[preactivation_fxn]*len(emb_layer_sizes))
resequence_embedded_sequences = tf.scatter_nd(
index_sequences_elements, embed_desequenced, tf.cast(
[sequences_shapes[0], sequences_shapes[1], Ninputs], tf.int32))
resequence_embedded_sequences.set_shape([None, None, Ninputs])
# https://github.com/tensorflow/tensorflow/issues/2938
return resequence_embedded_sequences
# CURRENTLY DEPRECATED
def _sequence_dilate(
self, sequences, emb_layer_sizes, FF_dropout, emb_strings,
kernel_size=2
):
######
# Use emb_strings to name the layers....
######
z = nn.TemporalConvNet(emb_layer_sizes, kernel_size, FF_dropout)(
sequences, training=True)
index_sequences_elements, get_sequences_lengths = nn.sequences_tools(z)
return z, get_sequences_lengths, index_sequences_elements
def _output_net(
self, get_activations, num_input_features, layer_sizes,
num_output_features, FF_dropout, final_preactivation=None,
subnet_name='decoder_projection'
):
'''
Just a little wrapper for feed_forward_multi_layer. It builds an MLP
followed by affine transformation--the natural params for some
exponential-family distribution.
'''
Nlayers = len(layer_sizes)
if final_preactivation is None:
final_preactivation = self._vanilla_final_affine_fxn
get_natural_params, _ = nn.feed_forward_multi_layer(
get_activations, num_input_features,
layer_sizes+[num_output_features], FF_dropout, subnet_name,
preactivation_fxns=[self._vanilla_affine_fxn]*Nlayers+[
final_preactivation],
activation_fxns=[tf.nn.relu]*Nlayers+[lambda xx, name: xx]
)
return get_natural_params
@_transpose_annotator(False)
def _vanilla_affine_fxn(self, inputs, Nin, Nout):
return nn.tf_matmul_wrapper(inputs, Nin, Nout, stiffness=self.stiffness)
@_transpose_annotator(False)
def _t2t_embedding_affine_fxn(self, inputs, Nin, Nout):
return nn.tf_matmul_wrapper(
inputs, Nin, Nout, stiffness=self.stiffness,
num_shards=self.num_seq2seq_shards,
USE_BIASES=self.BIAS_DECODER_OUTPUTS
)
@_transpose_annotator(True)
def _vanilla_final_affine_fxn(self, inputs, Nin, Nout):
return nn.tf_matmul_wrapper(
inputs, Nin, Nout, stiffness=self.stiffness,
transpose_b=True, USE_BIASES=True)
@_transpose_annotator(True)
def _t2t_final_affine_fxn(self, inputs, Nin, Nout):
return nn.tf_matmul_wrapper(
inputs, Nin, Nout, stiffness=self.stiffness,
transpose_b=True, num_shards=self.num_seq2seq_shards,
USE_BIASES=self.BIAS_DECODER_OUTPUTS)
def _write_assessments(self, sequenced_op_dict, params, plotting_fxn):
# One can request any sequenced_op via the assessment_op_set--just make
# sure it's also in the all_assessment_ops list
for op_key, sequenced_op in sequenced_op_dict.items():
# if requested...
if op_key in self.assessment_op_set:
# ...identify this operation for returning to the user
sequenced_op_dict[op_key] = tf.identity(
sequenced_op, name='assess_' + op_key
)
# for sequences of categorical data
for key, data_manifest in params.data_manifests.items():
if '_targets' in key:
# <-log[q(outputs_d|inputs)]>_p(outputs_d,inputs),
# <-log[q(outputs_e|inputs)]>_p(outputs_e,inputs)
compute_cross_entropy = cross_entropy(
key, data_manifest, sequenced_op_dict
)
ce_key = swap(key, 'cross_entropy')
if ce_key in self.assessment_op_set:
compute_cross_entropy = tf.identity(
compute_cross_entropy, name='assess_' + ce_key
)
# write cross_entropy to tb, whether or not it was requested
self.summary_op_set.add(ce_key)
tf.compat.v1.summary.scalar(
'summarize_' + ce_key, np.log2(np.e)*compute_cross_entropy
)
# make some extra assessments of categorical data
if data_manifest.distribution == 'categorical':
self._write_categorical_assessments(
key, sequenced_op_dict, params, plotting_fxn
)
def _write_categorical_assessments(
self, key, sequenced_op_dict, params, plotting_fxn
):
# gather some useful tensors
sequenced_targets = sequenced_op_dict[key]
sequenced_natural_params = sequenced_op_dict[swap(key, 'natural_params')]
index_targets, _ = nn.sequences_tools(sequenced_targets)
desequenced_targets = tf.cast(
tf.gather_nd(sequenced_targets, index_targets), tf.int32
)
desequenced_natural_params = tf.gather_nd(
sequenced_natural_params, index_targets
)
# write assessents
assess_accuracies, predict_top_k_inds = self._write_accuracies(
key, desequenced_targets, desequenced_natural_params
)
self._write_word_error_rates(key, sequenced_op_dict, params)
confusions = self._write_confusions(
key, desequenced_targets, desequenced_natural_params,
predict_top_k_inds, params, plotting_fxn
)
self._write_frequency_normalized_stats(key, confusions)
self._write_calibration(
key, desequenced_natural_params, assess_accuracies, params
)
def _write_accuracies(
self, key, desequenced_targets, desequenced_natural_params
):
# top-k accuracy
# \sum_i=1^k{<\delta{outputs_d - argmax_a[q(a|inputs)]}>_p(outputs_d,inputs)}
_, predict_top_k_inds = tf.nn.top_k(
desequenced_natural_params, k=self.k_for_top_k_accuracy
)
predict_top_k_inds = tf.identity(
predict_top_k_inds, name='assess_%s' % swap(key, 'top_k_inds')
)
assess_accuracies = tf.cast(
tf.equal(predict_top_k_inds, desequenced_targets), tf.float32
)
average_accuracies = tf.reduce_mean(assess_accuracies, axis=0)
if swap(key, 'top_%i_accuracy' % self.k_for_top_k_accuracy) in self.summary_op_set:
tf.compat.v1.summary.scalar(
'summarize_%s' % swap(
key, 'top_%i_accuracy' % self.k_for_top_k_accuracy
),
tf.reduce_sum(average_accuracies)
)
# <\delta{outputs_d - argmax_a[q(a|inputs)]}>_p(outputs_d,inputs)
assess_average_accuracy = tf.gather(
average_accuracies, 0, name='assess_%s' % swap(key, 'accuracy')
)
if swap(key, 'accuracy') in self.summary_op_set:
tf.compat.v1.summary.scalar(
'summarize_%s' % swap(key, 'accuracy'), assess_average_accuracy
)
return assess_accuracies, predict_top_k_inds
def _write_word_error_rates(self, key, sequenced_op_dict, params):
# the tensors required to compute a word error rate
sequenced_outputs = sequenced_op_dict[swap(key, 'outputs')]
sequenced_beam_targets = sequenced_op_dict[swap(key, 'beam_targets')]
sequence_log_probs = tf.identity(
sequenced_op_dict[swap(key, 'sequence_log_probs')],
name='assess_%s' % swap(key, 'sequence_log_probs')
)
# minimum normalized edit distance between sequences of words
targets_list = params.data_manifests[key].get_feature_list()
eos_id = (
targets_list.index(self.EOS_token)
if self.EOS_token in targets_list else -1
)
get_word_error_rates = nn.tf_expected_word_error_rates(
sequenced_beam_targets, sequenced_outputs, sequence_log_probs,
EXCLUDE_EOS=True, eos_id=eos_id
)
assess_word_error_rate = tf.reduce_mean(
get_word_error_rates, name='assess_%s' % swap(key, 'word_error_rate')
)
######
# FIX ME
# tf.compat.v1.get_collection('my_collection')
# tf.compat.v1.add_to_collection('my_collection', assess_word_error_rate)
# EMA = tf.train.ExponentialMovingAverage(decay=0.9)
# assess_word_error_rate = EMA.apply(tf.compat.v1.get_collection('my_collection'))
######
if swap(key, 'word_error_rate') in self.summary_op_set:
tf.compat.v1.summary.scalar(
'summarize_%s' % swap(key, 'word_error_rate'),
assess_word_error_rate
)
def _write_confusions(
self, key, desequenced_targets, desequenced_natural_params,
predict_top_k_inds, params, plotting_fxn
):
# tf's confusion matrix wants predictions, not probs. Therefore,
# you *prefer* to use your own version, using output *probabilities*.
num_target_features = params.data_manifests[key].num_features
if self.PROBABILISTIC_CONFUSIONS:
# the kind of confusion matrix, via conditional probabilities
qvec_samples = tf.nn.softmax(desequenced_natural_params)
xpct_pvec_qvec_unnorm = tf.scatter_nd(
desequenced_targets, qvec_samples,
tf.constant([num_target_features]*2, dtype=tf.int32))
xpct_pvec_unnorm = tf.reduce_sum(
xpct_pvec_qvec_unnorm, axis=1, keepdims=True
)
confusions = tf.divide(
xpct_pvec_qvec_unnorm, xpct_pvec_unnorm,
name='assess_%s' % swap(key, 'confusions')
)
else:
# get confusions and supply a name to the op
confusions = tf.math.confusion_matrix(
labels=tf.reshape(desequenced_targets, [-1]),
predictions=predict_top_k_inds[:, 0],
num_classes=num_target_features)
confusions = tf.identity(
confusions, name='assess_%s' % swap(key, 'confusions')
)
image_key = swap(key, 'confusions_image')
if image_key in self.summary_op_set:
tf.compat.v1.summary.image(
'summarize_%s' % image_key,
plotting_fxn(
confusions, params.data_manifests[key].get_feature_list()
)
)
return confusions
def _write_frequency_normalized_stats(self, key, confusions):
# EXPECTED frequency-normalized accuracy
xpct_key = swap(key, 'xpct_normalized_accuracy')
target_frequencies = tf.reduce_sum(confusions, axis=1)
where_targets = tf.cast(
tf.compat.v1.where(target_frequencies > 0), tf.int32
)
frequency_normalized_accuracies = tf.divide(
tf.gather(tf.linalg.diag_part(confusions), where_targets),
tf.gather(target_frequencies, where_targets))
assess_xpct_frequency_normalized_accuracy = tf.reduce_mean(
frequency_normalized_accuracies, name='assess_%s' % xpct_key
)
if xpct_key in self.summary_op_set:
tf.compat.v1.summary.scalar(
'summarize_%s' % xpct_key,
assess_xpct_frequency_normalized_accuracy
)
# VARIANCE of the frequency-normalized accuracy
vrnc_key = swap(key, 'vrnc_normalized_accuracy')
assess_vrnc_frequency_normalized_accuracy = tf.reduce_mean(
tf.math.squared_difference(
frequency_normalized_accuracies,
assess_xpct_frequency_normalized_accuracy
),
name='assess_%s' % vrnc_key)
if vrnc_key in self.summary_op_set:
tf.compat.v1.summary.scalar(
'summarize_%s' % vrnc_key,
assess_vrnc_frequency_normalized_accuracy
)
def _write_calibration(
self, key, desequenced_natural_params, assess_accuracies, params
):
# the average entropy of the *output distribution*
s_key = swap(key, 'entropy')
if s_key in self.summary_op_set:
C = tf.reduce_logsumexp(desequenced_natural_params, axis=1)
decoder_probs = tf.nn.softmax(desequenced_natural_params)
assess_entropies = C - tf.reduce_sum(tf.multiply(
decoder_probs, desequenced_natural_params), axis=1)
average_entropy = tf.reduce_mean(assess_entropies)
tf.compat.v1.summary.scalar(
'summarize_%s' % s_key, np.log2(np.e)*average_entropy
)
else:
return
# and does it correlate with accuracy?
s_key = swap(key, 'calibration')
if s_key in self.summary_op_set:
acc_entropy_corr = tfp.stats.correlation(
assess_accuracies[:, 0], assess_entropies, event_axis=None)
tf.compat.v1.summary.scalar(
'summarize_%s' % s_key, acc_entropy_corr
)
# also *look* at the relationship
s_key = swap(key, 'calibration_image')
if s_key in self.summary_op_set:
num_target_features = params.data_manifests[key].num_features
tf.compat.v1.summary.image(s_key, dual_violin_plot(
assess_entropies, assess_accuracies[:, 0], [0, 1],
x_axis_label='correctness', y_axis_label='entropy',
ymin=0.0, ymax=np.log2(num_target_features)
))
def _assess(
self, sess, assessment_struct, epoch, assessment_step,
decoder_targets_list, data_partition
):
########
# add functinality for encoder categorical data?
########
# The summaries you wish to make for tensorboard. You need a deep copy
# because you intend to alter the set on a temporary basis.
summary_op_set = copy.copy(self.summary_op_set)
if len(decoder_targets_list) > 100:
summary_op_set.discard('decoder_confusions_image')
if (epoch % 10 != 0):
summary_op_set.discard('decoder_confusions_image')
summary_op_set.discard('decoder_calibration_image')
# The assessments you wish to make for printing or returning. Convert
# to a list to ensure the order is fixed
assessment_op_list = list(self.assessment_op_set)
# ...initialize the session with training/validation data
sess.run(assessment_struct.initializer)
# ...execute all summaries and assessments
(summaries, assessments, subnet_id) = sess.run((
[sess.graph.get_operation_by_name('summarize_' + summary_op).outputs[0]
for summary_op in summary_op_set],
[sess.graph.get_operation_by_name('assess_' + assessment_op).outputs[0]
for assessment_op in assessment_op_list],
sess.graph.get_operation_by_name('identify_subnet_id').outputs[0],
# sess.graph.get_operation_by_name('seq2seq/case/identify_initial_ind').outputs[0],
))
# update the assessment_struct with the assessments
for field, assessment in zip(assessment_op_list, assessments):
setattr(assessment_struct, field, assessment)
# if there's a writer...
if assessment_struct.writer:
# ...write to tensorboard
for summary in summaries:
assessment_struct.writer.add_summary(summary, epoch)
assessment_struct.writer.flush()
if 'decoder_accuracy' in self.assessment_op_set:
# ...and to the screen
print("step %2d: %10s decoder accuracy (%i) = %.2g" % (
epoch, data_partition, subnet_id,
assessment_struct.decoder_accuracy
))
assessment_struct.decoder_accuracies[
assessment_step] = assessment_struct.decoder_accuracy
if 'decoder_word_error_rate' in self.assessment_op_set:
assessment_struct.decoder_word_error_rates[
assessment_step] = assessment_struct.decoder_word_error_rate
# print some assessments
if not self.TARGETS_ARE_SEQUENCES:
# Non-sequence references/hypotheses are based on a fake_beam,
# so we have to follow its lead and (re)build a unique tokens
# list
decoder_targets_list = nn.targets_to_tokens(
decoder_targets_list, self.pad_token)
if (
'decoder_sequence_log_probs' in self.assessment_op_set and
'decoder_outputs' in self.assessment_op_set and
'decoder_beam_targets' in self.assessment_op_set
):
on_clr = 'on_yellow' if data_partition == 'training' else 'on_cyan'
# references
sequenced_decoder_target = self.target_inds_to_sequences(
assessment_struct.decoder_beam_targets, decoder_targets_list
)[0]
cprint(
'example %s reference:' % data_partition, on_color=on_clr
)
cprint('\t' + sequenced_decoder_target, on_color='on_red')
# hypotheses
sequenced_decoder_outputs = self.target_inds_to_sequences(
assessment_struct.decoder_outputs,
decoder_targets_list
)
decoder_sequence_log_probs = assessment_struct.decoder_sequence_log_probs[0]
log_probs = decoder_sequence_log_probs - logsumexp(
decoder_sequence_log_probs)
probs = np.exp(log_probs)
cprint(
'example ' + data_partition + ' hypothesis:',
on_color=on_clr
)
for ind in range(self.beam_width):
cprint('%.2f\t' % probs[ind] + sequenced_decoder_outputs[ind],
on_color='on_green')
print('')
# print *all* validation hypotheses and references
num_examples = assessment_struct.decoder_outputs.shape[0]
for iExample in range(num_examples):
ref = self.target_inds_to_sequences(
assessment_struct.decoder_beam_targets,
decoder_targets_list, iExample)[0]
hyp = self.target_inds_to_sequences(
assessment_struct.decoder_outputs,
decoder_targets_list, iExample)[0]
cprint('{0:60} {1}'.format(ref, hyp), on_color='on_cyan')
if iExample > 50:
break
print('')
# debugging: print images....
if data_partition == 'training':
clear_output(wait=True)
return assessment_struct
def _batch_and_split_data(
self, subnets_params, num_GPUs, data_partition='training'
):
# remove any device specifications for the input data
with tf.device(None):
# create an iterator across batches from the tf_records
dataset = self._tf_records_to_dataset(
subnets_params, data_partition, self.Ncases,
self.num_training_shards_to_discard
)
iterator = tf.compat.v1.data.make_initializable_iterator(dataset)
# get the next batch and break into sequences and subnet_id dicts
GPU_op_dict = iterator.get_next()
CPU_keys = ['subnet_id']
CPU_op_dict = {key: GPU_op_dict.pop(key) for key in CPU_keys}
# split data for processing across GPUs.
batch_size = tf.shape(GPU_op_dict['decoder_targets'])[0]
final_index = batch_size - tf.math.mod(batch_size, num_GPUs)
GPU_split_op_dict = {
key: tf.split(
axis=0, num_or_size_splits=num_GPUs,
value=batch_sequence_data[:final_index]
) for key, batch_sequence_data in GPU_op_dict.items()
}
return GPU_split_op_dict, CPU_op_dict, iterator.initializer
def _generate_oneshot_datasets(self, assessment_params, num_epochs):
# use as many training as *validation* samples
num_assessment_examples = sum(
[sum(1 for _ in tf.compat.v1.python_io.tf_record_iterator(
assessment_params.tf_record_partial_path.format(block_id)))
for block_id in assessment_params.block_ids['validation']])
# for each data type that you want to assess, create a dataset
assessments = dict.fromkeys(self.assessment_partitions)
for i, data_partition in enumerate(assessments):
dataset = self._tf_records_to_dataset(
[assessment_params], data_partition, num_assessment_examples
)
if i == 0:
# create just one iterator---from *any* dataset's types
# and shapes, since they're all the same
iterator = tf.compat.v1.data.Iterator.from_structure(
tf.compat.v1.data.get_output_types(dataset),
tf.compat.v1.data.get_output_shapes(dataset)
)
assessments[data_partition] = self._initialize_assessment_struct(
iterator.make_initializer(dataset), data_partition, num_epochs)
# get the all data and break into sequences and subnet_id dicts
GPU_op_dict = iterator.get_next()
CPU_keys = ['subnet_id']
CPU_op_dict = {key: GPU_op_dict.pop(key) for key in CPU_keys}
return GPU_op_dict, CPU_op_dict, assessments
@staticmethod
def _standard_indexer(sequences):
(index_sequences_elements, get_sequences_lengths) = nn.sequences_tools(
sequences)
max_length = tf.reduce_max(get_sequences_lengths)
# "you should use something longer than max_sequences_lengths!"
return index_sequences_elements, max_length
def target_inds_to_sequences(self, hypotheses, targets_list, iExample=0):
predicted_tokens = [
''.join([targets_list[ind] for ind in hypothesis]).replace(
'_', ' ').replace(self.pad_token, '').replace(
self.EOS_token, '').rstrip()
for hypothesis in hypotheses[iExample]
]
return predicted_tokens
def _tf_records_to_dataset(
self, subnets_params, data_partition, num_cases,
num_shards_to_discard=0, DROP_REMAINDER=False
):
'''
Load, shuffle, batch and pad, and concatentate across subnets (for
parallel transfer learning) all the data.
'''
# accumulate datasets, one for each subnetwork
dataset_list = []
for subnet_params in subnets_params:
dataset = tf.data.TFRecordDataset([
subnet_params.tf_record_partial_path.format(block_id)
for block_id in subnet_params.block_ids[data_partition]]
)
dataset = dataset.map(
lambda example_proto: tfh.parse_protobuf_seq2seq_example(
example_proto, subnet_params.data_manifests
),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
#########
# Insane tensorflow bug: "num_parallel_calls" cannot be moved to
# the preceding line (after the comma), and results in extremely
# erratic behavior (especially in conjunction with Jupyter)
#########
# filter data to include or exclude only specified decoder targets?
decoder_targets_list = subnet_params.data_manifests[
'decoder_targets'].get_feature_list()
target_filter = TargetFilter(
decoder_targets_list, subnet_params.target_specs,
data_partition
)
dataset = target_filter.filter_dataset(dataset)
# # filter out words not in the decoder_targets_list
# ######
# # FIX ME
# if False: # not self.TARGETS_ARE_SEQUENCES:
# OOV_id = (decoder_targets_list.index(self.OOV_token)
# if self.OOV_token in decoder_targets_list else -1)
# dataset = dataset.filter(
# lambda encoder_input, decoder_target, encoder_target, s_id:
# tf.not_equal(decoder_target[0], OOV_id))
# ######
# discard some of the data?; shuffle; batch (evening out w/padding)
if num_shards_to_discard > 0:
dataset = dataset.shard(num_shards_to_discard+1, 0)
dataset = dataset.shuffle(buffer_size=35000) # > greatest
dataset = dataset.padded_batch(
num_cases,
padded_shapes=tf.compat.v1.data.get_output_shapes(dataset),
padding_values={
key: data_manifest.padding_value
for key, data_manifest in subnet_params.data_manifests.items()
},
drop_remainder=DROP_REMAINDER
)
# add id for "proprietary" parts of network under transfer learning
dataset = dataset.map(
lambda batch_of_protos_dict: {
**batch_of_protos_dict,
'subnet_id': tf.constant(
subnet_params.subnet_id, dtype=tf.int32)
}
)
dataset_list.append(dataset)
# (randomly) interleave (sub-)batches w/o throwing anything away
dataset = reduce(
lambda set_a, set_b: set_a.concatenate(set_b), dataset_list
)
dataset = dataset.shuffle(buffer_size=3000)
######
# Since your parse_protobuf_seq2seq_example isn't doing much, the
# overhead associated with just scheduling the dataset.map will
# dominate the cost of applying it. Therefore, tensorflow
# recommends batching first, and applying a vectorized version of
# parse_protobuf_seq2seq_example. But you shuffle first.....
######
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE) #num_cases)
return dataset
def compute_learning_rate(self, subnets_params, Ncases_total=None):
if not Ncases_total:
data_graph = tf.Graph()
with data_graph.as_default():
dataset = tf.data.TFRecordDataset([
subnet_params.tf_record_partial_path.format(block_id)
for subnet_params in subnets_params
for block_id in subnet_params.block_ids['training']
])
count_records = dataset.reduce(0, lambda x, _: x + 1)
Ncases_total = tf.compat.v1.Session().run(count_records)
learning_rate = self.temperature/Ncases_total
print('learning rate is %f' % learning_rate)
return learning_rate
def restore_and_assess(
self, subnets_params, restore_epoch, WRITE=True, **graph_kwargs
):
######
# This code is redundant with fit above....
# You *could* just construct the GraphBuilder once in the constructor
assessment_subnet_params = subnets_params[-1]
decoder_targets_list = assessment_subnet_params.data_manifests[
'decoder_targets'].get_feature_list()
def assessment_data_fxn(num_epochs):
(data_op_tuple, misc_op_tuple, assessments
) = self._generate_oneshot_datasets(
assessment_subnet_params, num_epochs
)
if not WRITE:
for assessment in assessments.values():
assessment.writer = None
return data_op_tuple, misc_op_tuple, assessments
@tfmpl.figure_tensor
def plotting_fxn(confusions, axis_labels):
fig = toolbox.draw_confusion_matrix(
confusions, axis_labels, (12, 12))
return fig
def assessment_net_builder(GPU_op_dict, CPU_op_dict):
return self._build_assessment_net(
GPU_op_dict, CPU_op_dict, assessment_subnet_params,
self._standard_indexer, plotting_fxn
)
def assessor(sess, assessment_struct, epoch, assessment_step, data_partition):
return self._assess(
sess, assessment_struct, epoch, assessment_step,
decoder_targets_list, data_partition)
######
# (re-)build the assessment graph and restore its params from the ckpt
graph_builder = tfh.GraphBuilder(
None, assessment_data_fxn, None, assessment_net_builder, None,
assessor, self.checkpoint_path, restore_epoch, restore_epoch-1,
EMA_decay=self.EMA_decay, assessment_GPU=self.assessment_GPU,
**graph_kwargs
)
return graph_builder.assess()
def get_weights_as_numpy_array(self, tensor_name, restore_epoch):
# use the tensorflow checkpoint reader
this_checkpoint = self.checkpoint_path + '-%i' % restore_epoch
reader = pywrap_tensorflow.NewCheckpointReader(this_checkpoint)
return reader.get_tensor(tensor_name)
def _compute_guessable_class_indices(self, get_targets, subnet_params):
# Not quite right, but easier to implement: construct a dictionary of
# size num_guessable_classes, then add in the words actually in the
# target sentences. Thus, the dictionary size will generally differ
# across sentences....
# Ns
num_words_avg = 7
num_cases = tf.shape(input=get_targets)[0]
num_decoder_target_features = subnet_params.data_manifests[
'decoder_targets'].num_features
# randomly generate the "extra"--incorrect but guessable--classes
make_extra_classes = tf.tile(tf.expand_dims(tf.random.shuffle(tf.range(
num_decoder_target_features))[
:(self.num_guessable_classes-num_words_avg)
], axis=0), (num_cases, 1))
# first get a tensor of the guessable classes
tile_all_classes = tf.tile(tf.expand_dims(
tf.range(num_decoder_target_features), axis=0), (num_cases, 1))
get_unused_classes_matrix = tf.sets.difference(
tile_all_classes, get_targets[:, :, 0])
get_used_classes_matrix = tf.sets.difference(
tile_all_classes, get_unused_classes_matrix)
get_guessable_classes = tf.sparse.to_dense(tf.sets.union(
get_used_classes_matrix, make_extra_classes))
# expand to beam_width
get_guessable_classes = tf.reshape(tf.tile(tf.expand_dims(
get_guessable_classes, axis=1), (1, self.beam_width, 1)),
[num_cases*self.beam_width, -1])
# now get the corresponding indices (for scattering)
get_guessable_class_row_indices = tf.reshape(tf.tile(
tf.expand_dims(tf.range(num_cases*self.beam_width), axis=1),
(1, tf.shape(get_guessable_classes)[1])), [-1])
get_guessable_class_col_indices = tf.reshape(
get_guessable_classes, [-1])
get_guessable_indices = tf.stack(
(get_guessable_class_row_indices, get_guessable_class_col_indices),
axis=1)
return get_guessable_indices
def _reduced_classes_hack(
self, score_as_unnorm_log_probs, get_guessable_indices
):
# Ns
num_cases = tf.shape(score_as_unnorm_log_probs)[0]
# thing
get_guessable_updates = tf.gather_nd(
score_as_unnorm_log_probs, get_guessable_indices)
get_guessable_unnorm_log_probs = tf.scatter_nd(
get_guessable_indices, get_guessable_updates,
tf.shape(score_as_unnorm_log_probs))
# the pad should not be guessable
get_guessable_unnorm_log_probs = tf.concat(
(tf.zeros([num_cases, 1]), get_guessable_unnorm_log_probs[:, 1:]),
axis=1)
# being log probs, they can't be left at 0, so we need to populate the
# log prob matrix for the classes we *don't* want to select from, too
index_unguessable_unnorm_log_probs = tf.cast(
tf.compat.v1.where(tf.equal(get_guessable_unnorm_log_probs, 0)), tf.int32)
###
get_batch_min = tf.reduce_min(score_as_unnorm_log_probs)
# This feels ugly--would be better, albeit more complicated, to use the
# row mins. On the other hand, you still have to do the weird thing of
# multiplying it by two or whatever....
###
make_unguessable_updates = tf.fill(
tf.shape(index_unguessable_unnorm_log_probs)[0:1], get_batch_min)
get_unguessable_unnorm_log_probs = tf.scatter_nd(
index_unguessable_unnorm_log_probs, make_unguessable_updates,
tf.shape(score_as_unnorm_log_probs))
# now add the two pieces together
return get_guessable_unnorm_log_probs + get_unguessable_unnorm_log_probs
class TargetFilter:
def __init__(self, unique_targets, target_specs, this_data_type):
'''
# Example:
target_specs = {
'validation': [
['this', 'was', 'easy', 'for', 'us'],
['they', 'often', 'go', 'out', 'in', 'the', 'evening'],
['i', 'honour', 'my', 'mum'],
['a', 'doctor', 'was', 'in', 'the', 'ambulance', 'with', 'the', 'patient'],
['we', 'are', 'open', 'every', 'monday', 'evening'],
['withdraw', 'only', 'as', 'much', 'money', 'as', 'you', 'need'],
['allow', 'each', 'child', 'to', 'have', 'an', 'ice', 'pop'],
['is', 'she', 'going', 'with', 'you']
]
}
'''
# fixed
data_types = {'training', 'validation'}
# convert target_specs dictionary entries from word- to index-based
# NB: PROBABLY NOT GENERAL ENOUGH to work w/non-word_sequence data
self.target_specs = {key: [
[unique_targets.index(w + '_') for w in target] + [1]
for target in target_spec] for key, target_spec in target_specs.items()
}
# store for later use
self.this_data_type = this_data_type
self.other_data_type = (data_types - {this_data_type}).pop()
def _test_special(self, fetch_target_indices, data_type):
# Test if this tf_record target is among this dataset's target_specs.
# NB that this function returns a (boolean) tf.tensor.
TEST_SPECIAL = tf.constant(False)
for target_indices in self.target_specs[data_type]:
TEST_MATCH = tf.reduce_all(
tf.linalg.diag_part(tf.equal(
fetch_target_indices,
np.array(target_indices, ndmin=2))
))
TEST_SPECIAL = tf.logical_or(TEST_SPECIAL, TEST_MATCH)
return TEST_SPECIAL
def filter_dataset(self, dataset):
if self.this_data_type in self.target_specs:
return dataset.filter(
lambda example_dict: self._test_special(
example_dict['decoder_targets'], self.this_data_type
))
elif self.other_data_type in self.target_specs:
return dataset.filter(
lambda example_dict: self._test_special(
example_dict['decoder_targets'], self.other_data_type
))
else:
return dataset
def cross_entropy(key, data_manifest, sequenced_op_dict):
'''
...
In fact, this function *averages*, rather than sums, across all features
given by a particular key. Although the result is not technically the
cross entropy of the output, it is more easily comparable across keys and
therefore facilitates the design of penalty_scales.
'''
# desequence the targets and natural_params
# NB that this enforces that the lengths of the predicted and actual
# sequences match. This is of course *not* enforced when calculating the
# word error rate, which is anyway computed from 'decoder_outputs', not
# 'decoder_natural_params'.
index_targets, get_lengths = nn.sequences_tools(sequenced_op_dict[key])
targets = tf.gather_nd(sequenced_op_dict[key], index_targets)
np_key = swap(key, 'natural_params')
natural_params = tf.gather_nd(sequenced_op_dict[np_key], index_targets)
# the form of the cross-entropy depends on the distribution
if data_manifest.distribution == 'Gaussian':
# average across features (axis=1)
compute_cross_entropy = tf.reduce_mean(
tf.square(natural_params - targets), 1)/2
elif data_manifest.distribution == 'categorical':
compute_cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=tf.reshape(targets, [-1]), logits=natural_params
)
elif data_manifest.distribution == 'CTC':
#########
# Why do we need to pad the symbol dimensions with a zero??
sequenced_natural_params = tf.pad(
sequenced_op_dict[np_key], tf.constant([[0, 0], [0, 0], [0, 1]])
)
#########
# the labels need to be a SparseTensor
sequenced_encoder_targets = tf.SparseTensor(
tf.cast(index_targets, tf.int64),
tf.reshape(targets, [-1]),
tf.cast(
[tf.shape(get_lengths)[0], tf.reduce_max(get_lengths)],
tf.int64
)
)
####
# ugh: not actually a cross entropy...
####
compute_cross_entropy = tf.compat.v1.nn.ctc_loss(
sequenced_encoder_targets,
inputs=sequenced_natural_params,
sequence_length=get_lengths,
preprocess_collapse_repeated=True,
ctc_merge_repeated=False,
time_major=False
)
else:
# raise NotImplementedError(
# "Only Gaussian, categorical cross entropies have been impl.")
print('WARNING: unrecognized data_manifest.', end='')
print('distribution; not computing a cross entropy')
return
# average across elements of the batch
return tf.reduce_mean(compute_cross_entropy, 0)
###return tf.reduce_sum(compute_cross_entropy, 0)
def swap(key, string):
# In SequenceNetworks, keys are often constructed from the data_manifest
# key by swapping out the word 'targets' for some other string. This is
# just a shortcut for that process.
return key.replace('targets', string)
def data_augmentor(sequenced_op_dict, keyword):
######
# This has a bunch of values hard-coded in--including the booleans that
# control whether or not something happens. At some future date you
# might generalize it.
######
# temporally warp the encoder data
if False:
draw_stretch_factor = tf.random.uniform(
[1], minval=0.4, maxval=1.5, dtype=tf.float32)[0]
for key, sequenced_op in sequenced_op_dict:
if keyword in key:
sequenced_op_dict[key] = nn.tf_linear_interpolation(
sequenced_op, draw_stretch_factor, axis=1)
# jitter the onset and offset of the encoder data
if False:
draw_jitters = tf.random.uniform(
[2], minval=200, maxval=1000, dtype=tf.int32)
for key, sequenced_op in sequenced_op_dict:
if keyword in key:
sequenced_op_dict[key] = sequenced_op[
:, draw_jitters[0]:-50, :]
return sequenced_op_dict
|
d27ccb01bd8eb67c3b5072e3bee73642fdb47a9a
|
46129f1b8ec08598c922f6a256c4a41e087901f8
|
/carla_ros_bridge/src/carla_ros_bridge/imu.py
|
9ba52ec022e447006a5a6b4cff7394dbba8ba5b4
|
[
"MIT"
] |
permissive
|
carla-simulator/ros-bridge
|
bb5515d354a2b0c583ab1d171cef90f750162f28
|
e9063d97ff5a724f76adbb1b852dc71da1dcfeec
|
refs/heads/master
| 2023-07-19T19:11:10.707619
| 2022-07-22T07:37:00
| 2022-07-22T07:37:00
| 159,830,279
| 448
| 376
|
MIT
| 2023-09-06T18:19:27
| 2018-11-30T13:51:34
|
Python
|
UTF-8
|
Python
| false
| false
| 3,309
|
py
|
imu.py
|
#!usr/bin/env python
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
#
"""
Classes to handle Carla imu sensor
"""
from transforms3d.euler import euler2quat
import carla_common.transforms as trans
from carla_ros_bridge.sensor import Sensor
from sensor_msgs.msg import Imu
class ImuSensor(Sensor):
"""
Actor implementation details for imu sensor
"""
def __init__(self, uid, name, parent, relative_spawn_pose, node, carla_actor, synchronous_mode):
"""
Constructor
:param uid: unique identifier for this object
:type uid: int
:param name: name identiying this object
:type name: string
:param parent: the parent of this
:type parent: carla_ros_bridge.Parent
:param relative_spawn_pose: the relative spawn pose of this
:type relative_spawn_pose: geometry_msgs.Pose
:param node: node-handle
:type node: CompatibleNode
:param carla_actor : carla actor object
:type carla_actor: carla.Actor
:param synchronous_mode: use in synchronous mode?
:type synchronous_mode: bool
"""
super(ImuSensor, self).__init__(uid=uid,
name=name,
parent=parent,
relative_spawn_pose=relative_spawn_pose,
node=node,
carla_actor=carla_actor,
synchronous_mode=synchronous_mode)
self.imu_publisher = node.new_publisher(Imu, self.get_topic_prefix(), qos_profile=10)
self.listen()
def destroy(self):
super(ImuSensor, self).destroy()
self.node.destroy_publisher(self.imu_publisher)
# pylint: disable=arguments-differ
def sensor_data_updated(self, carla_imu_measurement):
"""
Function to transform a received imu measurement into a ROS Imu message
:param carla_imu_measurement: carla imu measurement object
:type carla_imu_measurement: carla.IMUMeasurement
"""
imu_msg = Imu()
imu_msg.header = self.get_msg_header(timestamp=carla_imu_measurement.timestamp)
# Carla uses a left-handed coordinate convention (X forward, Y right, Z up).
# Here, these measurements are converted to the right-handed ROS convention
# (X forward, Y left, Z up).
imu_msg.angular_velocity.x = -carla_imu_measurement.gyroscope.x
imu_msg.angular_velocity.y = carla_imu_measurement.gyroscope.y
imu_msg.angular_velocity.z = -carla_imu_measurement.gyroscope.z
imu_msg.linear_acceleration.x = carla_imu_measurement.accelerometer.x
imu_msg.linear_acceleration.y = -carla_imu_measurement.accelerometer.y
imu_msg.linear_acceleration.z = carla_imu_measurement.accelerometer.z
roll, pitch, yaw = trans.carla_rotation_to_RPY(carla_imu_measurement.transform.rotation)
quat = euler2quat(roll, pitch, yaw)
imu_msg.orientation.w = quat[0]
imu_msg.orientation.x = quat[1]
imu_msg.orientation.y = quat[2]
imu_msg.orientation.z = quat[3]
self.imu_publisher.publish(imu_msg)
|
8b8dd7e87b37c881ee5d4c2c26d6dfdb3f1934fe
|
b54f5fe75dbb010a18d1da30a4f030fbb257ed4a
|
/examples/imageview/imageview/app.py
|
655baca80e4c0842024aac5836baa1c0fdd0fe49
|
[
"BSD-3-Clause"
] |
permissive
|
beeware/toga
|
449e3f008ad89e10f8ffcc61bdac798e7e825d09
|
01b076bd6434d0bd04c04ff72ac6eb20b9e973ea
|
refs/heads/main
| 2023-08-24T11:33:53.705165
| 2023-08-24T00:06:09
| 2023-08-24T00:06:09
| 22,529,973
| 1,865
| 468
|
BSD-3-Clause
| 2023-09-14T18:46:58
| 2014-08-01T21:44:10
|
Python
|
UTF-8
|
Python
| false
| false
| 2,930
|
py
|
app.py
|
import io
from PIL import Image, ImageDraw
import toga
from toga.style.pack import CENTER, COLUMN, Pack
class ImageViewApp(toga.App):
def startup(self):
self.main_window = toga.MainWindow(title=self.name)
box = toga.Box(
style=Pack(
padding=10,
alignment=CENTER,
direction=COLUMN,
)
)
# image from relative path, specified as a string to load brutus.png from
# the package.
image_from_path = toga.Image("resources/pride-brutus.png")
# First display the image at its intrinsic size.
box.add(
toga.ImageView(
image_from_path,
)
)
# Scale ONE of the width or height, and the aspect ratio should be retained.
box.add(
toga.ImageView(
image_from_path,
style=Pack(width=72),
)
)
box.add(
toga.ImageView(
image_from_path,
style=Pack(height=72),
)
)
# image from pathlib.Path object
# same as the above image, just with a different argument type
image_from_pathlib_path = toga.Image(
self.paths.app / "resources" / "pride-brutus.png"
)
# Scale BOTH of the width or height, and the aspect ratio should be overridden.
box.add(
toga.ImageView(
image_from_pathlib_path,
style=Pack(width=72, height=72),
)
)
# Flex with unpecified cross axis size: aspect ratio should be retained.
box.add(
toga.ImageView(
image_from_pathlib_path,
style=Pack(flex=1),
)
)
# Flex with fixed cross axis size: aspect ratio should be retained.
box.add(
toga.ImageView(
image_from_pathlib_path,
style=Pack(flex=1, width=150),
)
)
# image from bytes
# generate an image using pillow
img = Image.new("RGBA", size=(110, 30))
d1 = ImageDraw.Draw(img)
d1.text((20, 10), "Pillow image", fill="green")
# get png bytes
buffer = io.BytesIO()
img.save(buffer, format="png", compress_level=0)
image_from_bytes = toga.Image(data=buffer.getvalue())
imageview_from_bytes = toga.ImageView(
image_from_bytes,
style=Pack(height=72, background_color="lightgray"),
)
box.add(imageview_from_bytes)
# An empty imageview.
empty_imageview = toga.ImageView()
box.add(empty_imageview)
self.main_window.content = box
self.main_window.show()
def main():
return ImageViewApp("ImageView", "org.beeware.widgets.imageview")
if __name__ == "__main__":
app = main()
app.main_loop()
|
e9ae122efbf47bc231902369c8957f7e0b79458b
|
474c281c47aed69036b2a13e9a60d150d8ecddc5
|
/jsons/deserializers/default_iterable.py
|
24cc0b538526842eb52e4b1079c4e8ba5a03feaa
|
[
"MIT"
] |
permissive
|
ramonhagenaars/jsons
|
c2445eb7c002544abdfde4ac63d42f5a93e4d776
|
9abbf3a3bd32435ac74bc98c3554ad3c71086036
|
refs/heads/master
| 2023-07-23T22:08:10.093119
| 2022-06-09T19:50:52
| 2022-06-09T19:50:52
| 140,337,655
| 286
| 52
|
MIT
| 2023-07-14T15:20:59
| 2018-07-09T20:18:08
|
Python
|
UTF-8
|
Python
| false
| false
| 1,161
|
py
|
default_iterable.py
|
from collections.abc import Mapping, Iterable
from typing import Iterable as IterableType
from jsons._compatibility_impl import get_naked_class
from jsons.deserializers.default_list import default_list_deserializer
def default_iterable_deserializer(
obj: list,
cls: type,
**kwargs) -> Iterable:
"""
Deserialize a (JSON) list into an ``Iterable`` by deserializing all items
of that list. The given obj is assumed to be homogeneous; if the list has a
generic type (e.g. Set[datetime]) then it is assumed that all elements can
be deserialized to that type.
:param obj: The list that needs deserializing to an ``Iterable``.
:param cls: The type, optionally with a generic (e.g. Deque[str]).
:param kwargs: Any keyword arguments.
:return: A deserialized ``Iterable`` (e.g. ``set``) instance.
"""
cls_ = Mapping
if hasattr(cls, '__args__'):
cls_ = IterableType[cls.__args__]
list_ = default_list_deserializer(obj, cls_, **kwargs)
result = list_
naked_cls = get_naked_class(cls)
if not isinstance(result, naked_cls):
result = naked_cls(list_)
return result
|
bfb8065fd27ecdefcbaf23d82c0a7c57db21c736
|
c42a5c6dc908caec486da3769ff4d3bc6398648f
|
/meta_dataset/data/dump_episodes.py
|
67d3dd1a782b995fdaeefff3a3c13d62df76f029
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
google-research/meta-dataset
|
4a611b194ea813208c8cc4b8fdd4cb2b082115d6
|
13ca9ed2533056909f232168c759c096ae291740
|
refs/heads/main
| 2023-09-01T07:34:56.516732
| 2022-12-22T06:23:44
| 2023-01-20T19:44:32
| 174,000,899
| 753
| 147
|
Apache-2.0
| 2023-02-17T17:25:22
| 2019-03-05T18:39:08
|
Python
|
UTF-8
|
Python
| false
| false
| 4,812
|
py
|
dump_episodes.py
|
# coding=utf-8
# Copyright 2022 The Meta-Dataset Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Dumps Meta-Dataset episodes to disk as tfrecords files.
Episodes are stored as a pair of `{episode_number}-train.tfrecords` and
`{episode_number}-test.tfrecords` files, each of which contains serialized
TFExample strings for the support and query set, respectively.
python -m meta_dataset.data.dump_episodes \
--gin_config=meta_dataset/learn/gin/setups/\
data_config_string.gin --gin_config=meta_dataset/learn/gin/\
setups/variable_way_and_shot.gin \
--gin_bindings="DataConfig.num_prefetch=<num_prefetch>"
"""
import json
import os
from absl import app
from absl import flags
from absl import logging
import gin
from meta_dataset.data import config
from meta_dataset.data import dataset_spec as dataset_spec_lib
from meta_dataset.data import learning_spec
from meta_dataset.data import pipeline
from meta_dataset.data import utils
import tensorflow.compat.v1 as tf
tf.enable_eager_execution()
flags.DEFINE_multi_string('gin_config', None,
'List of paths to the config files.')
flags.DEFINE_multi_string('gin_bindings', None,
'List of Gin parameter bindings.')
flags.DEFINE_string('output_dir', '/tmp/cached_episodes/',
'Root directory for saving episodes.')
flags.DEFINE_integer('num_episodes', 600, 'Number of episodes to sample.')
flags.DEFINE_string('dataset_name', 'omniglot', 'Dataset name to create '
'episodes from.')
flags.DEFINE_enum_class('split', learning_spec.Split.TEST, learning_spec.Split,
'See learning_spec.Split for '
'allowed values.')
flags.DEFINE_boolean(
'ignore_dag_ontology', False, 'If True the dag ontology'
' for Imagenet dataset is not used.')
flags.DEFINE_boolean(
'ignore_bilevel_ontology', False, 'If True the bilevel'
' sampling for Omniglot dataset is not used.')
tf.flags.DEFINE_string('records_root_dir', '',
'Root directory containing a subdirectory per dataset.')
FLAGS = flags.FLAGS
def main(unused_argv):
logging.info(FLAGS.output_dir)
tf.io.gfile.makedirs(FLAGS.output_dir)
gin.parse_config_files_and_bindings(
FLAGS.gin_config, FLAGS.gin_bindings, finalize_config=True)
dataset_spec = dataset_spec_lib.load_dataset_spec(
os.path.join(FLAGS.records_root_dir, FLAGS.dataset_name))
data_config = config.DataConfig()
episode_descr_config = config.EpisodeDescriptionConfig()
use_dag_ontology = (
FLAGS.dataset_name in ('ilsvrc_2012', 'ilsvrc_2012_v2') and
not FLAGS.ignore_dag_ontology)
use_bilevel_ontology = (
FLAGS.dataset_name == 'omniglot' and not FLAGS.ignore_bilevel_ontology)
data_pipeline = pipeline.make_one_source_episode_pipeline(
dataset_spec,
use_dag_ontology=use_dag_ontology,
use_bilevel_ontology=use_bilevel_ontology,
split=FLAGS.split,
episode_descr_config=episode_descr_config,
# TODO(evcu) Maybe set the following to 0 to prevent shuffling and check
# reproducibility of dumping.
shuffle_buffer_size=data_config.shuffle_buffer_size,
read_buffer_size_bytes=data_config.read_buffer_size_bytes,
num_prefetch=data_config.num_prefetch)
dataset = data_pipeline.take(FLAGS.num_episodes)
images_per_class_dict = {}
# Ignoring dataset number since we are loading one dataset.
for episode_number, (episode, _) in enumerate(dataset):
logging.info('Dumping episode %d', episode_number)
train_imgs, train_labels, _, test_imgs, test_labels, _ = episode
path_train = utils.get_file_path(FLAGS.output_dir, episode_number, 'train')
path_test = utils.get_file_path(FLAGS.output_dir, episode_number, 'test')
utils.dump_as_tfrecord(path_train, train_imgs, train_labels)
utils.dump_as_tfrecord(path_test, test_imgs, test_labels)
images_per_class_dict[os.path.basename(path_train)] = (
utils.get_label_counts(train_labels))
images_per_class_dict[os.path.basename(path_test)] = (
utils.get_label_counts(test_labels))
info_path = utils.get_info_path(FLAGS.output_dir)
with tf.io.gfile.GFile(info_path, 'w') as f:
f.write(json.dumps(images_per_class_dict, indent=2))
if __name__ == '__main__':
app.run(main)
|
cd2c8aa597b01cf3c193bfb195fbfc646b304d4b
|
9e5752ec6fa4f9797dd06f49e9d26dba55b05975
|
/mindarmour/privacy/evaluation/attacker.py
|
496110c4127b861cebaf1cb0163abff0f0bd3497
|
[
"Apache-2.0"
] |
permissive
|
mindspore-ai/mindarmour
|
8e0d221d4cc77ebf2ce67dbcdf8d2cb8175d7051
|
9cd825b416916c9cda5a7f3623b39b086d16275c
|
refs/heads/master
| 2023-07-09T11:43:45.380811
| 2023-07-07T07:35:27
| 2023-07-07T07:35:27
| 250,692,967
| 151
| 16
|
Apache-2.0
| 2020-04-02T09:50:15
| 2020-03-28T01:59:08
|
Python
|
UTF-8
|
Python
| false
| false
| 6,699
|
py
|
attacker.py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Attacker of Membership Inference.
"""
import warnings
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.exceptions import ConvergenceWarning
from mindarmour.utils.logger import LogUtil
from mindarmour.utils._check_param import check_pair_numpy_param, check_param_type
LOGGER = LogUtil.get_instance()
TAG = "Attacker"
def _attack_knn(features, labels, param_grid, n_jobs):
"""
Train and return a KNN model.
Args:
features (numpy.ndarray): Loss and logits characteristics of each sample.
labels (numpy.ndarray): Labels of each sample whether belongs to training set.
param_grid (dict): Setting of GridSearchCV.
n_jobs (int): Number of jobs run in parallel. -1 means using all processors,
otherwise the value of n_jobs must be a positive integer.
Returns:
sklearn.model_selection.GridSearchCV, trained model.
"""
knn_model = KNeighborsClassifier()
knn_model = GridSearchCV(
knn_model, param_grid=param_grid, cv=3, n_jobs=n_jobs, verbose=0,
)
knn_model.fit(X=features, y=labels)
return knn_model
def _attack_lr(features, labels, param_grid, n_jobs):
"""
Train and return a LR model.
Args:
features (numpy.ndarray): Loss and logits characteristics of each sample.
labels (numpy.ndarray): Labels of each sample whether belongs to training set.
param_grid (dict): Setting of GridSearchCV.
n_jobs (int): Number of jobs run in parallel. -1 means using all processors,
otherwise the value of n_jobs must be a positive integer.
Returns:
sklearn.model_selection.GridSearchCV, trained model.
"""
lr_model = LogisticRegression(C=1.0, penalty="l2", max_iter=300)
lr_model = GridSearchCV(
lr_model, param_grid=param_grid, cv=3, n_jobs=n_jobs, verbose=0,
)
lr_model.fit(X=features, y=labels)
return lr_model
def _attack_mlpc(features, labels, param_grid, n_jobs):
"""
Train and return a MLPC model.
Args:
features (numpy.ndarray): Loss and logits characteristics of each sample.
labels (numpy.ndarray): Labels of each sample whether belongs to training set.
param_grid (dict): Setting of GridSearchCV.
n_jobs (int): Number of jobs run in parallel. -1 means using all processors,
otherwise the value of n_jobs must be a positive integer.
Returns:
sklearn.model_selection.GridSearchCV, trained model.
"""
mlpc_model = MLPClassifier(random_state=1, max_iter=300)
mlpc_model = GridSearchCV(
mlpc_model, param_grid=param_grid, cv=3, n_jobs=n_jobs, verbose=0,
)
mlpc_model.fit(features, labels)
return mlpc_model
def _attack_rf(features, labels, random_grid, n_jobs):
"""
Train and return a RF model.
Args:
features (numpy.ndarray): Loss and logits characteristics of each sample.
labels (numpy.ndarray): Labels of each sample whether belongs to training set.
random_grid (dict): Setting of RandomizedSearchCV.
n_jobs (int): Number of jobs run in parallel. -1 means using all processors,
otherwise the value of n_jobs must be a positive integer.
Returns:
sklearn.model_selection.RandomizedSearchCV, trained model.
"""
rf_model = RandomForestClassifier(max_depth=2, random_state=0)
rf_model = RandomizedSearchCV(
rf_model, param_distributions=random_grid, n_iter=7, cv=3, n_jobs=n_jobs,
verbose=0,
)
rf_model.fit(features, labels)
return rf_model
def _get_attack_model(features, labels, config, n_jobs=-1):
"""
Get trained attack model specify by config.
Args:
features (numpy.ndarray): Loss and logits characteristics of each sample.
labels (numpy.ndarray): Labels of each sample whether belongs to training set.
config (dict): Config of attacker, with key in ["method", "params"].
The format is {"method": "knn", "params": {"n_neighbors": [3, 5, 7]}},
params of each method must within the range of changeable parameters.
Tips of params implement can be found in
"https://scikit-learn.org/0.16/modules/generated/sklearn.grid_search.GridSearchCV.html".
n_jobs (int): Number of jobs run in parallel. -1 means using all processors,
otherwise the value of n_jobs must be a positive integer.
Returns:
sklearn.BaseEstimator, trained model specify by config["method"].
Examples:
>>> from mindarmour.privacy.evaluation.attacker import _get_attack_model
>>> features = np.random.randn(10, 10)
>>> labels = np.random.randint(0, 2, 10)
>>> config = {"method": "knn", "params": {"n_neighbors": [3, 5]}}
>>> attack_model = _get_attack_model(features, labels, config)
"""
features, labels = check_pair_numpy_param("features", features, "labels", labels)
config = check_param_type("config", config, dict)
n_jobs = check_param_type("n_jobs", n_jobs, int)
if not (n_jobs == -1 or n_jobs > 0):
msg = "Value of n_jobs must be -1 or positive integer."
raise ValueError(msg)
method = str.lower(config["method"])
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=ConvergenceWarning)
if method == "knn":
return _attack_knn(features, labels, config["params"], n_jobs)
if method == "lr":
return _attack_lr(features, labels, config["params"], n_jobs)
if method == "mlp":
return _attack_mlpc(features, labels, config["params"], n_jobs)
if method == "rf":
return _attack_rf(features, labels, config["params"], n_jobs)
msg = "Method {} is not supported.".format(config["method"])
LOGGER.error(TAG, msg)
raise NameError(msg)
|
aa286cdc23e714255ad16171de90bb23132beed0
|
da99c3d2dfbce3902ec189700daf87278f90d5cd
|
/evaluation/eval_wrapper.py
|
e855718c2d7fd12e2e02278f0433d0c55d2ed32e
|
[
"MIT"
] |
permissive
|
cfzd/Ultra-Fast-Lane-Detection
|
52c297c1975d0e2e5cf7844bc2ce4f47b8d993e3
|
353df107756b8c03c22c27201e33fc63d84ecfe6
|
refs/heads/master
| 2023-08-11T23:20:22.619180
| 2022-12-14T06:50:09
| 2022-12-14T06:50:09
| 266,319,758
| 1,630
| 488
|
MIT
| 2021-12-03T04:09:33
| 2020-05-23T11:11:34
|
Python
|
UTF-8
|
Python
| false
| false
| 11,756
|
py
|
eval_wrapper.py
|
from data.dataloader import get_test_loader
from evaluation.tusimple.lane import LaneEval
from utils.dist_utils import is_main_process, dist_print, get_rank, get_world_size, dist_tqdm, synchronize
import os, json, torch, scipy
import numpy as np
import platform
def generate_lines(out, shape, names, output_path, griding_num, localization_type='abs', flip_updown=False):
col_sample = np.linspace(0, shape[1] - 1, griding_num)
col_sample_w = col_sample[1] - col_sample[0]
for j in range(out.shape[0]):
out_j = out[j].data.cpu().numpy()
if flip_updown:
out_j = out_j[:, ::-1, :]
if localization_type == 'abs':
out_j = np.argmax(out_j, axis=0)
out_j[out_j == griding_num] = -1
out_j = out_j + 1
elif localization_type == 'rel':
prob = scipy.special.softmax(out_j[:-1, :, :], axis=0)
idx = np.arange(griding_num) + 1
idx = idx.reshape(-1, 1, 1)
loc = np.sum(prob * idx, axis=0)
out_j = np.argmax(out_j, axis=0)
loc[out_j == griding_num] = 0
out_j = loc
else:
raise NotImplementedError
name = names[j]
line_save_path = os.path.join(output_path, name[:-3] + 'lines.txt')
save_dir, _ = os.path.split(line_save_path)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
with open(line_save_path, 'w') as fp:
for i in range(out_j.shape[1]):
if np.sum(out_j[:, i] != 0) > 2:
for k in range(out_j.shape[0]):
if out_j[k, i] > 0:
fp.write(
'%d %d ' % (int(out_j[k, i] * col_sample_w * 1640 / 800) - 1, int(590 - k * 20) - 1))
fp.write('\n')
def run_test(net, data_root, exp_name, work_dir, griding_num, use_aux,distributed, batch_size=8):
# torch.backends.cudnn.benchmark = True
output_path = os.path.join(work_dir, exp_name)
if not os.path.exists(output_path) and is_main_process():
os.mkdir(output_path)
synchronize()
loader = get_test_loader(batch_size, data_root, 'CULane', distributed)
# import pdb;pdb.set_trace()
for i, data in enumerate(dist_tqdm(loader)):
imgs, names = data
imgs = imgs.cuda()
with torch.no_grad():
out = net(imgs)
if len(out) == 2 and use_aux:
out, seg_out = out
generate_lines(out,imgs[0,0].shape,names,output_path,griding_num,localization_type = 'rel',flip_updown = True)
def generate_tusimple_lines(out,shape,griding_num,localization_type='rel'):
out = out.data.cpu().numpy()
out_loc = np.argmax(out,axis=0)
if localization_type == 'rel':
prob = scipy.special.softmax(out[:-1, :, :], axis=0)
idx = np.arange(griding_num)
idx = idx.reshape(-1, 1, 1)
loc = np.sum(prob * idx, axis=0)
loc[out_loc == griding_num] = griding_num
out_loc = loc
lanes = []
for i in range(out_loc.shape[1]):
out_i = out_loc[:,i]
lane = [int(round((loc + 0.5) * 1280.0 / (griding_num - 1))) if loc != griding_num else -2 for loc in out_i]
lanes.append(lane)
return lanes
def run_test_tusimple(net,data_root,work_dir,exp_name,griding_num,use_aux, distributed,batch_size = 8):
output_path = os.path.join(work_dir,exp_name+'.%d.txt'% get_rank())
fp = open(output_path,'w')
loader = get_test_loader(batch_size,data_root,'Tusimple', distributed)
for i,data in enumerate(dist_tqdm(loader)):
imgs,names = data
imgs = imgs.cuda()
with torch.no_grad():
out = net(imgs)
if len(out) == 2 and use_aux:
out = out[0]
for i,name in enumerate(names):
tmp_dict = {}
tmp_dict['lanes'] = generate_tusimple_lines(out[i],imgs[0,0].shape,griding_num)
tmp_dict['h_samples'] = [160, 170, 180, 190, 200, 210, 220, 230, 240, 250, 260,
270, 280, 290, 300, 310, 320, 330, 340, 350, 360, 370, 380, 390, 400, 410, 420,
430, 440, 450, 460, 470, 480, 490, 500, 510, 520, 530, 540, 550, 560, 570, 580,
590, 600, 610, 620, 630, 640, 650, 660, 670, 680, 690, 700, 710]
tmp_dict['raw_file'] = name
tmp_dict['run_time'] = 10
json_str = json.dumps(tmp_dict)
fp.write(json_str+'\n')
fp.close()
def combine_tusimple_test(work_dir,exp_name):
size = get_world_size()
all_res = []
for i in range(size):
output_path = os.path.join(work_dir,exp_name+'.%d.txt'% i)
with open(output_path, 'r') as fp:
res = fp.readlines()
all_res.extend(res)
names = set()
all_res_no_dup = []
for i, res in enumerate(all_res):
pos = res.find('clips')
name = res[pos:].split('\"')[0]
if name not in names:
names.add(name)
all_res_no_dup.append(res)
output_path = os.path.join(work_dir,exp_name+'.txt')
with open(output_path, 'w') as fp:
fp.writelines(all_res_no_dup)
def eval_lane(net, dataset, data_root, work_dir, griding_num, use_aux, distributed):
net.eval()
if dataset == 'CULane':
run_test(net,data_root, 'culane_eval_tmp', work_dir, griding_num, use_aux, distributed)
synchronize() # wait for all results
if is_main_process():
res = call_culane_eval(data_root, 'culane_eval_tmp', work_dir)
TP,FP,FN = 0,0,0
for k, v in res.items():
val = float(v['Fmeasure']) if 'nan' not in v['Fmeasure'] else 0
val_tp,val_fp,val_fn = int(v['tp']),int(v['fp']),int(v['fn'])
TP += val_tp
FP += val_fp
FN += val_fn
dist_print(k,val)
P = TP * 1.0/(TP + FP)
R = TP * 1.0/(TP + FN)
F = 2*P*R/(P + R)
dist_print(F)
synchronize()
elif dataset == 'Tusimple':
exp_name = 'tusimple_eval_tmp'
run_test_tusimple(net, data_root, work_dir, exp_name, griding_num, use_aux, distributed)
synchronize() # wait for all results
if is_main_process():
combine_tusimple_test(work_dir,exp_name)
res = LaneEval.bench_one_submit(os.path.join(work_dir,exp_name + '.txt'),os.path.join(data_root,'test_label.json'))
res = json.loads(res)
for r in res:
dist_print(r['name'], r['value'])
synchronize()
def read_helper(path):
lines = open(path, 'r').readlines()[1:]
lines = ' '.join(lines)
values = lines.split(' ')[1::2]
keys = lines.split(' ')[0::2]
keys = [key[:-1] for key in keys]
res = {k : v for k,v in zip(keys,values)}
return res
def call_culane_eval(data_dir, exp_name,output_path):
if data_dir[-1] != '/':
data_dir = data_dir + '/'
detect_dir=os.path.join(output_path,exp_name)+'/'
w_lane=30
iou=0.5; # Set iou to 0.3 or 0.5
im_w=1640
im_h=590
frame=1
list0 = os.path.join(data_dir,'list/test_split/test0_normal.txt')
list1 = os.path.join(data_dir,'list/test_split/test1_crowd.txt')
list2 = os.path.join(data_dir,'list/test_split/test2_hlight.txt')
list3 = os.path.join(data_dir,'list/test_split/test3_shadow.txt')
list4 = os.path.join(data_dir,'list/test_split/test4_noline.txt')
list5 = os.path.join(data_dir,'list/test_split/test5_arrow.txt')
list6 = os.path.join(data_dir,'list/test_split/test6_curve.txt')
list7 = os.path.join(data_dir,'list/test_split/test7_cross.txt')
list8 = os.path.join(data_dir,'list/test_split/test8_night.txt')
if not os.path.exists(os.path.join(output_path,'txt')):
os.mkdir(os.path.join(output_path,'txt'))
out0 = os.path.join(output_path,'txt','out0_normal.txt')
out1=os.path.join(output_path,'txt','out1_crowd.txt')
out2=os.path.join(output_path,'txt','out2_hlight.txt')
out3=os.path.join(output_path,'txt','out3_shadow.txt')
out4=os.path.join(output_path,'txt','out4_noline.txt')
out5=os.path.join(output_path,'txt','out5_arrow.txt')
out6=os.path.join(output_path,'txt','out6_curve.txt')
out7=os.path.join(output_path,'txt','out7_cross.txt')
out8=os.path.join(output_path,'txt','out8_night.txt')
eval_cmd = './evaluation/culane/evaluate'
if platform.system() == 'Windows':
eval_cmd = eval_cmd.replace('/', os.sep)
# print('./evaluate -a %s -d %s -i %s -l %s -w %s -t %s -c %s -r %s -f %s -o %s'%(data_dir,detect_dir,data_dir,list0,w_lane,iou,im_w,im_h,frame,out0))
os.system('%s -a %s -d %s -i %s -l %s -w %s -t %s -c %s -r %s -f %s -o %s'%(eval_cmd,data_dir,detect_dir,data_dir,list0,w_lane,iou,im_w,im_h,frame,out0))
# print('./evaluate -a %s -d %s -i %s -l %s -w %s -t %s -c %s -r %s -f %s -o %s'%(data_dir,detect_dir,data_dir,list1,w_lane,iou,im_w,im_h,frame,out1))
os.system('%s -a %s -d %s -i %s -l %s -w %s -t %s -c %s -r %s -f %s -o %s'%(eval_cmd,data_dir,detect_dir,data_dir,list1,w_lane,iou,im_w,im_h,frame,out1))
# print('./evaluate -a %s -d %s -i %s -l %s -w %s -t %s -c %s -r %s -f %s -o %s'%(data_dir,detect_dir,data_dir,list2,w_lane,iou,im_w,im_h,frame,out2))
os.system('%s -a %s -d %s -i %s -l %s -w %s -t %s -c %s -r %s -f %s -o %s'%(eval_cmd,data_dir,detect_dir,data_dir,list2,w_lane,iou,im_w,im_h,frame,out2))
# print('./evaluate -a %s -d %s -i %s -l %s -w %s -t %s -c %s -r %s -f %s -o %s'%(data_dir,detect_dir,data_dir,list3,w_lane,iou,im_w,im_h,frame,out3))
os.system('%s -a %s -d %s -i %s -l %s -w %s -t %s -c %s -r %s -f %s -o %s'%(eval_cmd,data_dir,detect_dir,data_dir,list3,w_lane,iou,im_w,im_h,frame,out3))
# print('./evaluate -a %s -d %s -i %s -l %s -w %s -t %s -c %s -r %s -f %s -o %s'%(data_dir,detect_dir,data_dir,list4,w_lane,iou,im_w,im_h,frame,out4))
os.system('%s -a %s -d %s -i %s -l %s -w %s -t %s -c %s -r %s -f %s -o %s'%(eval_cmd,data_dir,detect_dir,data_dir,list4,w_lane,iou,im_w,im_h,frame,out4))
# print('./evaluate -a %s -d %s -i %s -l %s -w %s -t %s -c %s -r %s -f %s -o %s'%(data_dir,detect_dir,data_dir,list5,w_lane,iou,im_w,im_h,frame,out5))
os.system('%s -a %s -d %s -i %s -l %s -w %s -t %s -c %s -r %s -f %s -o %s'%(eval_cmd,data_dir,detect_dir,data_dir,list5,w_lane,iou,im_w,im_h,frame,out5))
# print('./evaluate -a %s -d %s -i %s -l %s -w %s -t %s -c %s -r %s -f %s -o %s'%(data_dir,detect_dir,data_dir,list6,w_lane,iou,im_w,im_h,frame,out6))
os.system('%s -a %s -d %s -i %s -l %s -w %s -t %s -c %s -r %s -f %s -o %s'%(eval_cmd,data_dir,detect_dir,data_dir,list6,w_lane,iou,im_w,im_h,frame,out6))
# print('./evaluate -a %s -d %s -i %s -l %s -w %s -t %s -c %s -r %s -f %s -o %s'%(data_dir,detect_dir,data_dir,list7,w_lane,iou,im_w,im_h,frame,out7))
os.system('%s -a %s -d %s -i %s -l %s -w %s -t %s -c %s -r %s -f %s -o %s'%(eval_cmd,data_dir,detect_dir,data_dir,list7,w_lane,iou,im_w,im_h,frame,out7))
# print('./evaluate -a %s -d %s -i %s -l %s -w %s -t %s -c %s -r %s -f %s -o %s'%(data_dir,detect_dir,data_dir,list8,w_lane,iou,im_w,im_h,frame,out8))
os.system('%s -a %s -d %s -i %s -l %s -w %s -t %s -c %s -r %s -f %s -o %s'%(eval_cmd,data_dir,detect_dir,data_dir,list8,w_lane,iou,im_w,im_h,frame,out8))
res_all = {}
res_all['res_normal'] = read_helper(out0)
res_all['res_crowd']= read_helper(out1)
res_all['res_night']= read_helper(out8)
res_all['res_noline'] = read_helper(out4)
res_all['res_shadow'] = read_helper(out3)
res_all['res_arrow']= read_helper(out5)
res_all['res_hlight'] = read_helper(out2)
res_all['res_curve']= read_helper(out6)
res_all['res_cross']= read_helper(out7)
return res_all
|
31363f6e735e40ed141cddf990557422888283bb
|
4e8eead628904a586fed97b7840266498f7db8fc
|
/vagrant/start_provider.py
|
5611c5e6c63216d7fabed55820ec79b7db3a6631
|
[
"MIT"
] |
permissive
|
wndhydrnt/python-oauth2
|
5d730806e9632009dee9e92df512eeb52d69d6af
|
d1f75e321bac049291925b9ee345bf4218f5b7a9
|
refs/heads/v1
| 2020-12-24T16:15:33.238125
| 2019-06-28T17:53:25
| 2019-06-28T17:53:25
| 13,421,726
| 121
| 52
|
MIT
| 2018-11-09T19:59:37
| 2013-10-08T18:21:07
|
Python
|
UTF-8
|
Python
| false
| false
| 2,669
|
py
|
start_provider.py
|
import argparse
import mysql.connector
from pymongo import MongoClient
from wsgiref.simple_server import make_server
from oauth2 import Provider
from oauth2.store.dbapi.mysql import MysqlAccessTokenStore, MysqlAuthCodeStore, \
MysqlClientStore
from oauth2.store.mongodb import AccessTokenStore, AuthCodeStore, ClientStore
from oauth2.tokengenerator import Uuid4
from oauth2.web import SiteAdapter, Wsgi
from oauth2.grant import AuthorizationCodeGrant, ImplicitGrant, ResourceOwnerGrant,\
RefreshToken, ClientCredentialsGrant
class TestSiteAdapter(SiteAdapter):
def authenticate(self, request, environ, response):
return {}, 123
def user_has_denied_access(self, request):
return False
def main():
parser = argparse.ArgumentParser(description="python-oauth2 test provider")
parser.add_argument("--store", dest="store", type=str, default="mongodb",
help="The store adapter to use. Can one of 'mongodb'"\
"(default), 'mysql'")
args = parser.parse_args()
if args.store == "mongodb":
print("Using mongodb stores...")
client = MongoClient()
db = client.testdb
access_token_store = AccessTokenStore(collection=db["access_tokens"])
auth_code_store = AuthCodeStore(collection=db["auth_codes"])
client_store = ClientStore(collection=db["clients"])
elif args.store == "mysql":
print("Using mysql stores...")
connection = mysql.connector.connect(host="127.0.0.1", user="root",
passwd="", db="testdb")
access_token_store = MysqlAccessTokenStore(connection=connection)
auth_code_store = MysqlAuthCodeStore(connection=connection)
client_store = MysqlClientStore(connection=connection)
else:
raise Exception("Unknown store")
provider = Provider(access_token_store=access_token_store,
auth_code_store=auth_code_store,
client_store=client_store,
site_adapter=TestSiteAdapter(),
token_generator=Uuid4())
provider.add_grant(AuthorizationCodeGrant(expires_in=120))
provider.add_grant(ImplicitGrant())
provider.add_grant(ResourceOwnerGrant())
provider.add_grant(ClientCredentialsGrant())
provider.add_grant(RefreshToken(expires_in=60))
app = Wsgi(server=provider)
try:
httpd = make_server('', 8888, app)
print("Starting test auth server on port 8888...")
httpd.serve_forever()
except KeyboardInterrupt:
httpd.server_close()
if __name__ == "__main__":
main()
|
b4515638a3dee04ef2dd49fdd98ee02485ebeab7
|
29bd0e9b5bdef7e2b4ff79edda3be258d5bde63c
|
/xonsh/completers/man.py
|
a2e56f13d7e2359de2bca50590292b484ab705ba
|
[
"BSD-2-Clause"
] |
permissive
|
xonsh/xonsh
|
4dec5e4c14a4a82f81277a89d8ab6091869fc29e
|
60f0145ed893cb73bbfcf336c448238981010d41
|
refs/heads/main
| 2023-08-31T03:37:57.786839
| 2023-08-23T15:30:20
| 2023-08-23T15:30:20
| 29,620,400
| 6,374
| 684
|
NOASSERTION
| 2023-09-11T02:52:37
| 2015-01-21T22:05:27
|
Python
|
UTF-8
|
Python
| false
| false
| 4,448
|
py
|
man.py
|
import functools
import json
import re
import shutil
import subprocess
import textwrap
from pathlib import Path
from xonsh.built_ins import XSH
from xonsh.completers.tools import RichCompletion, contextual_command_completer
from xonsh.parsers.completion_context import CommandContext
@functools.cache
def get_man_completions_path() -> Path:
env = XSH.env or {}
datadir = Path(env["XONSH_DATA_DIR"]) / "generated_completions" / "man"
if datadir.exists() and (not datadir.is_dir()):
shutil.move(datadir, datadir.with_suffix(".bkp"))
if not datadir.exists():
datadir.mkdir(exist_ok=True, parents=True)
return datadir
def _get_man_page(cmd: str):
"""without control characters"""
env = XSH.env.detype()
manpage = subprocess.Popen(
["man", cmd], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, env=env
)
# This is a trick to get rid of reverse line feeds
return subprocess.check_output(["col", "-b"], stdin=manpage.stdout, env=env)
@functools.cache
def _man_option_string_regex():
return re.compile(
r"(?:(,\s?)|^|(\sor\s))(?P<option>-[\w]|--[\w-]+)(?=\[?(\s|,|=\w+|$))"
)
def generate_options_of(cmd: str):
out = _get_man_page(cmd)
if not out:
return
def get_headers(text: str):
"""split as header-body based on indent"""
if not text:
return
header = ""
body = []
for line in textwrap.dedent(text.replace("\n\t", "\n ")).splitlines():
if not line.strip():
continue
if line.startswith((" ", "\t")):
body.append(line)
else:
if header or body:
yield header, body
# found new section
header = line.strip()
body = []
if header or body:
yield header, body
def split_options_string(text: str):
text = text.strip()
regex = _man_option_string_regex()
regex.findall(text)
options = []
for match in regex.finditer(text):
option = match.groupdict().pop("option", None)
if option:
options.append(option)
text = text[match.end() :]
return options, text.strip()
def get_option_section():
option_sect = dict(get_headers(out.decode()))
small_names = {k.lower(): k for k in option_sect}
for head in (
"options",
"command options",
"description",
): # prefer sections in this order
if head in small_names:
title = small_names[head]
return "\n".join(option_sect[title])
def get_options(text):
"""finally get the options"""
# return old section if
for opt, lines in get_headers(text):
# todo: some have [+-] or such vague notations
if opt.startswith("-"):
# sometime a single line will have both desc and options
option_strings, rest = split_options_string(opt)
descs = []
if rest:
descs.append(rest)
if lines:
descs.append(textwrap.dedent("\n".join(lines)))
if option_strings:
yield ". ".join(descs), tuple(option_strings)
elif lines:
# sometimes the options are nested inside subheaders
yield from get_options("\n".join(lines))
yield from get_options(get_option_section())
@functools.lru_cache(maxsize=10)
def _parse_man_page_options(cmd: str) -> "dict[str, tuple[str, ...]]":
path = get_man_completions_path() / f"{cmd}.json"
if path.exists():
return json.loads(path.read_text())
options = dict(generate_options_of(cmd))
path.write_text(json.dumps(options))
return options
@contextual_command_completer
def complete_from_man(context: CommandContext):
"""
Completes an option name, based on the contents of the associated man
page.
"""
if context.arg_index == 0 or not context.prefix.startswith("-"):
return
cmd = context.args[0].value
def completions():
for desc, opts in _parse_man_page_options(cmd).items():
yield RichCompletion(
value=opts[-1], display=", ".join(opts), description=desc
)
return completions(), False
|
29ef022bfae6ef4cbe5ef632574ac34157e41a8c
|
7c857119fe1505b1d80d6e62969661c06dc1a2f4
|
/BaseTools/Source/Python/AutoGen/IdfClassObject.py
|
a6b8123c2539a48d28e7c0a82dbe0dee6b57c89a
|
[
"BSD-2-Clause"
] |
permissive
|
CloverHackyColor/CloverBootloader
|
7042ca7dd6b513d22be591a295e49071ae1482ee
|
2711170df4f60b2ae5aa20add3e00f35cf57b7e5
|
refs/heads/master
| 2023-08-30T22:14:34.590134
| 2023-08-27T19:14:02
| 2023-08-27T19:14:02
| 205,810,121
| 4,734
| 770
|
BSD-2-Clause
| 2023-09-03T12:41:33
| 2019-09-02T08:22:14
|
C
|
UTF-8
|
Python
| false
| false
| 5,665
|
py
|
IdfClassObject.py
|
## @file
# This file is used to collect all defined strings in Image Definition files
#
# Copyright (c) 2016, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
# Import Modules
#
from __future__ import absolute_import
import Common.EdkLogger as EdkLogger
from Common.BuildToolError import *
from Common.StringUtils import GetLineNo
from Common.Misc import PathClass
from Common.LongFilePathSupport import LongFilePath
import re
import os
from Common.GlobalData import gIdentifierPattern
from .UniClassObject import StripComments
IMAGE_TOKEN = re.compile('IMAGE_TOKEN *\(([A-Z0-9_]+) *\)', re.MULTILINE | re.UNICODE)
#
# Value of different image information block types
#
EFI_HII_IIBT_END = 0x00
EFI_HII_IIBT_IMAGE_1BIT = 0x10
EFI_HII_IIBT_IMAGE_1BIT_TRANS = 0x11
EFI_HII_IIBT_IMAGE_4BIT = 0x12
EFI_HII_IIBT_IMAGE_4BIT_TRANS = 0x13
EFI_HII_IIBT_IMAGE_8BIT = 0x14
EFI_HII_IIBT_IMAGE_8BIT_TRANS = 0x15
EFI_HII_IIBT_IMAGE_24BIT = 0x16
EFI_HII_IIBT_IMAGE_24BIT_TRANS = 0x17
EFI_HII_IIBT_IMAGE_JPEG = 0x18
EFI_HII_IIBT_IMAGE_PNG = 0x19
EFI_HII_IIBT_DUPLICATE = 0x20
EFI_HII_IIBT_SKIP2 = 0x21
EFI_HII_IIBT_SKIP1 = 0x22
EFI_HII_IIBT_EXT1 = 0x30
EFI_HII_IIBT_EXT2 = 0x31
EFI_HII_IIBT_EXT4 = 0x32
#
# Value of HII package type
#
EFI_HII_PACKAGE_TYPE_ALL = 0x00
EFI_HII_PACKAGE_TYPE_GUID = 0x01
EFI_HII_PACKAGE_FORMS = 0x02
EFI_HII_PACKAGE_STRINGS = 0x04
EFI_HII_PACKAGE_FONTS = 0x05
EFI_HII_PACKAGE_IMAGES = 0x06
EFI_HII_PACKAGE_SIMPLE_FONTS = 0x07
EFI_HII_PACKAGE_DEVICE_PATH = 0x08
EFI_HII_PACKAGE_KEYBOARD_LAYOUT = 0x09
EFI_HII_PACKAGE_ANIMATIONS = 0x0A
EFI_HII_PACKAGE_END = 0xDF
EFI_HII_PACKAGE_TYPE_SYSTEM_BEGIN = 0xE0
EFI_HII_PACKAGE_TYPE_SYSTEM_END = 0xFF
class IdfFileClassObject(object):
def __init__(self, FileList = []):
self.ImageFilesDict = {}
self.ImageIDList = []
for File in FileList:
if File is None:
EdkLogger.error("Image Definition File Parser", PARSER_ERROR, 'No Image definition file is given.')
try:
IdfFile = open(LongFilePath(File.Path), mode='r')
FileIn = IdfFile.read()
IdfFile.close()
except:
EdkLogger.error("build", FILE_OPEN_FAILURE, ExtraData=File)
ImageFileList = []
for Line in FileIn.splitlines():
Line = Line.strip()
Line = StripComments(Line)
if len(Line) == 0:
continue
LineNo = GetLineNo(FileIn, Line, False)
if not Line.startswith('#image '):
EdkLogger.error("Image Definition File Parser", PARSER_ERROR, 'The %s in Line %s of File %s is invalid.' % (Line, LineNo, File.Path))
if Line.find('#image ') >= 0:
LineDetails = Line.split()
Len = len(LineDetails)
if Len != 3 and Len != 4:
EdkLogger.error("Image Definition File Parser", PARSER_ERROR, 'The format is not match #image IMAGE_ID [TRANSPARENT] ImageFileName in Line %s of File %s.' % (LineNo, File.Path))
if Len == 4 and LineDetails[2] != 'TRANSPARENT':
EdkLogger.error("Image Definition File Parser", PARSER_ERROR, 'Please use the keyword "TRANSPARENT" to describe the transparency setting in Line %s of File %s.' % (LineNo, File.Path))
MatchString = gIdentifierPattern.match(LineDetails[1])
if MatchString is None:
EdkLogger.error('Image Definition File Parser', FORMAT_INVALID, 'The Image token name %s defined in Idf file %s contains the invalid character.' % (LineDetails[1], File.Path))
if LineDetails[1] not in self.ImageIDList:
self.ImageIDList.append(LineDetails[1])
else:
EdkLogger.error("Image Definition File Parser", PARSER_ERROR, 'The %s in Line %s of File %s is already defined.' % (LineDetails[1], LineNo, File.Path))
if Len == 4:
ImageFile = ImageFileObject(LineDetails[Len-1], LineDetails[1], True)
else:
ImageFile = ImageFileObject(LineDetails[Len-1], LineDetails[1], False)
ImageFileList.append(ImageFile)
if ImageFileList:
self.ImageFilesDict[File] = ImageFileList
def SearchImageID(ImageFileObject, FileList):
if FileList == []:
return ImageFileObject
for File in FileList:
if os.path.isfile(File):
Lines = open(File, 'r')
for Line in Lines:
ImageIdList = IMAGE_TOKEN.findall(Line)
for ID in ImageIdList:
EdkLogger.debug(EdkLogger.DEBUG_5, "Found ImageID identifier: " + ID)
ImageFileObject.SetImageIDReferenced(ID)
class ImageFileObject(object):
def __init__(self, FileName, ImageID, TransParent = False):
self.FileName = FileName
self.File = ''
self.ImageID = ImageID
self.TransParent = TransParent
self.Referenced = False
def SetImageIDReferenced(self, ImageID):
if ImageID == self.ImageID:
self.Referenced = True
|
264936f288daa5cb31561f88d5effafbff27a530
|
01857ef455ea60eccaf03b5a9059ec83e9803c2e
|
/nicegui/elements/query.py
|
84cd8af71bbfdc499140199daa8d65e7e2ee3868
|
[
"MIT"
] |
permissive
|
zauberzeug/nicegui
|
f08312cc1f393deca79e0e84a2506d3a35efff16
|
c61b1315f29d51e26cc1168207f5616b302f8df0
|
refs/heads/main
| 2023-08-18T18:09:30.937322
| 2023-08-18T15:04:00
| 2023-08-18T15:04:00
| 365,250,183
| 5,128
| 271
|
MIT
| 2023-09-14T01:50:56
| 2021-05-07T13:55:05
|
Python
|
UTF-8
|
Python
| false
| false
| 2,756
|
py
|
query.py
|
from typing import Optional
from typing_extensions import Self
from ..element import Element
from ..globals import get_client
class Query(Element, component='query.js'):
def __init__(self, selector: str) -> None:
super().__init__()
self._props['selector'] = selector
self._props['classes'] = []
self._props['style'] = {}
self._props['props'] = {}
def classes(self, add: Optional[str] = None, *, remove: Optional[str] = None, replace: Optional[str] = None) \
-> Self:
classes = self._update_classes_list(self._props['classes'], add, remove, replace)
new_classes = [c for c in classes if c not in self._props['classes']]
old_classes = [c for c in self._props['classes'] if c not in classes]
if new_classes:
self.run_method('add_classes', new_classes)
if old_classes:
self.run_method('remove_classes', old_classes)
self._props['classes'] = classes
return self
def style(self, add: Optional[str] = None, *, remove: Optional[str] = None, replace: Optional[str] = None) \
-> Self:
old_style = Element._parse_style(remove)
for key in old_style:
self._props['style'].pop(key, None)
if old_style:
self.run_method('remove_style', list(old_style))
self._props['style'].update(Element._parse_style(add))
self._props['style'].update(Element._parse_style(replace))
if self._props['style']:
self.run_method('add_style', self._props['style'])
return self
def props(self, add: Optional[str] = None, *, remove: Optional[str] = None) -> Self:
old_props = self._parse_props(remove)
for key in old_props:
self._props['props'].pop(key, None)
if old_props:
self.run_method('remove_props', list(old_props))
new_props = self._parse_props(add)
self._props['props'].update(new_props)
if self._props['props']:
self.run_method('add_props', self._props['props'])
return self
def query(selector: str) -> Query:
"""Query Selector
To manipulate elements like the document body, you can use the `ui.query` function.
With the query result you can add classes, styles, and attributes like with every other UI element.
This can be useful for example to change the background color of the page (e.g. `ui.query('body').classes('bg-green')`).
:param selector: the CSS selector (e.g. "body", "#my-id", ".my-class", "div > p")
"""
for element in get_client().elements.values():
if isinstance(element, Query) and element._props['selector'] == selector:
return element
return Query(selector)
|
88d616be1d5cc306702629b902bbf748041712c2
|
20c5cd805f105a70815aca39c5c7711612ea4388
|
/camera2car/auto_calib/test_img_calib.py
|
f11e114563b01784cf93f722303ace177f89a404
|
[] |
no_license
|
OpenCalib/SensorX2car
|
ff7f5d64dae8cc5e8d783185d42ef481e7f508ea
|
29e6816d6d0691a9ebfe0544ad0db236e3be5551
|
refs/heads/main
| 2023-04-28T04:52:20.355395
| 2023-04-13T08:46:02
| 2023-04-13T08:46:02
| 577,998,537
| 114
| 20
| null | 2023-01-31T10:51:26
| 2022-12-14T02:28:28
|
C++
|
UTF-8
|
Python
| false
| false
| 5,261
|
py
|
test_img_calib.py
|
'''
usage:
python test_img_calib.py --config-file config-files/ctrlc.yaml --opts MODE test DATASET_DIR ./pic/
'''
import os
import os.path as osp
import argparse
from datetime import date
import json
import random
import time
from pathlib import Path
import numpy as np
import numpy.linalg as LA
from tqdm import tqdm
import matplotlib as mpl
import matplotlib.pyplot as plt
import cv2
import csv
import math
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
import util.misc as utils
from datasets import build_image_dataset
from models import build_model
from config import cfg
cmap = plt.get_cmap("jet")
norm = mpl.colors.Normalize(vmin=0.0, vmax=1.0)
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm.set_array([])
def c(x):
return sm.to_rgba(x)
def get_args_parser():
parser = argparse.ArgumentParser('Set gptran', add_help=False)
parser.add_argument('--config-file',
metavar="FILE",
help="path to config file",
type=str,
default='config-files/ctrlc.yaml')
parser.add_argument("--opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER
)
return parser
def compute_horizon(angle, vp, img_sz):
hl_left = vp[1] + (vp[0] + img_sz[1] / 2) * math.tan(angle)
hl_right = vp[1] + (vp[0] - img_sz[1] / 2) * math.tan(angle)
return hl_left, hl_right
def to_device(data, device):
if type(data) == dict:
return {k: v.to(device) for k, v in data.items()}
return [{k: v.to(device) if isinstance(v, torch.Tensor) else v
for k, v in t.items()} for t in data]
def main(cfg):
device = torch.device(cfg.DEVICE)
model, _ = build_model(cfg)
model.to(device)
dataset_test = build_image_dataset(cfg)
sampler_test = torch.utils.data.SequentialSampler(dataset_test)
data_loader_test = DataLoader(dataset_test, 1, sampler=sampler_test,
drop_last=False,
collate_fn=utils.collate_fn,
num_workers=1)
checkpoint = torch.load(cfg.LOAD, map_location='cpu') # change model
model.load_state_dict(checkpoint['model'])
model = model.eval()
start = time.time()
image_num = 0
angleset = []
for i, (samples, extra_samples, targets) in enumerate(tqdm(data_loader_test)):
with torch.no_grad():
samples = samples.to(device)
extra_samples = to_device(extra_samples, device)
outputs = model(samples, extra_samples)
filename = targets[0]['filename']
filename = osp.splitext(filename)[0]
pred_vp = outputs['pred_vp'].to('cpu')[0].numpy()
pred_hl = outputs['pred_hl'].to('cpu')[0].numpy()
input_sz = targets[0]['input_sz']
rho = 2.0/np.minimum(input_sz[0],input_sz[1])
img = targets[0]['org_img']
origin_sz = (img.shape[1], img.shape[0])
pred_vp /= pred_vp[2]
crop_vp = np.array((pred_vp[0] / rho, pred_vp[1] / rho))
# crop_left, crop_right = compute_horizon(pred_hl, crop_vp, input_sz)
vp = np.ones(3)
vp[0] = crop_vp[0] / input_sz[0] * max(origin_sz[0], origin_sz[1]) + origin_sz[0] / 2
vp[1] = crop_vp[1] / input_sz[0] * max(origin_sz[0], origin_sz[1]) + origin_sz[1] / 2
# hl_left = crop_left / input_sz[0] * max(origin_sz[0], origin_sz[1]) + origin_sz[1] / 2
# hl_right = crop_right / input_sz[0] * max(origin_sz[0], origin_sz[1]) + origin_sz[1] / 2
# ----calibrate----
# restore to the origin image size in KITTI
vp[0] = vp[0] * 2 - 4
vp[1] = vp[1] * 2 - 2
# KITTI intrinsic
K = np.array([[7.070912e+02, 0.000000e+00, 6.018873e+02],[0.000000e+00, 7.070912e+02, 1.831104e+02],[0, 0, 1]])
vp = vp.reshape(3, 1)
K_ = np.matrix(K)
K_inv = K_.I
r3 = (K_inv * vp)/np.linalg.norm(K_inv * vp)
pitch = math.asin(r3[1])
yaw = math.atan(r3[0]/r3[2])
roll = pred_hl[0]
angle = [roll, pitch, yaw]
angleset.append(angle)
image_num += 1
end = time.time()
average_angle = np.mean(np.array(angleset), axis=0) * 180 / np.pi
print("roll angle(degree):", average_angle[0])
print("pitch angle(degree):", average_angle[1])
print("yaw angle(degree):", average_angle[2])
print("total inferece time:", end - start)
print("FPS:", image_num / (end - start))
if __name__ == '__main__':
parser = argparse.ArgumentParser('GPANet training and evaluation script',
parents=[get_args_parser()])
args = parser.parse_args()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
if cfg.OUTPUT_DIR:
Path(cfg.OUTPUT_DIR).mkdir(parents=True, exist_ok=True)
main(cfg)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.