hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c3f4ce11391295ce698b0c6e4455c5e0de17368
| 11,558
|
py
|
Python
|
config.py
|
aimakerspace/synergos_director
|
c4b10502d7ffa6da4fc29fe675a5042590657996
|
[
"Apache-2.0"
] | null | null | null |
config.py
|
aimakerspace/synergos_director
|
c4b10502d7ffa6da4fc29fe675a5042590657996
|
[
"Apache-2.0"
] | null | null | null |
config.py
|
aimakerspace/synergos_director
|
c4b10502d7ffa6da4fc29fe675a5042590657996
|
[
"Apache-2.0"
] | 2
|
2022-01-21T00:57:00.000Z
|
2022-01-26T01:11:12.000Z
|
#!/usr/bin/env python
####################
# Required Modules #
####################
# Generic
import json
import logging
import os
import random
import subprocess
from collections import defaultdict, OrderedDict
from glob import glob
from pathlib import Path
from string import Template
# Libs
import numpy as np
import psutil
import torch as th
# Custom
from synlogger.general import DirectorLogger, SysmetricLogger
##################
# Configurations #
##################
SRC_DIR = Path(__file__).parent.absolute()
API_VERSION = "0.1.0"
infinite_nested_dict = lambda: defaultdict(infinite_nested_dict)
####################
# Helper Functions #
####################
def seed_everything(seed=42):
""" Convenience function to set a constant random seed for model consistency
Args:
seed (int): Seed for RNG
Returns:
True if operation is successful
False otherwise
"""
try:
random.seed(seed)
th.manual_seed(seed)
th.cuda.manual_seed_all(seed)
np.random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
return True
except:
return False
def count_available_cpus(safe_mode: bool = False, r_count: int = 1) -> int:
""" Counts no. of detected CPUs in the current system. By default, all
CPU cores detected are returned. However, if safe mode is toggled, then
a specified number of cores are reserved.
Args:
safe_mode (bool): Toggles if cores are reserved
r_count (int): No. of cores to reserve
Return:
No. of usable cores (int)
"""
total_cores_available = psutil.cpu_count(logical=True)
reserved_cores = safe_mode * r_count
return total_cores_available - reserved_cores
def count_available_gpus() -> int:
""" Counts no. of attached GPUs devices in the current system. As GPU
support is supplimentary, if any exceptions are caught here, system
defaults back to CPU-driven processes (i.e. gpu count is 0)
Returns:
gpu_count (int)
"""
try:
process = subprocess.run(
['lspci'],
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
)
all_detected_devices = process.stdout.split('\n')
gpus = [
device
for device in all_detected_devices
if (('VGA' in device) or ('Display' in device)) and
'Integrated Graphics' not in device # exclude integrated graphics
]
logging.debug(f"Detected GPUs: {gpus}")
return len(gpus)
except subprocess.CalledProcessError as cpe:
logging.warning(f"Could not detect GPUs! Error: {cpe}")
logging.warning(f"Defaulting to CPU processing instead...")
return 0
def detect_configurations(dirname):
""" Automates loading of configuration files in specified directory
Args:
dirname (str): Target directory to load configurations from
Returns:
Params (dict)
"""
def parse_filename(filepath):
""" Extracts filename from a specified filepath
Assumptions: There are no '.' in filename
Args:
filepath (str): Path of file to parse
Returns:
filename (str)
"""
return os.path.basename(filepath).split('.')[0]
# Load in parameters for participating servers
config_globstring = os.path.join(SRC_DIR, dirname, "**/*.json")
config_paths = glob(config_globstring)
return {parse_filename(c_path): c_path for c_path in config_paths}
def capture_system_snapshot() -> dict:
""" Takes a snapshot of parameters used in system-wide operations
Returns:
System snapshot (dict)
"""
return {
'IS_CLUSTER': IS_CLUSTER,
'IS_MASTER': IS_MASTER,
'GRID': GRID,
'IN_DIR': IN_DIR,
'OUT_DIR': OUT_DIR,
'DATA_DIR': DATA_DIR,
'MLFLOW_DIR': MLFLOW_DIR,
'TEST_DIR': TEST_DIR,
'CORES_USED': CORES_USED,
'GPU_COUNT': GPU_COUNT,
'GPUS': GPUS,
'USE_GPU': USE_GPU,
'DEVICE': DEVICE,
'DB_PATH': DB_PATH,
'SCHEMAS': SCHEMAS,
'RETRY_INTERVAL': RETRY_INTERVAL
}
def configure_grid(grid: int) -> int:
""" Binds the server to a specific grid referenced by its index. This is
important when running the SynCluster configuration of Synergos.
Args:
grid (int): Grid to be bounded to
Returns:
Bounded grid (int)
"""
GRID = grid
return GRID
def configure_cpu_allocation(**res_kwargs) -> int:
""" Configures no. of CPU cores available to the system. By default, all
CPU cores will be allocated.
Args:
res_kwargs: Any custom resource allocations declared by user
Returns:
CPU cores used (int)
"""
global CORES_USED
cpu_count = res_kwargs.get('cpus')
CORES_USED = min(cpu_count, CORES_USED) if cpu_count else CORES_USED
return CORES_USED
def configure_gpu_allocation(**res_kwargs):
""" Configures no. of GPU cores available to the system.
Args:
res_kwargs: Any custom resource allocations declared by user
Returns:
GPU cores used (int)
"""
global GPU_COUNT
gpu_count = res_kwargs.get('gpus')
GPU_COUNT = min(gpu_count, GPU_COUNT) if gpu_count else GPU_COUNT
return GPU_COUNT
def configure_node_logger(**logger_kwargs) -> DirectorLogger:
""" Initialises the synergos logger corresponding to the current node type.
In this case, a TTPLogger is initialised.
Args:
logger_kwargs: Any parameters required for node logger configuration
Returns:
Node logger (TTPLogger)
"""
global NODE_LOGGER
NODE_LOGGER = DirectorLogger(**logger_kwargs)
NODE_LOGGER.initialise()
return NODE_LOGGER
def configure_sysmetric_logger(**logger_kwargs) -> SysmetricLogger:
""" Initialises the sysmetric logger to facillitate polling for hardware
statistics.
Args:
logger_kwargs: Any parameters required for node logger configuration
Returns:
Sysmetric logger (SysmetricLogger)
"""
global SYSMETRIC_LOGGER
SYSMETRIC_LOGGER = SysmetricLogger(**logger_kwargs)
return SYSMETRIC_LOGGER
########################################################
# Synergos Orchestrator Container Local Configurations #
########################################################
"""
General parameters required for processing inputs & outputs
"""
# Define deployment configuration
IS_CLUSTER = True # director only exists in cluster mode
# Define server's role: Master or slave
IS_MASTER = True # director is always an orchestrator
# State grid server is bounded to
GRID = None # director does not orchestrate grids directly
# State input directory
IN_DIR = os.path.join(SRC_DIR, "inputs")
# State output directory
OUT_DIR = os.path.join(SRC_DIR, "outputs")
# State data directory
DATA_DIR = os.path.join(SRC_DIR, "data")
# State test directory
TEST_DIR = os.path.join(SRC_DIR, "tests")
# State MLFlow local directory
MLFLOW_DIR = "/mlflow"
# Initialise Cache
CACHE = infinite_nested_dict()
# Allocate no. of cores for processes
CORES_USED = count_available_cpus(safe_mode=True)
# Detect no. of GPUs attached to server
GPU_COUNT = count_available_gpus()
GPUS = [g_idx for g_idx in range(GPU_COUNT)]
USE_GPU = GPU_COUNT > 0 and th.cuda.is_available()
DEVICE = th.device('cuda' if USE_GPU else 'cpu')
# Retry interval for contacting idle workers
RETRY_INTERVAL = 5 # in seconds
logging.debug(f"Grid linked: {GRID}")
logging.debug(f"Is master node? {IS_MASTER}")
logging.debug(f"Input directory detected: {IN_DIR}")
logging.debug(f"Output directory detected: {OUT_DIR}")
logging.debug(f"Data directory detected: {DATA_DIR}")
logging.debug(f"Test directory detected: {TEST_DIR}")
logging.debug(f"MLFlow directory detected: {MLFLOW_DIR}")
logging.debug(f"Cache initialised: {CACHE}")
logging.debug(f"No. of available CPU Cores: {CORES_USED}")
logging.debug(f"No. of available GPUs: {GPU_COUNT}")
logging.debug(f"Are GPUs active: {USE_GPU}")
logging.debug(f"Final device used: {DEVICE}")
logging.debug(f"Retry Interval: {RETRY_INTERVAL} seconds")
#############################################
# Synergos Metadata Database Configurations #
#############################################
"""
In PySyft TTP, each registered project is factored into many tables, namely
Project, Experiment, Run, Participant, Registration, Tag, Alignment & Model, all
related hierarchially. All interactions must conform to specified relation &
association rules. Refer to the Record classes in all `rest_rpc/*/core/utils.py`
for more detailed descriptions of said relations/associations.
Also, all archived payloads must conform to specified template schemas. Refer
to the `templates` directory for the actual schemas.
"""
DB_PATH = os.path.join(SRC_DIR, "data", "database.json")
logging.debug(f"Database path detected: {DB_PATH}")
#########################################
# Synergos Marshalling Template Schemas #
#########################################
"""
For REST service to be stable, there must be schemas enforced to ensure that any
erroneous queries will affect the functions of the system.
"""
template_paths = detect_configurations("templates")
SCHEMAS = {}
for name, s_path in template_paths.items():
with open(s_path, 'r') as schema:
SCHEMAS[name] = json.load(schema, object_pairs_hook=OrderedDict)
logging.debug(f"Schemas loaded: {list(SCHEMAS.keys())}")
########################################
# Synergos REST Payload Configurations #
########################################
"""
Responses for REST-RPC have a specific format to allow compatibility between TTP
& Worker Flask Interfaces. Remember to modify rest_rpc.connection.core.utils.Payload
upon modifying this template!
"""
PAYLOAD_TEMPLATE = {
'apiVersion': API_VERSION,
'success': 0,
'status': None,
'method': "",
'params': {},
'data': {}
}
##########################################
# Synergos Worker Logging Configurations #
##########################################
"""
Synergos has certain optional components, such as a centrialised logging
server, as well as a metadata catalogue. This section governs configuration of
the orchestrator node to facilitate such integrations, where applicable. This
portion gets configured during runtime. By default, unconfigured node &
sysmetric loggers are loaded.
"""
NODE_LOGGER = configure_node_logger(logger_name="director")
SYSMETRIC_LOGGER = configure_sysmetric_logger(logger_name="director")
###################################
# Synergos REST-RPC Worker Routes #
###################################
"""
In a Synergos REST-RPC Worker Node, there are a few flask routes that serve as
interfacing services in order to initialise the WSSW pysyft worker.
"""
WORKER_ROUTE_TEMPLATES = {
'poll': Template('/worker/poll/$collab_id/$project_id'),
'align': Template('/worker/align/$collab_id/$project_id'),
'initialise': Template('/worker/initialise/$collab_id/$project_id/$expt_id/$run_id'),
'terminate': Template('/worker/terminate/$collab_id/$project_id/$expt_id/$run_id'),
'predict': Template('/worker/predict/$collab_id/$project_id/$expt_id/$run_id')
}
NODE_ID_TEMPLATE = Template("$participant") #Template("$participant-[$node]")
NODE_PID_REGEX = "^(.*)(?=-\[node_\d*\])"
NODE_NID_REGEX = "(?:(?!\[)(node_\d*)(?=\]$))"
| 31.069892
| 89
| 0.655304
|
k=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
)
all_detected_devices = process.stdout.split('\n')
gpus = [
device
for device in all_detected_devices
if (('VGA' in device) or ('Display' in device)) and
'Integrated Graphics' not in device
]
logging.debug(f"Detected GPUs: {gpus}")
return len(gpus)
except subprocess.CalledProcessError as cpe:
logging.warning(f"Could not detect GPUs! Error: {cpe}")
logging.warning(f"Defaulting to CPU processing instead...")
return 0
def detect_configurations(dirname):
def parse_filename(filepath):
return os.path.basename(filepath).split('.')[0]
config_globstring = os.path.join(SRC_DIR, dirname, "**/*.json")
config_paths = glob(config_globstring)
return {parse_filename(c_path): c_path for c_path in config_paths}
def capture_system_snapshot() -> dict:
return {
'IS_CLUSTER': IS_CLUSTER,
'IS_MASTER': IS_MASTER,
'GRID': GRID,
'IN_DIR': IN_DIR,
'OUT_DIR': OUT_DIR,
'DATA_DIR': DATA_DIR,
'MLFLOW_DIR': MLFLOW_DIR,
'TEST_DIR': TEST_DIR,
'CORES_USED': CORES_USED,
'GPU_COUNT': GPU_COUNT,
'GPUS': GPUS,
'USE_GPU': USE_GPU,
'DEVICE': DEVICE,
'DB_PATH': DB_PATH,
'SCHEMAS': SCHEMAS,
'RETRY_INTERVAL': RETRY_INTERVAL
}
def configure_grid(grid: int) -> int:
GRID = grid
return GRID
def configure_cpu_allocation(**res_kwargs) -> int:
global CORES_USED
cpu_count = res_kwargs.get('cpus')
CORES_USED = min(cpu_count, CORES_USED) if cpu_count else CORES_USED
return CORES_USED
def configure_gpu_allocation(**res_kwargs):
global GPU_COUNT
gpu_count = res_kwargs.get('gpus')
GPU_COUNT = min(gpu_count, GPU_COUNT) if gpu_count else GPU_COUNT
return GPU_COUNT
def configure_node_logger(**logger_kwargs) -> DirectorLogger:
global NODE_LOGGER
NODE_LOGGER = DirectorLogger(**logger_kwargs)
NODE_LOGGER.initialise()
return NODE_LOGGER
def configure_sysmetric_logger(**logger_kwargs) -> SysmetricLogger:
global SYSMETRIC_LOGGER
SYSMETRIC_LOGGER = SysmetricLogger(**logger_kwargs)
return SYSMETRIC_LOGGER
ate('/worker/initialise/$collab_id/$project_id/$expt_id/$run_id'),
'terminate': Template('/worker/terminate/$collab_id/$project_id/$expt_id/$run_id'),
'predict': Template('/worker/predict/$collab_id/$project_id/$expt_id/$run_id')
}
NODE_ID_TEMPLATE = Template("$participant") #Template("$participant-[$node]")
NODE_PID_REGEX = "^(.*)(?=-\[node_\d*\])"
NODE_NID_REGEX = "(?:(?!\[)(node_\d*)(?=\]$))"
| true
| true
|
1c3f4e2283a2ca09974785f1d9bc5acbdf5ae2f9
| 2,448
|
py
|
Python
|
config.py
|
Jackson-Kang/VQVC-Pytorch
|
d2267b5c52253b6ae11a5767963a65320ae335c2
|
[
"MIT"
] | 13
|
2021-02-11T17:48:40.000Z
|
2022-02-08T06:37:12.000Z
|
config.py
|
Jackson-Kang/VQVC-Pytorch
|
d2267b5c52253b6ae11a5767963a65320ae335c2
|
[
"MIT"
] | 1
|
2022-01-17T17:07:22.000Z
|
2022-01-18T06:51:21.000Z
|
config.py
|
Jackson-Kang/VQVC-Pytorch
|
d2267b5c52253b6ae11a5767963a65320ae335c2
|
[
"MIT"
] | 3
|
2021-03-10T08:40:00.000Z
|
2022-01-17T17:08:48.000Z
|
import torch
from utils.path import get_path
class Arguments:
"""
path configurations
"""
dataset_name = "VCTK-Corpus"
dataset_path = get_path("/home/minsu/dataset/VCTK/", dataset_name)
converted_sample_dir = "results"
prepro_dir = "preprocessed"
model_log_dir = "logs"
model_checkpoint_dir = "ckpts"
# path for loading audio(wav) samples to be preprocessed
wav_dir = get_path(dataset_path, "wavs")
# by default, preprocessed samples and metadata are stored in "prepro_path"
prepro_path = get_path(prepro_dir, dataset_name)
prepro_mel_dir = get_path(prepro_path, "mels")
prepro_meta_dir = get_path(prepro_path, "metas")
prepro_meta_train = get_path(prepro_meta_dir, "meta_train.csv")
prepro_meta_eval = get_path(prepro_meta_dir, "meta_eval.csv")
prepro_meta_unseen = get_path(prepro_meta_dir, "meta_unseen.csv")
mel_stat_path = get_path(prepro_path, "mel_stats.npy")
model_log_path = get_path(model_log_dir, dataset_name)
model_checkpoint_path = get_path(model_checkpoint_dir, dataset_name)
"""
preprocessing hyperparams
"""
max_frame_length = 40 # window size of random resampling
sr = 22050 # 22050kHz sampling rate
n_mels = 80
filter_length = 1024
hop_length = 256
win_length = 1024
max_wav_value = 32768.0 # for other dataset
mel_fmin = 0
mel_fmax = 8000
trim_silence = True
top_db = 15 # threshold for trimming silence
"""
VQVC hyperparameters
"""
n_embeddings = 256 # of codes in VQ-codebook
z_dim=32 # bottleneck dimension
commitment_cost = 0.01 # commitment cost
norm_epsilon = 1e-4
speaker_emb_reduction=1
warmup_steps = 1000
init_lr = 1e-3 # initial learning rate
max_lr = 4e-2 # maximum learning rate
gamma = 0.25
milestones = [20000]
"""
data & training setting
"""
grad_clip_thresh=3.0
seed = 999
n_workers = 10
#scheduler setting
use_cuda = True
mem_mode = True
data_split_ratio = [0.95, 0.05] # [train, evaluation] in 0 ~ 1 range
train_visible_devices = "7"
conversion_visible_devices = "7"
train_batch_size = 120
eval_batch_size = 100
eval_step = 1000
eval_path = "eval_results"
save_checkpoint_step = 5000
log_tensorboard = True
max_training_step = 60000
# vocoder setting
vocoder = "vocgan"
vocoder_pretrained_model_name = "vocgan_universal_pretrained_model_epoch_1280.pt"
vocoder_pretrained_model_path = get_path("./vocoder", "{}", "pretrained_models", vocoder_pretrained_model_name).format(vocoder)
| 23.76699
| 128
| 0.747549
|
import torch
from utils.path import get_path
class Arguments:
dataset_name = "VCTK-Corpus"
dataset_path = get_path("/home/minsu/dataset/VCTK/", dataset_name)
converted_sample_dir = "results"
prepro_dir = "preprocessed"
model_log_dir = "logs"
model_checkpoint_dir = "ckpts"
wav_dir = get_path(dataset_path, "wavs")
prepro_path = get_path(prepro_dir, dataset_name)
prepro_mel_dir = get_path(prepro_path, "mels")
prepro_meta_dir = get_path(prepro_path, "metas")
prepro_meta_train = get_path(prepro_meta_dir, "meta_train.csv")
prepro_meta_eval = get_path(prepro_meta_dir, "meta_eval.csv")
prepro_meta_unseen = get_path(prepro_meta_dir, "meta_unseen.csv")
mel_stat_path = get_path(prepro_path, "mel_stats.npy")
model_log_path = get_path(model_log_dir, dataset_name)
model_checkpoint_path = get_path(model_checkpoint_dir, dataset_name)
max_frame_length = 40
sr = 22050
n_mels = 80
filter_length = 1024
hop_length = 256
win_length = 1024
max_wav_value = 32768.0
mel_fmin = 0
mel_fmax = 8000
trim_silence = True
top_db = 15
n_embeddings = 256
z_dim=32
commitment_cost = 0.01
norm_epsilon = 1e-4
speaker_emb_reduction=1
warmup_steps = 1000
init_lr = 1e-3
max_lr = 4e-2
gamma = 0.25
milestones = [20000]
grad_clip_thresh=3.0
seed = 999
n_workers = 10
use_cuda = True
mem_mode = True
data_split_ratio = [0.95, 0.05]
train_visible_devices = "7"
conversion_visible_devices = "7"
train_batch_size = 120
eval_batch_size = 100
eval_step = 1000
eval_path = "eval_results"
save_checkpoint_step = 5000
log_tensorboard = True
max_training_step = 60000
vocoder = "vocgan"
vocoder_pretrained_model_name = "vocgan_universal_pretrained_model_epoch_1280.pt"
vocoder_pretrained_model_path = get_path("./vocoder", "{}", "pretrained_models", vocoder_pretrained_model_name).format(vocoder)
| true
| true
|
1c3f4e978bc8466505b3f34142a058a11b694f7f
| 56,611
|
py
|
Python
|
theano/gof/op.py
|
canyon289/Theano-PyMC
|
1a9b04bfe480b758ddfa54ba49c88bee3bec419c
|
[
"BSD-3-Clause"
] | null | null | null |
theano/gof/op.py
|
canyon289/Theano-PyMC
|
1a9b04bfe480b758ddfa54ba49c88bee3bec419c
|
[
"BSD-3-Clause"
] | null | null | null |
theano/gof/op.py
|
canyon289/Theano-PyMC
|
1a9b04bfe480b758ddfa54ba49c88bee3bec419c
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Defines base classes `Op`, `PureOp`, and `CLinkerOp`.
The `Op` class is the base interface for all operations
compatible with `gof`'s :doc:`graph` routines.
"""
import copy
import inspect
import logging
import os
import re
import sys
import warnings
import numpy as np
import theano
import theano.gof.cc
from theano import config
from theano.gof import graph
from theano.gof.cmodule import GCC_compiler
from theano.gof.fg import FunctionGraph
from theano.gof.utils import (
MethodNotDefined,
TestValueError,
add_tag_trace,
get_variable_trace_string,
object2,
)
__authors__ = "theano-dev"
__copyright__ = "(c) 2010, Universite de Montreal"
__license__ = "3-clause BSD License"
__contact__ = "theano-dev <theano-dev@googlegroups.com>"
__docformat__ = "restructuredtext en"
_logger = logging.getLogger("theano.gof.op.Op")
def compute_test_value(node):
"""Computes the test value of a node.
Parameters
----------
node : Apply
The `Apply` node for which the test value is computed.
Returns
-------
None
The `tag.test_value`s are updated in each `Variable` in `node.outputs`.
"""
# Gather the test values for each input of the node
storage_map = {}
compute_map = {}
for i, ins in enumerate(node.inputs):
try:
storage_map[ins] = [ins.get_test_value()]
compute_map[ins] = [True]
except TestValueError:
# no test-value was specified, act accordingly
if config.compute_test_value == "warn":
warnings.warn(
"Warning, Cannot compute test value: input %i (%s) of Op %s missing default value"
% (i, ins, node),
stacklevel=2,
)
return
elif config.compute_test_value == "raise":
detailed_err_msg = get_variable_trace_string(ins)
raise ValueError(
"Cannot compute test value: input %i (%s) of Op %s missing default value. %s"
% (i, ins, node, detailed_err_msg)
)
elif config.compute_test_value == "ignore":
return
elif config.compute_test_value == "pdb":
import pdb
pdb.post_mortem(sys.exc_info()[2])
else:
raise ValueError(
"%s is invalid for option config.compute_test_value"
% config.compute_test_value
)
# All inputs have test-values; perform the `Op`'s computation
# The original values should not be destroyed, so we copy the values of the
# inputs in `destroy_map`
destroyed_inputs_idx = set()
if getattr(node.op, "destroy_map", None):
for i_pos_list in node.op.destroy_map.values():
destroyed_inputs_idx.update(i_pos_list)
for inp_idx in destroyed_inputs_idx:
inp = node.inputs[inp_idx]
storage_map[inp] = [copy.copy(storage_map[inp][0])]
# Prepare `storage_map` and `compute_map` for the outputs
for o in node.outputs:
storage_map[o] = [None]
compute_map[o] = [False]
# Create a thunk that performs the computation
thunk = node.op.make_thunk(node, storage_map, compute_map, no_recycling=[])
thunk.inputs = [storage_map[v] for v in node.inputs]
thunk.outputs = [storage_map[v] for v in node.outputs]
required = thunk()
assert not required # We provided all inputs
for output in node.outputs:
# Check that the output has been computed
assert compute_map[output][0], (output, storage_map[output][0])
# Add 'test_value' to output tag, so that downstream `Op`s can use
# these numerical values as test values
output.tag.test_value = storage_map[output][0]
class CLinkerObject:
"""
Standard elements of an Op or Type used with the CLinker.
"""
def c_headers(self):
"""
Optional: Return a list of header files required by code returned by
this class.
Examples
--------
return ['<iostream>', '<math.h>', '/full/path/to/header.h']
These strings will be prefixed with "#include " and inserted at the
beginning of the c source code.
Strings in this list that start neither with '<' nor '"' will be
enclosed in double-quotes.
Raises
------
MethodNotDefined
Subclass does not implement this method.
"""
raise MethodNotDefined("c_headers", type(self), self.__class__.__name__)
def c_header_dirs(self):
"""
Optional: Return a list of header search paths required by code
returned by this class.
Examples
--------
return ['/usr/local/include', '/opt/weirdpath/src/include']
Provides search paths for headers, in addition to those in any relevant
environment variables.
Hint: for unix compilers, these are the things that get '-I' prefixed
in the compiler cmdline.
Raises
------
MethodNotDefined
Subclass does not implement this method.
"""
raise MethodNotDefined("c_header_dirs", type(self), self.__class__.__name__)
def c_libraries(self):
"""
Optional: Return a list of libraries required by code returned by
this class.
Examples
--------
return ['gsl', 'gslcblas', 'm', 'fftw3', 'g2c'].
The compiler will search the directories specified by the environment
variable LD_LIBRARY_PATH in addition to any returned by `c_lib_dirs`.
Hint: for unix compilers, these are the things that get '-l' prefixed
in the compiler cmdline.
Raises
------
MethodNotDefined
Subclass does not implement this method.
"""
raise MethodNotDefined("c_libraries", type(self), self.__class__.__name__)
def c_lib_dirs(self):
"""
Optional: Return a list of library search paths required by code
returned by this class.
Examples
--------
return ['/usr/local/lib', '/opt/weirdpath/build/libs'].
Provides search paths for libraries, in addition to those in any
relevant environment variables (e.g. LD_LIBRARY_PATH).
Hint: for unix compilers, these are the things that get '-L' prefixed
in the compiler cmdline.
Raises
------
MethodNotDefined
Subclass does not implement this method.
"""
raise MethodNotDefined("c_lib_dirs", type(self), self.__class__.__name__)
def c_support_code(self):
"""
Optional: Return utility code (a string, or a list of strings) for use by a `Variable` or `Op` to be
included at global scope prior to the rest of the code for this class.
QUESTION: How many times will this support code be emitted for a graph
with many instances of the same type?
Raises
------
MethodNotDefined
Subclass does not implement this method.
"""
raise MethodNotDefined("c_support_code", type(self), self.__class__.__name__)
def c_code_cache_version(self):
"""
Return a tuple of integers indicating the version of this Op.
An empty tuple indicates an 'unversioned' Op that will not be cached
between processes.
The cache mechanism may erase cached modules that have been superceded
by newer versions. See `ModuleCache` for details.
See Also
--------
c_code_cache_version_apply()
"""
return ()
def c_compile_args(self):
"""
Optional: Return a list of compile args recommended to compile the
code returned by other methods in this class.
Examples
--------
return ['-ffast-math']
Compiler arguments related to headers, libraries and search paths should
be provided via the functions `c_headers`, `c_libraries`,
`c_header_dirs`, and `c_lib_dirs`.
Raises
------
MethodNotDefined
Subclass does not implement this method.
"""
raise MethodNotDefined("c_compile_args", type(self), self.__class__.__name__)
def c_no_compile_args(self):
"""
Optional: return a list of incompatible gcc compiler arguments.
We will remove those arguments from the command line of gcc. So if
another Op adds a compile arg in the graph that is incompatible
with this Op, the incompatible arg will not be used.
Useful for instance to remove -ffast-math.
EXAMPLE
WRITEME
Raises
------
MethodNotDefined
The subclass does not override this method.
"""
raise MethodNotDefined("c_no_compile_args", type(self), self.__class__.__name__)
def c_init_code(self):
"""
Optional: return a list of code snippets to be inserted in module
initialization.
Raises
------
MethodNotDefined
The subclass does not override this method.
"""
raise MethodNotDefined("c_init_code", type(self), self.__class__.__name__)
class CLinkerOp(CLinkerObject):
"""
Interface definition for `Op` subclasses compiled by `CLinker`.
A subclass should implement WRITEME.
WRITEME: structure of automatically generated C code.
Put this in doc/code_structure.txt
"""
def c_code(self, node, name, inputs, outputs, sub):
"""
Required: return the C implementation of an Op.
Returns C code that does the computation associated to this `Op`,
given names for the inputs and outputs.
Parameters
----------
node : Apply instance
The node for which we are compiling the current c_code.
The same Op may be used in more than one node.
name : str
A name that is automatically assigned and guaranteed to be
unique.
inputs : list of strings
There is a string for each input of the function, and the
string is the name of a C variable pointing to that input.
The type of the variable depends on the declared type of
the input. There is a corresponding python variable that
can be accessed by prepending "py_" to the name in the
list.
outputs : list of strings
Each string is the name of a C variable where the Op should
store its output. The type depends on the declared type of
the output. There is a corresponding python variable that
can be accessed by prepending "py_" to the name in the
list. In some cases the outputs will be preallocated and
the value of the variable may be pre-filled. The value for
an unallocated output is type-dependent.
sub : dict of strings
Extra symbols defined in `CLinker` sub symbols (such as 'fail').
WRITEME
Raises
------
MethodNotDefined
The subclass does not override this method.
"""
raise MethodNotDefined("%s.c_code" % self.__class__.__name__)
def c_code_cache_version_apply(self, node):
"""
Return a tuple of integers indicating the version of this Op.
An empty tuple indicates an 'unversioned' Op that will not be
cached between processes.
The cache mechanism may erase cached modules that have been
superceded by newer versions. See `ModuleCache` for details.
See Also
--------
c_code_cache_version()
Notes
-----
This function overrides `c_code_cache_version` unless it explicitly
calls `c_code_cache_version`. The default implementation simply
calls `c_code_cache_version` and ignores the `node` argument.
"""
return self.c_code_cache_version()
def c_code_cleanup(self, node, name, inputs, outputs, sub):
"""
Optional: return C code to run after c_code, whether it failed or not.
This is a convenient place to clean up things allocated by c_code().
Parameters
----------
node : Apply instance
WRITEME
name : str
A name that is automatically assigned and guaranteed to be
unique.
inputs : list of strings
There is a string for each input of the function, and the
string is the name of a C variable pointing to that input.
The type of the variable depends on the declared type of
the input. There is a corresponding python variable that
can be accessed by prepending "py_" to the name in the
list.
outputs : list of strings
Each string is the name of a C variable correspoinding to
one of the outputs of the Op. The type depends on the
declared type of the output. There is a corresponding
python variable that can be accessed by prepending "py_" to
the name in the list.
sub : dict of strings
extra symbols defined in `CLinker` sub symbols (such as 'fail').
WRITEME
Raises
------
MethodNotDefined
The subclass does not override this method.
"""
raise MethodNotDefined("%s.c_code_cleanup" % self.__class__.__name__)
def c_support_code_apply(self, node, name):
"""
Optional: return utility code for use by an `Op` that will be
inserted at global scope, that can be specialized for the
support of a particular `Apply` node.
Parameters
----------
node: an Apply instance in the graph being compiled
name: str
A string or number that serves to uniquely identify this node.
Symbol names defined by this support code should include the name,
so that they can be called from the c_code, and so that they do not
cause name collisions.
Notes
-----
This function is called in addition to c_support_code and will
supplement whatever is returned from there.
Raises
------
MethodNotDefined
Subclass does not implement this method.
"""
raise MethodNotDefined(
"c_support_code_apply", type(self), self.__class__.__name__
)
def c_init_code_apply(self, node, name):
"""
Optional: return a code string specific to the apply
to be inserted in the module initialization code.
Parameters
----------
node : an Apply instance in the graph being compiled
name : str
A string or number that serves to uniquely identify this node.
Symbol names defined by this support code should include the name,
so that they can be called from the c_code, and so that they do not
cause name collisions.
Notes
-----
This function is called in addition to c_init_code and will supplement
whatever is returned from there.
Raises
------
MethodNotDefined
The subclass does not override this method.
"""
raise MethodNotDefined("c_init_code_apply", type(self), self.__class__.__name__)
def c_init_code_struct(self, node, name, sub):
"""
Optional: return a code string specific to the apply
to be inserted in the struct initialization code.
Parameters
----------
node : an Apply instance in the graph being compiled
name : str
A unique name to distinguish variables from those of other nodes.
sub
A dictionary of values to substitute in the code.
Most notably it contains a 'fail' entry that you should place in
your code after setting a python exception to indicate an error.
Raises
------
MethodNotDefined
The subclass does not override this method.
"""
raise MethodNotDefined(
"c_init_code_struct", type(self), self.__class__.__name__
)
def c_support_code_struct(self, node, name):
"""
Optional: return utility code for use by an `Op` that will be
inserted at struct scope, that can be specialized for the
support of a particular `Apply` node.
Parameters
----------
node : an Apply instance in the graph being compiled
name : str
A unique name to distinguish you variables from those of other
nodes.
Raises
------
MethodNotDefined
Subclass does not implement this method.
"""
raise MethodNotDefined(
"c_support_code_struct", type(self), self.__class__.__name__
)
def c_cleanup_code_struct(self, node, name):
"""
Optional: return a code string specific to the apply to be
inserted in the struct cleanup code.
Parameters
----------
node : an Apply instance in the graph being compiled
name : str
A unique name to distinguish variables from those of other nodes.
Raises
------
MethodNotDefined
The subclass does not override this method.
"""
raise MethodNotDefined(
"c_cleanup_code_struct", type(self), self.__class__.__name__
)
class PureOp:
"""A class that models and constructs operations in a graph.
A `PureOp` instance has several responsibilities:
- construct `Apply` nodes via `PureOp.make_node` method,
- perform the numeric calculation of the modeled operation via
the `PureOp.perform` method,
- and (optionally) build the gradient-calculating sub-graphs via the
`PureOp.grad` method.
To see how `PureOp`, `Type`, `Variable`, and `Apply` fit together see the
page on :doc:`graph`.
For more details regarding how these methods should behave: see the `Op
Contract` in the sphinx docs (advanced tutorial on `Op`-making).
"""
default_output = None
"""
An `int` that specifies which output `PureOp.__call__` should return. If
`None`, then all outputs are returned.
A subclass should not change this class variable, but instead override it
with a subclass variable or an instance variable.
"""
def make_node(self, *inputs):
"""Construct an `Apply` node that represent the application of this operation to the given inputs.
This must be implemented by sub-classes.
Returns
-------
node: Apply
The constructed `Apply` node.
"""
raise MethodNotDefined("make_node", type(self), self.__class__.__name__)
def __call__(self, *inputs, **kwargs):
"""Construct an `Apply` node using `self.make_node` and return its outputs.
This method is just a wrapper around `PureOp.make_node`.
It is called by code such as:
.. python::
x = tensor.matrix()
y = tensor.exp(x)
`tensor.exp` is an Op instance, so `tensor.exp(x)` calls
`tensor.exp.__call__` (i.e. this method) and returns its single output
`Variable`, `y`. The `Apply` node constructed by `self.make_node`
behind the scenes is available via `y.owner`.
`PureOp` authors are able to determine which output is returned by this method
via the `PureOp.default_output` property., but subclasses are free to override this
function and ignore `default_output`.
Parameters
----------
inputs : tuple of Variable
The `PureOp`'s inputs.
kwargs
Additional keyword arguments to be forwarded to
`make_node()` *except* for optional argument `return_list` (which
defaults to `False`). If `return_list` is `True`, then the returned
value is always a `list`. Otherwise it is either a single `Variable`
when the output of `make_node()` contains a single element, or this
output (unchanged) when it contains multiple elements.
Returns
-------
outputs : list of Variable or Variable
Either a list of output `Variable`s, or a single `Variable`.
This is determined by the number of outputs produced by the
`PureOp`, the value of the keyword `return_list`, and the value of
the `PureOp.default_output` property.
"""
return_list = kwargs.pop("return_list", False)
node = self.make_node(*inputs, **kwargs)
if config.compute_test_value != "off":
compute_test_value(node)
if self.default_output is not None:
rval = node.outputs[self.default_output]
if return_list:
rval = [rval]
return rval
else:
if return_list:
return list(node.outputs)
elif len(node.outputs) == 1:
return node.outputs[0]
else:
return node.outputs
def __ne__(self, other):
return not (self == other)
# Convenience so that subclass implementers don't have to import utils
# just to self.add_tag_trace
add_tag_trace = staticmethod(add_tag_trace)
def grad(self, inputs, output_grads):
"""Construct a graph for the gradient with respect to each input variable.
Each returned `Variable` represents the gradient with respect to that
input computed based on the symbolic gradients with respect to each
output. If the output is not differentiable with respect to an input,
then this method should return an instance of type `NullType` for that
input.
Parameters
----------
inputs : list of Variable
The input variables.
output_grads : list of Variable
The gradients of the output variables.
Returns
-------
grads : list of Variable
The gradients with respect to each `Variable` in `inputs`.
"""
raise NotImplementedError()
def L_op(self, inputs, outputs, output_grads):
r"""Construct a graph for the L-operator.
This method is primarily used by `tensor.Lop` and dispatches to
`PureOp.grad` by default.
The *L-operator* computes a *row* vector times the Jacobian. The
mathematical relationship is
:math:`v \frac{\partial f(x)}{\partial x}`.
The *L-operator* is also supported for generic tensors (not only for
vectors).
Parameters
----------
inputs : list of Variable
outputs : list of Variable
output_grads : list of Variable
"""
return self.grad(inputs, output_grads)
def R_op(self, inputs, eval_points):
"""Construct a graph for the R-operator.
This method is primarily used by tensor.Rop
Suppose the op outputs
[ f_1(inputs), ..., f_n(inputs) ]
Parameters
----------
inputs : a Variable or list of Variables
eval_points
A Variable or list of Variables with the same length as inputs.
Each element of eval_points specifies the value of the corresponding
input at the point where the R op is to be evaluated.
Returns
-------
list of n elements
rval[i] should be Rop(f=f_i(inputs),
wrt=inputs,
eval_points=eval_points)
"""
raise NotImplementedError()
def perform(self, node, inputs, output_storage, params=None):
"""
Required: Calculate the function on the inputs and put the variables in
the output storage. Return None.
Parameters
----------
node : Apply
The symbolic `Apply` node that represents this computation.
inputs : Sequence
Immutable sequence of non-symbolic/numeric inputs. These
are the values of each `Variable` in `node.inputs`.
output_storage : list of list
List of mutable single-element lists (do not change the length of
these lists). Each sub-list corresponds to value of each
`Variable` in `node.outputs`. The primary purpose of this method
is to set the values of these sub-lists.
params : tuple
A tuple containing the values of each entry in `__props__`.
Notes
-----
The `output_storage` list might contain data. If an element of
output_storage is not `None`, it has to be of the right type, for
instance, for a `TensorVariable`, it has to be a NumPy `ndarray`
with the right number of dimensions and the correct dtype.
Its shape and stride pattern can be arbitrary. It is not
guaranteed that such pre-set values were produced by a previous call to
this `PureOp.perform`; they could've been allocated by another
`PureOp`'s `perform` method.
A `PureOp` is free to reuse `output_storage` as it sees fit, or to
discard it and allocate new memory.
Raises
------
MethodNotDefined
The subclass does not override this method.
"""
raise MethodNotDefined(
"perform",
type(self),
self.__class__.__name__,
"Did you used Theano flags mode=FAST_COMPILE?"
" You can use optimizer=fast_compile instead.",
)
def do_constant_folding(self, node):
"""Determine whether or not constant folding should be performed for the given node.
This allows each `PureOp` to determine if it wants to be constant
folded when all its inputs are constant. This allows it to choose where
it puts its memory/speed trade-off. Also, it could make things faster
as constants can't be used for in-place operations (see
`*IncSubtensor`).
Parameters
----------
node : Apply
The node for which the constant folding determination is made.
Returns
-------
res : bool
"""
return True
class Op(object2, PureOp, CLinkerOp):
"""
Convenience class to bundle `PureOp` and `CLinkerOp`.
"""
# We add a default get_params() implementation which will try to detect params from the op
# if params_type is set to a ParamsType. If not, we raise a MethodNotDefined exception.
def get_params(self, node):
if hasattr(self, "params_type") and isinstance(
self.params_type, theano.gof.ParamsType
):
wrapper = self.params_type
if not all(hasattr(self, field) for field in wrapper.fields):
# Let's print missing attributes for debugging.
not_found = tuple(
field for field in wrapper.fields if not hasattr(self, field)
)
raise AttributeError(
"%s: missing attributes %s for ParamsType."
% (type(self).__name__, not_found)
)
# ParamsType.get_params() will apply filtering to attributes.
return self.params_type.get_params(self)
raise MethodNotDefined("get_params")
def prepare_node(self, node, storage_map, compute_map, impl):
"""
Make any special modifications that the Op needs before doing
make_thunk().
This can modify the node inplace and should return nothing.
It can be called multiple time with different impl. It is the
op responsibility to don't re-prepare the node when it isn't
good to do so.
"""
def make_c_thunk(self, node, storage_map, compute_map, no_recycling):
"""Like make_thunk, but will only try to make a C thunk."""
node_input_storage = [storage_map[r] for r in node.inputs]
node_output_storage = [storage_map[r] for r in node.outputs]
e = FunctionGraph(node.inputs, node.outputs)
e_no_recycling = [
new_o
for (new_o, old_o) in zip(e.outputs, node.outputs)
if old_o in no_recycling
]
cl = theano.gof.cc.CLinker().accept(e, no_recycling=e_no_recycling)
# float16 gets special treatment since running
# unprepared C code will get bad results.
if not getattr(self, "_f16_ok", False):
def is_f16(t):
return getattr(t, "dtype", "") == "float16"
if any(is_f16(i.type) for i in node.inputs) or any(
is_f16(o.type) for o in node.outputs
):
# get_dynamic_module is a subset of make_thunk that is reused.
# This just try to build the c code
# It will raise an error for ops
# that don't implement c code. In those cases, we
# don't want to print a warning.
cl.get_dynamic_module()
print("Disabling C code for %s due to unsupported " "float16" % (self,))
raise NotImplementedError("float16")
_logger.debug("Trying CLinker.make_thunk")
outputs = cl.make_thunk(
input_storage=node_input_storage, output_storage=node_output_storage
)
thunk, node_input_filters, node_output_filters = outputs
def rval():
thunk()
for o in node.outputs:
compute_map[o][0] = True
rval.thunk = thunk
rval.cthunk = thunk.cthunk
rval.inputs = node_input_storage
rval.outputs = node_output_storage
rval.lazy = False
return rval
def make_py_thunk(self, node, storage_map, compute_map, no_recycling, debug=False):
"""
Like make_thunk() but only makes python thunks.
"""
node_input_storage = [storage_map[r] for r in node.inputs]
node_output_storage = [storage_map[r] for r in node.outputs]
if debug:
p = node.op.debug_perform
else:
p = node.op.perform
params = node.run_params()
if params is graph.NoParams:
# default arguments are stored in the closure of `rval`
def rval(p=p, i=node_input_storage, o=node_output_storage, n=node):
r = p(n, [x[0] for x in i], o)
for o in node.outputs:
compute_map[o][0] = True
return r
else:
params_val = node.params_type.filter(params)
def rval(
p=p,
i=node_input_storage,
o=node_output_storage,
n=node,
params=params_val,
):
r = p(n, [x[0] for x in i], o, params)
for o in node.outputs:
compute_map[o][0] = True
return r
rval.inputs = node_input_storage
rval.outputs = node_output_storage
rval.perform = p
rval.lazy = False
return rval
def make_thunk(self, node, storage_map, compute_map, no_recycling, impl=None):
"""
This function must return a thunk, that is a zero-arguments
function that encapsulates the computation to be performed
by this op on the arguments of the node.
Parameters
----------
node
Something previously returned by self.make_node.
storage_map
dict variable -> one-element-list where a computed
value for this variable may be found.
compute_map
dict variable -> one-element-list where a boolean
value will be found. The boolean indicates whether the
variable's storage_map container contains a valid value (True)
or if it has not been computed yet (False).
no_recycling
List of variables for which it is forbidden to reuse memory
allocated by a previous call.
impl
Currently, None, 'c' or 'py'. If 'c' or 'py' we will only try
that version of the code.
Notes
-----
If the thunk consults the storage_map on every call, it is safe
for it to ignore the no_recycling argument, because elements of the
no_recycling list will have a value of None in the storage map. If
the thunk can potentially cache return values (like CLinker does),
then it must not do so for variables in the no_recycling list.
self.prepare_node(node, ...) is always called. If we try 'c' and it
fail and we try again 'py', prepare_node will be called twice.
"""
if (impl is None and theano.config.cxx) or impl == "c":
self.prepare_node(
node, storage_map=storage_map, compute_map=compute_map, impl="c"
)
try:
return self.make_c_thunk(node, storage_map, compute_map, no_recycling)
except (NotImplementedError, MethodNotDefined):
# We requested the c code, so don't catch the error.
if impl == "c":
raise
_logger.debug("Falling back on perform")
# condition: either there was no c_code, or it failed or
# python code was requested.
self.prepare_node(
node, storage_map=storage_map, compute_map=compute_map, impl="py"
)
return self.make_py_thunk(node, storage_map, compute_map, no_recycling)
def make_node(self, *inputs):
"""
Create a "apply" nodes for the inputs in that order.
"""
if not hasattr(self, "itypes"):
raise NotImplementedError(
"You can either define itypes and otypes,\
or implement make_node"
)
if not hasattr(self, "otypes"):
raise NotImplementedError(
"You can either define itypes and otypes,\
or implement make_node"
)
if len(inputs) != len(self.itypes):
raise ValueError(
"We expected %d inputs but got %d." % (len(self.itypes), len(inputs))
)
if not all(inp.type == it for inp, it in zip(inputs, self.itypes)):
raise TypeError(
"We expected inputs of types '%s' but got types '%s' "
% (str(self.itypes), str([inp.type for inp in inputs]))
)
return theano.Apply(self, inputs, [o() for o in self.otypes])
def get_test_value(v):
"""Get the test value for `v`.
If input `v` is not already a variable, it is turned into one by calling
`as_tensor_variable(v)`.
Raises
------
AttributeError if no test value is set.
"""
if not isinstance(v, graph.Variable):
v = theano.tensor.as_tensor_variable(v)
return v.get_test_value()
def missing_test_message(msg):
"""
Displays msg, a message saying that some test_value is missing,
in the appropriate form based on config.compute_test_value:
off: The interactive debugger is off, so we do nothing.
ignore: The interactive debugger is set to ignore missing inputs,
so do nothing.
warn: Display msg as a warning.
Raises
------
AttributeError
With msg as the exception text.
"""
action = config.compute_test_value
if action == "raise":
raise TestValueError(msg)
elif action == "warn":
warnings.warn(msg, stacklevel=2)
else:
assert action in ["ignore", "off"]
def get_test_values(*args):
"""
Intended use:
for val_1, ..., val_n in get_debug_values(var_1, ..., var_n):
if some condition on val_1, ..., val_n is not met:
missing_test_message("condition was not met")
Given a list of variables, get_debug_values does one of three things:
1. If the interactive debugger is off, returns an empty list
2. If the interactive debugger is on, and all variables have
debug values, returns a list containing a single element.
This single element is either:
a) if there is only one variable, the element is its
value
b) otherwise, a tuple containing debug values of all
the variables.
3. If the interactive debugger is on, and some variable does
not have a debug value, issue a missing_test_message about
the variable, and, if still in control of execution, return
an empty list.
"""
if config.compute_test_value == "off":
return []
rval = []
for i, arg in enumerate(args):
try:
rval.append(get_test_value(arg))
except TestValueError:
if hasattr(arg, "name") and arg.name is not None:
missing_test_message(
"Argument {} ('{}') has no test value".format(i, arg.name)
)
else:
missing_test_message("Argument {} has no test value".format(i))
return []
if len(rval) == 1:
return rval
return [tuple(rval)]
ops_with_inner_function = {}
"""
Registry of Ops that have an inner compiled Theano function.
The keys are Op classes (not instances), and values are the name of the
attribute that contains the function. For instance, if the function is
self.fn, the value will be 'fn'.
We need that to be able not to run debug checks a number of times that is
exponential in the nesting level of those ops.
For instance, Scan will be registered here.
"""
class OpenMPOp(Op):
"""
All op using OpenMP code should inherit from this Op.
This op will check that the compiler support correctly OpenMP code.
If not, it will print a warning and disable openmp for this Op.
Then it will generate the not OpenMP code.
This is needed as EPD on Windows g++ version spec information tell
it support OpenMP, but does not include the OpenMP files.
We also add the correct compiler flags in c_compile_args.
"""
gxx_support_openmp = None
"""
True/False after we tested this.
"""
def __init__(self, openmp=None):
if openmp is None:
openmp = theano.config.openmp
self.openmp = openmp
def __setstate__(self, d):
self.__dict__.update(d)
# If we unpickle old op
if not hasattr(self, "openmp"):
self.openmp = False
def c_compile_args(self):
"""
Return the compilation arg "fopenmp" if openMP is supported
"""
self.update_self_openmp()
if self.openmp:
return ["-fopenmp"]
return []
def c_headers(self):
"""
Return the header file name "omp.h" if openMP is supported
"""
self.update_self_openmp()
if self.openmp:
return ["omp.h"]
return []
@staticmethod
def test_gxx_support():
"""
Check if openMP is supported
"""
code = """
#include <omp.h>
int main( int argc, const char* argv[] )
{
int res[10];
for(int i=0; i < 10; i++){
res[i] = i;
}
}
"""
default_openmp = GCC_compiler.try_compile_tmp(
src_code=code, tmp_prefix="test_omp_", flags=["-fopenmp"], try_run=False
)
return default_openmp
def update_self_openmp(self):
"""
Make sure self.openmp is not True if there is no support in gxx.
"""
if self.openmp:
if OpenMPOp.gxx_support_openmp is None:
OpenMPOp.gxx_support_openmp = OpenMPOp.test_gxx_support()
if not OpenMPOp.gxx_support_openmp:
# We want to warn only once.
warnings.warn(
"Your g++ compiler fails to compile OpenMP code. We"
" know this happen with some version of the EPD mingw"
" compiler and LLVM compiler on Mac OS X."
" We disable openmp everywhere in Theano."
" To remove this warning set the theano flags `openmp`"
" to False.",
stacklevel=3,
)
if OpenMPOp.gxx_support_openmp is False:
self.openmp = False
theano.config.openmp = False
def prepare_node(self, node, storage_map, compute_map, impl):
if impl == "c":
self.update_self_openmp()
def simple_meth(tag):
def f(self):
if tag in self.code_sections:
return self.code_sections[tag]
else:
raise MethodNotDefined("c_" + tag, type(self), type(self).__name__)
f.__name__ = "c_" + tag
return f
def apply_meth(tag):
def f(self, node, name):
if tag in self.code_sections:
code = self.code_sections[tag]
define_macros, undef_macros = self.get_c_macros(node, name)
return "\n".join(["", define_macros, code, undef_macros])
else:
raise MethodNotDefined("c_" + tag, type(self), type(self).__name__)
f.__name__ = "c_" + tag
return f
class COp(Op):
"""
Class to allow an op to have an external C implementation.
An op can use this class by inheriting from it and calling its
__init__() method, providing it with a path to an external file containing
the C implementation and the name of the function, in that file, to call
to perform the computations for the op.
"""
section_re = re.compile(r"^#section ([a-zA-Z0-9_]+)$", re.MULTILINE)
backward_re = re.compile(r"^THEANO_(APPLY|SUPPORT)_CODE_SECTION$", re.MULTILINE)
# This is the set of allowed markers
SECTIONS = {
"init_code",
"init_code_apply",
"init_code_struct",
"support_code",
"support_code_apply",
"support_code_struct",
"cleanup_code_struct",
"code",
"code_cleanup",
}
@classmethod
def get_path(cls, f):
"""
Convert a path relative to the location of the class file into
an aboslute path. Paths that are already absolute are passed
through unchanged.
"""
if not os.path.isabs(f):
class_file = inspect.getfile(cls)
class_dir = os.path.dirname(class_file)
f = os.path.realpath(os.path.join(class_dir, f))
return f
def __init__(self, func_files, func_name=None):
"""
Sections are loaded from files in order with sections in later
files overriding sections in previous files.
"""
if not isinstance(func_files, list):
func_files = [func_files]
self.func_name = func_name
# Keep the original name. If we reload old pickle, we want to
# find the new path and new version of the file in Theano.
self.func_files = func_files
self.load_c_code(func_files)
if len(self.code_sections) == 0:
raise ValueError("No sections where defined in C files")
if self.func_name is not None:
if "op_code" in self.code_sections:
# maybe a warning instead (and clearing the key)
raise ValueError(
'Cannot have an "op_code" section and ' "specify the func_name"
)
if "op_code_cleanup" in self.code_sections:
# maybe a warning instead (and clearing the key)
raise ValueError(
'Cannot have an "op_code_cleanup" section '
"and specify the func_name"
)
def load_c_code(self, func_files):
"""
Loads the c code to perform the Op
"""
func_files = [self.get_path(f) for f in func_files]
self.func_codes = []
for func_file in func_files:
# U (universal) will convert all new lines format to \n.
with open(func_file) as f:
self.func_codes.append(f.read())
# If both the old section markers and the new section markers are
# present, raise an error because we don't know which ones to follow.
old_markers_present = False
new_markers_present = False
for code in self.func_codes:
if self.backward_re.search(code):
old_markers_present = True
if self.section_re.search(code):
new_markers_present = True
if old_markers_present and new_markers_present:
raise ValueError(
"Both the new and the old syntax for "
"identifying code sections are present in the "
"provided C code. These two syntaxes should not "
"be used at the same time."
)
self.code_sections = dict()
for i, code in enumerate(self.func_codes):
if self.backward_re.search(code):
# This is backward compat code that will go away in a while
# Separate the code into the proper sections
split = self.backward_re.split(code)
n = 1
while n < len(split):
if split[n] == "APPLY":
self.code_sections["support_code_apply"] = split[n + 1]
elif split[n] == "SUPPORT":
self.code_sections["support_code"] = split[n + 1]
n += 2
continue
elif self.section_re.search(code):
# Check for code outside of the supported sections
split = self.section_re.split(code)
if split[0].strip() != "":
raise ValueError(
"Stray code before first #section "
"statement (in file %s): %s" % (func_files[i], split[0])
)
# Separate the code into the proper sections
n = 1
while n < len(split):
if split[n] not in self.SECTIONS:
raise ValueError(
"Unknown section type (in file %s): %s"
% (func_files[i], split[n])
)
if split[n] not in self.code_sections:
self.code_sections[split[n]] = ""
self.code_sections[split[n]] += split[n + 1]
n += 2
else:
raise ValueError(
"No valid section marker was found in file " "%s" % func_files[i]
)
def __get_op_params(self):
"""
Returns a list of (name, value) pairs that will be turned into
macros for use within the op code.
The names must be strings that are not a C keyword and the
values must be strings of literal C representations.
If op uses a :class:`theano.gof.params_type.ParamsType` as ``params_type``,
it returns:
- a default macro ``PARAMS_TYPE`` which defines the class name of the
corresponding C struct.
- a macro ``DTYPE_PARAM_key`` for every ``key`` in the ParamsType for which associated
type implements the method :func:`theano.gof.type.CLinkerType.c_element_type`.
``DTYPE_PARAM_key`` defines the primitive C type name of an item in a variable
associated to ``key``.
"""
if hasattr(self, "params_type") and isinstance(
self.params_type, theano.gof.ParamsType
):
wrapper = self.params_type
params = [("PARAMS_TYPE", wrapper.name)]
for i in range(wrapper.length):
try:
# NB (reminder): These macros are currently used only in ParamsType example test
# (`theano/gof/tests/test_quadratic_function.c`), to demonstrate how we can
# access params dtypes when dtypes may change (e.g. if based on theano.config.floatX).
# But in practice, params types generally have fixed types per op.
params.append(
(
"DTYPE_PARAM_" + wrapper.fields[i],
wrapper.types[i].c_element_type(),
)
)
except MethodNotDefined:
pass
return params
return []
def c_code_cache_version(self):
version = (hash(tuple(self.func_codes)),)
if hasattr(self, "params_type"):
version += (self.params_type.c_code_cache_version(),)
return version
def c_init_code(self):
"""
Get the code section for init_code
"""
if "init_code" in self.code_sections:
return [self.code_sections["init_code"]]
else:
raise MethodNotDefined("c_init_code", type(self), type(self).__name__)
c_init_code_apply = apply_meth("init_code_apply")
c_support_code = simple_meth("support_code")
c_support_code_apply = apply_meth("support_code_apply")
c_support_code_struct = apply_meth("support_code_struct")
c_cleanup_code_struct = apply_meth("cleanup_code_struct")
def format_c_function_args(self, inp, out):
# Generate an string containing the arguments sent to the external C
# function. The argstring will be of format :
# "input0, input1, input2, &output0, &output1"
inp = list(inp)
numi = getattr(self, "_cop_num_inputs", len(inp))
while len(inp) < numi:
inp.append("NULL")
out = ["&%s" % o for o in out]
numo = getattr(self, "_cop_num_outputs", len(out))
while len(out) < numo:
out.append("NULL")
return ", ".join(inp + out)
def get_c_macros(self, node, name, check_input=None):
define_template = "#define %s %s"
undef_template = "#undef %s"
define_macros = []
undef_macros = []
if check_input is None:
check_input = getattr(self, "check_input", True)
if check_input:
# Extract the various properties of the input and output variables
variables = node.inputs + node.outputs
variable_names = ["INPUT_%i" % i for i in range(len(node.inputs))] + [
"OUTPUT_%i" % i for i in range(len(node.outputs))
]
# Generate dtype macros
for i, v in enumerate(variables):
if not hasattr(v, "dtype"):
continue
vname = variable_names[i]
macro_name = "DTYPE_" + vname
macro_value = "npy_" + v.dtype
define_macros.append(define_template % (macro_name, macro_value))
undef_macros.append(undef_template % macro_name)
d = np.dtype(v.dtype)
macro_name = "TYPENUM_" + vname
macro_value = d.num
define_macros.append(define_template % (macro_name, macro_value))
undef_macros.append(undef_template % macro_name)
macro_name = "ITEMSIZE_" + vname
macro_value = d.itemsize
define_macros.append(define_template % (macro_name, macro_value))
undef_macros.append(undef_template % macro_name)
# Generate a macro to mark code as being apply-specific
define_macros.append(
define_template % ("APPLY_SPECIFIC(str)", "str##_%s" % name)
)
undef_macros.append(undef_template % "APPLY_SPECIFIC")
for n, v in self.__get_op_params():
define_macros.append(define_template % (n, v))
undef_macros.append(undef_template % (n,))
return "\n".join(define_macros), "\n".join(undef_macros)
def _lquote_macro(self, txt):
res = []
spl = txt.split("\n")
for l in spl[:-1]:
res.append(l + " \\")
res.append(spl[-1])
return "\n".join(res)
def get_sub_macros(self, sub):
define_macros = []
undef_macros = []
define_macros.append("#define FAIL {}".format(self._lquote_macro(sub["fail"])))
undef_macros.append("#undef FAIL")
if "params" in sub:
define_macros.append("#define PARAMS {}".format(sub["params"]))
undef_macros.append("#undef PARAMS")
return "\n".join(define_macros), "\n".join(undef_macros)
def get_io_macros(self, inputs, outputs):
define_macros = []
undef_macros = []
for i, inp in enumerate(inputs):
define_macros.append("#define INPUT_%d %s" % (i, inp))
undef_macros.append("#undef INPUT_%d" % (i,))
for i, out in enumerate(outputs):
define_macros.append("#define OUTPUT_%d %s" % (i, inp))
undef_macros.append("#undef OUTPUT_%d" % (i,))
def c_init_code_struct(self, node, name, sub):
"""
Stitches all the macros and "init_code" together
"""
if "init_code_struct" in self.code_sections:
op_code = self.code_sections["init_code_struct"]
def_macros, undef_macros = self.get_c_macros(node, name)
def_sub, undef_sub = self.get_sub_macros(sub)
return "\n".join(
["", def_macros, def_sub, op_code, undef_sub, undef_macros]
)
else:
raise MethodNotDefined(
"c_init_code_struct", type(self), type(self).__name__
)
def c_code(self, node, name, inp, out, sub):
if self.func_name is not None:
assert "code" not in self.code_sections
define_macros, undef_macros = self.get_c_macros(
node, name, check_input=False
)
params = ""
if "params" in sub:
params = ", {}".format(sub["params"])
# Generate the C code
return """
%(define_macros)s
{
if (%(func_name)s(%(func_args)s%(params)s) != 0) {
%(fail)s
}
}
%(undef_macros)s
""" % dict(
func_name=self.func_name,
fail=sub["fail"],
params=params,
func_args=self.format_c_function_args(inp, out),
define_macros=define_macros,
undef_macros=undef_macros,
)
else:
if "code" in self.code_sections:
op_code = self.code_sections["code"]
def_macros, undef_macros = self.get_c_macros(node, name)
def_sub, undef_sub = self.get_sub_macros(sub)
def_io, undef_io = self.get_io_macros(inp, out)
return "\n".join(
[
def_macros,
def_sub,
def_io,
op_code,
undef_io,
undef_sub,
undef_macros,
]
)
else:
raise MethodNotDefined("c_code", type(self), type(self).__name__)
def c_code_cleanup(self, node, name, inputs, outputs, sub):
"""
Stitches all the macros and "code_cleanup" together
"""
if "code_cleanup" in self.code_sections:
op_code = self.code_sections["code_cleanup"]
def_macros, undef_macros = self.get_c_macros(node, name)
def_sub, undef_sub = self.get_sub_macros(sub)
def_io, undef_io = self.get_io_macros(inputs, outputs)
return "\n".join(
[
def_macros,
def_sub,
def_io,
op_code,
undef_io,
undef_sub,
undef_macros,
]
)
else:
raise MethodNotDefined("c_code_cleanup", type(self), type(self).__name__)
| 34.309697
| 108
| 0.584639
|
import copy
import inspect
import logging
import os
import re
import sys
import warnings
import numpy as np
import theano
import theano.gof.cc
from theano import config
from theano.gof import graph
from theano.gof.cmodule import GCC_compiler
from theano.gof.fg import FunctionGraph
from theano.gof.utils import (
MethodNotDefined,
TestValueError,
add_tag_trace,
get_variable_trace_string,
object2,
)
__authors__ = "theano-dev"
__copyright__ = "(c) 2010, Universite de Montreal"
__license__ = "3-clause BSD License"
__contact__ = "theano-dev <theano-dev@googlegroups.com>"
__docformat__ = "restructuredtext en"
_logger = logging.getLogger("theano.gof.op.Op")
def compute_test_value(node):
storage_map = {}
compute_map = {}
for i, ins in enumerate(node.inputs):
try:
storage_map[ins] = [ins.get_test_value()]
compute_map[ins] = [True]
except TestValueError:
if config.compute_test_value == "warn":
warnings.warn(
"Warning, Cannot compute test value: input %i (%s) of Op %s missing default value"
% (i, ins, node),
stacklevel=2,
)
return
elif config.compute_test_value == "raise":
detailed_err_msg = get_variable_trace_string(ins)
raise ValueError(
"Cannot compute test value: input %i (%s) of Op %s missing default value. %s"
% (i, ins, node, detailed_err_msg)
)
elif config.compute_test_value == "ignore":
return
elif config.compute_test_value == "pdb":
import pdb
pdb.post_mortem(sys.exc_info()[2])
else:
raise ValueError(
"%s is invalid for option config.compute_test_value"
% config.compute_test_value
)
# The original values should not be destroyed, so we copy the values of the
# inputs in `destroy_map`
destroyed_inputs_idx = set()
if getattr(node.op, "destroy_map", None):
for i_pos_list in node.op.destroy_map.values():
destroyed_inputs_idx.update(i_pos_list)
for inp_idx in destroyed_inputs_idx:
inp = node.inputs[inp_idx]
storage_map[inp] = [copy.copy(storage_map[inp][0])]
# Prepare `storage_map` and `compute_map` for the outputs
for o in node.outputs:
storage_map[o] = [None]
compute_map[o] = [False]
# Create a thunk that performs the computation
thunk = node.op.make_thunk(node, storage_map, compute_map, no_recycling=[])
thunk.inputs = [storage_map[v] for v in node.inputs]
thunk.outputs = [storage_map[v] for v in node.outputs]
required = thunk()
assert not required # We provided all inputs
for output in node.outputs:
# Check that the output has been computed
assert compute_map[output][0], (output, storage_map[output][0])
# Add 'test_value' to output tag, so that downstream `Op`s can use
# these numerical values as test values
output.tag.test_value = storage_map[output][0]
class CLinkerObject:
def c_headers(self):
raise MethodNotDefined("c_headers", type(self), self.__class__.__name__)
def c_header_dirs(self):
raise MethodNotDefined("c_header_dirs", type(self), self.__class__.__name__)
def c_libraries(self):
raise MethodNotDefined("c_libraries", type(self), self.__class__.__name__)
def c_lib_dirs(self):
raise MethodNotDefined("c_lib_dirs", type(self), self.__class__.__name__)
def c_support_code(self):
raise MethodNotDefined("c_support_code", type(self), self.__class__.__name__)
def c_code_cache_version(self):
return ()
def c_compile_args(self):
raise MethodNotDefined("c_compile_args", type(self), self.__class__.__name__)
def c_no_compile_args(self):
raise MethodNotDefined("c_no_compile_args", type(self), self.__class__.__name__)
def c_init_code(self):
raise MethodNotDefined("c_init_code", type(self), self.__class__.__name__)
class CLinkerOp(CLinkerObject):
def c_code(self, node, name, inputs, outputs, sub):
raise MethodNotDefined("%s.c_code" % self.__class__.__name__)
def c_code_cache_version_apply(self, node):
return self.c_code_cache_version()
def c_code_cleanup(self, node, name, inputs, outputs, sub):
raise MethodNotDefined("%s.c_code_cleanup" % self.__class__.__name__)
def c_support_code_apply(self, node, name):
raise MethodNotDefined(
"c_support_code_apply", type(self), self.__class__.__name__
)
def c_init_code_apply(self, node, name):
raise MethodNotDefined("c_init_code_apply", type(self), self.__class__.__name__)
def c_init_code_struct(self, node, name, sub):
raise MethodNotDefined(
"c_init_code_struct", type(self), self.__class__.__name__
)
def c_support_code_struct(self, node, name):
raise MethodNotDefined(
"c_support_code_struct", type(self), self.__class__.__name__
)
def c_cleanup_code_struct(self, node, name):
raise MethodNotDefined(
"c_cleanup_code_struct", type(self), self.__class__.__name__
)
class PureOp:
default_output = None
def make_node(self, *inputs):
raise MethodNotDefined("make_node", type(self), self.__class__.__name__)
def __call__(self, *inputs, **kwargs):
return_list = kwargs.pop("return_list", False)
node = self.make_node(*inputs, **kwargs)
if config.compute_test_value != "off":
compute_test_value(node)
if self.default_output is not None:
rval = node.outputs[self.default_output]
if return_list:
rval = [rval]
return rval
else:
if return_list:
return list(node.outputs)
elif len(node.outputs) == 1:
return node.outputs[0]
else:
return node.outputs
def __ne__(self, other):
return not (self == other)
# Convenience so that subclass implementers don't have to import utils
add_tag_trace = staticmethod(add_tag_trace)
def grad(self, inputs, output_grads):
raise NotImplementedError()
def L_op(self, inputs, outputs, output_grads):
return self.grad(inputs, output_grads)
def R_op(self, inputs, eval_points):
raise NotImplementedError()
def perform(self, node, inputs, output_storage, params=None):
raise MethodNotDefined(
"perform",
type(self),
self.__class__.__name__,
"Did you used Theano flags mode=FAST_COMPILE?"
" You can use optimizer=fast_compile instead.",
)
def do_constant_folding(self, node):
return True
class Op(object2, PureOp, CLinkerOp):
def get_params(self, node):
if hasattr(self, "params_type") and isinstance(
self.params_type, theano.gof.ParamsType
):
wrapper = self.params_type
if not all(hasattr(self, field) for field in wrapper.fields):
not_found = tuple(
field for field in wrapper.fields if not hasattr(self, field)
)
raise AttributeError(
"%s: missing attributes %s for ParamsType."
% (type(self).__name__, not_found)
)
# ParamsType.get_params() will apply filtering to attributes.
return self.params_type.get_params(self)
raise MethodNotDefined("get_params")
def prepare_node(self, node, storage_map, compute_map, impl):
def make_c_thunk(self, node, storage_map, compute_map, no_recycling):
node_input_storage = [storage_map[r] for r in node.inputs]
node_output_storage = [storage_map[r] for r in node.outputs]
e = FunctionGraph(node.inputs, node.outputs)
e_no_recycling = [
new_o
for (new_o, old_o) in zip(e.outputs, node.outputs)
if old_o in no_recycling
]
cl = theano.gof.cc.CLinker().accept(e, no_recycling=e_no_recycling)
# float16 gets special treatment since running
# unprepared C code will get bad results.
if not getattr(self, "_f16_ok", False):
def is_f16(t):
return getattr(t, "dtype", "") == "float16"
if any(is_f16(i.type) for i in node.inputs) or any(
is_f16(o.type) for o in node.outputs
):
# get_dynamic_module is a subset of make_thunk that is reused.
# This just try to build the c code
# It will raise an error for ops
# that don't implement c code. In those cases, we
cl.get_dynamic_module()
print("Disabling C code for %s due to unsupported " "float16" % (self,))
raise NotImplementedError("float16")
_logger.debug("Trying CLinker.make_thunk")
outputs = cl.make_thunk(
input_storage=node_input_storage, output_storage=node_output_storage
)
thunk, node_input_filters, node_output_filters = outputs
def rval():
thunk()
for o in node.outputs:
compute_map[o][0] = True
rval.thunk = thunk
rval.cthunk = thunk.cthunk
rval.inputs = node_input_storage
rval.outputs = node_output_storage
rval.lazy = False
return rval
def make_py_thunk(self, node, storage_map, compute_map, no_recycling, debug=False):
node_input_storage = [storage_map[r] for r in node.inputs]
node_output_storage = [storage_map[r] for r in node.outputs]
if debug:
p = node.op.debug_perform
else:
p = node.op.perform
params = node.run_params()
if params is graph.NoParams:
# default arguments are stored in the closure of `rval`
def rval(p=p, i=node_input_storage, o=node_output_storage, n=node):
r = p(n, [x[0] for x in i], o)
for o in node.outputs:
compute_map[o][0] = True
return r
else:
params_val = node.params_type.filter(params)
def rval(
p=p,
i=node_input_storage,
o=node_output_storage,
n=node,
params=params_val,
):
r = p(n, [x[0] for x in i], o, params)
for o in node.outputs:
compute_map[o][0] = True
return r
rval.inputs = node_input_storage
rval.outputs = node_output_storage
rval.perform = p
rval.lazy = False
return rval
def make_thunk(self, node, storage_map, compute_map, no_recycling, impl=None):
if (impl is None and theano.config.cxx) or impl == "c":
self.prepare_node(
node, storage_map=storage_map, compute_map=compute_map, impl="c"
)
try:
return self.make_c_thunk(node, storage_map, compute_map, no_recycling)
except (NotImplementedError, MethodNotDefined):
# We requested the c code, so don't catch the error.
if impl == "c":
raise
_logger.debug("Falling back on perform")
self.prepare_node(
node, storage_map=storage_map, compute_map=compute_map, impl="py"
)
return self.make_py_thunk(node, storage_map, compute_map, no_recycling)
def make_node(self, *inputs):
if not hasattr(self, "itypes"):
raise NotImplementedError(
"You can either define itypes and otypes,\
or implement make_node"
)
if not hasattr(self, "otypes"):
raise NotImplementedError(
"You can either define itypes and otypes,\
or implement make_node"
)
if len(inputs) != len(self.itypes):
raise ValueError(
"We expected %d inputs but got %d." % (len(self.itypes), len(inputs))
)
if not all(inp.type == it for inp, it in zip(inputs, self.itypes)):
raise TypeError(
"We expected inputs of types '%s' but got types '%s' "
% (str(self.itypes), str([inp.type for inp in inputs]))
)
return theano.Apply(self, inputs, [o() for o in self.otypes])
def get_test_value(v):
if not isinstance(v, graph.Variable):
v = theano.tensor.as_tensor_variable(v)
return v.get_test_value()
def missing_test_message(msg):
action = config.compute_test_value
if action == "raise":
raise TestValueError(msg)
elif action == "warn":
warnings.warn(msg, stacklevel=2)
else:
assert action in ["ignore", "off"]
def get_test_values(*args):
if config.compute_test_value == "off":
return []
rval = []
for i, arg in enumerate(args):
try:
rval.append(get_test_value(arg))
except TestValueError:
if hasattr(arg, "name") and arg.name is not None:
missing_test_message(
"Argument {} ('{}') has no test value".format(i, arg.name)
)
else:
missing_test_message("Argument {} has no test value".format(i))
return []
if len(rval) == 1:
return rval
return [tuple(rval)]
ops_with_inner_function = {}
class OpenMPOp(Op):
gxx_support_openmp = None
def __init__(self, openmp=None):
if openmp is None:
openmp = theano.config.openmp
self.openmp = openmp
def __setstate__(self, d):
self.__dict__.update(d)
if not hasattr(self, "openmp"):
self.openmp = False
def c_compile_args(self):
self.update_self_openmp()
if self.openmp:
return ["-fopenmp"]
return []
def c_headers(self):
self.update_self_openmp()
if self.openmp:
return ["omp.h"]
return []
@staticmethod
def test_gxx_support():
code = """
#include <omp.h>
int main( int argc, const char* argv[] )
{
int res[10];
for(int i=0; i < 10; i++){
res[i] = i;
}
}
"""
default_openmp = GCC_compiler.try_compile_tmp(
src_code=code, tmp_prefix="test_omp_", flags=["-fopenmp"], try_run=False
)
return default_openmp
def update_self_openmp(self):
if self.openmp:
if OpenMPOp.gxx_support_openmp is None:
OpenMPOp.gxx_support_openmp = OpenMPOp.test_gxx_support()
if not OpenMPOp.gxx_support_openmp:
warnings.warn(
"Your g++ compiler fails to compile OpenMP code. We"
" know this happen with some version of the EPD mingw"
" compiler and LLVM compiler on Mac OS X."
" We disable openmp everywhere in Theano."
" To remove this warning set the theano flags `openmp`"
" to False.",
stacklevel=3,
)
if OpenMPOp.gxx_support_openmp is False:
self.openmp = False
theano.config.openmp = False
def prepare_node(self, node, storage_map, compute_map, impl):
if impl == "c":
self.update_self_openmp()
def simple_meth(tag):
def f(self):
if tag in self.code_sections:
return self.code_sections[tag]
else:
raise MethodNotDefined("c_" + tag, type(self), type(self).__name__)
f.__name__ = "c_" + tag
return f
def apply_meth(tag):
def f(self, node, name):
if tag in self.code_sections:
code = self.code_sections[tag]
define_macros, undef_macros = self.get_c_macros(node, name)
return "\n".join(["", define_macros, code, undef_macros])
else:
raise MethodNotDefined("c_" + tag, type(self), type(self).__name__)
f.__name__ = "c_" + tag
return f
class COp(Op):
section_re = re.compile(r"^#section ([a-zA-Z0-9_]+)$", re.MULTILINE)
backward_re = re.compile(r"^THEANO_(APPLY|SUPPORT)_CODE_SECTION$", re.MULTILINE)
SECTIONS = {
"init_code",
"init_code_apply",
"init_code_struct",
"support_code",
"support_code_apply",
"support_code_struct",
"cleanup_code_struct",
"code",
"code_cleanup",
}
@classmethod
def get_path(cls, f):
if not os.path.isabs(f):
class_file = inspect.getfile(cls)
class_dir = os.path.dirname(class_file)
f = os.path.realpath(os.path.join(class_dir, f))
return f
def __init__(self, func_files, func_name=None):
if not isinstance(func_files, list):
func_files = [func_files]
self.func_name = func_name
self.func_files = func_files
self.load_c_code(func_files)
if len(self.code_sections) == 0:
raise ValueError("No sections where defined in C files")
if self.func_name is not None:
if "op_code" in self.code_sections:
raise ValueError(
'Cannot have an "op_code" section and ' "specify the func_name"
)
if "op_code_cleanup" in self.code_sections:
raise ValueError(
'Cannot have an "op_code_cleanup" section '
"and specify the func_name"
)
def load_c_code(self, func_files):
func_files = [self.get_path(f) for f in func_files]
self.func_codes = []
for func_file in func_files:
with open(func_file) as f:
self.func_codes.append(f.read())
old_markers_present = False
new_markers_present = False
for code in self.func_codes:
if self.backward_re.search(code):
old_markers_present = True
if self.section_re.search(code):
new_markers_present = True
if old_markers_present and new_markers_present:
raise ValueError(
"Both the new and the old syntax for "
"identifying code sections are present in the "
"provided C code. These two syntaxes should not "
"be used at the same time."
)
self.code_sections = dict()
for i, code in enumerate(self.func_codes):
if self.backward_re.search(code):
# This is backward compat code that will go away in a while
# Separate the code into the proper sections
split = self.backward_re.split(code)
n = 1
while n < len(split):
if split[n] == "APPLY":
self.code_sections["support_code_apply"] = split[n + 1]
elif split[n] == "SUPPORT":
self.code_sections["support_code"] = split[n + 1]
n += 2
continue
elif self.section_re.search(code):
# Check for code outside of the supported sections
split = self.section_re.split(code)
if split[0].strip() != "":
raise ValueError(
"Stray code before first #section "
"statement (in file %s): %s" % (func_files[i], split[0])
)
# Separate the code into the proper sections
n = 1
while n < len(split):
if split[n] not in self.SECTIONS:
raise ValueError(
"Unknown section type (in file %s): %s"
% (func_files[i], split[n])
)
if split[n] not in self.code_sections:
self.code_sections[split[n]] = ""
self.code_sections[split[n]] += split[n + 1]
n += 2
else:
raise ValueError(
"No valid section marker was found in file " "%s" % func_files[i]
)
def __get_op_params(self):
if hasattr(self, "params_type") and isinstance(
self.params_type, theano.gof.ParamsType
):
wrapper = self.params_type
params = [("PARAMS_TYPE", wrapper.name)]
for i in range(wrapper.length):
try:
# NB (reminder): These macros are currently used only in ParamsType example test
# (`theano/gof/tests/test_quadratic_function.c`), to demonstrate how we can
# access params dtypes when dtypes may change (e.g. if based on theano.config.floatX).
# But in practice, params types generally have fixed types per op.
params.append(
(
"DTYPE_PARAM_" + wrapper.fields[i],
wrapper.types[i].c_element_type(),
)
)
except MethodNotDefined:
pass
return params
return []
def c_code_cache_version(self):
version = (hash(tuple(self.func_codes)),)
if hasattr(self, "params_type"):
version += (self.params_type.c_code_cache_version(),)
return version
def c_init_code(self):
if "init_code" in self.code_sections:
return [self.code_sections["init_code"]]
else:
raise MethodNotDefined("c_init_code", type(self), type(self).__name__)
c_init_code_apply = apply_meth("init_code_apply")
c_support_code = simple_meth("support_code")
c_support_code_apply = apply_meth("support_code_apply")
c_support_code_struct = apply_meth("support_code_struct")
c_cleanup_code_struct = apply_meth("cleanup_code_struct")
def format_c_function_args(self, inp, out):
# Generate an string containing the arguments sent to the external C
# function. The argstring will be of format :
# "input0, input1, input2, &output0, &output1"
inp = list(inp)
numi = getattr(self, "_cop_num_inputs", len(inp))
while len(inp) < numi:
inp.append("NULL")
out = ["&%s" % o for o in out]
numo = getattr(self, "_cop_num_outputs", len(out))
while len(out) < numo:
out.append("NULL")
return ", ".join(inp + out)
def get_c_macros(self, node, name, check_input=None):
define_template = "#define %s %s"
undef_template = "#undef %s"
define_macros = []
undef_macros = []
if check_input is None:
check_input = getattr(self, "check_input", True)
if check_input:
# Extract the various properties of the input and output variables
variables = node.inputs + node.outputs
variable_names = ["INPUT_%i" % i for i in range(len(node.inputs))] + [
"OUTPUT_%i" % i for i in range(len(node.outputs))
]
# Generate dtype macros
for i, v in enumerate(variables):
if not hasattr(v, "dtype"):
continue
vname = variable_names[i]
macro_name = "DTYPE_" + vname
macro_value = "npy_" + v.dtype
define_macros.append(define_template % (macro_name, macro_value))
undef_macros.append(undef_template % macro_name)
d = np.dtype(v.dtype)
macro_name = "TYPENUM_" + vname
macro_value = d.num
define_macros.append(define_template % (macro_name, macro_value))
undef_macros.append(undef_template % macro_name)
macro_name = "ITEMSIZE_" + vname
macro_value = d.itemsize
define_macros.append(define_template % (macro_name, macro_value))
undef_macros.append(undef_template % macro_name)
# Generate a macro to mark code as being apply-specific
define_macros.append(
define_template % ("APPLY_SPECIFIC(str)", "str##_%s" % name)
)
undef_macros.append(undef_template % "APPLY_SPECIFIC")
for n, v in self.__get_op_params():
define_macros.append(define_template % (n, v))
undef_macros.append(undef_template % (n,))
return "\n".join(define_macros), "\n".join(undef_macros)
def _lquote_macro(self, txt):
res = []
spl = txt.split("\n")
for l in spl[:-1]:
res.append(l + " \\")
res.append(spl[-1])
return "\n".join(res)
def get_sub_macros(self, sub):
define_macros = []
undef_macros = []
define_macros.append("#define FAIL {}".format(self._lquote_macro(sub["fail"])))
undef_macros.append("#undef FAIL")
if "params" in sub:
define_macros.append("#define PARAMS {}".format(sub["params"]))
undef_macros.append("#undef PARAMS")
return "\n".join(define_macros), "\n".join(undef_macros)
def get_io_macros(self, inputs, outputs):
define_macros = []
undef_macros = []
for i, inp in enumerate(inputs):
define_macros.append("#define INPUT_%d %s" % (i, inp))
undef_macros.append("#undef INPUT_%d" % (i,))
for i, out in enumerate(outputs):
define_macros.append("#define OUTPUT_%d %s" % (i, inp))
undef_macros.append("#undef OUTPUT_%d" % (i,))
def c_init_code_struct(self, node, name, sub):
if "init_code_struct" in self.code_sections:
op_code = self.code_sections["init_code_struct"]
def_macros, undef_macros = self.get_c_macros(node, name)
def_sub, undef_sub = self.get_sub_macros(sub)
return "\n".join(
["", def_macros, def_sub, op_code, undef_sub, undef_macros]
)
else:
raise MethodNotDefined(
"c_init_code_struct", type(self), type(self).__name__
)
def c_code(self, node, name, inp, out, sub):
if self.func_name is not None:
assert "code" not in self.code_sections
define_macros, undef_macros = self.get_c_macros(
node, name, check_input=False
)
params = ""
if "params" in sub:
params = ", {}".format(sub["params"])
# Generate the C code
return """
%(define_macros)s
{
if (%(func_name)s(%(func_args)s%(params)s) != 0) {
%(fail)s
}
}
%(undef_macros)s
""" % dict(
func_name=self.func_name,
fail=sub["fail"],
params=params,
func_args=self.format_c_function_args(inp, out),
define_macros=define_macros,
undef_macros=undef_macros,
)
else:
if "code" in self.code_sections:
op_code = self.code_sections["code"]
def_macros, undef_macros = self.get_c_macros(node, name)
def_sub, undef_sub = self.get_sub_macros(sub)
def_io, undef_io = self.get_io_macros(inp, out)
return "\n".join(
[
def_macros,
def_sub,
def_io,
op_code,
undef_io,
undef_sub,
undef_macros,
]
)
else:
raise MethodNotDefined("c_code", type(self), type(self).__name__)
def c_code_cleanup(self, node, name, inputs, outputs, sub):
if "code_cleanup" in self.code_sections:
op_code = self.code_sections["code_cleanup"]
def_macros, undef_macros = self.get_c_macros(node, name)
def_sub, undef_sub = self.get_sub_macros(sub)
def_io, undef_io = self.get_io_macros(inputs, outputs)
return "\n".join(
[
def_macros,
def_sub,
def_io,
op_code,
undef_io,
undef_sub,
undef_macros,
]
)
else:
raise MethodNotDefined("c_code_cleanup", type(self), type(self).__name__)
| true
| true
|
1c3f4ea74f0df3431f7567da5cfc25b5846263c0
| 904
|
py
|
Python
|
WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/projects-DS/Data-Structures/lru_cache/lru_cache.py
|
webdevhub42/Lambda
|
b04b84fb5b82fe7c8b12680149e25ae0d27a0960
|
[
"MIT"
] | 5
|
2021-06-02T23:44:25.000Z
|
2021-12-27T16:21:57.000Z
|
WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/projects-DS/Data-Structures/lru_cache/lru_cache.py
|
webdevhub42/Lambda
|
b04b84fb5b82fe7c8b12680149e25ae0d27a0960
|
[
"MIT"
] | 22
|
2021-05-31T01:33:25.000Z
|
2021-10-18T18:32:39.000Z
|
WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/projects-DS/Data-Structures/lru_cache/lru_cache.py
|
webdevhub42/Lambda
|
b04b84fb5b82fe7c8b12680149e25ae0d27a0960
|
[
"MIT"
] | 3
|
2021-06-19T03:37:47.000Z
|
2021-08-31T00:49:51.000Z
|
class LRUCache:
def __init__(self, limit=10):
pass
"""
Retrieves the value associated with the given key. Also
needs to move the key-value pair to the top of the order
such that the pair is considered most-recently used.
Returns the value associated with the key or None if the
key-value pair doesn't exist in the cache.
"""
def get(self, key):
pass
"""
Adds the given key-value pair to the cache. The newly-
added pair should be considered the most-recently used
entry in the cache. If the cache is already at max capacity
before this entry is added, then the oldest entry in the
cache needs to be removed to make room. Additionally, in the
case that the key already exists in the cache, we simply
want to overwrite the old value associated with the key with
the newly-specified value.
"""
def set(self, key, value):
pass
| 31.172414
| 62
| 0.702434
|
class LRUCache:
def __init__(self, limit=10):
pass
def get(self, key):
pass
def set(self, key, value):
pass
| true
| true
|
1c3f4f64643e9db00643465b6d7976dc2f3815f0
| 23,787
|
py
|
Python
|
code/run_fewrel.py
|
Riroaki/ERNIE
|
ad59b5e7cbad83247a123705ef2d64f65d0cfbf7
|
[
"MIT"
] | 1,382
|
2019-05-17T07:55:01.000Z
|
2022-03-30T02:47:06.000Z
|
all_exp/ernie/code/code/run_fewrel.py
|
yifan-h/GCS_KI
|
5d5c68832aa37cefb1d01723c35fc3d74482c8c2
|
[
"MIT"
] | 80
|
2019-05-20T01:45:37.000Z
|
2022-02-17T14:33:17.000Z
|
all_exp/ernie/code/code/run_fewrel.py
|
yifan-h/GCS_KI
|
5d5c68832aa37cefb1d01723c35fc3d74482c8c2
|
[
"MIT"
] | 257
|
2019-05-18T10:39:18.000Z
|
2022-03-21T15:23:41.000Z
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
import logging
import argparse
import random
from tqdm import tqdm, trange
import simplejson as json
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from knowledge_bert.tokenization import BertTokenizer
from knowledge_bert.modeling import BertForSequenceClassification
from knowledge_bert.optimization import BertAdam
from knowledge_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, input_ent, ent_mask, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.input_ent = input_ent
self.ent_mask = ent_mask
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_json(cls, input_file):
with open(input_file, "r", encoding='utf-8') as f:
return json.loads(f.read())
class FewrelProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
examples = self._create_examples(
self._read_json(os.path.join(data_dir, "train.json")), "train")
labels = set([x.label for x in examples])
return examples, list(labels)
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "dev.json")), "dev")
def get_labels(self):
"""Useless"""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
for x in line['ents']:
if x[1] == 1:
x[1] = 0
text_a = (line['text'], line['ents'])
label = line['label']
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, threshold):
"""Loads a data file into a list of `InputBatch`s."""
label_list = sorted(label_list)
label_map = {label : i for i, label in enumerate(label_list)}
entity2id = {}
with open("kg_embed/entity2id.txt") as fin:
fin.readline()
for line in fin:
qid, eid = line.strip().split('\t')
entity2id[qid] = int(eid)
features = []
for (ex_index, example) in enumerate(examples):
ex_text_a = example.text_a[0]
h, t = example.text_a[1]
h_name = ex_text_a[h[1]:h[2]]
t_name = ex_text_a[t[1]:t[2]]
# Add [HD] and [TL], which are "#" and "$" respectively.
if h[1] < t[1]:
ex_text_a = ex_text_a[:h[1]] + "# "+h_name+" #" + ex_text_a[h[2]:t[1]] + "$ "+t_name+" $" + ex_text_a[t[2]:]
else:
ex_text_a = ex_text_a[:t[1]] + "$ "+t_name+" $" + ex_text_a[t[2]:h[1]] + "# "+h_name+" #" + ex_text_a[h[2]:]
if h[1] < t[1]:
h[1] += 2
h[2] += 2
t[1] += 6
t[2] += 6
else:
h[1] += 6
h[2] += 6
t[1] += 2
t[2] += 2
tokens_a, entities_a = tokenizer.tokenize(ex_text_a, [h, t])
if len([x for x in entities_a if x!="UNK"]) != 2:
print(entities_a, len([x for x in entities_a if x[0]!="UNK"]))
exit(1)
tokens_b = None
if example.text_b:
tokens_b, entities_b = tokenizer.tokenize(example.text_b[0], [x for x in example.text_b[1] if x[-1]>threshold])
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, entities_a, entities_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
entities_a = entities_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
ents = ["UNK"] + entities_a + ["UNK"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
ents += entities_b + ["UNK"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_ent = []
ent_mask = []
for ent in ents:
if ent != "UNK" and ent in entity2id:
input_ent.append(entity2id[ent])
ent_mask.append(1)
else:
input_ent.append(-1)
ent_mask.append(0)
ent_mask[0] = 1
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
padding_ = [-1] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
input_ent += padding_
ent_mask += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(input_ent) == max_seq_length
assert len(ent_mask) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("ents: %s" % " ".join(
[str(x) for x in ents]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
input_ent=input_ent,
ent_mask=ent_mask,
label_id=label_id))
return features
def _truncate_seq_pair(tokens_a, tokens_b, ents_a, ents_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
ents_a.pop()
else:
tokens_b.pop()
ents_b.pop()
def accuracy(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs == labels)
def warmup_linear(x, warmup=0.002):
if x < warmup:
return x/warmup
return 1.0
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--ernie_model", default=None, type=str, required=True,
help="Ernie pre-trained model")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
default=False,
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
default=False,
action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_lower_case",
default=False,
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size",
default=32,
type=int,
help="Total batch size for training.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--no_cuda",
default=False,
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16',
default=False,
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
parser.add_argument('--threshold', type=float, default=.3)
args = parser.parse_args()
processors = FewrelProcessor
num_labels_task = 80
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
os.makedirs(args.output_dir, exist_ok=True)
processor = processors()
num_labels = num_labels_task
label_list = None
tokenizer = BertTokenizer.from_pretrained(args.ernie_model, do_lower_case=args.do_lower_case)
train_examples = None
num_train_steps = None
train_examples, label_list = processor.get_train_examples(args.data_dir)
num_train_steps = int(
len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs)
# Prepare model
model, _ = BertForSequenceClassification.from_pretrained(args.ernie_model,
cache_dir=PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(args.local_rank),
num_labels = num_labels)
if args.fp16:
model.half()
model.to(device)
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare optimizer
param_optimizer = list(model.named_parameters())
no_grad = ['bert.encoder.layer.11.output.dense_ent', 'bert.encoder.layer.11.output.LayerNorm_ent']
param_optimizer = [(n, p) for n, p in param_optimizer if not any(nd in n for nd in no_grad)]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
t_total = num_train_steps
if args.local_rank != -1:
t_total = t_total // torch.distributed.get_world_size()
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=t_total)
global_step = 0
if args.do_train:
train_features = convert_examples_to_features(
train_examples, label_list, args.max_seq_length, tokenizer, args.threshold)
vecs = []
vecs.append([0]*100)
with open("kg_embed/entity2vec.vec", 'r') as fin:
for line in fin:
vec = line.strip().split('\t')
vec = [float(x) for x in vec]
vecs.append(vec)
embed = torch.FloatTensor(vecs)
embed = torch.nn.Embedding.from_pretrained(embed)
#embed = torch.nn.Embedding(5041175, 100)
logger.info("Shape of entity embedding: "+str(embed.weight.size()))
del vecs
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_steps)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
all_ent = torch.tensor([f.input_ent for f in train_features], dtype=torch.long)
all_ent_masks = torch.tensor([f.ent_mask for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_ent, all_ent_masks, all_label_ids)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
output_loss_file = os.path.join(args.output_dir, "loss")
loss_fout = open(output_loss_file, 'w')
model.train()
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
batch = tuple(t.to(device) if i != 3 else t for i, t in enumerate(batch))
input_ids, input_mask, segment_ids, input_ent, ent_mask, label_ids = batch
input_ent = embed(input_ent+1).to(device) # -1 -> 0
loss = model(input_ids, segment_ids, input_mask, input_ent.half(), ent_mask, label_ids)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
loss_fout.write("{}\n".format(loss.item()))
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (step + 1) % args.gradient_accumulation_steps == 0:
# modify learning rate with special warm up BERT uses
lr_this_step = args.learning_rate * warmup_linear(global_step/t_total, args.warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
model_to_save = model.module if hasattr(model, 'module') else model
output_model_file = os.path.join(args.output_dir, "pytorch_model.bin_{}".format(global_step))
torch.save(model_to_save.state_dict(), output_model_file)
# Save a trained model
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(args.output_dir, "pytorch_model.bin")
torch.save(model_to_save.state_dict(), output_model_file)
if __name__ == "__main__":
main()
| 42.859459
| 130
| 0.59213
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
import logging
import argparse
import random
from tqdm import tqdm, trange
import simplejson as json
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from knowledge_bert.tokenization import BertTokenizer
from knowledge_bert.modeling import BertForSequenceClassification
from knowledge_bert.optimization import BertAdam
from knowledge_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
class InputExample(object):
def __init__(self, guid, text_a, text_b=None, label=None):
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
def __init__(self, input_ids, input_mask, segment_ids, input_ent, ent_mask, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.input_ent = input_ent
self.ent_mask = ent_mask
class DataProcessor(object):
def get_train_examples(self, data_dir):
raise NotImplementedError()
def get_dev_examples(self, data_dir):
raise NotImplementedError()
def get_labels(self):
raise NotImplementedError()
@classmethod
def _read_json(cls, input_file):
with open(input_file, "r", encoding='utf-8') as f:
return json.loads(f.read())
class FewrelProcessor(DataProcessor):
def get_train_examples(self, data_dir):
examples = self._create_examples(
self._read_json(os.path.join(data_dir, "train.json")), "train")
labels = set([x.label for x in examples])
return examples, list(labels)
def get_dev_examples(self, data_dir):
return self._create_examples(
self._read_json(os.path.join(data_dir, "dev.json")), "dev")
def get_labels(self):
return ["0", "1"]
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
for x in line['ents']:
if x[1] == 1:
x[1] = 0
text_a = (line['text'], line['ents'])
label = line['label']
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, threshold):
label_list = sorted(label_list)
label_map = {label : i for i, label in enumerate(label_list)}
entity2id = {}
with open("kg_embed/entity2id.txt") as fin:
fin.readline()
for line in fin:
qid, eid = line.strip().split('\t')
entity2id[qid] = int(eid)
features = []
for (ex_index, example) in enumerate(examples):
ex_text_a = example.text_a[0]
h, t = example.text_a[1]
h_name = ex_text_a[h[1]:h[2]]
t_name = ex_text_a[t[1]:t[2]]
if h[1] < t[1]:
ex_text_a = ex_text_a[:h[1]] + "# "+h_name+" #" + ex_text_a[h[2]:t[1]] + "$ "+t_name+" $" + ex_text_a[t[2]:]
else:
ex_text_a = ex_text_a[:t[1]] + "$ "+t_name+" $" + ex_text_a[t[2]:h[1]] + "# "+h_name+" #" + ex_text_a[h[2]:]
if h[1] < t[1]:
h[1] += 2
h[2] += 2
t[1] += 6
t[2] += 6
else:
h[1] += 6
h[2] += 6
t[1] += 2
t[2] += 2
tokens_a, entities_a = tokenizer.tokenize(ex_text_a, [h, t])
if len([x for x in entities_a if x!="UNK"]) != 2:
print(entities_a, len([x for x in entities_a if x[0]!="UNK"]))
exit(1)
tokens_b = None
if example.text_b:
tokens_b, entities_b = tokenizer.tokenize(example.text_b[0], [x for x in example.text_b[1] if x[-1]>threshold])
_truncate_seq_pair(tokens_a, tokens_b, entities_a, entities_b, max_seq_length - 3)
else:
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
entities_a = entities_a[:(max_seq_length - 2)]
"[CLS]"] + tokens_a + ["[SEP]"]
ents = ["UNK"] + entities_a + ["UNK"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
ents += entities_b + ["UNK"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_ent = []
ent_mask = []
for ent in ents:
if ent != "UNK" and ent in entity2id:
input_ent.append(entity2id[ent])
ent_mask.append(1)
else:
input_ent.append(-1)
ent_mask.append(0)
ent_mask[0] = 1
input_mask = [1] * len(input_ids)
padding = [0] * (max_seq_length - len(input_ids))
padding_ = [-1] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
input_ent += padding_
ent_mask += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(input_ent) == max_seq_length
assert len(ent_mask) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("ents: %s" % " ".join(
[str(x) for x in ents]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
input_ent=input_ent,
ent_mask=ent_mask,
label_id=label_id))
return features
def _truncate_seq_pair(tokens_a, tokens_b, ents_a, ents_b, max_length):
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
ents_a.pop()
else:
tokens_b.pop()
ents_b.pop()
def accuracy(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs == labels)
def warmup_linear(x, warmup=0.002):
if x < warmup:
return x/warmup
return 1.0
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--ernie_model", default=None, type=str, required=True,
help="Ernie pre-trained model")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
default=False,
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
default=False,
action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_lower_case",
default=False,
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size",
default=32,
type=int,
help="Total batch size for training.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--no_cuda",
default=False,
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16',
default=False,
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
parser.add_argument('--threshold', type=float, default=.3)
args = parser.parse_args()
processors = FewrelProcessor
num_labels_task = 80
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
os.makedirs(args.output_dir, exist_ok=True)
processor = processors()
num_labels = num_labels_task
label_list = None
tokenizer = BertTokenizer.from_pretrained(args.ernie_model, do_lower_case=args.do_lower_case)
train_examples = None
num_train_steps = None
train_examples, label_list = processor.get_train_examples(args.data_dir)
num_train_steps = int(
len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs)
# Prepare model
model, _ = BertForSequenceClassification.from_pretrained(args.ernie_model,
cache_dir=PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(args.local_rank),
num_labels = num_labels)
if args.fp16:
model.half()
model.to(device)
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare optimizer
param_optimizer = list(model.named_parameters())
no_grad = ['bert.encoder.layer.11.output.dense_ent', 'bert.encoder.layer.11.output.LayerNorm_ent']
param_optimizer = [(n, p) for n, p in param_optimizer if not any(nd in n for nd in no_grad)]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
t_total = num_train_steps
if args.local_rank != -1:
t_total = t_total // torch.distributed.get_world_size()
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=t_total)
global_step = 0
if args.do_train:
train_features = convert_examples_to_features(
train_examples, label_list, args.max_seq_length, tokenizer, args.threshold)
vecs = []
vecs.append([0]*100)
with open("kg_embed/entity2vec.vec", 'r') as fin:
for line in fin:
vec = line.strip().split('\t')
vec = [float(x) for x in vec]
vecs.append(vec)
embed = torch.FloatTensor(vecs)
embed = torch.nn.Embedding.from_pretrained(embed)
#embed = torch.nn.Embedding(5041175, 100)
logger.info("Shape of entity embedding: "+str(embed.weight.size()))
del vecs
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_steps)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
all_ent = torch.tensor([f.input_ent for f in train_features], dtype=torch.long)
all_ent_masks = torch.tensor([f.ent_mask for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_ent, all_ent_masks, all_label_ids)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
output_loss_file = os.path.join(args.output_dir, "loss")
loss_fout = open(output_loss_file, 'w')
model.train()
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
batch = tuple(t.to(device) if i != 3 else t for i, t in enumerate(batch))
input_ids, input_mask, segment_ids, input_ent, ent_mask, label_ids = batch
input_ent = embed(input_ent+1).to(device) # -1 -> 0
loss = model(input_ids, segment_ids, input_mask, input_ent.half(), ent_mask, label_ids)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
loss_fout.write("{}\n".format(loss.item()))
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (step + 1) % args.gradient_accumulation_steps == 0:
# modify learning rate with special warm up BERT uses
lr_this_step = args.learning_rate * warmup_linear(global_step/t_total, args.warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
model_to_save = model.module if hasattr(model, 'module') else model
output_model_file = os.path.join(args.output_dir, "pytorch_model.bin_{}".format(global_step))
torch.save(model_to_save.state_dict(), output_model_file)
# Save a trained model
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(args.output_dir, "pytorch_model.bin")
torch.save(model_to_save.state_dict(), output_model_file)
if __name__ == "__main__":
main()
| true
| true
|
1c3f505cbd7250fbf9f886f196440d28e9b05cca
| 572
|
py
|
Python
|
00-modules/builtin_modules/shutil_examples.py
|
cccaaannn/useful_functions
|
1570cda8c642a39f04ed9f22ebeeab2bfb9e9424
|
[
"MIT"
] | null | null | null |
00-modules/builtin_modules/shutil_examples.py
|
cccaaannn/useful_functions
|
1570cda8c642a39f04ed9f22ebeeab2bfb9e9424
|
[
"MIT"
] | null | null | null |
00-modules/builtin_modules/shutil_examples.py
|
cccaaannn/useful_functions
|
1570cda8c642a39f04ed9f22ebeeab2bfb9e9424
|
[
"MIT"
] | null | null | null |
import shutil
# https://docs.python.org/2/library/shutil.html
src = ""
dst = ""
# move
shutil.move(src, dst)
# copy
shutil.copy(src, dst)
# copy2 also copies metadata
shutil.copy2(src, dst)
# copy file works faster but it only copies files
shutil.copyfile(src, dst)
# copies metadata
shutil.copystat(src, dst)
# copytree copies entire directories
# for ignoring patters
ignore = shutil.ignore_patterns('*.pyc', 'tmp*')
shutil.copytree(src, dst, ignore=ignore)
# removes entire directorires
shutil.rmtree(src)
# zipdir
shutil.make_archive(dst+".zip", 'zip', src)
| 17.875
| 49
| 0.729021
|
import shutil
src = ""
dst = ""
shutil.move(src, dst)
shutil.copy(src, dst)
shutil.copy2(src, dst)
shutil.copyfile(src, dst)
shutil.copystat(src, dst)
ignore = shutil.ignore_patterns('*.pyc', 'tmp*')
shutil.copytree(src, dst, ignore=ignore)
shutil.rmtree(src)
shutil.make_archive(dst+".zip", 'zip', src)
| true
| true
|
1c3f51f3327cf445677987a7a1e883c763e01bb9
| 82,241
|
py
|
Python
|
mindspore/python/mindspore/train/serialization.py
|
glucklichste/mindspore
|
9df63697af663836fc18d03fef40715f093a3fa1
|
[
"Apache-2.0"
] | null | null | null |
mindspore/python/mindspore/train/serialization.py
|
glucklichste/mindspore
|
9df63697af663836fc18d03fef40715f093a3fa1
|
[
"Apache-2.0"
] | null | null | null |
mindspore/python/mindspore/train/serialization.py
|
glucklichste/mindspore
|
9df63697af663836fc18d03fef40715f093a3fa1
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Model and parameters serialization."""
import copy
import json
import os
import shutil
import stat
import threading
from threading import Thread, Lock
from collections import defaultdict
import math
import sys
import time
import numpy as np
from mindspore.train.checkpoint_pb2 import Checkpoint
from mindspore.train.mind_ir_pb2 import ModelProto as mindir_model
from mindspore.train.node_strategy_pb2 import ParallelStrategyMap, ParallelLayouts, ParallelGroupMap
from mindspore.train.print_pb2 import Print
import mindspore
import mindspore.nn as nn
from mindspore import context
from mindspore import log as logger
from mindspore._checkparam import check_input_data, check_input_dataset, Validator
from mindspore.common import dtype as mstype
from mindspore.common.api import _cell_graph_executor as _executor
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
from mindspore.common.tensor import Tensor
from mindspore.communication.management import get_rank, get_group_size
from mindspore.compression.export import quant_export
from mindspore.parallel._cell_wrapper import get_allgather_cell
from mindspore.parallel._tensor import _load_tensor, _get_tensor_strategy, _get_tensor_slice_index
from mindspore.parallel._tensor import _reshape_param_data
from mindspore.parallel._tensor import _reshape_param_data_with_weight
from mindspore.parallel._utils import _infer_rank_list, _remove_repeated_slices
from .._c_expression import load_mindir, _encrypt, _decrypt, _is_cipher_file
tensor_to_ms_type = {"Int8": mstype.int8, "UInt8": mstype.uint8, "Int16": mstype.int16, "UInt16": mstype.uint16,
"Int32": mstype.int32, "UInt32": mstype.uint32, "Int64": mstype.int64, "UInt64": mstype.uint64,
"Float16": mstype.float16, "Float32": mstype.float32, "Float64": mstype.float64,
"Bool": mstype.bool_}
tensor_to_np_type = {"Int8": np.int8, "UInt8": np.uint8, "Int16": np.int16, "UInt16": np.uint16,
"Int32": np.int32, "UInt32": np.uint32, "Int64": np.int64, "UInt64": np.uint64,
"Float16": np.float16, "Float32": np.float32, "Float64": np.float64, "Bool": np.bool_}
_ckpt_mutex = Lock()
# unit is KB
SLICE_SIZE = 512 * 1024
PROTO_LIMIT_SIZE = 1024 * 1024 * 2
TOTAL_SAVE = 1024 * 1024
PARAMETER_SPLIT_SIZE = 1024 * 1024 * 1024
def _special_process_par(par, new_par):
"""
Processes the special condition.
Like (12,2048,1,1)->(12,2048), this case is caused by GE 4 dimensions tensor.
"""
par_shape_len = len(par.data.shape)
new_par_shape_len = len(new_par.data.shape)
if new_par_shape_len <= par_shape_len:
return False
for i in range(new_par_shape_len - par_shape_len):
if new_par.data.shape[par_shape_len + i] != 1:
return False
new_val = new_par.data.asnumpy()
new_val = new_val.reshape(par.data.shape)
par.set_data(Tensor(new_val, par.data.dtype))
return True
def _update_param(param, new_param, strict_load):
"""Updates param's data from new_param's data."""
if isinstance(param.data, Tensor) and isinstance(new_param.data, Tensor):
if param.data.shape != new_param.data.shape:
if not _special_process_par(param, new_param):
logger.critical("Failed to combine the net and the parameters for param %s.", param.name)
msg = (f"For 'load_param_into_net', {param.name} in the argument 'net' should have the same shape "
f"as {param.name} in the argument 'parameter_dict'. But got its shape {param.data.shape} in"
f" the argument 'net' and shape {new_param.data.shape} in the argument 'parameter_dict'."
f"May you need to check whether the checkpoint you loaded is correct or the batch size and "
f"so on in the 'net' and 'parameter_dict' are same.")
raise RuntimeError(msg)
if param.data.dtype != new_param.data.dtype:
if _type_convert(param, new_param, strict_load):
new_tensor = Tensor(new_param.data.asnumpy(), param.data.dtype)
param.set_data(new_tensor)
return
logger.critical("Failed to combine the net and the parameters for param %s.", param.name)
msg = (f"For 'load_param_into_net', {param.name} in the argument 'net' should have the same type as "
f"{param.name} in the argument 'parameter_dict'. but got its type {param.data.dtype} in the "
f"argument 'net' and type {new_param.data.dtype} in the argument 'parameter_dict'."
f"May you need to check whether the checkpoint you loaded is correct.")
raise RuntimeError(msg)
param.set_data(new_param.data, param.sliced)
return
if isinstance(param.data, Tensor) and not isinstance(new_param.data, Tensor):
if param.data.shape != (1,) and param.data.shape != ():
logger.critical("Failed to combine the net and the parameters for param %s.", param.name)
msg = (f"For 'load_param_into_net', {param.name} in the argument 'parameter_dict' is "
f"scalar, then the shape of {param.name} in the argument 'net' should be "
f"(1,) or (), but got shape {param.data.shape}."
f"May you need to check whether the checkpoint you loaded is correct.")
raise RuntimeError(msg)
param.set_data(initializer(new_param.data, param.data.shape, param.data.dtype))
elif isinstance(new_param.data, Tensor) and not isinstance(param.data, Tensor):
logger.critical("Failed to combine the net and the parameters for param %s.", param.name)
msg = (f"For 'load_param_into_net', {param.name} in the argument 'parameter_dict' is Tensor, "
f"then {param.name} in the argument 'net' also should be Tensor, but got {type(param.data)}."
f"May you need to check whether the checkpoint you loaded is correct.")
raise RuntimeError(msg)
else:
param.set_data(type(param.data)(new_param.data))
def _type_convert(param, new_param, strict_load):
"""Whether to convert parameter's type during load checkpoint into network."""
float_type = (mstype.float16, mstype.float32, mstype.float64)
int_type = (mstype.int8, mstype.int16, mstype.int32, mstype.int64)
if not strict_load and ({param.data.dtype, new_param.data.dtype}.issubset(float_type) or
{param.data.dtype, new_param.data.dtype}.issubset(int_type)):
logger.warning(f"The type of {new_param.name}:{new_param.data.dtype} in 'parameter_dict' is different from "
f"the type of it in 'net':{param.data.dtype}, then the type convert from "
f"{new_param.data.dtype} to {param.data.dtype} in the network.")
return True
return False
def _exec_save(ckpt_file_name, data_list, enc_key=None, enc_mode="AES-GCM"):
"""Execute the process of saving checkpoint into file."""
try:
with _ckpt_mutex:
if os.path.exists(ckpt_file_name):
os.chmod(ckpt_file_name, stat.S_IWUSR)
os.remove(ckpt_file_name)
with open(ckpt_file_name, "ab") as f:
if enc_key is not None:
plain_data = bytes(0)
cipher_data = bytes(0)
for name, value in data_list.items():
data_size = value[2].nbytes / 1024
if data_size > SLICE_SIZE:
slice_count = math.ceil(data_size / SLICE_SIZE)
param_slice_list = np.array_split(value[2], slice_count)
else:
param_slice_list = [value[2]]
for param_slice in param_slice_list:
checkpoint_list = Checkpoint()
param_value = checkpoint_list.value.add()
param_value.tag = name
param_tensor = param_value.tensor
param_tensor.dims.extend(value[0])
param_tensor.tensor_type = value[1]
param_tensor.tensor_content = param_slice.tobytes()
if enc_key is None:
f.write(checkpoint_list.SerializeToString())
else:
plain_data += checkpoint_list.SerializeToString()
max_block_size = SLICE_SIZE * 1024
while len(plain_data) >= max_block_size:
cipher_data += _encrypt(plain_data[0: max_block_size], max_block_size, enc_key,
len(enc_key), enc_mode)
plain_data = plain_data[max_block_size:]
if enc_key is not None:
if plain_data:
cipher_data += _encrypt(plain_data, len(plain_data), enc_key, len(enc_key), enc_mode)
f.write(cipher_data)
os.chmod(ckpt_file_name, stat.S_IRUSR)
except BaseException as e:
logger.critical("Failed to save the checkpoint file %s. Maybe don't have the permission to write files, "
"or the disk space is insufficient and so on.", ckpt_file_name)
raise e
def save_checkpoint(save_obj, ckpt_file_name, integrated_save=True,
async_save=False, append_dict=None, enc_key=None, enc_mode="AES-GCM"):
"""
Save checkpoint to a specified file.
Args:
save_obj (Union[Cell, list]): The cell object or data list(each element is a dictionary, like
[{"name": param_name, "data": param_data},...], the type of
param_name would be string, and the type of param_data would
be parameter or Tensor).
ckpt_file_name (str): Checkpoint file name. If the file name already exists, it will be overwritten.
integrated_save (bool): Whether to integrated save in automatic model parallel scene. Default: True
async_save (bool): Whether to open an independent thread to save the checkpoint file. Default: False
append_dict (dict): Additional information that needs to be saved. The key of dict must be str,
the value of dict must be one of int float and bool. Default: None
enc_key (Union[None, bytes]): Byte type key used for encryption. If the value is None, the encryption
is not required. Default: None.
enc_mode (str): This parameter is valid only when enc_key is not set to None. Specifies the encryption
mode, currently supports 'AES-GCM' and 'AES-CBC'. Default: 'AES-GCM'.
Raises:
TypeError: If the parameter save_obj is not `nn.Cell` or list type. And if the parameter
`integrated_save` and `async_save` are not bool type.
Examples:
>>> from mindspore import save_checkpoint
>>>
>>> net = Net()
>>> save_checkpoint(net, "lenet.ckpt")
"""
if not isinstance(save_obj, nn.Cell) and not isinstance(save_obj, list):
raise TypeError("For 'save_checkpoint', the argument 'save_obj' should be nn.Cell or list, "
"but got {}.".format(type(save_obj)))
integrated_save = Validator.check_bool(integrated_save)
async_save = Validator.check_bool(async_save)
append_dict = _check_append_dict(append_dict)
enc_key = Validator.check_isinstance('enc_key', enc_key, (type(None), bytes))
enc_mode = Validator.check_isinstance('enc_mode', enc_mode, str)
logger.info("Execute the process of saving checkpoint files.")
if isinstance(save_obj, nn.Cell):
save_obj.init_parameters_data()
param_dict = {}
for _, param in save_obj.parameters_and_names():
param_dict[param.name] = param
param_list = []
for (key, value) in param_dict.items():
each_param = {"name": key}
param_data = Tensor(value.data)
# in automatic model parallel scenario, some parameters were split to all the devices,
# which should be combined before saving
if key in save_obj.parameter_layout_dict:
param_data = _get_merged_param_data(save_obj, key, param_data, integrated_save)
each_param["data"] = param_data
param_list.append(each_param)
save_obj = param_list
if append_dict:
append_info_list = []
for k_name, value in append_dict.items():
append_info_list.append({"name": k_name, "data": Tensor(value)})
save_obj.extend(append_info_list)
data_list = {}
with _ckpt_mutex:
for param in save_obj:
key = param["name"]
data_list[key] = []
if isinstance(param["data"], Parameter):
param["data"].init_data()
dims = []
if param['data'].shape == ():
dims.append(0)
else:
for dim in param['data'].shape:
dims.append(dim)
data_list[key].append(dims)
tensor_type = str(param["data"].dtype)
data_list[key].append(tensor_type)
data = param["data"].asnumpy().reshape(-1)
data_list[key].append(data)
ckpt_file_name = os.path.realpath(ckpt_file_name)
if async_save:
data_copy = copy.deepcopy(data_list)
thr = Thread(target=_exec_save, args=(ckpt_file_name, data_copy, enc_key, enc_mode), name="asyn_save_ckpt")
thr.start()
else:
_exec_save(ckpt_file_name, data_list, enc_key, enc_mode)
logger.info("Saving checkpoint process is finished.")
def _check_param_prefix(filter_prefix, param_name):
"""Checks whether the prefix of parameter name matches the given filter_prefix."""
for prefix in filter_prefix:
if param_name.find(prefix) == 0 \
and (param_name == prefix or param_name[len(prefix)] == "." or (prefix and prefix[-1] == ".")):
return True
return False
def _check_append_dict(append_dict):
"""Check the argument append_dict for save_checkpoint."""
if append_dict is None:
return append_dict
if not isinstance(append_dict, dict):
raise TypeError("For 'save_checkpoint', the argument 'append_dict' must be dict, but got "
"{}.".format(type(append_dict)))
for key, value in append_dict.items():
if not isinstance(key, str) or not isinstance(value, (int, float, bool)):
raise TypeError(f"For 'save_checkpoint', the type of dict 'append_info' must be key: string, "
f"value: int, float or bool, but got key: {type(key)}, value: {type(value)}")
return append_dict
def load(file_name, **kwargs):
"""
Load MindIR.
The returned object can be executed by a `GraphCell`, see class :class:`mindspore.nn.GraphCell` for more details.
Args:
file_name (str): MindIR file name.
kwargs (dict): Configuration options dictionary.
- dec_key (bytes): Byte type key used for decryption. The valid length is 16, 24, or 32.
- dec_mode (str): Specifies the decryption mode, to take effect when dec_key is set.
Option: 'AES-GCM' | 'AES-CBC'. Default: 'AES-GCM'.
Returns:
Object, a compiled graph that can executed by `GraphCell`.
Raises:
ValueError: MindIR file name is incorrect.
RuntimeError: Failed to parse MindIR file.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, export, load
>>>
>>> net = nn.Conv2d(1, 1, kernel_size=3, weight_init="ones")
>>> input_tensor = Tensor(np.ones([1, 1, 3, 3]).astype(np.float32))
>>> export(net, input_tensor, file_name="net", file_format="MINDIR")
>>> graph = load("net.mindir")
>>> net = nn.GraphCell(graph)
>>> output = net(input_tensor)
>>> print(output)
[[[[4. 6. 4.]
[6. 9. 6.]
[4. 6. 4.]]]]
"""
if not isinstance(file_name, str):
raise ValueError("For 'load', the argument 'file_name' must be string, but "
"got {}.".format(type(file_name)))
if not file_name.endswith(".mindir"):
raise ValueError("For 'load', the argument 'file_name'(MindIR file) should end with '.mindir', "
"please input the correct 'file_name'.")
if not os.path.exists(file_name):
raise ValueError("For 'load', the argument 'file_name'(MindIR file) does not exist, "
"please check whether the 'file_name' is correct.")
file_name = os.path.realpath(file_name)
logger.info("Execute the process of loading mindir.")
if 'dec_key' in kwargs.keys():
dec_key = Validator.check_isinstance('dec_key', kwargs['dec_key'], bytes)
dec_mode = 'AES-GCM'
if 'dec_mode' in kwargs.keys():
dec_mode = Validator.check_isinstance('dec_mode', kwargs['dec_mode'], str)
graph = load_mindir(file_name, dec_key=dec_key, key_len=len(dec_key), dec_mode=dec_mode)
else:
graph = load_mindir(file_name)
if graph is None:
if _is_cipher_file(file_name):
raise RuntimeError("Load MindIR failed. The file may be encrypted and decrypt failed, you "
"can check whether the values of the arguments 'dec_key' and 'dec_mode'"
" are the same as when exported MindIR file.")
raise RuntimeError("Load MindIR failed.")
return graph
def load_checkpoint(ckpt_file_name, net=None, strict_load=False, filter_prefix=None, dec_key=None, dec_mode="AES-GCM"):
"""
Load checkpoint info from a specified file.
Args:
ckpt_file_name (str): Checkpoint file name.
net (Cell): The network where the parameters will be loaded. Default: None
strict_load (bool): Whether to strict load the parameter into net. If False, it will load parameter
into net when parameter name's suffix in checkpoint file is the same as the
parameter in the network. When the types are inconsistent perform type conversion
on the parameters of the same type, such as float32 to float16. Default: False.
filter_prefix (Union[str, list[str], tuple[str]]): Parameters starting with the filter_prefix
will not be loaded. Default: None.
dec_key (Union[None, bytes]): Byte type key used for decryption. If the value is None, the decryption
is not required. Default: None.
dec_mode (str): This parameter is valid only when dec_key is not set to None. Specifies the decryption
mode, currently supports 'AES-GCM' and 'AES-CBC'. Default: 'AES-GCM'.
Returns:
Dict, key is parameter name, value is a Parameter.
Raises:
ValueError: Checkpoint file is incorrect.
Examples:
>>> from mindspore import load_checkpoint
>>>
>>> ckpt_file_name = "./checkpoint/LeNet5-1_32.ckpt"
>>> param_dict = load_checkpoint(ckpt_file_name, filter_prefix="conv1")
>>> print(param_dict["conv2.weight"])
Parameter (name=conv2.weight, shape=(16, 6, 5, 5), dtype=Float32, requires_grad=True)
"""
ckpt_file_name, filter_prefix = _check_checkpoint_param(ckpt_file_name, filter_prefix)
dec_key = Validator.check_isinstance('dec_key', dec_key, (type(None), bytes))
dec_mode = Validator.check_isinstance('dec_mode', dec_mode, str)
logger.info("Execute the process of loading checkpoint files.")
checkpoint_list = Checkpoint()
try:
if dec_key is None:
with open(ckpt_file_name, "rb") as f:
pb_content = f.read()
else:
pb_content = _decrypt(ckpt_file_name, dec_key, len(dec_key), dec_mode)
if pb_content is None:
raise ValueError("For 'load_checkpoint', Failed to decrypt the checkpoint file.")
checkpoint_list.ParseFromString(pb_content)
except BaseException as e:
if _is_cipher_file(ckpt_file_name):
logger.critical("Failed to read the checkpoint file '%s'. The file may be encrypted, please pass in the "
"correct 'dec_key'.", ckpt_file_name)
else:
logger.critical("Failed to read the checkpoint file '%s' , may not have permission to read it, please "
"check the correct of the file.", ckpt_file_name)
raise ValueError(e.__str__() + "\nFor 'load_checkpoint', failed to read the checkpoint file {}, may not have "
"permission to read it.".format(ckpt_file_name))
parameter_dict = {}
try:
param_data_list = []
for element_id, element in enumerate(checkpoint_list.value):
if filter_prefix is not None and _check_param_prefix(filter_prefix, element.tag):
continue
data = element.tensor.tensor_content
data_type = element.tensor.tensor_type
np_type = tensor_to_np_type[data_type]
ms_type = tensor_to_ms_type[data_type]
element_data = np.frombuffer(data, np_type)
param_data_list.append(element_data)
if (element_id == len(checkpoint_list.value) - 1) or \
(element.tag != checkpoint_list.value[element_id + 1].tag):
param_data = np.concatenate((param_data_list), axis=0)
param_data_list.clear()
dims = element.tensor.dims
if dims == [0]:
if 'Float' in data_type:
param_data = float(param_data[0])
elif 'Int' in data_type:
param_data = int(param_data[0])
parameter_dict[element.tag] = Parameter(Tensor(param_data, ms_type), name=element.tag)
elif dims == [1]:
parameter_dict[element.tag] = Parameter(Tensor(param_data, ms_type), name=element.tag)
else:
param_dim = []
for dim in dims:
param_dim.append(dim)
param_value = param_data.reshape(param_dim)
parameter_dict[element.tag] = Parameter(Tensor(param_value, ms_type), name=element.tag)
logger.info("Loading checkpoint files process is finished.")
except BaseException as e:
logger.critical("Failed to load the checkpoint file '%s'.", ckpt_file_name)
raise ValueError(e.__str__() + "\nFailed to load the checkpoint file {}.".format(ckpt_file_name))
if not parameter_dict:
raise ValueError(f"The loaded parameter dict is empty after filtering, please check whether "
f"'filter_prefix' was set to filter out all parameters.")
if net is not None:
load_param_into_net(net, parameter_dict, strict_load)
return parameter_dict
def _check_checkpoint_param(ckpt_file_name, filter_prefix=None):
"""Check function load_checkpoint's parameter."""
if not isinstance(ckpt_file_name, str):
raise ValueError("For 'load_checkpoint', the argument 'ckpt_file_name' must be string, "
"but got {}.".format(type(ckpt_file_name)))
if not os.path.exists(ckpt_file_name):
raise ValueError("For 'load_checkpoint', the checkpoint file does not exist, please check "
"whether the 'ckpt_file_name' is correct.")
if ckpt_file_name[-5:] != ".ckpt":
raise ValueError("For 'load_checkpoint', the checkpoint file should end with '.ckpt', please "
"input the correct 'ckpt_file_name'.")
ckpt_file_name = os.path.realpath(ckpt_file_name)
if filter_prefix is not None:
if not isinstance(filter_prefix, (str, list, tuple)):
raise TypeError(f"For 'load_checkpoint', the type of 'filter_prefix' must be string, "
f"list[string] or tuple[string] when 'filter_prefix' is not None, but "
f"got {str(type(filter_prefix))}.")
if isinstance(filter_prefix, str):
filter_prefix = (filter_prefix,)
if not filter_prefix:
raise ValueError("For 'load_checkpoint', the 'filter_prefix' can't be empty when "
"'filter_prefix' is list or tuple.")
for index, prefix in enumerate(filter_prefix):
if not isinstance(prefix, str):
raise TypeError(f"For 'load_checkpoint', when 'filter_prefix' is list or tuple, "
f"the element in 'filter_prefix' must be string, but got "
f"{str(type(prefix))} at index {index}.")
return ckpt_file_name, filter_prefix
def load_param_into_net(net, parameter_dict, strict_load=False):
"""
Load parameters into network.
Args:
net (Cell): The network where the parameters will be loaded.
parameter_dict (dict): The dictionary generated by load checkpoint file.
strict_load (bool): Whether to strict load the parameter into net. If False, it will load parameter
into net when parameter name's suffix in checkpoint file is the same as the
parameter in the network. When the types are inconsistent perform type conversion
on the parameters of the same type, such as float32 to float16. Default: False.
Returns:
List, parameter name not loaded into the network
Raises:
TypeError: Argument is not a Cell, or parameter_dict is not a Parameter dictionary.
Examples:
>>> from mindspore import load_checkpoint, load_param_into_net
>>>
>>> net = Net()
>>> ckpt_file_name = "./checkpoint/LeNet5-1_32.ckpt"
>>> param_dict = load_checkpoint(ckpt_file_name, filter_prefix="conv1")
>>> param_not_load = load_param_into_net(net, param_dict)
>>> print(param_not_load)
['conv1.weight']
"""
if not isinstance(net, nn.Cell):
logger.critical("Failed to combine the net and the parameters.")
msg = ("For 'load_param_into_net', the argument 'net' should be a Cell, but got {}.".format(type(net)))
raise TypeError(msg)
if not isinstance(parameter_dict, dict):
logger.critical("Failed to combine the net and the parameters.")
msg = ("For 'load_param_into_net', the argument 'parameter_dict' should be a dict, "
"but got {}.".format(type(parameter_dict)))
raise TypeError(msg)
strict_load = Validator.check_bool(strict_load)
logger.info("Execute the process of loading parameters into net.")
net.init_parameters_data()
param_not_load = []
for _, param in net.parameters_and_names():
if param.name in parameter_dict:
new_param = copy.deepcopy(parameter_dict[param.name])
if not isinstance(new_param, Parameter):
logger.critical("Failed to combine the net and the parameters.")
msg = ("For 'load_param_into_net', the element in the argument 'parameter_dict' should be a "
"'Parameter', but got {}.".format(type(new_param)))
raise TypeError(msg)
_update_param(param, new_param, strict_load)
else:
param_not_load.append(param.name)
if param_not_load and not strict_load:
_load_dismatch_prefix_params(net, parameter_dict, param_not_load, strict_load)
logger.debug("Params not matched(in net but not in parameter_dict):")
for param_name in param_not_load:
logger.debug("%s", param_name)
logger.info("Loading parameters into net is finished.")
if param_not_load:
logger.warning("{} parameters in the 'net' are not loaded, because they are not in the "
"'parameter_dict'.".format(len(param_not_load)))
for param_name in param_not_load:
logger.warning("{} is not loaded.".format(param_name))
return param_not_load
def _load_dismatch_prefix_params(net, parameter_dict, param_not_load, strict_load):
"""When some net parameter did not load, try to continue loading."""
prefix_name = ""
longest_name = param_not_load[0]
while prefix_name != longest_name and param_not_load:
logger.debug("Count: {} parameters has not been loaded, try to continue loading.".format(len(param_not_load)))
prefix_name = longest_name
for net_param_name in param_not_load:
for dict_name in parameter_dict:
if dict_name.endswith(net_param_name):
prefix_name = dict_name[:-len(net_param_name)]
break
if prefix_name != longest_name:
break
if prefix_name != longest_name:
logger.warning("Remove parameter prefix name: {}, continue to load.".format(prefix_name))
for _, param in net.parameters_and_names():
new_param_name = prefix_name + param.name
if param.name in param_not_load and new_param_name in parameter_dict:
new_param = parameter_dict[new_param_name]
_update_param(param, new_param, strict_load)
param_not_load.remove(param.name)
def _save_graph(network, file_name):
"""
Saves the graph of network to a file.
Args:
network (Cell): Obtain a pipeline through network for saving graph.
file_name (str): Graph file name into which the graph will be saved.
"""
logger.info("Execute the process of saving graph.")
file_name = os.path.realpath(file_name)
graph_pb = network.get_func_graph_proto()
if graph_pb:
with open(file_name, "wb") as f:
os.chmod(file_name, stat.S_IRUSR | stat.S_IWUSR)
f.write(graph_pb)
def _get_merged_param_data(net, param_name, param_data, integrated_save):
"""
Gets the merged data(tensor) from tensor slice, by device arrangement and tensor map.
Args:
net (Cell): MindSpore network.
param_name (str): The parameter name, which to be combined.
param_data (Tensor): The parameter data on the local device, which was a slice of the whole parameter data.
integrated_save (bool): Whether to integrated save in automatic model parallel scene.
Returns:
Tensor, the combined tensor which with the whole data value.
"""
layout = net.parameter_layout_dict[param_name]
if len(layout) < 6:
logger.info("The layout dict does not contain the key %s", param_name)
return param_data
dev_mat = layout[0]
tensor_map = layout[1]
uniform_split = layout[4]
opt_shard_group = layout[5]
allgather_net = None
mp_weight = False
for dim in tensor_map:
if dim != -1:
mp_weight = True
break
if param_name in net.parallel_parameter_merge_net_dict:
allgather_net = net.parallel_parameter_merge_net_dict[param_name]
else:
logger.info("Need to create allgather net for %s", param_name)
if integrated_save:
if context.get_auto_parallel_context("pipeline_stages") > 1:
raise RuntimeError("Pipeline Parallel don't support Integrated save checkpoint now.")
if uniform_split == 0:
raise RuntimeError("Integrated save checkpoint only support uniform split tensor now.")
# while any dim is not equal to -1, means param is split and needs to be merged
# pipeline parallel need to be supported here later
if mp_weight:
allgather_net = get_allgather_cell(opt_shard_group, bool(opt_shard_group))
elif opt_shard_group:
allgather_net = get_allgather_cell(opt_shard_group, False)
elif opt_shard_group and context.get_auto_parallel_context("optimizer_weight_shard_aggregated_save"):
allgather_net = get_allgather_cell(opt_shard_group, False)
net.parallel_parameter_merge_net_dict[param_name] = allgather_net
if allgather_net:
param_data = allgather_net(param_data)
if mp_weight and integrated_save:
param_data = _reshape_param_data(param_data, dev_mat, tensor_map)
return param_data
def _fill_param_into_net(net, parameter_list):
"""
Fills parameter_list into net.
Args:
net (Cell): train network.
parameter_list (list): parameters list from ge callback.
"""
parameter_dict = {}
for each_param in parameter_list:
param_name = each_param["name"]
if isinstance(each_param["data"], Parameter):
each_param["data"].init_data()
np_val = each_param["data"].asnumpy()
if np_val.shape == (1,):
parameter_dict[param_name] = Parameter(np_val, name=param_name)
elif np_val.shape == ():
parameter_dict[param_name] = Parameter(Tensor(np_val.tolist(), mstype.pytype_to_dtype(np_val.dtype)),
name=param_name)
else:
parameter_dict[param_name] = Parameter(Tensor(np_val), name=param_name)
load_param_into_net(net, parameter_dict)
def export(net, *inputs, file_name, file_format='AIR', **kwargs):
"""
Export the mindspore network into an offline model in the specified format.
Note:
1. When exporting AIR, ONNX format, the size of a single tensor can not exceed 2GB.
2. When file_name does not have a suffix, the system will automatically add one according to the file_format.
Args:
net (Cell): MindSpore network.
inputs (Union[Tensor, tuple(Tensor), Dataset]): While the input type is Tensor, it represents the inputs
of the `net`, if the network has multiple inputs, incoming tuple(Tensor). While its type is Dataset,
it represents the preprocess behavior of the `net`, data preprocess operations will be serialized.
In second situation, you should adjust batch size of dataset script manually which will impact on
the batch size of 'net' input. Only supports parse "image" column from dataset currently.
file_name (str): File name of the model to be exported.
file_format (str): MindSpore currently supports 'AIR', 'ONNX' and 'MINDIR' format for exported model.
Default: 'AIR'.
- AIR: Ascend Intermediate Representation. An intermediate representation format of Ascend model.
- ONNX: Open Neural Network eXchange. An open format built to represent machine learning models.
- MINDIR: MindSpore Native Intermediate Representation for Anf. An intermediate representation format
for MindSpore models.
kwargs (dict): Configuration options dictionary.
- quant_mode (str): If the network is a quantization aware training network, the quant_mode should
be set to "QUANT", else the quant_mode should be set to "NONQUANT".
- mean (float): The mean of input data after preprocessing, used for quantizing the first layer of network.
Default: 127.5.
- std_dev (float): The variance of input data after preprocessing,
used for quantizing the first layer of the network. Default: 127.5.
- enc_key (byte): Byte type key used for encryption. The valid length is 16, 24, or 32.
- enc_mode (str): Specifies the encryption mode, to take effect when enc_key is set.
Option: 'AES-GCM' | 'AES-CBC'. Default: 'AES-GCM'.
Examples:
>>> import numpy as np
>>> from mindspore import export, Tensor
>>>
>>> net = LeNet()
>>> input_tensor = Tensor(np.ones([1, 1, 32, 32]).astype(np.float32))
>>> export(net, Tensor(input_tensor), file_name='lenet', file_format='MINDIR')
"""
logger.info("exporting model file:%s format:%s.", file_name, file_format)
if check_input_dataset(*inputs, dataset_type=mindspore.dataset.Dataset):
if len(inputs) != 1:
raise RuntimeError(f"You can only serialize one dataset into MindIR, got " + str(len(inputs)) + " datasets")
shapes, types, columns = inputs[0].output_shapes(), inputs[0].output_types(), inputs[0].get_col_names()
kwargs['dataset'] = inputs[0]
only_support_col = "image"
inputs = list()
for c, s, t in zip(columns, shapes, types):
if only_support_col != c:
continue
inputs.append(Tensor(np.random.uniform(-1.0, 1.0, size=s).astype(t)))
if not inputs:
raise RuntimeError(f"Only supports parse \"image\" column from dataset now, given dataset has columns: "
+ str(columns))
inputs = tuple(inputs)
else:
check_input_data(*inputs, data_class=Tensor)
Validator.check_file_name_by_regular(file_name)
file_name = os.path.realpath(file_name)
net = _quant_export(net, *inputs, file_format=file_format, **kwargs)
if 'enc_key' in kwargs.keys():
if file_format != 'MINDIR':
raise ValueError(f"For 'export', 'enc_key' can be passed in only when 'file_format' == 'MINDIR',"
f" but got 'file_format' {file_format}.")
enc_key = Validator.check_isinstance('enc_key', kwargs['enc_key'], bytes)
enc_mode = 'AES-GCM'
if 'enc_mode' in kwargs.keys():
enc_mode = Validator.check_isinstance('enc_mode', kwargs['enc_mode'], str)
dataset = kwargs['dataset'] if 'dataset' in kwargs.keys() else None
_export(net, file_name, file_format, *inputs, enc_key=enc_key, enc_mode=enc_mode, dataset=dataset)
else:
_export(net, file_name, file_format, *inputs, **kwargs)
def _export(net, file_name, file_format, *inputs, **kwargs):
"""
It is an internal conversion function. Export the MindSpore prediction model to a file in the specified format.
"""
logger.info("exporting model file:%s format:%s.", file_name, file_format)
check_input_data(*inputs, data_class=Tensor)
if file_format == 'GEIR':
logger.warning(f"Format 'GEIR' is deprecated, it would be removed in future release, use 'AIR' instead.")
file_format = 'AIR'
supported_formats = ['AIR', 'ONNX', 'MINDIR']
if file_format not in supported_formats:
raise ValueError(f"For 'export', 'file_format' must be one of {supported_formats}, but got {file_format}.")
# When dumping ONNX file, switch network mode to infer when it is training(NOTE: ONNX only designed for prediction)
is_dump_onnx_in_training = net.training and file_format == 'ONNX'
if is_dump_onnx_in_training:
net.set_train(mode=False)
if file_format == 'AIR':
phase_name = 'export.air'
graph_id, _ = _executor.compile(net, *inputs, phase=phase_name)
if not file_name.endswith('.air'):
file_name += ".air"
if os.path.exists(file_name):
os.chmod(file_name, stat.S_IWUSR)
if "/" in file_name:
real_path = os.path.realpath(file_name[:file_name.rfind("/")])
os.makedirs(real_path, exist_ok=True)
_executor.export(file_name, graph_id)
os.chmod(file_name, stat.S_IRUSR)
elif file_format == 'ONNX':
total_size = _calculation_net_size(net)
if total_size > PROTO_LIMIT_SIZE:
raise RuntimeError('Export onnx model failed. Network size is: {}G, it exceeded the protobuf: {}G limit.'
.format(total_size / 1024 / 1024, PROTO_LIMIT_SIZE / 1024 / 1024))
phase_name = 'export.onnx'
graph_id, _ = _executor.compile(net, *inputs, phase=phase_name, do_convert=False)
onnx_stream = _executor._get_func_graph_proto(net, graph_id)
if not file_name.endswith('.onnx'):
file_name += ".onnx"
if os.path.exists(file_name):
os.chmod(file_name, stat.S_IWUSR)
with open(file_name, 'wb') as f:
f.write(onnx_stream)
os.chmod(file_name, stat.S_IRUSR)
elif file_format == 'MINDIR':
_save_mindir(net, file_name, *inputs, **kwargs)
if is_dump_onnx_in_training:
net.set_train(mode=True)
def _generate_front_info_for_param_data_file(is_encrypt, kwargs):
front_info = bytes()
check_code = sys.byteorder == "little"
front_info += check_code.to_bytes(1, byteorder=sys.byteorder)
front_info += bytes(63)
if is_encrypt():
front_info = _encrypt(front_info, len(front_info), kwargs['enc_key'],
len(kwargs['enc_key']), kwargs['enc_mode'])
return front_info
def _change_file(f, dirname, external_local, is_encrypt, kwargs):
'''
Change to another file to write parameter data
'''
# The parameter has been not written in the file
front_info = _generate_front_info_for_param_data_file(is_encrypt, kwargs)
f.seek(0, 0)
f.write(front_info)
f.close()
ori_data_file_name = f.name
os.chmod(ori_data_file_name, stat.S_IRUSR)
if os.path.getsize(ori_data_file_name) == 64:
raise RuntimeError("The parameter size is exceed 1T,cannot export to the file")
data_file_name = os.path.join(dirname, external_local)
return _get_data_file(is_encrypt, kwargs, data_file_name)
def _get_data_file(is_encrypt, kwargs, data_file_name):
'''
Get Data File to write parameter data
'''
# Reserves 64 bytes as spare information such as check data
offset = 64
if os.path.exists(data_file_name):
os.chmod(data_file_name, stat.S_IWUSR)
f = open(data_file_name, "wb")
place_holder_data = bytes(offset)
if is_encrypt():
place_holder_data = _encrypt(place_holder_data, len(place_holder_data), kwargs["enc_key"],
len(kwargs["enc_key"]), kwargs["enc_mode"])
f.write(place_holder_data)
parameter_size = (offset / 1024)
return f, parameter_size, offset
def _spilt_save(net_dict, model, file_name, is_encrypt, **kwargs):
'''
The function to save parameter data
'''
logger.warning("Parameters in the net capacity exceeds 1G, save MindIR model and parameters separately.")
# save parameter
file_prefix = file_name.split("/")[-1]
if file_prefix.endswith(".mindir"):
file_prefix = file_prefix[:-7]
current_path = os.path.abspath(file_name)
dirname = os.path.dirname(current_path)
data_path = os.path.join(dirname, file_prefix + "_variables")
if os.path.exists(data_path):
shutil.rmtree(data_path)
os.makedirs(data_path, exist_ok=True)
os.chmod(data_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
index = 0
external_local = os.path.join(file_prefix + "_variables", "data_" + str(index))
data_file_name = os.path.join(dirname, external_local)
f, parameter_size, offset = _get_data_file(is_encrypt, kwargs, data_file_name)
try:
for param_proto in model.graph.parameter:
name = param_proto.name[param_proto.name.find(":") + 1:]
param = net_dict[name]
raw_data = param.data.asnumpy().tobytes()
data_length = len(raw_data)
append_size = 0
if data_length % 64 != 0:
append_size = 64 - (data_length % 64)
parameter_size += ((append_size + data_length) / 1024)
if parameter_size > PARAMETER_SPLIT_SIZE:
index += 1
external_local = os.path.join(file_prefix + "_variables", "data_" + str(index))
f, parameter_size, offset = _change_file(f, dirname, external_local, is_encrypt, kwargs)
parameter_size += ((append_size + data_length) / 1024)
param_proto.external_data.location = external_local
param_proto.external_data.length = data_length
param_proto.external_data.offset = offset
write_data = raw_data + bytes(append_size)
offset += (data_length + append_size)
if is_encrypt():
write_data = _encrypt(write_data, len(write_data), kwargs['enc_key'],
len(kwargs['enc_key']), kwargs['enc_mode'])
f.write(write_data)
# save graph
graph_file_name = os.path.join(dirname, file_prefix + "_graph.mindir")
if os.path.exists(graph_file_name):
os.chmod(graph_file_name, stat.S_IWUSR)
with open(graph_file_name, 'wb') as model_file:
os.chmod(graph_file_name, stat.S_IRUSR | stat.S_IWUSR)
model_string = model.SerializeToString()
if is_encrypt():
model_string = _encrypt(model_string, len(model_string), kwargs['enc_key'],
len(kwargs['enc_key']),
kwargs['enc_mode'])
model_file.write(model_string)
os.chmod(graph_file_name, stat.S_IRUSR)
front_info = _generate_front_info_for_param_data_file(is_encrypt, kwargs)
f.seek(0, 0)
f.write(front_info)
finally:
f.close()
os.chmod(data_file_name, stat.S_IRUSR)
def _save_mindir(net, file_name, *inputs, **kwargs):
"""Save MindIR format file."""
if context._get_mode() == context.PYNATIVE_MODE:
raise RuntimeError("MindIR export is not support in the Pynative mode, please convert to the Graph Mode.")
model = mindir_model()
phase_name = "predict" if net._auto_parallel_mode else "export.mindir"
graph_id, _ = _executor.compile(net, *inputs, phase=phase_name,
do_convert=False, auto_parallel_mode=net._auto_parallel_mode)
mindir_stream = _executor._get_func_graph_proto(net, graph_id, 'mind_ir')
net_dict = net.parameters_dict()
model.ParseFromString(mindir_stream)
if 'dataset' in kwargs.keys() and kwargs['dataset'] is not None:
check_input_data(kwargs['dataset'], data_class=mindspore.dataset.Dataset)
dataset = kwargs['dataset']
_save_dataset_to_mindir(model, dataset)
save_together = _save_together(net_dict, model)
is_encrypt = lambda: 'enc_key' in kwargs.keys() and 'enc_mode' in kwargs.keys()
if save_together:
_save_mindir_together(net_dict, model, file_name, is_encrypt, **kwargs)
else:
_spilt_save(net_dict, model, file_name, is_encrypt, **kwargs)
def _save_mindir_together(net_dict, model, file_name, is_encrypt, **kwargs):
"""Save graph and parameter together."""
for param_proto in model.graph.parameter:
param_name = param_proto.name[param_proto.name.find(":") + 1:]
if param_name in net_dict.keys():
param_data = net_dict[param_name].data.asnumpy().tobytes()
param_proto.raw_data = param_data
else:
logger.critical("The parameter %s in the graph should also be defined in the network.", param_name)
raise ValueError("The parameter {} in the graph should also be defined in the "
"network.".format(param_name))
if not file_name.endswith('.mindir'):
file_name += ".mindir"
current_path = os.path.abspath(file_name)
dirname = os.path.dirname(current_path)
os.makedirs(dirname, exist_ok=True)
if os.path.exists(file_name):
os.chmod(file_name, stat.S_IWUSR)
with open(file_name, 'wb') as f:
os.chmod(file_name, stat.S_IRUSR | stat.S_IWUSR)
model_string = model.SerializeToString()
if is_encrypt():
model_string = _encrypt(model_string, len(model_string), kwargs['enc_key'], len(kwargs['enc_key']),
kwargs['enc_mode'])
f.write(model_string)
os.chmod(file_name, stat.S_IRUSR)
def _save_together(net_dict, model):
"""Whether graph and parameter save together during save mindir model."""
data_total = 0
for param_proto in model.graph.parameter:
name = param_proto.name[param_proto.name.find(":") + 1:]
if name in net_dict.keys():
data_total += sys.getsizeof(net_dict[name].data.asnumpy().tobytes()) / 1024
else:
raise RuntimeError('Graph parameter: {} Undefined in network.'.format(param_proto.name))
if data_total > TOTAL_SAVE:
return False
return True
def _save_dataset_to_mindir(model, dataset):
"""Save dataset preprocess operations into mindir model."""
dataset_json = dataset.to_json()
reverse_dataset = []
while dataset_json:
reverse_dataset = [dataset_json] + reverse_dataset
if len(dataset_json['children']) > 1:
logger.warning("Need to support dataset_node with more than one child, using child 0 as default.")
dataset_json = dataset_json['children'][0] if dataset_json['children'] else []
for op in reverse_dataset:
if op['op_type'] == 'Map':
model.preprocessor.op.add()
model.preprocessor.op[-1].input_columns = json.dumps(op['input_columns'])
model.preprocessor.op[-1].output_columns = json.dumps(op['output_columns'])
model.preprocessor.op[-1].project_columns = json.dumps(op['project_columns'])
model.preprocessor.op[-1].op_type = json.dumps(op['op_type'])
model.preprocessor.op[-1].operations = json.dumps(op['operations'])
model.preprocessor.op[-1].offload = op['offload'] if 'offload' in op.keys() else False
def quant_mode_manage(func):
"""
Inherit the quant_mode in old version.
"""
def warpper(network, *inputs, file_format, **kwargs):
if 'quant_mode' not in kwargs:
return network
quant_mode = kwargs['quant_mode']
if not isinstance(quant_mode, str):
raise TypeError("For 'export', the type of 'quant_mode' should be string, "
"but got {}.".format(type(quant_mode)))
if quant_mode in ('AUTO', 'MANUAL'):
kwargs['quant_mode'] = 'QUANT'
return func(network, *inputs, file_format=file_format, **kwargs)
return warpper
@quant_mode_manage
def _quant_export(network, *inputs, file_format, **kwargs):
"""
Exports MindSpore quantization predict model to deploy with AIR and MINDIR.
"""
supported_device = ["Ascend", "GPU"]
supported_formats = ['AIR', 'MINDIR']
quant_mode_formats = ['QUANT', 'NONQUANT']
quant_mode = kwargs['quant_mode']
if quant_mode not in quant_mode_formats:
raise KeyError(f"For 'export', the argument 'quant_mode' must be one of {quant_mode_formats}, "
f"but got {quant_mode}.")
if quant_mode == 'NONQUANT':
return network
quant_net = copy.deepcopy(network)
quant_net._create_time = int(time.time() * 1e9)
mean = 127.5 if kwargs.get('mean', None) is None else kwargs['mean']
std_dev = 127.5 if kwargs.get('std_dev', None) is None else kwargs['std_dev']
mean = Validator.check_value_type("mean", mean, (int, float))
std_dev = Validator.check_value_type("std_dev", std_dev, (int, float))
if context.get_context('device_target') not in supported_device:
raise KeyError(f"For 'export', quant export only support {supported_device} device target now, "
f"but got {context.get_context('device_target')}")
if file_format not in supported_formats:
raise ValueError(f"For 'export', quant export only support 'file_format' {supported_formats}, "
f"but got {file_format}.")
quant_net.set_train(False)
if file_format == "MINDIR":
exporter = quant_export.ExportToQuantInferNetwork(quant_net, mean, std_dev, *inputs, is_mindir=True)
else:
exporter = quant_export.ExportToQuantInferNetwork(quant_net, mean, std_dev, *inputs)
deploy_net = exporter.run()
return deploy_net
def parse_print(print_file_name):
"""
Parse saved data generated by mindspore.ops.Print. Print is used to print data to screen in graph mode.
It can also been turned off by setting the parameter `print_file_path` in `context`, and the data will be saved
in a file specified by print_file_path. parse_print is used to parse the saved file. For more information
please refer to :func:`mindspore.context.set_context` and :class:`mindspore.ops.Print`.
Args:
print_file_name (str): The file name of saved print data.
Returns:
List, element of list is Tensor.
Raises:
ValueError: The print file may be empty, please make sure enter the correct file name.
Examples:
>>> import numpy as np
>>> import mindspore
>>> import mindspore.ops as ops
>>> from mindspore import nn
>>> from mindspore import Tensor, context
>>> context.set_context(mode=context.GRAPH_MODE, print_file_path='log.data')
>>> class PrintInputTensor(nn.Cell):
... def __init__(self):
... super().__init__()
... self.print = ops.Print()
...
... def construct(self, input_pra):
... self.print('print:', input_pra)
... return input_pra
>>> x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]).astype(np.float32)
>>> input_pra = Tensor(x)
>>> net = PrintInputTensor()
>>> net(input_pra)
>>> import mindspore
>>> data = mindspore.parse_print('./log.data')
>>> print(data)
['print:', Tensor(shape=[2, 4], dtype=Float32, value=
[[ 1.00000000e+00, 2.00000000e+00, 3.00000000e+00, 4.00000000e+00],
[ 5.00000000e+00, 6.00000000e+00, 7.00000000e+00, 8.00000000e+00]])]
"""
print_file_path = os.path.realpath(print_file_name)
if os.path.getsize(print_file_path) == 0:
raise ValueError("For 'parse_print', the print file may be empty, please make sure enter the correct "
"'print_file_name'.")
logger.info("Execute load print process.")
print_list = Print()
try:
with open(print_file_path, "rb") as f:
pb_content = f.read()
print_list.ParseFromString(pb_content)
except BaseException as e:
logger.critical("Failed to read the print file %s, please check whether the file is "
"correct.", print_file_name)
raise ValueError(e.__str__() + "\nFailed to read the print file {}, please check whether "
"the file is correct.".format(print_file_name))
tensor_list = []
try:
for print_ in print_list.value:
# String type
if print_.HasField("desc"):
tensor_list.append(print_.desc)
elif print_.HasField("tensor"):
dims = print_.tensor.dims
data_type = print_.tensor.tensor_type
data = print_.tensor.tensor_content
np_type = tensor_to_np_type[data_type]
param_data = np.fromstring(data, np_type)
ms_type = tensor_to_ms_type[data_type]
if dims and dims != [0]:
param_value = param_data.reshape(dims)
tensor_list.append(Tensor(param_value, ms_type))
# Scalar type
else:
data_type_ = data_type.lower()
if 'float' in data_type_:
param_data = float(param_data[0])
elif 'int' in data_type_:
param_data = int(param_data[0])
elif 'bool' in data_type_:
param_data = bool(param_data[0])
tensor_list.append(Tensor(param_data, ms_type))
except BaseException as e:
logger.critical("Failed to load the print file %s.", print_list)
raise RuntimeError(e.__str__() + "\nFailed to load the print file {}.".format(print_list))
return tensor_list
def _merge_param_with_strategy(sliced_data, parameter_name, strategy, is_even):
"""
Merge data slices to one tensor with whole data when strategy is not None.
Args:
sliced_data (list[numpy.ndarray]): Data slices in order of rank_id.
parameter_name (str): Name of parameter.
strategy (dict): Parameter slice strategy.
is_even (bool): Slice manner that True represents slicing evenly and False represents slicing unevenly.
Returns:
Tensor, the merged Tensor which has the whole data.
Raises:
ValueError: Failed to merge.
"""
layout = strategy.get(parameter_name)
try:
dev_mat = list(layout.dev_matrix[0].dim)
tensor_map = list(layout.tensor_map[0].dim)
param_split_shape = list(layout.param_split_shape[0].dim)
field_size = int(layout.field)
except BaseException as e:
raise ValueError(f"{e.__str__()}. Please make sure that strategy matches the node_strategy.proto.")
device_count = 1
for dim in dev_mat:
device_count *= dim
if len(sliced_data) != device_count:
raise ValueError(f"For 'merge_sliced_parameter', the length of 'sliced_parameters' should be equal to "
f"device_count. The length of 'sliced_parameters' is {len(sliced_data)}, but "
f"device_count is {device_count}.")
if not param_split_shape:
if not is_even:
raise ValueError("When the shape of every parameter in 'sliced_parameters' is same, "
"the 'is_even' should be True, but got {}.".format(is_even))
all_gather_tensor = Tensor(np.concatenate(sliced_data))
if field_size > 0:
merged_tensor = _reshape_param_data_with_weight(all_gather_tensor, dev_mat, field_size)
else:
merged_tensor = _reshape_param_data(all_gather_tensor, dev_mat, tensor_map)
else:
tensor_strategy = _get_tensor_strategy(dev_mat, tensor_map)
slice_count = 1
for dim in tensor_strategy:
slice_count *= dim
if len(param_split_shape) != slice_count:
raise ValueError(f"The param_split_shape length in strategy should be {slice_count}, "
f"but got {len(param_split_shape)}.")
tensor_slices_new = list(range(slice_count))
tensor_slices = sliced_data
for i in range(device_count):
slice_index = int(_get_tensor_slice_index(dev_mat, tensor_strategy, tensor_map, i))
if tensor_slices[i].shape[0] != param_split_shape[slice_index]:
raise ValueError(f"The slice {slice_index} should be {param_split_shape[slice_index]} in 0 axis, "
f"but got {tensor_slices[i].shape[0]}.")
tensor_slices_new[slice_index] = np.array(tensor_slices[i])
dim_len = len(tensor_strategy)
for i in range(dim_len):
ele_count = int(len(tensor_slices_new) / tensor_strategy[dim_len - 1 - i])
tensor_slices_new_inner = []
for j in range(ele_count):
new_tensor = tensor_slices_new[j * tensor_strategy[dim_len - 1 - i]]
for l in range(j * tensor_strategy[dim_len - 1 - i] + 1,
(j + 1) * tensor_strategy[dim_len - 1 - i]):
new_tensor = np.concatenate((new_tensor, tensor_slices_new[l]), axis=dim_len - 1 - i)
tensor_slices_new_inner.insert(len(tensor_slices_new_inner), np.array(new_tensor))
tensor_slices_new = tensor_slices_new_inner
merged_tensor = Tensor(tensor_slices_new[0])
return merged_tensor
def restore_group_info_list(group_info_file_name):
"""
Build rank list, the checkpoint of ranks in the rank list has the same contents with the local rank
who saves the group_info_file_name. To save the group info file, please export GROUP_INFO_FILE environment variables
like "export GROUP_INFO_FILE=/data/group_info.pb".
Args:
group_info_file_name (str): Name of group information file.
Returns:
List, the rank list.
Raises:
ValueError: group information file is incorrect.
TypeError: group_info_file_name is not str.
Examples:
>>> restore_list = restore_group_info_list("./group_info.pb")
"""
if not isinstance(group_info_file_name, str):
raise TypeError(f"The group_info_file_name should be str, but got {type(group_info_file_name)}.")
if not os.path.isfile(group_info_file_name):
raise ValueError(f"No such group info file: {group_info_file_name}.")
if os.path.getsize(group_info_file_name) == 0:
raise ValueError("The group info file should not be empty.")
parallel_group_map = ParallelGroupMap()
with open(group_info_file_name, 'rb') as f:
pb_content = f.read()
parallel_group_map.ParseFromString(pb_content)
restore_list = parallel_group_map.ckpt_restore_rank_list
if not restore_list:
raise ValueError("The group info file has no restore rank list.")
restore_rank_list = [rank for rank in restore_list.dim]
return restore_rank_list
def build_searched_strategy(strategy_filename):
"""
Build strategy of every parameter in network. Used in the case of distributed inference.
For details of it, please check:
`<https://www.mindspore.cn/docs/programming_guide/en/master/save_load_model_hybrid_parallel.html>`_.
Args:
strategy_filename (str): Name of strategy file.
Returns:
Dict, whose key is parameter name and value is slice strategy of this parameter.
Raises:
ValueError: Strategy file is incorrect.
TypeError: strategy_filename is not str.
Examples:
>>> strategy = build_searched_strategy("./strategy_train.ckpt")
"""
if not isinstance(strategy_filename, str):
raise TypeError(f"For 'build_searched_strategy', the 'strategy_filename' should be string, "
f"but got {type(strategy_filename)}.")
if not os.path.isfile(strategy_filename):
raise ValueError(f"For 'build_searched_strategy', no such strategy file: {strategy_filename}. "
f"Please check whether the 'strategy_filename' exists.")
if os.path.getsize(strategy_filename) == 0:
raise ValueError(f"For 'build_searched_strategy', the strategy file {strategy_filename} should not "
f"be empty. Please check whether the 'strategy_filename' is correct.")
parallel_strategy_map = ParallelStrategyMap()
with open(strategy_filename, 'rb') as f:
pb_content = f.read()
parallel_strategy_map.ParseFromString(pb_content)
layout_items = parallel_strategy_map.parallel_layout_item
if not layout_items:
raise ValueError(f"For 'build_searched_strategy', the strategy file {strategy_filename} has no sliced "
f"parameter, please check whether the 'strategy_filename' is correct.")
strategy = {}
for layout_item in layout_items:
parameter_name = layout_item.param_name
layout = layout_item.parallel_layouts
strategy[parameter_name] = layout
return strategy
def merge_sliced_parameter(sliced_parameters, strategy=None):
"""
Merge parameter slices into one parameter. Used in the case of distributed inference.
For details of it, please check:
`<https://www.mindspore.cn/docs/programming_guide/en/master/save_load_model_hybrid_parallel.html>`_.
Args:
sliced_parameters (list[Parameter]): Parameter slices in order of rank id.
strategy (Optional[dict]): Parameter slice strategy, whose key is parameter name and
value is slice strategy of this parameter. If strategy is None, just merge
parameter slices in 0 axis order. Default: None.
Returns:
Parameter, the merged parameter which has the whole data.
Raises:
ValueError: Failed to merge.
TypeError: The sliced_parameters is incorrect or strategy is not dict.
KeyError: The parameter name is not in keys of strategy.
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, merge_sliced_parameter, Parameter
>>>
>>> sliced_parameters = [
... Parameter(Tensor(np.array([0.00023915, 0.00013939, -0.00098059])),
... "network.embedding_table"),
... Parameter(Tensor(np.array([0.00015815, 0.00015458, -0.00012125])),
... "network.embedding_table"),
... Parameter(Tensor(np.array([0.00042165, 0.00029692, -0.00007941])),
... "network.embedding_table"),
... Parameter(Tensor(np.array([0.00084451, 0.00089960, -0.00010431])),
... "network.embedding_table")]
>>> merged_parameter = merge_sliced_parameter(sliced_parameters)
>>> print(merged_parameter)
Parameter (name=network.embedding_table, shape=(12,), dtype=Float64, requires_grad=True)
"""
if not isinstance(sliced_parameters, list):
raise TypeError(f"For 'merge_sliced_parameter', the 'sliced_parameters' should be list, "
f"but got {type(sliced_parameters)}.")
if not sliced_parameters:
raise ValueError("For 'merge_sliced_parameter', the 'sliced_parameters' should not be empty.")
if strategy and not isinstance(strategy, dict):
raise TypeError(f"For 'merge_sliced_parameter', the 'strategy' should be dict, but got {type(strategy)}.")
try:
parameter_name = sliced_parameters[0].name
parameter_shape = sliced_parameters[0].data.shape
parameter_shape_length = len(parameter_shape)
except BaseException as e:
raise TypeError(e.__str__() + f" For 'merge_sliced_parameter', the element in 'sliced_parameters' should be "
f"'Parameter', but got {type(sliced_parameters[0])} at index 0.")
is_even = True
for index, parameter in enumerate(sliced_parameters):
if not isinstance(parameter, Parameter):
raise TypeError(f"For 'merge_sliced_parameter', the element in 'sliced_parameters' should be 'Parameter', "
f"but got {type(parameter)} at index {index}.")
if parameter.name != parameter_name \
or len(parameter.data.shape) != parameter_shape_length \
or parameter.data.shape[1:] != parameter_shape[1:]:
raise ValueError(f"For 'merge_sliced_parameter', please make sure that the elements in 'slice_parameters'"
f" have the same name, dimension length and shape except 0 axis. The name, dimension "
f"length, shape except 0 axis should be {parameter_name}, {parameter_shape_length}, "
f"{parameter_shape[1:]}, but got name: {parameter.name}, dimension length: "
f"{len(parameter.data.shape)}, shape except 0 axis: {parameter.data.shape[1:]} "
f"at index {index}.")
if parameter.data.shape != parameter_shape:
is_even = False
layerwise_parallel = sliced_parameters[0].layerwise_parallel
requires_grad = sliced_parameters[0].requires_grad
sliced_data = [parameter.data.asnumpy() for parameter in sliced_parameters]
if not strategy:
merged_tensor = Tensor(np.concatenate(sliced_data))
merged_parameter = Parameter(merged_tensor, parameter_name, requires_grad, layerwise_parallel)
else:
if parameter_name not in strategy.keys():
raise KeyError(f"For 'merge_sliced_parameter', the parameter name {parameter_name} should be a key in "
f"the 'strategy'. Please check 'sliced_parameter' and 'strategy'.")
merged_tensor = _merge_param_with_strategy(sliced_data, parameter_name, strategy, is_even)
merged_parameter = Parameter(merged_tensor, parameter_name, requires_grad, layerwise_parallel)
return merged_parameter
def load_distributed_checkpoint(network, checkpoint_filenames, predict_strategy=None,
train_strategy_filename=None, strict_load=False, dec_key=None, dec_mode='AES-GCM'):
"""
Load checkpoint into net for distributed predication. Used in the case of distributed inference.
For details of distributed inference, please check:
`<https://www.mindspore.cn/docs/programming_guide/en/master/distributed_inference.html>`_.
Args:
network (Cell): Network for distributed predication.
checkpoint_filenames (list[str]): The name of Checkpoint files in order of rank id.
predict_strategy (dict): Strategy of predication process. It means that using one device to predict
when setting predict_strategy as None. Default: None.
train_strategy_filename (str): The filename of training strategy protocol buffer file.
When train_strategy_filename is None, the training strategy file will be
obtained from context.get_auto_parallel_context("strategy_ckpt_load_file").
Therefore, the training strategy file needs to be specified
in at least one of them. Default: None.
strict_load (bool): Whether to strict load the parameter into net. If False, it will load parameter
into net when parameter name's suffix in checkpoint file is the same as the
parameter in the network. When the types are inconsistent perform type conversion
on the parameters of the same type, such as float32 to float16. Default: False.
dec_key (Union[None, bytes]): Byte type key used for decryption. If the value is None, the decryption
is not required. Default: None.
dec_mode (str): This parameter is valid only when dec_key is not set to None. Specifies the decryption
mode, currently supports 'AES-GCM' and 'AES-CBC'. Default: 'AES-GCM'.
Raises:
TypeError: The type of inputs do not match the requirements.
ValueError: Failed to load checkpoint into net.
"""
network = Validator.check_isinstance("network", network, nn.Cell)
_check_checkpoint_file(checkpoint_filenames)
_check_predict_strategy(predict_strategy)
dec_key = Validator.check_isinstance('dec_key', dec_key, (type(None), bytes))
dec_mode = Validator.check_isinstance('dec_mode', dec_mode, str)
if train_strategy_filename is None:
train_strategy_filename = context.get_auto_parallel_context("strategy_ckpt_load_file")
_train_strategy = build_searched_strategy(train_strategy_filename)
train_strategy = _convert_to_list(_train_strategy)
train_dev_count = 1
ckpt_file_len = len(checkpoint_filenames)
for dim in train_strategy[list(train_strategy.keys())[0]][0]:
train_dev_count *= dim
if train_dev_count != ckpt_file_len:
raise ValueError(f"For 'load_distributed_checkpoint', the argument 'predict_strategy' is dict, "
f"the key of it must be string, and the value of it must be list or tuple that "
f"the first four elements are dev_matrix (list[int]), tensor_map (list[int]), "
f"param_split_shape (list[int]) and field_size (int, which value is 0)."
f"Please check whether 'predict_strategy' is correct.")
rank_list = _infer_rank_list(train_strategy, predict_strategy)
param_total_dict = defaultdict(dict)
for file_index, file_name in enumerate(checkpoint_filenames):
ckpt_dict = load_checkpoint(file_name, dec_key=dec_key, dec_mode=dec_mode)
for param_name, param in ckpt_dict.items():
param_total_dict[param_name][file_index] = param
param_dict = {}
param_not_in_strategy = []
param_not_in_ckpt = []
for _, param in network.parameters_and_names():
sliced_params = []
if param.name not in rank_list.keys():
param_not_in_strategy.append(param.name)
continue
if param.name not in param_total_dict:
param_not_in_ckpt.append(param.name)
continue
param_rank = rank_list[param.name][0]
skip_merge_split = rank_list[param.name][1]
shard_stride = train_strategy[param.name][4]
if train_strategy[param.name][5]:
shard_size = ckpt_file_len / shard_stride / train_strategy[param.name][5]
else:
shard_size = 0
for rank in param_rank:
param_total_list = list(range(0, ckpt_file_len))
if shard_size > 0:
shard_total_list = [param_total_list[i:i + shard_size] for i in
range(0, ckpt_file_len, shard_size)]
param_total_list = shard_total_list[rank // shard_size]
if shard_stride > 0:
param_stride = []
# merge pre parameter
param_index = param_total_list[0:param_total_list.index(rank) + 1][::-1][::shard_stride]
param_index.extend(param_total_list[param_total_list.index(rank):][::shard_stride])
param_index = list(set(param_index))
param_index.sort()
for rank_num in param_index:
param_stride.append(param_total_dict[param.name][rank_num].data.asnumpy())
sliced_param = Parameter(Tensor(np.concatenate(param_stride)), name=param.name)
else:
sliced_param = param_total_dict[param.name][rank]
sliced_params.append(sliced_param)
if skip_merge_split:
split_param = sliced_params[0]
else:
param_unique_strategy = _remove_repeated_slices(train_strategy[param.name])
_param_unique_strategy = _convert_to_layout(param.name, param_unique_strategy)
split_param = _merge_and_split(sliced_params, _param_unique_strategy, predict_strategy)
opt_shard_group = predict_strategy[param.name][5] if predict_strategy else None
if opt_shard_group:
data = split_param.data.asnumpy()
rank = get_rank(opt_shard_group)
size = get_group_size(opt_shard_group)
try:
data_slice = np.split(data, size)[rank]
except BaseException as e:
logger.critical("Failed to load opt shard slice in load distributed checkpoint for {}. Data shape is {}"
" and group is {}".format(param.name, split_param.data.shape, opt_shard_group))
raise RuntimeError(e.__str__() + f"\nFor 'load_distributed_checkpoint', failed to load opt shard slice"
f" in load distributed checkpoint for {param.name}. Data shape is "
f"{split_param.data.shape} and group is {opt_shard_group}.")
split_param = Parameter(Tensor(data_slice), param.name,
split_param.requires_grad, split_param.layerwise_parallel)
param_dict[param.name] = split_param
if param_not_in_strategy:
logger.warning("{} parameters in network are not in the slice strategy.".format(param_not_in_strategy))
if param_not_in_ckpt:
logger.warning("{} parameters in slice strategy but not in the checkpoint file.".format(param_not_in_ckpt))
load_param_into_net(network, param_dict, strict_load=strict_load)
def async_ckpt_thread_status():
"""
Get the status of asynchronous save checkpoint thread.
When performing asynchronous save checkpoint, you can get the thread state through this function
to ensure that write checkpoint file is completed.
Returns:
True, Asynchronous save checkpoint thread is running.
False, Asynchronous save checkpoint thread is not executing.
"""
thr_list = threading.enumerate()
return True in [ele.getName() == "asyn_save_ckpt" for ele in thr_list]
def _check_predict_strategy(predict_strategy):
"""Check predict strategy."""
def _check_int_list(arg):
if not isinstance(arg, list):
return False
for item in arg:
if not isinstance(item, int):
return False
return True
if predict_strategy is None:
return
flag = True
predict_strategy = Validator.check_isinstance("predict_strategy", predict_strategy, dict)
for key in predict_strategy.keys():
if not isinstance(key, str) or not isinstance(predict_strategy[key], (list, tuple)) \
or len(predict_strategy[key]) < 4:
flag = False
dev_matrix, tensor_map, param_split_shape, field_size = predict_strategy[key][:4]
if not _check_int_list(dev_matrix) or not _check_int_list(tensor_map) or \
not (_check_int_list(param_split_shape) or not param_split_shape) or \
not (isinstance(field_size, int) and field_size == 0):
flag = False
if not flag:
raise ValueError(f"Please make sure that the key of predict_strategy is str, "
f"and the value is a list or a tuple that the first four elements are "
f"dev_matrix (list[int]), tensor_map (list[int]), "
f"param_split_shape (list[int]) and field_size (zero).")
def _check_checkpoint_file(checkpoint_filenames):
"""Check checkpoint file name."""
for index, filename in enumerate(checkpoint_filenames):
if not isinstance(filename, str) or not os.path.exists(filename) \
or filename[-5:] != ".ckpt" or os.path.getsize(filename) == 0:
raise ValueError(f"For 'load_distributed_checkpoint', please check 'checkpoint_filenames', and "
f"make sure the {filename} at index {index} is a valid checkpoint file, it must "
f"be a string ending with '.ckpt', and the checkpoint file it represents must "
f"be exist and not empty.")
def _convert_to_list(strategy):
"""Convert ParallelLayouts object to specified list."""
train_map = {}
for param_name in strategy.keys():
try:
layout = strategy.get(param_name)
dev_mat = list(layout.dev_matrix[0].dim)
tensor_map = list(layout.tensor_map[0].dim)
param_split_shape = list(layout.param_split_shape[0].dim)
field_size = int(layout.field)
shard_stride = int(layout.opt_weight_shard_step)
shard_size = int(layout.opt_weight_shard_size)
train_map[param_name] = [dev_mat, tensor_map, param_split_shape, field_size, shard_stride, shard_size]
except BaseException as e:
raise ValueError(f"{e.__str__()}. Please make sure that strategy matches the node_strategy.proto.")
return train_map
def _convert_to_layout(param_name, tensor_layout):
"""Convert list to ParallelLayouts object."""
strategy = {}
try:
layout = ParallelLayouts()
layout.field = tensor_layout[3]
dev_matrix = layout.dev_matrix.add()
for item in tensor_layout[0]:
dev_matrix.dim.append(item)
tensor_map = layout.tensor_map.add()
for item in tensor_layout[1]:
tensor_map.dim.append(item)
param_split_shape = layout.param_split_shape.add()
for item in tensor_layout[2]:
param_split_shape.dim.append(item)
except BaseException as e:
raise ValueError("Convert failed. " + e.__str__())
strategy[param_name] = layout
return strategy
def _merge_and_split(sliced_params, train_strategy, predict_strategy):
"""Merge sliced parameter and split it according to the predict strategy."""
merged_param = merge_sliced_parameter(sliced_params, train_strategy)
if predict_strategy is None:
return merged_param
param_name = merged_param.name
tensor_layout = predict_strategy[param_name]
split_tensor = _load_tensor(merged_param.data, tensor_layout[0], tensor_layout[1])
requires_grad = merged_param.requires_grad
layerwise_parallel = merged_param.layerwise_parallel
split_param = Parameter(split_tensor, param_name, requires_grad, layerwise_parallel)
return split_param
def _calculation_net_size(net):
"""Calculate the size of parameters in the network."""
data_total = 0
net_dict = net.parameters_dict()
for name in net_dict:
data_total += sys.getsizeof(net_dict[name].data.asnumpy().tobytes()) / 1024
return data_total
| 47.156537
| 120
| 0.640398
|
import copy
import json
import os
import shutil
import stat
import threading
from threading import Thread, Lock
from collections import defaultdict
import math
import sys
import time
import numpy as np
from mindspore.train.checkpoint_pb2 import Checkpoint
from mindspore.train.mind_ir_pb2 import ModelProto as mindir_model
from mindspore.train.node_strategy_pb2 import ParallelStrategyMap, ParallelLayouts, ParallelGroupMap
from mindspore.train.print_pb2 import Print
import mindspore
import mindspore.nn as nn
from mindspore import context
from mindspore import log as logger
from mindspore._checkparam import check_input_data, check_input_dataset, Validator
from mindspore.common import dtype as mstype
from mindspore.common.api import _cell_graph_executor as _executor
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
from mindspore.common.tensor import Tensor
from mindspore.communication.management import get_rank, get_group_size
from mindspore.compression.export import quant_export
from mindspore.parallel._cell_wrapper import get_allgather_cell
from mindspore.parallel._tensor import _load_tensor, _get_tensor_strategy, _get_tensor_slice_index
from mindspore.parallel._tensor import _reshape_param_data
from mindspore.parallel._tensor import _reshape_param_data_with_weight
from mindspore.parallel._utils import _infer_rank_list, _remove_repeated_slices
from .._c_expression import load_mindir, _encrypt, _decrypt, _is_cipher_file
tensor_to_ms_type = {"Int8": mstype.int8, "UInt8": mstype.uint8, "Int16": mstype.int16, "UInt16": mstype.uint16,
"Int32": mstype.int32, "UInt32": mstype.uint32, "Int64": mstype.int64, "UInt64": mstype.uint64,
"Float16": mstype.float16, "Float32": mstype.float32, "Float64": mstype.float64,
"Bool": mstype.bool_}
tensor_to_np_type = {"Int8": np.int8, "UInt8": np.uint8, "Int16": np.int16, "UInt16": np.uint16,
"Int32": np.int32, "UInt32": np.uint32, "Int64": np.int64, "UInt64": np.uint64,
"Float16": np.float16, "Float32": np.float32, "Float64": np.float64, "Bool": np.bool_}
_ckpt_mutex = Lock()
SLICE_SIZE = 512 * 1024
PROTO_LIMIT_SIZE = 1024 * 1024 * 2
TOTAL_SAVE = 1024 * 1024
PARAMETER_SPLIT_SIZE = 1024 * 1024 * 1024
def _special_process_par(par, new_par):
par_shape_len = len(par.data.shape)
new_par_shape_len = len(new_par.data.shape)
if new_par_shape_len <= par_shape_len:
return False
for i in range(new_par_shape_len - par_shape_len):
if new_par.data.shape[par_shape_len + i] != 1:
return False
new_val = new_par.data.asnumpy()
new_val = new_val.reshape(par.data.shape)
par.set_data(Tensor(new_val, par.data.dtype))
return True
def _update_param(param, new_param, strict_load):
if isinstance(param.data, Tensor) and isinstance(new_param.data, Tensor):
if param.data.shape != new_param.data.shape:
if not _special_process_par(param, new_param):
logger.critical("Failed to combine the net and the parameters for param %s.", param.name)
msg = (f"For 'load_param_into_net', {param.name} in the argument 'net' should have the same shape "
f"as {param.name} in the argument 'parameter_dict'. But got its shape {param.data.shape} in"
f" the argument 'net' and shape {new_param.data.shape} in the argument 'parameter_dict'."
f"May you need to check whether the checkpoint you loaded is correct or the batch size and "
f"so on in the 'net' and 'parameter_dict' are same.")
raise RuntimeError(msg)
if param.data.dtype != new_param.data.dtype:
if _type_convert(param, new_param, strict_load):
new_tensor = Tensor(new_param.data.asnumpy(), param.data.dtype)
param.set_data(new_tensor)
return
logger.critical("Failed to combine the net and the parameters for param %s.", param.name)
msg = (f"For 'load_param_into_net', {param.name} in the argument 'net' should have the same type as "
f"{param.name} in the argument 'parameter_dict'. but got its type {param.data.dtype} in the "
f"argument 'net' and type {new_param.data.dtype} in the argument 'parameter_dict'."
f"May you need to check whether the checkpoint you loaded is correct.")
raise RuntimeError(msg)
param.set_data(new_param.data, param.sliced)
return
if isinstance(param.data, Tensor) and not isinstance(new_param.data, Tensor):
if param.data.shape != (1,) and param.data.shape != ():
logger.critical("Failed to combine the net and the parameters for param %s.", param.name)
msg = (f"For 'load_param_into_net', {param.name} in the argument 'parameter_dict' is "
f"scalar, then the shape of {param.name} in the argument 'net' should be "
f"(1,) or (), but got shape {param.data.shape}."
f"May you need to check whether the checkpoint you loaded is correct.")
raise RuntimeError(msg)
param.set_data(initializer(new_param.data, param.data.shape, param.data.dtype))
elif isinstance(new_param.data, Tensor) and not isinstance(param.data, Tensor):
logger.critical("Failed to combine the net and the parameters for param %s.", param.name)
msg = (f"For 'load_param_into_net', {param.name} in the argument 'parameter_dict' is Tensor, "
f"then {param.name} in the argument 'net' also should be Tensor, but got {type(param.data)}."
f"May you need to check whether the checkpoint you loaded is correct.")
raise RuntimeError(msg)
else:
param.set_data(type(param.data)(new_param.data))
def _type_convert(param, new_param, strict_load):
float_type = (mstype.float16, mstype.float32, mstype.float64)
int_type = (mstype.int8, mstype.int16, mstype.int32, mstype.int64)
if not strict_load and ({param.data.dtype, new_param.data.dtype}.issubset(float_type) or
{param.data.dtype, new_param.data.dtype}.issubset(int_type)):
logger.warning(f"The type of {new_param.name}:{new_param.data.dtype} in 'parameter_dict' is different from "
f"the type of it in 'net':{param.data.dtype}, then the type convert from "
f"{new_param.data.dtype} to {param.data.dtype} in the network.")
return True
return False
def _exec_save(ckpt_file_name, data_list, enc_key=None, enc_mode="AES-GCM"):
try:
with _ckpt_mutex:
if os.path.exists(ckpt_file_name):
os.chmod(ckpt_file_name, stat.S_IWUSR)
os.remove(ckpt_file_name)
with open(ckpt_file_name, "ab") as f:
if enc_key is not None:
plain_data = bytes(0)
cipher_data = bytes(0)
for name, value in data_list.items():
data_size = value[2].nbytes / 1024
if data_size > SLICE_SIZE:
slice_count = math.ceil(data_size / SLICE_SIZE)
param_slice_list = np.array_split(value[2], slice_count)
else:
param_slice_list = [value[2]]
for param_slice in param_slice_list:
checkpoint_list = Checkpoint()
param_value = checkpoint_list.value.add()
param_value.tag = name
param_tensor = param_value.tensor
param_tensor.dims.extend(value[0])
param_tensor.tensor_type = value[1]
param_tensor.tensor_content = param_slice.tobytes()
if enc_key is None:
f.write(checkpoint_list.SerializeToString())
else:
plain_data += checkpoint_list.SerializeToString()
max_block_size = SLICE_SIZE * 1024
while len(plain_data) >= max_block_size:
cipher_data += _encrypt(plain_data[0: max_block_size], max_block_size, enc_key,
len(enc_key), enc_mode)
plain_data = plain_data[max_block_size:]
if enc_key is not None:
if plain_data:
cipher_data += _encrypt(plain_data, len(plain_data), enc_key, len(enc_key), enc_mode)
f.write(cipher_data)
os.chmod(ckpt_file_name, stat.S_IRUSR)
except BaseException as e:
logger.critical("Failed to save the checkpoint file %s. Maybe don't have the permission to write files, "
"or the disk space is insufficient and so on.", ckpt_file_name)
raise e
def save_checkpoint(save_obj, ckpt_file_name, integrated_save=True,
async_save=False, append_dict=None, enc_key=None, enc_mode="AES-GCM"):
if not isinstance(save_obj, nn.Cell) and not isinstance(save_obj, list):
raise TypeError("For 'save_checkpoint', the argument 'save_obj' should be nn.Cell or list, "
"but got {}.".format(type(save_obj)))
integrated_save = Validator.check_bool(integrated_save)
async_save = Validator.check_bool(async_save)
append_dict = _check_append_dict(append_dict)
enc_key = Validator.check_isinstance('enc_key', enc_key, (type(None), bytes))
enc_mode = Validator.check_isinstance('enc_mode', enc_mode, str)
logger.info("Execute the process of saving checkpoint files.")
if isinstance(save_obj, nn.Cell):
save_obj.init_parameters_data()
param_dict = {}
for _, param in save_obj.parameters_and_names():
param_dict[param.name] = param
param_list = []
for (key, value) in param_dict.items():
each_param = {"name": key}
param_data = Tensor(value.data)
# in automatic model parallel scenario, some parameters were split to all the devices,
# which should be combined before saving
if key in save_obj.parameter_layout_dict:
param_data = _get_merged_param_data(save_obj, key, param_data, integrated_save)
each_param["data"] = param_data
param_list.append(each_param)
save_obj = param_list
if append_dict:
append_info_list = []
for k_name, value in append_dict.items():
append_info_list.append({"name": k_name, "data": Tensor(value)})
save_obj.extend(append_info_list)
data_list = {}
with _ckpt_mutex:
for param in save_obj:
key = param["name"]
data_list[key] = []
if isinstance(param["data"], Parameter):
param["data"].init_data()
dims = []
if param['data'].shape == ():
dims.append(0)
else:
for dim in param['data'].shape:
dims.append(dim)
data_list[key].append(dims)
tensor_type = str(param["data"].dtype)
data_list[key].append(tensor_type)
data = param["data"].asnumpy().reshape(-1)
data_list[key].append(data)
ckpt_file_name = os.path.realpath(ckpt_file_name)
if async_save:
data_copy = copy.deepcopy(data_list)
thr = Thread(target=_exec_save, args=(ckpt_file_name, data_copy, enc_key, enc_mode), name="asyn_save_ckpt")
thr.start()
else:
_exec_save(ckpt_file_name, data_list, enc_key, enc_mode)
logger.info("Saving checkpoint process is finished.")
def _check_param_prefix(filter_prefix, param_name):
for prefix in filter_prefix:
if param_name.find(prefix) == 0 \
and (param_name == prefix or param_name[len(prefix)] == "." or (prefix and prefix[-1] == ".")):
return True
return False
def _check_append_dict(append_dict):
if append_dict is None:
return append_dict
if not isinstance(append_dict, dict):
raise TypeError("For 'save_checkpoint', the argument 'append_dict' must be dict, but got "
"{}.".format(type(append_dict)))
for key, value in append_dict.items():
if not isinstance(key, str) or not isinstance(value, (int, float, bool)):
raise TypeError(f"For 'save_checkpoint', the type of dict 'append_info' must be key: string, "
f"value: int, float or bool, but got key: {type(key)}, value: {type(value)}")
return append_dict
def load(file_name, **kwargs):
if not isinstance(file_name, str):
raise ValueError("For 'load', the argument 'file_name' must be string, but "
"got {}.".format(type(file_name)))
if not file_name.endswith(".mindir"):
raise ValueError("For 'load', the argument 'file_name'(MindIR file) should end with '.mindir', "
"please input the correct 'file_name'.")
if not os.path.exists(file_name):
raise ValueError("For 'load', the argument 'file_name'(MindIR file) does not exist, "
"please check whether the 'file_name' is correct.")
file_name = os.path.realpath(file_name)
logger.info("Execute the process of loading mindir.")
if 'dec_key' in kwargs.keys():
dec_key = Validator.check_isinstance('dec_key', kwargs['dec_key'], bytes)
dec_mode = 'AES-GCM'
if 'dec_mode' in kwargs.keys():
dec_mode = Validator.check_isinstance('dec_mode', kwargs['dec_mode'], str)
graph = load_mindir(file_name, dec_key=dec_key, key_len=len(dec_key), dec_mode=dec_mode)
else:
graph = load_mindir(file_name)
if graph is None:
if _is_cipher_file(file_name):
raise RuntimeError("Load MindIR failed. The file may be encrypted and decrypt failed, you "
"can check whether the values of the arguments 'dec_key' and 'dec_mode'"
" are the same as when exported MindIR file.")
raise RuntimeError("Load MindIR failed.")
return graph
def load_checkpoint(ckpt_file_name, net=None, strict_load=False, filter_prefix=None, dec_key=None, dec_mode="AES-GCM"):
ckpt_file_name, filter_prefix = _check_checkpoint_param(ckpt_file_name, filter_prefix)
dec_key = Validator.check_isinstance('dec_key', dec_key, (type(None), bytes))
dec_mode = Validator.check_isinstance('dec_mode', dec_mode, str)
logger.info("Execute the process of loading checkpoint files.")
checkpoint_list = Checkpoint()
try:
if dec_key is None:
with open(ckpt_file_name, "rb") as f:
pb_content = f.read()
else:
pb_content = _decrypt(ckpt_file_name, dec_key, len(dec_key), dec_mode)
if pb_content is None:
raise ValueError("For 'load_checkpoint', Failed to decrypt the checkpoint file.")
checkpoint_list.ParseFromString(pb_content)
except BaseException as e:
if _is_cipher_file(ckpt_file_name):
logger.critical("Failed to read the checkpoint file '%s'. The file may be encrypted, please pass in the "
"correct 'dec_key'.", ckpt_file_name)
else:
logger.critical("Failed to read the checkpoint file '%s' , may not have permission to read it, please "
"check the correct of the file.", ckpt_file_name)
raise ValueError(e.__str__() + "\nFor 'load_checkpoint', failed to read the checkpoint file {}, may not have "
"permission to read it.".format(ckpt_file_name))
parameter_dict = {}
try:
param_data_list = []
for element_id, element in enumerate(checkpoint_list.value):
if filter_prefix is not None and _check_param_prefix(filter_prefix, element.tag):
continue
data = element.tensor.tensor_content
data_type = element.tensor.tensor_type
np_type = tensor_to_np_type[data_type]
ms_type = tensor_to_ms_type[data_type]
element_data = np.frombuffer(data, np_type)
param_data_list.append(element_data)
if (element_id == len(checkpoint_list.value) - 1) or \
(element.tag != checkpoint_list.value[element_id + 1].tag):
param_data = np.concatenate((param_data_list), axis=0)
param_data_list.clear()
dims = element.tensor.dims
if dims == [0]:
if 'Float' in data_type:
param_data = float(param_data[0])
elif 'Int' in data_type:
param_data = int(param_data[0])
parameter_dict[element.tag] = Parameter(Tensor(param_data, ms_type), name=element.tag)
elif dims == [1]:
parameter_dict[element.tag] = Parameter(Tensor(param_data, ms_type), name=element.tag)
else:
param_dim = []
for dim in dims:
param_dim.append(dim)
param_value = param_data.reshape(param_dim)
parameter_dict[element.tag] = Parameter(Tensor(param_value, ms_type), name=element.tag)
logger.info("Loading checkpoint files process is finished.")
except BaseException as e:
logger.critical("Failed to load the checkpoint file '%s'.", ckpt_file_name)
raise ValueError(e.__str__() + "\nFailed to load the checkpoint file {}.".format(ckpt_file_name))
if not parameter_dict:
raise ValueError(f"The loaded parameter dict is empty after filtering, please check whether "
f"'filter_prefix' was set to filter out all parameters.")
if net is not None:
load_param_into_net(net, parameter_dict, strict_load)
return parameter_dict
def _check_checkpoint_param(ckpt_file_name, filter_prefix=None):
if not isinstance(ckpt_file_name, str):
raise ValueError("For 'load_checkpoint', the argument 'ckpt_file_name' must be string, "
"but got {}.".format(type(ckpt_file_name)))
if not os.path.exists(ckpt_file_name):
raise ValueError("For 'load_checkpoint', the checkpoint file does not exist, please check "
"whether the 'ckpt_file_name' is correct.")
if ckpt_file_name[-5:] != ".ckpt":
raise ValueError("For 'load_checkpoint', the checkpoint file should end with '.ckpt', please "
"input the correct 'ckpt_file_name'.")
ckpt_file_name = os.path.realpath(ckpt_file_name)
if filter_prefix is not None:
if not isinstance(filter_prefix, (str, list, tuple)):
raise TypeError(f"For 'load_checkpoint', the type of 'filter_prefix' must be string, "
f"list[string] or tuple[string] when 'filter_prefix' is not None, but "
f"got {str(type(filter_prefix))}.")
if isinstance(filter_prefix, str):
filter_prefix = (filter_prefix,)
if not filter_prefix:
raise ValueError("For 'load_checkpoint', the 'filter_prefix' can't be empty when "
"'filter_prefix' is list or tuple.")
for index, prefix in enumerate(filter_prefix):
if not isinstance(prefix, str):
raise TypeError(f"For 'load_checkpoint', when 'filter_prefix' is list or tuple, "
f"the element in 'filter_prefix' must be string, but got "
f"{str(type(prefix))} at index {index}.")
return ckpt_file_name, filter_prefix
def load_param_into_net(net, parameter_dict, strict_load=False):
if not isinstance(net, nn.Cell):
logger.critical("Failed to combine the net and the parameters.")
msg = ("For 'load_param_into_net', the argument 'net' should be a Cell, but got {}.".format(type(net)))
raise TypeError(msg)
if not isinstance(parameter_dict, dict):
logger.critical("Failed to combine the net and the parameters.")
msg = ("For 'load_param_into_net', the argument 'parameter_dict' should be a dict, "
"but got {}.".format(type(parameter_dict)))
raise TypeError(msg)
strict_load = Validator.check_bool(strict_load)
logger.info("Execute the process of loading parameters into net.")
net.init_parameters_data()
param_not_load = []
for _, param in net.parameters_and_names():
if param.name in parameter_dict:
new_param = copy.deepcopy(parameter_dict[param.name])
if not isinstance(new_param, Parameter):
logger.critical("Failed to combine the net and the parameters.")
msg = ("For 'load_param_into_net', the element in the argument 'parameter_dict' should be a "
"'Parameter', but got {}.".format(type(new_param)))
raise TypeError(msg)
_update_param(param, new_param, strict_load)
else:
param_not_load.append(param.name)
if param_not_load and not strict_load:
_load_dismatch_prefix_params(net, parameter_dict, param_not_load, strict_load)
logger.debug("Params not matched(in net but not in parameter_dict):")
for param_name in param_not_load:
logger.debug("%s", param_name)
logger.info("Loading parameters into net is finished.")
if param_not_load:
logger.warning("{} parameters in the 'net' are not loaded, because they are not in the "
"'parameter_dict'.".format(len(param_not_load)))
for param_name in param_not_load:
logger.warning("{} is not loaded.".format(param_name))
return param_not_load
def _load_dismatch_prefix_params(net, parameter_dict, param_not_load, strict_load):
prefix_name = ""
longest_name = param_not_load[0]
while prefix_name != longest_name and param_not_load:
logger.debug("Count: {} parameters has not been loaded, try to continue loading.".format(len(param_not_load)))
prefix_name = longest_name
for net_param_name in param_not_load:
for dict_name in parameter_dict:
if dict_name.endswith(net_param_name):
prefix_name = dict_name[:-len(net_param_name)]
break
if prefix_name != longest_name:
break
if prefix_name != longest_name:
logger.warning("Remove parameter prefix name: {}, continue to load.".format(prefix_name))
for _, param in net.parameters_and_names():
new_param_name = prefix_name + param.name
if param.name in param_not_load and new_param_name in parameter_dict:
new_param = parameter_dict[new_param_name]
_update_param(param, new_param, strict_load)
param_not_load.remove(param.name)
def _save_graph(network, file_name):
logger.info("Execute the process of saving graph.")
file_name = os.path.realpath(file_name)
graph_pb = network.get_func_graph_proto()
if graph_pb:
with open(file_name, "wb") as f:
os.chmod(file_name, stat.S_IRUSR | stat.S_IWUSR)
f.write(graph_pb)
def _get_merged_param_data(net, param_name, param_data, integrated_save):
layout = net.parameter_layout_dict[param_name]
if len(layout) < 6:
logger.info("The layout dict does not contain the key %s", param_name)
return param_data
dev_mat = layout[0]
tensor_map = layout[1]
uniform_split = layout[4]
opt_shard_group = layout[5]
allgather_net = None
mp_weight = False
for dim in tensor_map:
if dim != -1:
mp_weight = True
break
if param_name in net.parallel_parameter_merge_net_dict:
allgather_net = net.parallel_parameter_merge_net_dict[param_name]
else:
logger.info("Need to create allgather net for %s", param_name)
if integrated_save:
if context.get_auto_parallel_context("pipeline_stages") > 1:
raise RuntimeError("Pipeline Parallel don't support Integrated save checkpoint now.")
if uniform_split == 0:
raise RuntimeError("Integrated save checkpoint only support uniform split tensor now.")
# while any dim is not equal to -1, means param is split and needs to be merged
# pipeline parallel need to be supported here later
if mp_weight:
allgather_net = get_allgather_cell(opt_shard_group, bool(opt_shard_group))
elif opt_shard_group:
allgather_net = get_allgather_cell(opt_shard_group, False)
elif opt_shard_group and context.get_auto_parallel_context("optimizer_weight_shard_aggregated_save"):
allgather_net = get_allgather_cell(opt_shard_group, False)
net.parallel_parameter_merge_net_dict[param_name] = allgather_net
if allgather_net:
param_data = allgather_net(param_data)
if mp_weight and integrated_save:
param_data = _reshape_param_data(param_data, dev_mat, tensor_map)
return param_data
def _fill_param_into_net(net, parameter_list):
parameter_dict = {}
for each_param in parameter_list:
param_name = each_param["name"]
if isinstance(each_param["data"], Parameter):
each_param["data"].init_data()
np_val = each_param["data"].asnumpy()
if np_val.shape == (1,):
parameter_dict[param_name] = Parameter(np_val, name=param_name)
elif np_val.shape == ():
parameter_dict[param_name] = Parameter(Tensor(np_val.tolist(), mstype.pytype_to_dtype(np_val.dtype)),
name=param_name)
else:
parameter_dict[param_name] = Parameter(Tensor(np_val), name=param_name)
load_param_into_net(net, parameter_dict)
def export(net, *inputs, file_name, file_format='AIR', **kwargs):
logger.info("exporting model file:%s format:%s.", file_name, file_format)
if check_input_dataset(*inputs, dataset_type=mindspore.dataset.Dataset):
if len(inputs) != 1:
raise RuntimeError(f"You can only serialize one dataset into MindIR, got " + str(len(inputs)) + " datasets")
shapes, types, columns = inputs[0].output_shapes(), inputs[0].output_types(), inputs[0].get_col_names()
kwargs['dataset'] = inputs[0]
only_support_col = "image"
inputs = list()
for c, s, t in zip(columns, shapes, types):
if only_support_col != c:
continue
inputs.append(Tensor(np.random.uniform(-1.0, 1.0, size=s).astype(t)))
if not inputs:
raise RuntimeError(f"Only supports parse \"image\" column from dataset now, given dataset has columns: "
+ str(columns))
inputs = tuple(inputs)
else:
check_input_data(*inputs, data_class=Tensor)
Validator.check_file_name_by_regular(file_name)
file_name = os.path.realpath(file_name)
net = _quant_export(net, *inputs, file_format=file_format, **kwargs)
if 'enc_key' in kwargs.keys():
if file_format != 'MINDIR':
raise ValueError(f"For 'export', 'enc_key' can be passed in only when 'file_format' == 'MINDIR',"
f" but got 'file_format' {file_format}.")
enc_key = Validator.check_isinstance('enc_key', kwargs['enc_key'], bytes)
enc_mode = 'AES-GCM'
if 'enc_mode' in kwargs.keys():
enc_mode = Validator.check_isinstance('enc_mode', kwargs['enc_mode'], str)
dataset = kwargs['dataset'] if 'dataset' in kwargs.keys() else None
_export(net, file_name, file_format, *inputs, enc_key=enc_key, enc_mode=enc_mode, dataset=dataset)
else:
_export(net, file_name, file_format, *inputs, **kwargs)
def _export(net, file_name, file_format, *inputs, **kwargs):
logger.info("exporting model file:%s format:%s.", file_name, file_format)
check_input_data(*inputs, data_class=Tensor)
if file_format == 'GEIR':
logger.warning(f"Format 'GEIR' is deprecated, it would be removed in future release, use 'AIR' instead.")
file_format = 'AIR'
supported_formats = ['AIR', 'ONNX', 'MINDIR']
if file_format not in supported_formats:
raise ValueError(f"For 'export', 'file_format' must be one of {supported_formats}, but got {file_format}.")
# When dumping ONNX file, switch network mode to infer when it is training(NOTE: ONNX only designed for prediction)
is_dump_onnx_in_training = net.training and file_format == 'ONNX'
if is_dump_onnx_in_training:
net.set_train(mode=False)
if file_format == 'AIR':
phase_name = 'export.air'
graph_id, _ = _executor.compile(net, *inputs, phase=phase_name)
if not file_name.endswith('.air'):
file_name += ".air"
if os.path.exists(file_name):
os.chmod(file_name, stat.S_IWUSR)
if "/" in file_name:
real_path = os.path.realpath(file_name[:file_name.rfind("/")])
os.makedirs(real_path, exist_ok=True)
_executor.export(file_name, graph_id)
os.chmod(file_name, stat.S_IRUSR)
elif file_format == 'ONNX':
total_size = _calculation_net_size(net)
if total_size > PROTO_LIMIT_SIZE:
raise RuntimeError('Export onnx model failed. Network size is: {}G, it exceeded the protobuf: {}G limit.'
.format(total_size / 1024 / 1024, PROTO_LIMIT_SIZE / 1024 / 1024))
phase_name = 'export.onnx'
graph_id, _ = _executor.compile(net, *inputs, phase=phase_name, do_convert=False)
onnx_stream = _executor._get_func_graph_proto(net, graph_id)
if not file_name.endswith('.onnx'):
file_name += ".onnx"
if os.path.exists(file_name):
os.chmod(file_name, stat.S_IWUSR)
with open(file_name, 'wb') as f:
f.write(onnx_stream)
os.chmod(file_name, stat.S_IRUSR)
elif file_format == 'MINDIR':
_save_mindir(net, file_name, *inputs, **kwargs)
if is_dump_onnx_in_training:
net.set_train(mode=True)
def _generate_front_info_for_param_data_file(is_encrypt, kwargs):
front_info = bytes()
check_code = sys.byteorder == "little"
front_info += check_code.to_bytes(1, byteorder=sys.byteorder)
front_info += bytes(63)
if is_encrypt():
front_info = _encrypt(front_info, len(front_info), kwargs['enc_key'],
len(kwargs['enc_key']), kwargs['enc_mode'])
return front_info
def _change_file(f, dirname, external_local, is_encrypt, kwargs):
# The parameter has been not written in the file
front_info = _generate_front_info_for_param_data_file(is_encrypt, kwargs)
f.seek(0, 0)
f.write(front_info)
f.close()
ori_data_file_name = f.name
os.chmod(ori_data_file_name, stat.S_IRUSR)
if os.path.getsize(ori_data_file_name) == 64:
raise RuntimeError("The parameter size is exceed 1T,cannot export to the file")
data_file_name = os.path.join(dirname, external_local)
return _get_data_file(is_encrypt, kwargs, data_file_name)
def _get_data_file(is_encrypt, kwargs, data_file_name):
# Reserves 64 bytes as spare information such as check data
offset = 64
if os.path.exists(data_file_name):
os.chmod(data_file_name, stat.S_IWUSR)
f = open(data_file_name, "wb")
place_holder_data = bytes(offset)
if is_encrypt():
place_holder_data = _encrypt(place_holder_data, len(place_holder_data), kwargs["enc_key"],
len(kwargs["enc_key"]), kwargs["enc_mode"])
f.write(place_holder_data)
parameter_size = (offset / 1024)
return f, parameter_size, offset
def _spilt_save(net_dict, model, file_name, is_encrypt, **kwargs):
logger.warning("Parameters in the net capacity exceeds 1G, save MindIR model and parameters separately.")
# save parameter
file_prefix = file_name.split("/")[-1]
if file_prefix.endswith(".mindir"):
file_prefix = file_prefix[:-7]
current_path = os.path.abspath(file_name)
dirname = os.path.dirname(current_path)
data_path = os.path.join(dirname, file_prefix + "_variables")
if os.path.exists(data_path):
shutil.rmtree(data_path)
os.makedirs(data_path, exist_ok=True)
os.chmod(data_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
index = 0
external_local = os.path.join(file_prefix + "_variables", "data_" + str(index))
data_file_name = os.path.join(dirname, external_local)
f, parameter_size, offset = _get_data_file(is_encrypt, kwargs, data_file_name)
try:
for param_proto in model.graph.parameter:
name = param_proto.name[param_proto.name.find(":") + 1:]
param = net_dict[name]
raw_data = param.data.asnumpy().tobytes()
data_length = len(raw_data)
append_size = 0
if data_length % 64 != 0:
append_size = 64 - (data_length % 64)
parameter_size += ((append_size + data_length) / 1024)
if parameter_size > PARAMETER_SPLIT_SIZE:
index += 1
external_local = os.path.join(file_prefix + "_variables", "data_" + str(index))
f, parameter_size, offset = _change_file(f, dirname, external_local, is_encrypt, kwargs)
parameter_size += ((append_size + data_length) / 1024)
param_proto.external_data.location = external_local
param_proto.external_data.length = data_length
param_proto.external_data.offset = offset
write_data = raw_data + bytes(append_size)
offset += (data_length + append_size)
if is_encrypt():
write_data = _encrypt(write_data, len(write_data), kwargs['enc_key'],
len(kwargs['enc_key']), kwargs['enc_mode'])
f.write(write_data)
# save graph
graph_file_name = os.path.join(dirname, file_prefix + "_graph.mindir")
if os.path.exists(graph_file_name):
os.chmod(graph_file_name, stat.S_IWUSR)
with open(graph_file_name, 'wb') as model_file:
os.chmod(graph_file_name, stat.S_IRUSR | stat.S_IWUSR)
model_string = model.SerializeToString()
if is_encrypt():
model_string = _encrypt(model_string, len(model_string), kwargs['enc_key'],
len(kwargs['enc_key']),
kwargs['enc_mode'])
model_file.write(model_string)
os.chmod(graph_file_name, stat.S_IRUSR)
front_info = _generate_front_info_for_param_data_file(is_encrypt, kwargs)
f.seek(0, 0)
f.write(front_info)
finally:
f.close()
os.chmod(data_file_name, stat.S_IRUSR)
def _save_mindir(net, file_name, *inputs, **kwargs):
if context._get_mode() == context.PYNATIVE_MODE:
raise RuntimeError("MindIR export is not support in the Pynative mode, please convert to the Graph Mode.")
model = mindir_model()
phase_name = "predict" if net._auto_parallel_mode else "export.mindir"
graph_id, _ = _executor.compile(net, *inputs, phase=phase_name,
do_convert=False, auto_parallel_mode=net._auto_parallel_mode)
mindir_stream = _executor._get_func_graph_proto(net, graph_id, 'mind_ir')
net_dict = net.parameters_dict()
model.ParseFromString(mindir_stream)
if 'dataset' in kwargs.keys() and kwargs['dataset'] is not None:
check_input_data(kwargs['dataset'], data_class=mindspore.dataset.Dataset)
dataset = kwargs['dataset']
_save_dataset_to_mindir(model, dataset)
save_together = _save_together(net_dict, model)
is_encrypt = lambda: 'enc_key' in kwargs.keys() and 'enc_mode' in kwargs.keys()
if save_together:
_save_mindir_together(net_dict, model, file_name, is_encrypt, **kwargs)
else:
_spilt_save(net_dict, model, file_name, is_encrypt, **kwargs)
def _save_mindir_together(net_dict, model, file_name, is_encrypt, **kwargs):
for param_proto in model.graph.parameter:
param_name = param_proto.name[param_proto.name.find(":") + 1:]
if param_name in net_dict.keys():
param_data = net_dict[param_name].data.asnumpy().tobytes()
param_proto.raw_data = param_data
else:
logger.critical("The parameter %s in the graph should also be defined in the network.", param_name)
raise ValueError("The parameter {} in the graph should also be defined in the "
"network.".format(param_name))
if not file_name.endswith('.mindir'):
file_name += ".mindir"
current_path = os.path.abspath(file_name)
dirname = os.path.dirname(current_path)
os.makedirs(dirname, exist_ok=True)
if os.path.exists(file_name):
os.chmod(file_name, stat.S_IWUSR)
with open(file_name, 'wb') as f:
os.chmod(file_name, stat.S_IRUSR | stat.S_IWUSR)
model_string = model.SerializeToString()
if is_encrypt():
model_string = _encrypt(model_string, len(model_string), kwargs['enc_key'], len(kwargs['enc_key']),
kwargs['enc_mode'])
f.write(model_string)
os.chmod(file_name, stat.S_IRUSR)
def _save_together(net_dict, model):
data_total = 0
for param_proto in model.graph.parameter:
name = param_proto.name[param_proto.name.find(":") + 1:]
if name in net_dict.keys():
data_total += sys.getsizeof(net_dict[name].data.asnumpy().tobytes()) / 1024
else:
raise RuntimeError('Graph parameter: {} Undefined in network.'.format(param_proto.name))
if data_total > TOTAL_SAVE:
return False
return True
def _save_dataset_to_mindir(model, dataset):
dataset_json = dataset.to_json()
reverse_dataset = []
while dataset_json:
reverse_dataset = [dataset_json] + reverse_dataset
if len(dataset_json['children']) > 1:
logger.warning("Need to support dataset_node with more than one child, using child 0 as default.")
dataset_json = dataset_json['children'][0] if dataset_json['children'] else []
for op in reverse_dataset:
if op['op_type'] == 'Map':
model.preprocessor.op.add()
model.preprocessor.op[-1].input_columns = json.dumps(op['input_columns'])
model.preprocessor.op[-1].output_columns = json.dumps(op['output_columns'])
model.preprocessor.op[-1].project_columns = json.dumps(op['project_columns'])
model.preprocessor.op[-1].op_type = json.dumps(op['op_type'])
model.preprocessor.op[-1].operations = json.dumps(op['operations'])
model.preprocessor.op[-1].offload = op['offload'] if 'offload' in op.keys() else False
def quant_mode_manage(func):
def warpper(network, *inputs, file_format, **kwargs):
if 'quant_mode' not in kwargs:
return network
quant_mode = kwargs['quant_mode']
if not isinstance(quant_mode, str):
raise TypeError("For 'export', the type of 'quant_mode' should be string, "
"but got {}.".format(type(quant_mode)))
if quant_mode in ('AUTO', 'MANUAL'):
kwargs['quant_mode'] = 'QUANT'
return func(network, *inputs, file_format=file_format, **kwargs)
return warpper
@quant_mode_manage
def _quant_export(network, *inputs, file_format, **kwargs):
supported_device = ["Ascend", "GPU"]
supported_formats = ['AIR', 'MINDIR']
quant_mode_formats = ['QUANT', 'NONQUANT']
quant_mode = kwargs['quant_mode']
if quant_mode not in quant_mode_formats:
raise KeyError(f"For 'export', the argument 'quant_mode' must be one of {quant_mode_formats}, "
f"but got {quant_mode}.")
if quant_mode == 'NONQUANT':
return network
quant_net = copy.deepcopy(network)
quant_net._create_time = int(time.time() * 1e9)
mean = 127.5 if kwargs.get('mean', None) is None else kwargs['mean']
std_dev = 127.5 if kwargs.get('std_dev', None) is None else kwargs['std_dev']
mean = Validator.check_value_type("mean", mean, (int, float))
std_dev = Validator.check_value_type("std_dev", std_dev, (int, float))
if context.get_context('device_target') not in supported_device:
raise KeyError(f"For 'export', quant export only support {supported_device} device target now, "
f"but got {context.get_context('device_target')}")
if file_format not in supported_formats:
raise ValueError(f"For 'export', quant export only support 'file_format' {supported_formats}, "
f"but got {file_format}.")
quant_net.set_train(False)
if file_format == "MINDIR":
exporter = quant_export.ExportToQuantInferNetwork(quant_net, mean, std_dev, *inputs, is_mindir=True)
else:
exporter = quant_export.ExportToQuantInferNetwork(quant_net, mean, std_dev, *inputs)
deploy_net = exporter.run()
return deploy_net
def parse_print(print_file_name):
print_file_path = os.path.realpath(print_file_name)
if os.path.getsize(print_file_path) == 0:
raise ValueError("For 'parse_print', the print file may be empty, please make sure enter the correct "
"'print_file_name'.")
logger.info("Execute load print process.")
print_list = Print()
try:
with open(print_file_path, "rb") as f:
pb_content = f.read()
print_list.ParseFromString(pb_content)
except BaseException as e:
logger.critical("Failed to read the print file %s, please check whether the file is "
"correct.", print_file_name)
raise ValueError(e.__str__() + "\nFailed to read the print file {}, please check whether "
"the file is correct.".format(print_file_name))
tensor_list = []
try:
for print_ in print_list.value:
# String type
if print_.HasField("desc"):
tensor_list.append(print_.desc)
elif print_.HasField("tensor"):
dims = print_.tensor.dims
data_type = print_.tensor.tensor_type
data = print_.tensor.tensor_content
np_type = tensor_to_np_type[data_type]
param_data = np.fromstring(data, np_type)
ms_type = tensor_to_ms_type[data_type]
if dims and dims != [0]:
param_value = param_data.reshape(dims)
tensor_list.append(Tensor(param_value, ms_type))
# Scalar type
else:
data_type_ = data_type.lower()
if 'float' in data_type_:
param_data = float(param_data[0])
elif 'int' in data_type_:
param_data = int(param_data[0])
elif 'bool' in data_type_:
param_data = bool(param_data[0])
tensor_list.append(Tensor(param_data, ms_type))
except BaseException as e:
logger.critical("Failed to load the print file %s.", print_list)
raise RuntimeError(e.__str__() + "\nFailed to load the print file {}.".format(print_list))
return tensor_list
def _merge_param_with_strategy(sliced_data, parameter_name, strategy, is_even):
layout = strategy.get(parameter_name)
try:
dev_mat = list(layout.dev_matrix[0].dim)
tensor_map = list(layout.tensor_map[0].dim)
param_split_shape = list(layout.param_split_shape[0].dim)
field_size = int(layout.field)
except BaseException as e:
raise ValueError(f"{e.__str__()}. Please make sure that strategy matches the node_strategy.proto.")
device_count = 1
for dim in dev_mat:
device_count *= dim
if len(sliced_data) != device_count:
raise ValueError(f"For 'merge_sliced_parameter', the length of 'sliced_parameters' should be equal to "
f"device_count. The length of 'sliced_parameters' is {len(sliced_data)}, but "
f"device_count is {device_count}.")
if not param_split_shape:
if not is_even:
raise ValueError("When the shape of every parameter in 'sliced_parameters' is same, "
"the 'is_even' should be True, but got {}.".format(is_even))
all_gather_tensor = Tensor(np.concatenate(sliced_data))
if field_size > 0:
merged_tensor = _reshape_param_data_with_weight(all_gather_tensor, dev_mat, field_size)
else:
merged_tensor = _reshape_param_data(all_gather_tensor, dev_mat, tensor_map)
else:
tensor_strategy = _get_tensor_strategy(dev_mat, tensor_map)
slice_count = 1
for dim in tensor_strategy:
slice_count *= dim
if len(param_split_shape) != slice_count:
raise ValueError(f"The param_split_shape length in strategy should be {slice_count}, "
f"but got {len(param_split_shape)}.")
tensor_slices_new = list(range(slice_count))
tensor_slices = sliced_data
for i in range(device_count):
slice_index = int(_get_tensor_slice_index(dev_mat, tensor_strategy, tensor_map, i))
if tensor_slices[i].shape[0] != param_split_shape[slice_index]:
raise ValueError(f"The slice {slice_index} should be {param_split_shape[slice_index]} in 0 axis, "
f"but got {tensor_slices[i].shape[0]}.")
tensor_slices_new[slice_index] = np.array(tensor_slices[i])
dim_len = len(tensor_strategy)
for i in range(dim_len):
ele_count = int(len(tensor_slices_new) / tensor_strategy[dim_len - 1 - i])
tensor_slices_new_inner = []
for j in range(ele_count):
new_tensor = tensor_slices_new[j * tensor_strategy[dim_len - 1 - i]]
for l in range(j * tensor_strategy[dim_len - 1 - i] + 1,
(j + 1) * tensor_strategy[dim_len - 1 - i]):
new_tensor = np.concatenate((new_tensor, tensor_slices_new[l]), axis=dim_len - 1 - i)
tensor_slices_new_inner.insert(len(tensor_slices_new_inner), np.array(new_tensor))
tensor_slices_new = tensor_slices_new_inner
merged_tensor = Tensor(tensor_slices_new[0])
return merged_tensor
def restore_group_info_list(group_info_file_name):
if not isinstance(group_info_file_name, str):
raise TypeError(f"The group_info_file_name should be str, but got {type(group_info_file_name)}.")
if not os.path.isfile(group_info_file_name):
raise ValueError(f"No such group info file: {group_info_file_name}.")
if os.path.getsize(group_info_file_name) == 0:
raise ValueError("The group info file should not be empty.")
parallel_group_map = ParallelGroupMap()
with open(group_info_file_name, 'rb') as f:
pb_content = f.read()
parallel_group_map.ParseFromString(pb_content)
restore_list = parallel_group_map.ckpt_restore_rank_list
if not restore_list:
raise ValueError("The group info file has no restore rank list.")
restore_rank_list = [rank for rank in restore_list.dim]
return restore_rank_list
def build_searched_strategy(strategy_filename):
if not isinstance(strategy_filename, str):
raise TypeError(f"For 'build_searched_strategy', the 'strategy_filename' should be string, "
f"but got {type(strategy_filename)}.")
if not os.path.isfile(strategy_filename):
raise ValueError(f"For 'build_searched_strategy', no such strategy file: {strategy_filename}. "
f"Please check whether the 'strategy_filename' exists.")
if os.path.getsize(strategy_filename) == 0:
raise ValueError(f"For 'build_searched_strategy', the strategy file {strategy_filename} should not "
f"be empty. Please check whether the 'strategy_filename' is correct.")
parallel_strategy_map = ParallelStrategyMap()
with open(strategy_filename, 'rb') as f:
pb_content = f.read()
parallel_strategy_map.ParseFromString(pb_content)
layout_items = parallel_strategy_map.parallel_layout_item
if not layout_items:
raise ValueError(f"For 'build_searched_strategy', the strategy file {strategy_filename} has no sliced "
f"parameter, please check whether the 'strategy_filename' is correct.")
strategy = {}
for layout_item in layout_items:
parameter_name = layout_item.param_name
layout = layout_item.parallel_layouts
strategy[parameter_name] = layout
return strategy
def merge_sliced_parameter(sliced_parameters, strategy=None):
if not isinstance(sliced_parameters, list):
raise TypeError(f"For 'merge_sliced_parameter', the 'sliced_parameters' should be list, "
f"but got {type(sliced_parameters)}.")
if not sliced_parameters:
raise ValueError("For 'merge_sliced_parameter', the 'sliced_parameters' should not be empty.")
if strategy and not isinstance(strategy, dict):
raise TypeError(f"For 'merge_sliced_parameter', the 'strategy' should be dict, but got {type(strategy)}.")
try:
parameter_name = sliced_parameters[0].name
parameter_shape = sliced_parameters[0].data.shape
parameter_shape_length = len(parameter_shape)
except BaseException as e:
raise TypeError(e.__str__() + f" For 'merge_sliced_parameter', the element in 'sliced_parameters' should be "
f"'Parameter', but got {type(sliced_parameters[0])} at index 0.")
is_even = True
for index, parameter in enumerate(sliced_parameters):
if not isinstance(parameter, Parameter):
raise TypeError(f"For 'merge_sliced_parameter', the element in 'sliced_parameters' should be 'Parameter', "
f"but got {type(parameter)} at index {index}.")
if parameter.name != parameter_name \
or len(parameter.data.shape) != parameter_shape_length \
or parameter.data.shape[1:] != parameter_shape[1:]:
raise ValueError(f"For 'merge_sliced_parameter', please make sure that the elements in 'slice_parameters'"
f" have the same name, dimension length and shape except 0 axis. The name, dimension "
f"length, shape except 0 axis should be {parameter_name}, {parameter_shape_length}, "
f"{parameter_shape[1:]}, but got name: {parameter.name}, dimension length: "
f"{len(parameter.data.shape)}, shape except 0 axis: {parameter.data.shape[1:]} "
f"at index {index}.")
if parameter.data.shape != parameter_shape:
is_even = False
layerwise_parallel = sliced_parameters[0].layerwise_parallel
requires_grad = sliced_parameters[0].requires_grad
sliced_data = [parameter.data.asnumpy() for parameter in sliced_parameters]
if not strategy:
merged_tensor = Tensor(np.concatenate(sliced_data))
merged_parameter = Parameter(merged_tensor, parameter_name, requires_grad, layerwise_parallel)
else:
if parameter_name not in strategy.keys():
raise KeyError(f"For 'merge_sliced_parameter', the parameter name {parameter_name} should be a key in "
f"the 'strategy'. Please check 'sliced_parameter' and 'strategy'.")
merged_tensor = _merge_param_with_strategy(sliced_data, parameter_name, strategy, is_even)
merged_parameter = Parameter(merged_tensor, parameter_name, requires_grad, layerwise_parallel)
return merged_parameter
def load_distributed_checkpoint(network, checkpoint_filenames, predict_strategy=None,
train_strategy_filename=None, strict_load=False, dec_key=None, dec_mode='AES-GCM'):
network = Validator.check_isinstance("network", network, nn.Cell)
_check_checkpoint_file(checkpoint_filenames)
_check_predict_strategy(predict_strategy)
dec_key = Validator.check_isinstance('dec_key', dec_key, (type(None), bytes))
dec_mode = Validator.check_isinstance('dec_mode', dec_mode, str)
if train_strategy_filename is None:
train_strategy_filename = context.get_auto_parallel_context("strategy_ckpt_load_file")
_train_strategy = build_searched_strategy(train_strategy_filename)
train_strategy = _convert_to_list(_train_strategy)
train_dev_count = 1
ckpt_file_len = len(checkpoint_filenames)
for dim in train_strategy[list(train_strategy.keys())[0]][0]:
train_dev_count *= dim
if train_dev_count != ckpt_file_len:
raise ValueError(f"For 'load_distributed_checkpoint', the argument 'predict_strategy' is dict, "
f"the key of it must be string, and the value of it must be list or tuple that "
f"the first four elements are dev_matrix (list[int]), tensor_map (list[int]), "
f"param_split_shape (list[int]) and field_size (int, which value is 0)."
f"Please check whether 'predict_strategy' is correct.")
rank_list = _infer_rank_list(train_strategy, predict_strategy)
param_total_dict = defaultdict(dict)
for file_index, file_name in enumerate(checkpoint_filenames):
ckpt_dict = load_checkpoint(file_name, dec_key=dec_key, dec_mode=dec_mode)
for param_name, param in ckpt_dict.items():
param_total_dict[param_name][file_index] = param
param_dict = {}
param_not_in_strategy = []
param_not_in_ckpt = []
for _, param in network.parameters_and_names():
sliced_params = []
if param.name not in rank_list.keys():
param_not_in_strategy.append(param.name)
continue
if param.name not in param_total_dict:
param_not_in_ckpt.append(param.name)
continue
param_rank = rank_list[param.name][0]
skip_merge_split = rank_list[param.name][1]
shard_stride = train_strategy[param.name][4]
if train_strategy[param.name][5]:
shard_size = ckpt_file_len / shard_stride / train_strategy[param.name][5]
else:
shard_size = 0
for rank in param_rank:
param_total_list = list(range(0, ckpt_file_len))
if shard_size > 0:
shard_total_list = [param_total_list[i:i + shard_size] for i in
range(0, ckpt_file_len, shard_size)]
param_total_list = shard_total_list[rank // shard_size]
if shard_stride > 0:
param_stride = []
# merge pre parameter
param_index = param_total_list[0:param_total_list.index(rank) + 1][::-1][::shard_stride]
param_index.extend(param_total_list[param_total_list.index(rank):][::shard_stride])
param_index = list(set(param_index))
param_index.sort()
for rank_num in param_index:
param_stride.append(param_total_dict[param.name][rank_num].data.asnumpy())
sliced_param = Parameter(Tensor(np.concatenate(param_stride)), name=param.name)
else:
sliced_param = param_total_dict[param.name][rank]
sliced_params.append(sliced_param)
if skip_merge_split:
split_param = sliced_params[0]
else:
param_unique_strategy = _remove_repeated_slices(train_strategy[param.name])
_param_unique_strategy = _convert_to_layout(param.name, param_unique_strategy)
split_param = _merge_and_split(sliced_params, _param_unique_strategy, predict_strategy)
opt_shard_group = predict_strategy[param.name][5] if predict_strategy else None
if opt_shard_group:
data = split_param.data.asnumpy()
rank = get_rank(opt_shard_group)
size = get_group_size(opt_shard_group)
try:
data_slice = np.split(data, size)[rank]
except BaseException as e:
logger.critical("Failed to load opt shard slice in load distributed checkpoint for {}. Data shape is {}"
" and group is {}".format(param.name, split_param.data.shape, opt_shard_group))
raise RuntimeError(e.__str__() + f"\nFor 'load_distributed_checkpoint', failed to load opt shard slice"
f" in load distributed checkpoint for {param.name}. Data shape is "
f"{split_param.data.shape} and group is {opt_shard_group}.")
split_param = Parameter(Tensor(data_slice), param.name,
split_param.requires_grad, split_param.layerwise_parallel)
param_dict[param.name] = split_param
if param_not_in_strategy:
logger.warning("{} parameters in network are not in the slice strategy.".format(param_not_in_strategy))
if param_not_in_ckpt:
logger.warning("{} parameters in slice strategy but not in the checkpoint file.".format(param_not_in_ckpt))
load_param_into_net(network, param_dict, strict_load=strict_load)
def async_ckpt_thread_status():
thr_list = threading.enumerate()
return True in [ele.getName() == "asyn_save_ckpt" for ele in thr_list]
def _check_predict_strategy(predict_strategy):
def _check_int_list(arg):
if not isinstance(arg, list):
return False
for item in arg:
if not isinstance(item, int):
return False
return True
if predict_strategy is None:
return
flag = True
predict_strategy = Validator.check_isinstance("predict_strategy", predict_strategy, dict)
for key in predict_strategy.keys():
if not isinstance(key, str) or not isinstance(predict_strategy[key], (list, tuple)) \
or len(predict_strategy[key]) < 4:
flag = False
dev_matrix, tensor_map, param_split_shape, field_size = predict_strategy[key][:4]
if not _check_int_list(dev_matrix) or not _check_int_list(tensor_map) or \
not (_check_int_list(param_split_shape) or not param_split_shape) or \
not (isinstance(field_size, int) and field_size == 0):
flag = False
if not flag:
raise ValueError(f"Please make sure that the key of predict_strategy is str, "
f"and the value is a list or a tuple that the first four elements are "
f"dev_matrix (list[int]), tensor_map (list[int]), "
f"param_split_shape (list[int]) and field_size (zero).")
def _check_checkpoint_file(checkpoint_filenames):
for index, filename in enumerate(checkpoint_filenames):
if not isinstance(filename, str) or not os.path.exists(filename) \
or filename[-5:] != ".ckpt" or os.path.getsize(filename) == 0:
raise ValueError(f"For 'load_distributed_checkpoint', please check 'checkpoint_filenames', and "
f"make sure the {filename} at index {index} is a valid checkpoint file, it must "
f"be a string ending with '.ckpt', and the checkpoint file it represents must "
f"be exist and not empty.")
def _convert_to_list(strategy):
train_map = {}
for param_name in strategy.keys():
try:
layout = strategy.get(param_name)
dev_mat = list(layout.dev_matrix[0].dim)
tensor_map = list(layout.tensor_map[0].dim)
param_split_shape = list(layout.param_split_shape[0].dim)
field_size = int(layout.field)
shard_stride = int(layout.opt_weight_shard_step)
shard_size = int(layout.opt_weight_shard_size)
train_map[param_name] = [dev_mat, tensor_map, param_split_shape, field_size, shard_stride, shard_size]
except BaseException as e:
raise ValueError(f"{e.__str__()}. Please make sure that strategy matches the node_strategy.proto.")
return train_map
def _convert_to_layout(param_name, tensor_layout):
strategy = {}
try:
layout = ParallelLayouts()
layout.field = tensor_layout[3]
dev_matrix = layout.dev_matrix.add()
for item in tensor_layout[0]:
dev_matrix.dim.append(item)
tensor_map = layout.tensor_map.add()
for item in tensor_layout[1]:
tensor_map.dim.append(item)
param_split_shape = layout.param_split_shape.add()
for item in tensor_layout[2]:
param_split_shape.dim.append(item)
except BaseException as e:
raise ValueError("Convert failed. " + e.__str__())
strategy[param_name] = layout
return strategy
def _merge_and_split(sliced_params, train_strategy, predict_strategy):
merged_param = merge_sliced_parameter(sliced_params, train_strategy)
if predict_strategy is None:
return merged_param
param_name = merged_param.name
tensor_layout = predict_strategy[param_name]
split_tensor = _load_tensor(merged_param.data, tensor_layout[0], tensor_layout[1])
requires_grad = merged_param.requires_grad
layerwise_parallel = merged_param.layerwise_parallel
split_param = Parameter(split_tensor, param_name, requires_grad, layerwise_parallel)
return split_param
def _calculation_net_size(net):
data_total = 0
net_dict = net.parameters_dict()
for name in net_dict:
data_total += sys.getsizeof(net_dict[name].data.asnumpy().tobytes()) / 1024
return data_total
| true
| true
|
1c3f5220100276b5490f58e7a181978bffecebdd
| 12,233
|
py
|
Python
|
gplearn/_programparser.py
|
vossjo/gplearn
|
105181fd020da11bc36b7e31c95f115dd7f05c21
|
[
"BSD-3-Clause"
] | null | null | null |
gplearn/_programparser.py
|
vossjo/gplearn
|
105181fd020da11bc36b7e31c95f115dd7f05c21
|
[
"BSD-3-Clause"
] | null | null | null |
gplearn/_programparser.py
|
vossjo/gplearn
|
105181fd020da11bc36b7e31c95f115dd7f05c21
|
[
"BSD-3-Clause"
] | 1
|
2022-03-08T22:42:00.000Z
|
2022-03-08T22:42:00.000Z
|
"""Genetic Programming in Python, with a scikit-learn inspired API
The :mod:`gplearn._programparser` module implements symbolic simplification
of programs via sympy and optimization of numerical parameters via scipy.
"""
# Author: Johannes Voss <https://stanford.edu/~vossj/main/>
#
# Additions to and based on gplearn by Trevor Stephens <trevorstephens.com>
#
# License: BSD 3 clause
from .functions import _Function, _protected_division
import numpy as np
from scipy import optimize
from sympy import symbols, simplify
import ast
def parseexpr(x, fun_list, params):
"""Recursively parse program as mathematical expression.
Parameters
----------
x : ast body
(sub) expression
fun_list: list
mapping to gplearn function objects.
params: list
list of numerically optimized parameters
will be empty after parser has completed
Returns
-------
parsed (sub) expression as flattened tree list
"""
if isinstance(x, ast.BinOp):
l = parseexpr(x.left, fun_list, params)
r = parseexpr(x.right, fun_list, params)
if isinstance(x.op, ast.Add):
return [fun_list[0]]+l+r
elif isinstance(x.op, ast.Sub):
return [fun_list[1]]+l+r
elif isinstance(x.op, ast.Mult):
return [fun_list[2]]+l+r
elif isinstance(x.op, ast.Div):
return [fun_list[3]]+l+r
elif isinstance(x.op, ast.Pow):
# expand powers to products where possible
if len(r)==1 and (type(r[0])==int or abs(round(r[0])-r[0])<1e-11) and r[0]>0 and fun_list[2] is not None:
return (([fun_list[2]]+l)*(int(r[0])-1)) + l
elif fun_list[4] is not None:
return [fun_list[4]]+l+r
else:
raise RuntimeError('simplification introduced power operator with exponent that is not a positive integer, which is not included in function list.'+str(r))
else:
raise RuntimeError('unimplemented operation '+str(x.op))
else:
if isinstance(x, ast.Name):
return [int(x.id[1:])]
elif isinstance(x, ast.Num):
if type(x.n)==int:
# integers must be converted to floats here,
# otherwise gplearn will interpret the integer
# as a feature index when executing the program
return [float(x.n)]
elif len(params)==0:
return [float(x.n)]
else:
return [params.pop(0)]
elif isinstance(x, ast.UnaryOp):
o = parseexpr(x.operand, fun_list, params)
if isinstance(x.op, ast.USub):
if fun_list[5] is not None:
return [fun_list[5]]+o
elif fun_list[2] is not None:
return [fun_list[2],-1.]+o
elif fun_list[1] is not None:
return [fun_list[1],0.]+o
else:
raise RuntimeError('simplifcation introduced negation operator, but function list is not including any of neg, mul, or sub to represent the negation.')
else:
raise RuntimeError('unimplemented operation '+str(x.op))
else:
raise RuntimeError('unimplemented object '+str(x))
def parseexpr_to_np(x, params):
"""Recursively parse program as mathematical expression.
Parameters
----------
x : ast body
(sub) expression
params: list
Initially empty list to which numerical parameters found
are appended
Returns
-------
parsed (sub) expression as flattened tree list
"""
if isinstance(x, ast.BinOp):
l = parseexpr_to_np(x.left, params)
r = parseexpr_to_np(x.right, params)
if isinstance(x.op, ast.Add):
return 'np.add('+l+','+r+')'
elif isinstance(x.op, ast.Sub):
return 'np.subtract('+l+','+r+')'
elif isinstance(x.op, ast.Mult):
return 'np.multiply('+l+','+r+')'
elif isinstance(x.op, ast.Div):
return '_protected_division('+l+','+r+')'
elif isinstance(x.op, ast.Pow):
return 'np.power('+l+','+r+')'
else:
raise RuntimeError('unimplemented operation '+str(x.op))
else:
if isinstance(x, ast.Name):
return 'X[:,k+'+x.id[1:]+']'
elif isinstance(x, ast.Num):
# don't treat integers as numerical parameters to be optimized
if type(x.n)==int or abs(round(float(x.n))-int(x.n))<1e-11:
return str(x.n)
else:
params.append(float(x.n))
return 'z[%d]' % (len(params)-1)
elif isinstance(x, ast.UnaryOp):
o = parseexpr_to_np(x.operand, params)
if isinstance(x.op, ast.USub):
return '(-('+o+'))'
else:
raise RuntimeError('unimplemented operation '+str(x.op))
else:
raise RuntimeError('unimplemented object '+str(x))
def add(x,y):
return x+y
def sub(x,y):
return x-y
def mul(x,y):
return x*y
def dv(x,y):
return x/y
def pw(x,y):
return x**y
def neg(x):
return -x
def program_to_str(program, format='%.15e', skip_nmax_feature=True):
"""Convert program in list representation to string.
Based on __str__ method in _program.py."""
terminals = [0]
output = ''
maxfeature = 0
for i, node in enumerate(program):
if isinstance(node, _Function):
terminals.append(node.arity)
output += node.name + '('
else:
if isinstance(node, int):
output += 'X%s' % node
maxfeature = max(maxfeature,node)
else:
output += format % node
terminals[-1] -= 1
while terminals[-1] == 0:
terminals.pop()
terminals[-1] -= 1
output += ')'
if i != len(program) - 1:
output += ', '
if skip_nmax_feature:
return output
else:
return output, maxfeature
def program_to_math(program, feature_names=None, format='%.8g'):
"""Convert program as math expression with standard operators +, -, *, /
Parameters
----------
program : list
The program to be optimized.
n_features : int
Number of features
feature_names : list, optional
Variable names of features
format : str, optional
format str for numerical values
Returns
-------
str with mathematical expression
"""
# convert program to string of mathematical expression
s, maxf = program_to_str(program, format=format, skip_nmax_feature=False)
# substitute reserved names for division and power
s = s.replace('div', 'dv').replace('pow', 'pw')
# generate symbol names for features for use with sympy
gpvars0 = ''
gpvars1 = ''
for i in range(maxf):
gpvars0 += 'X%d,' % i
gpvars1 += 'X%d ' % i
gpvars0 += 'X%d' % maxf
gpvars1 += 'X%d' % maxf
exec(gpvars0 + '=symbols("' + gpvars1 +'")')
u = str(eval(s))
# use optional feature variable names
if feature_names is not None:
for i in range(len(feature_names)-1,-1,-1):
u = u.replace('X%d' % i, feature_names[i])
return u
def _optimizer(program, fun_list, n_features, n_program_sum, metric,
X, y, weight):
"""Simplify a program and then optimize its numerical parameters.
Parameters
----------
program : list
The program to be optimized.
fun_list : list of length 6
List mapping the operations in order add, sub, mul, div, pow, neg
to the corresponding gplearn function objects.
n_features : int
number of features
n_program_sum : int
number of programs to be summed up for cost function
metric : instance of gplearn metric
metric to be optimized
X : array-like, shape = [n_samples, n_features*(n_program_sum+1)]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
weight : array-like, shape = [n_samples]
Weights applied to individual samples.
Returns
-------
Simplified and numerically optimized program
"""
# generate symbol names for features for use with sympy
gpvars0 = ''
gpvars1 = ''
for i in range(n_features-1):
gpvars0 += 'X%d,' % i
gpvars1 += 'X%d ' % i
gpvars0 += 'X%d' % (n_features-1)
gpvars1 += 'X%d' % (n_features-1)
exec(gpvars0 + '=symbols("' + gpvars1 +'")')
# convert program to string of mathematical expression
# substitute reserved names for division and power
s = program_to_str(program, format='%.12g').replace('div', 'dv').replace('pow', 'pw')
# symplify
u = str(simplify(eval(s)))
# If simplification detects division by zero (which _protected_divide would catch)
# or other overflows, it will introduce variable oo (or complex zoo or nan).
# program is likely not particularly good: simply replace zoo, oo, and nan with 1
# here, then optimize as much as possible
uast = ast.parse(u.replace('zoo','1.').replace('oo','1.').replace('nan','1.'),
mode='eval').body
# convert back to numpy expression
params = []
num = parseexpr_to_np(uast, params)
if len(params)>0:
# define cost function to be minimized with scipy
if hasattr(metric.function, '_obj'):
metr = metric.function._obj
else:
metr = metric.function
sign = -metric.sign
if weight is None:
weights = np.ones_like(y)
else:
weights = weight
local = {'X': X, 'y': y, 'w': weights, 'sign': sign,
'metr': metr, 'n': n_program_sum, 'nf': n_features+1, 'np': np,
'_protected_division': _protected_division}
if n_program_sum>1:
funstr = """def fun(z):
y_pred = np.zeros_like(y)
for k in range(1,n*nf+1,nf):
y_pred += X[:,k-1] * (%s)
return sign*metr(y, y_pred, w)
""" % num
else:
funstr = """def fun(z):
k = 0
return sign*metr(y, %s, w)
""" % num
exec(funstr, local)
#optimize numerical parameters params
newparams = optimize.fmin(local['fun'], params, disp=0, xtol=1e-8, ftol=1e-8)
numpar = list(newparams)
else:
numpar = []
#if simplification failed due to e.g. introduction of
#new operators not included in the original function list that
#cannot be resolved, return original program
try:
pro = parseexpr(uast, fun_list, numpar)
except RuntimeError:
pro = program
return pro
def _convert_function(fun, fun_set, n_features):
"""Convert mathematical expression to program in flattened tree list
Parameters
----------
fun : str
The mathematical expression to be converted to a program.
Variable for features must be X0, X1, X2, ...
fun_set : gp function set
n_features : int
number of features
Returns
-------
Program as list
"""
fun_list = [None]*6
parser_implemented = ('add','sub','mul','div','pow','neg')
for func in fun_set:
if func.name in parser_implemented:
fun_list[parser_implemented.index(func.name)] = func
else:
raise ValueError('function %s not implemented in optimization parser.'
% func.name)
# generate symbol names for features for use with sympy
gpvars0 = ''
gpvars1 = ''
for i in range(n_features-1):
gpvars0 += 'X%d,' % i
gpvars1 += 'X%d ' % i
gpvars0 += 'X%d' % (n_features-1)
gpvars1 += 'X%d' % (n_features-1)
exec(gpvars0 + '=symbols("' + gpvars1 +'")')
# replace overflows, if any and convert to ast for further parsing
funast = ast.parse(fun, mode='eval').body
return parseexpr(funast, fun_list, [])
| 31.609819
| 171
| 0.575738
|
from .functions import _Function, _protected_division
import numpy as np
from scipy import optimize
from sympy import symbols, simplify
import ast
def parseexpr(x, fun_list, params):
if isinstance(x, ast.BinOp):
l = parseexpr(x.left, fun_list, params)
r = parseexpr(x.right, fun_list, params)
if isinstance(x.op, ast.Add):
return [fun_list[0]]+l+r
elif isinstance(x.op, ast.Sub):
return [fun_list[1]]+l+r
elif isinstance(x.op, ast.Mult):
return [fun_list[2]]+l+r
elif isinstance(x.op, ast.Div):
return [fun_list[3]]+l+r
elif isinstance(x.op, ast.Pow):
if len(r)==1 and (type(r[0])==int or abs(round(r[0])-r[0])<1e-11) and r[0]>0 and fun_list[2] is not None:
return (([fun_list[2]]+l)*(int(r[0])-1)) + l
elif fun_list[4] is not None:
return [fun_list[4]]+l+r
else:
raise RuntimeError('simplification introduced power operator with exponent that is not a positive integer, which is not included in function list.'+str(r))
else:
raise RuntimeError('unimplemented operation '+str(x.op))
else:
if isinstance(x, ast.Name):
return [int(x.id[1:])]
elif isinstance(x, ast.Num):
if type(x.n)==int:
return [float(x.n)]
elif len(params)==0:
return [float(x.n)]
else:
return [params.pop(0)]
elif isinstance(x, ast.UnaryOp):
o = parseexpr(x.operand, fun_list, params)
if isinstance(x.op, ast.USub):
if fun_list[5] is not None:
return [fun_list[5]]+o
elif fun_list[2] is not None:
return [fun_list[2],-1.]+o
elif fun_list[1] is not None:
return [fun_list[1],0.]+o
else:
raise RuntimeError('simplifcation introduced negation operator, but function list is not including any of neg, mul, or sub to represent the negation.')
else:
raise RuntimeError('unimplemented operation '+str(x.op))
else:
raise RuntimeError('unimplemented object '+str(x))
def parseexpr_to_np(x, params):
if isinstance(x, ast.BinOp):
l = parseexpr_to_np(x.left, params)
r = parseexpr_to_np(x.right, params)
if isinstance(x.op, ast.Add):
return 'np.add('+l+','+r+')'
elif isinstance(x.op, ast.Sub):
return 'np.subtract('+l+','+r+')'
elif isinstance(x.op, ast.Mult):
return 'np.multiply('+l+','+r+')'
elif isinstance(x.op, ast.Div):
return '_protected_division('+l+','+r+')'
elif isinstance(x.op, ast.Pow):
return 'np.power('+l+','+r+')'
else:
raise RuntimeError('unimplemented operation '+str(x.op))
else:
if isinstance(x, ast.Name):
return 'X[:,k+'+x.id[1:]+']'
elif isinstance(x, ast.Num):
if type(x.n)==int or abs(round(float(x.n))-int(x.n))<1e-11:
return str(x.n)
else:
params.append(float(x.n))
return 'z[%d]' % (len(params)-1)
elif isinstance(x, ast.UnaryOp):
o = parseexpr_to_np(x.operand, params)
if isinstance(x.op, ast.USub):
return '(-('+o+'))'
else:
raise RuntimeError('unimplemented operation '+str(x.op))
else:
raise RuntimeError('unimplemented object '+str(x))
def add(x,y):
return x+y
def sub(x,y):
return x-y
def mul(x,y):
return x*y
def dv(x,y):
return x/y
def pw(x,y):
return x**y
def neg(x):
return -x
def program_to_str(program, format='%.15e', skip_nmax_feature=True):
terminals = [0]
output = ''
maxfeature = 0
for i, node in enumerate(program):
if isinstance(node, _Function):
terminals.append(node.arity)
output += node.name + '('
else:
if isinstance(node, int):
output += 'X%s' % node
maxfeature = max(maxfeature,node)
else:
output += format % node
terminals[-1] -= 1
while terminals[-1] == 0:
terminals.pop()
terminals[-1] -= 1
output += ')'
if i != len(program) - 1:
output += ', '
if skip_nmax_feature:
return output
else:
return output, maxfeature
def program_to_math(program, feature_names=None, format='%.8g'):
# convert program to string of mathematical expression
s, maxf = program_to_str(program, format=format, skip_nmax_feature=False)
# substitute reserved names for division and power
s = s.replace('div', 'dv').replace('pow', 'pw')
# generate symbol names for features for use with sympy
gpvars0 = ''
gpvars1 = ''
for i in range(maxf):
gpvars0 += 'X%d,' % i
gpvars1 += 'X%d ' % i
gpvars0 += 'X%d' % maxf
gpvars1 += 'X%d' % maxf
exec(gpvars0 + '=symbols("' + gpvars1 +'")')
u = str(eval(s))
# use optional feature variable names
if feature_names is not None:
for i in range(len(feature_names)-1,-1,-1):
u = u.replace('X%d' % i, feature_names[i])
return u
def _optimizer(program, fun_list, n_features, n_program_sum, metric,
X, y, weight):
# generate symbol names for features for use with sympy
gpvars0 = ''
gpvars1 = ''
for i in range(n_features-1):
gpvars0 += 'X%d,' % i
gpvars1 += 'X%d ' % i
gpvars0 += 'X%d' % (n_features-1)
gpvars1 += 'X%d' % (n_features-1)
exec(gpvars0 + '=symbols("' + gpvars1 +'")')
# convert program to string of mathematical expression
# substitute reserved names for division and power
s = program_to_str(program, format='%.12g').replace('div', 'dv').replace('pow', 'pw')
# symplify
u = str(simplify(eval(s)))
# If simplification detects division by zero (which _protected_divide would catch)
# or other overflows, it will introduce variable oo (or complex zoo or nan).
# program is likely not particularly good: simply replace zoo, oo, and nan with 1
# here, then optimize as much as possible
uast = ast.parse(u.replace('zoo','1.').replace('oo','1.').replace('nan','1.'),
mode='eval').body
# convert back to numpy expression
params = []
num = parseexpr_to_np(uast, params)
if len(params)>0:
# define cost function to be minimized with scipy
if hasattr(metric.function, '_obj'):
metr = metric.function._obj
else:
metr = metric.function
sign = -metric.sign
if weight is None:
weights = np.ones_like(y)
else:
weights = weight
local = {'X': X, 'y': y, 'w': weights, 'sign': sign,
'metr': metr, 'n': n_program_sum, 'nf': n_features+1, 'np': np,
'_protected_division': _protected_division}
if n_program_sum>1:
funstr = """def fun(z):
y_pred = np.zeros_like(y)
for k in range(1,n*nf+1,nf):
y_pred += X[:,k-1] * (%s)
return sign*metr(y, y_pred, w)
""" % num
else:
funstr = """def fun(z):
k = 0
return sign*metr(y, %s, w)
""" % num
exec(funstr, local)
#optimize numerical parameters params
newparams = optimize.fmin(local['fun'], params, disp=0, xtol=1e-8, ftol=1e-8)
numpar = list(newparams)
else:
numpar = []
#if simplification failed due to e.g. introduction of
#new operators not included in the original function list that
#cannot be resolved, return original program
try:
pro = parseexpr(uast, fun_list, numpar)
except RuntimeError:
pro = program
return pro
def _convert_function(fun, fun_set, n_features):
fun_list = [None]*6
parser_implemented = ('add','sub','mul','div','pow','neg')
for func in fun_set:
if func.name in parser_implemented:
fun_list[parser_implemented.index(func.name)] = func
else:
raise ValueError('function %s not implemented in optimization parser.'
% func.name)
# generate symbol names for features for use with sympy
gpvars0 = ''
gpvars1 = ''
for i in range(n_features-1):
gpvars0 += 'X%d,' % i
gpvars1 += 'X%d ' % i
gpvars0 += 'X%d' % (n_features-1)
gpvars1 += 'X%d' % (n_features-1)
exec(gpvars0 + '=symbols("' + gpvars1 +'")')
# replace overflows, if any and convert to ast for further parsing
funast = ast.parse(fun, mode='eval').body
return parseexpr(funast, fun_list, [])
| true
| true
|
1c3f528e021aeacde8c3005be959336163200770
| 1,347
|
py
|
Python
|
designer_family/tests/unit/objects/base.py
|
guishaowu/designer_family
|
c89e16c6649c181f3262aa65fa97a457abdc2eb2
|
[
"Apache-2.0"
] | null | null | null |
designer_family/tests/unit/objects/base.py
|
guishaowu/designer_family
|
c89e16c6649c181f3262aa65fa97a457abdc2eb2
|
[
"Apache-2.0"
] | null | null | null |
designer_family/tests/unit/objects/base.py
|
guishaowu/designer_family
|
c89e16c6649c181f3262aa65fa97a457abdc2eb2
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from designer_family import conf
from designer_family import context
from designer_family.tests.unit import base as unit_base
class TestCase(unit_base.ContextTestCase):
"""Base class for other tests in this file.
It establishes the RequestContext used as self.context in the tests.
"""
def setUp(self):
super(TestCase, self).setUp()
self.user_id = 'fake-user'
self.project_id = 'fake-project'
self.context = context.RequestContext(self.user_id, self.project_id)
config = cfg.ConfigOpts()
self.conf_fixture = self.useFixture(config_fixture.Config(config))
conf.register_opts(config)
self.context.config = config
| 37.416667
| 78
| 0.724573
|
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from designer_family import conf
from designer_family import context
from designer_family.tests.unit import base as unit_base
class TestCase(unit_base.ContextTestCase):
def setUp(self):
super(TestCase, self).setUp()
self.user_id = 'fake-user'
self.project_id = 'fake-project'
self.context = context.RequestContext(self.user_id, self.project_id)
config = cfg.ConfigOpts()
self.conf_fixture = self.useFixture(config_fixture.Config(config))
conf.register_opts(config)
self.context.config = config
| true
| true
|
1c3f52e0ce89f75b895b86619db16ae1c51d37bb
| 1,129
|
py
|
Python
|
Configuration/Generator/python/QCD_Pt_170_230_8TeV_TuneCUETP8M1_cfi.py
|
Purva-Chaudhari/cmssw
|
32e5cbfe54c4d809d60022586cf200b7c3020bcf
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
Configuration/Generator/python/QCD_Pt_170_230_8TeV_TuneCUETP8M1_cfi.py
|
Purva-Chaudhari/cmssw
|
32e5cbfe54c4d809d60022586cf200b7c3020bcf
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
Configuration/Generator/python/QCD_Pt_170_230_8TeV_TuneCUETP8M1_cfi.py
|
Purva-Chaudhari/cmssw
|
32e5cbfe54c4d809d60022586cf200b7c3020bcf
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8ConcurrentGeneratorFilter",
pythiaPylistVerbosity = cms.untracked.int32(0),
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(8000.0),
maxEventsToPrint = cms.untracked.int32(0),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'HardQCD:all = on',
'PhaseSpace:pTHatMin = 170.',
'PhaseSpace:pTHatMax = 230.'
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters',
)
)
)
| 45.16
| 74
| 0.54473
|
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8ConcurrentGeneratorFilter",
pythiaPylistVerbosity = cms.untracked.int32(0),
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(8000.0),
maxEventsToPrint = cms.untracked.int32(0),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'HardQCD:all = on',
'PhaseSpace:pTHatMin = 170.',
'PhaseSpace:pTHatMax = 230.'
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters',
)
)
)
| true
| true
|
1c3f5306df67d7a75fd8af47523e27b24b36c9e1
| 46,386
|
py
|
Python
|
testing/shell_script_testing/shell_script_testing.py
|
ncsa/NCSA-Genomics_MGC_GenomeGPS_CromwelWDL
|
4611896ea1bb50df50120752712e8d4b32a6d023
|
[
"MIT"
] | null | null | null |
testing/shell_script_testing/shell_script_testing.py
|
ncsa/NCSA-Genomics_MGC_GenomeGPS_CromwelWDL
|
4611896ea1bb50df50120752712e8d4b32a6d023
|
[
"MIT"
] | null | null | null |
testing/shell_script_testing/shell_script_testing.py
|
ncsa/NCSA-Genomics_MGC_GenomeGPS_CromwelWDL
|
4611896ea1bb50df50120752712e8d4b32a6d023
|
[
"MIT"
] | null | null | null |
import sys
import os
import subprocess
import unittest
import time
import glob
import copy
class Script:
"""
Trim_sequences is done, and the other scripts will have similar setups that have the flags as attributes so tests
can step through the list of attributes.
"""
__cwd = os.getcwd()
def __init__(self):
self.path = self.__cwd
self.shell_path = '{}/MayomicsVC/src/shell'.format(self.__cwd)
self.test_path = '{}/MayomicsVC/testing/shell_script_testing'.format(self.__cwd)
class Trimming(Script):
"""
Constructs the trim_sequences.sh commands for single and paired end reads. Each 'flag' attribute represents a
particular flag, that way we can step through the flags and perform tests on each.
TODO: os.path to make tests more generic
"""
def __init__(self):
Script.__init__(self)
self.flag_s = "-s {}/outputs/output".format(self.path)
self.flag_A = '-A Inputs/TruSeqAdaptors.fasta'
self.flag_l = '-l Inputs/WGS_chr1_5X_E0.005_L1_read1.fastq.gz'
self.flag_r = '-r Inputs/WGS_chr1_5X_E0.005_L1_read2.fastq.gz'
self.flag_C = '-C /usr/local/apps/bioapps/python/Python-3.6.1/bin' # for iforge testing
# self.flag_C = '-C /usr/bin' # for local testing
self.flag_t = '-t 0'
self.flag_P = '-P true'
self.flag_e = '-e Config/EnvProfile.file'
self.flag_F = '-F {}/shared_functions.sh'.format(self.shell_path)
self.flag_d = '-d'
self.name = '{}/trim_sequences.sh'.format(self.shell_path)
self.type = 'trim_sequences.sh'
def __str__(self, case: str = 'paired'):
if case == 'single':
return "/bin/bash {} {} {} {} -r null {} {} -P false {} {} {}".format(self.name, self.flag_s, self.flag_A,
self.flag_l, self.flag_C, self.flag_t,
self.flag_e, self.flag_F, self.flag_d)
elif case == 'paired':
return "/bin/bash {} {} {} {} {} {} {} {} {} {} {}".format(self.name, self.flag_s, self.flag_A,
self.flag_l, self.flag_r, self.flag_C,
self.flag_t, self.flag_P, self.flag_e,
self.flag_F, self.flag_d)
else:
raise ValueError("unknown case")
def __repr__(self, case: str = 'paired'):
if case == 'single':
return "/bin/bash {} {} {} {} -r null {} {} -P false {} {} {}".format(self.name, self.flag_s, self.flag_A,
self.flag_l, self.flag_C, self.flag_t,
self.flag_e, self.flag_F, self.flag_d)
elif case == 'paired':
return "/bin/bash {} {} {} {} {} {} {} {} {} {} {}".format(self.name, self.flag_s, self.flag_A,
self.flag_l, self.flag_r, self.flag_C,
self.flag_t, self.flag_P, self.flag_e,
self.flag_F, self.flag_d)
else:
raise ValueError("unknown case")
class DeliverHaplotyperVC(Script):
"""
Constructs the deliver_haplotyperVC.sh commands.
"""
def __init__(self):
Script.__init__(self)
self.flag_s = "-s output"
self.flag_r = '-r Inputs/somaticvariants.vcf.gz'
self.flag_j = "-j Jsons/SomaticMasterWorkflow.FilledIn.json"
self.flag_f = '-f Delivery' # for iforge testing
self.flag_F = '-F {}/shared_functions.sh'.format(self.shell_path)
self.flag_d = '-d'
self.name = '{}/deliver_haplotyperVC.sh'.format(self.shell_path)
self.type = 'deliver_haplotyperVC.sh'
def __str__(self):
return "/bin/bash {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_r, self.flag_j, self.flag_f, self.flag_F, self.flag_d)
def __repr__(self):
return "/bin/bash {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_r, self.flag_j, self.flag_f, self.flag_F, self.flag_d)
class BQSR(Script):
"""
Constructs the bqsr.sh commands for single or paired end reads.
"""
def __init__(self):
Script.__init__(self)
self.flag_s = "-s outputs/output"
self.flag_S = '-S /usr/local/apps/bioapps/sentieon/sentieon-genomics-201808' # for iforge testing
self.flag_G = "-G Reference/Homo_sapiens_assembly38.fasta"
self.flag_t = '-t 40'
self.flag_b = '-b Inputs/WGS_chr20_21_22_normal.bam'
self.flag_k = "-k Reference/Mills_and_1000G_gold_standard.inders.hg38.vcf"
self.flag_e = '-e Config/EnvProfile.file'
self.flag_F = '-F {}/shared_functions.sh'.format(self.shell_path)
self.flag_d = '-d'
self.name = '{}/bqsr.sh'.format(self.shell_path)
self.type = 'bqsr.sh'
def __str__(self):
return "/bin/bash {} {} {} {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_S, self.flag_G, self.flag_t, self.flag_b, self.flag_k,
self.flag_e, self.flag_F, self.flag_d)
def __repr__(self):
return "/bin/bash {} {} {} {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_S, self.flag_G, self.flag_t, self.flag_b, self.flag_k,
self.flag_e, self.flag_F, self.flag_d)
class VQSR(Script):
"""
Constructs the vqsr.sh commands for single or paired end reads.
"""
def __init__(self):
Script.__init__(self)
self.flag_s = "-s outputs/output"
self.flag_S = '-S /usr/local/apps/bioapps/sentieon/sentieon-genomics-201808' # for iforge testing
self.flag_G = "-G Reference/Homo_sapiens_assembly38.fasta"
self.flag_t = '-t 40'
self.flag_V = '-V Inputs/somaticvariants.vcf.gz'
self.flag_r = '-r \"\'--resource /projects/bioinformatics/DataPacks/human/gatk_bundle_Oct_2017/' \
'gatk_bundle_hg38/1000G_phase1.snps.high_confidence.hg38.vcf.gz --resource_param 1000G,' \
'known=false,training=true,truth=false,prior=10.0 --resource /projects/bioinformatics/' \
'DataPacks/human/gatk_bundle_Oct_2017/gatk_bundle_hg38/1000G_omni2.5.hg38.vcf.gz ' \
'--resource_param omni,known=false,training=true,truth=false,prior=12.0 --resource /projects/' \
'bioinformatics/jallen17/Reference/dbsnp_138.hg38.vcf --resource_param dbsnp,known=true,' \
'training=false,truth=false,prior=2.0 --resource /projects/bioinformatics/DataPacks/human/' \
'gatk_bundle_Oct_2017/gatk_bundle_hg38/hapmap_3.3.hg38.vcf.gz --resource_param hapmap,known=' \
'false,training=true,truth=true,prior=15.0\'\"'
self.flag_R = '-R \"\'--resource /projects/bioinformatics/jallen17/Reference/dbsnp_138.hg38.vcf ' \
'--resource_param dbsnp,known=true,training=false,truth=false,prior=2.0 --resource /projects/' \
'bioinformatics/jallen17/Reference/Mills_and_1000G_gold_standard.indels.hg38.vcf ' \
'--resource_param Mills,known=false,training=true,truth=true,prior=12.0\'\"'
self.flag_a = '-a \"\'--annotation DP --annotation QD --annotation FS --annotation SOR --annotation MQ ' \
'--annotation MQRankSum --annotation ReadPosRankSum \'\"'
self.flag_e = '-e Config/EnvProfile.file'
self.flag_F = '-F {}/shared_functions.sh'.format(self.shell_path)
self.flag_d = '-d'
self.name = '{}/vqsr.sh'.format(self.shell_path)
self.type = 'vqsr.sh'
def __str__(self):
return "/bin/bash {} {} {} {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_S, self.flag_G, self.flag_t, self.flag_b, self.flag_k,
self.flag_e, self.flag_F, self.flag_d)
def __repr__(self):
return "/bin/bash {} {} {} {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_S, self.flag_G, self.flag_t, self.flag_b, self.flag_k,
self.flag_e, self.flag_F, self.flag_d)
class Alignment(Script):
"""
Constructs the alignment.sh commands for single or paired end reads.
"""
def __init__(self):
Script.__init__(self)
self.flag_s = "-s outputs/output"
self.flag_L = "-L fake_lib"
self.flag_f = "-f normal"
self.flag_c = "-c NCSA"
self.flag_l = '-l Inputs/WGS_chr1_5X_E0.005_L1_read1.fastq.gz'
self.flag_r = '-r Inputs/WGS_chr1_5X_E0.005_L1_read2.fastq.gz'
self.flag_G = "-G Reference/Homo_sapiens_assembly38.fasta"
self.flag_K = "-K 10000000"
self.flag_o = "'-M'"
self.flag_S = '-S /usr/local/apps/bioapps/python/Python-3.6.1/bin' # for iforge testing
self.flag_t = '-t 40'
self.flag_e = '-e Config/EnvProfile.file'
self.flag_F = '-F {}/shared_functions.sh'.format(self.shell_path)
self.flag_d = '-d'
self.name = '{}/alignment.sh'.format(self.shell_path)
self.type = 'alignment.sh'
def __str__(self, case: str = 'paired'):
if case == 'single':
return "/bin/bash {} {} {} {} {} {} {} -r null {} {} {} {} {} -P false {} {} {}".\
format(self.name, self.flag_s, self.flag_p, self.flag_L, self.flag_f, self.flag_c, self.flag_l,
self.flag_G, self.flag_K, self.flag_o, self.flag_S, self.flag_t,
self.flag_e, self.flag_F, self.flag_d)
elif case == 'paired':
return "/bin/bash {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {}".\
format(self.name, self.flag_s, self.flag_p, self.flag_L, self.flag_f, self.flag_c, self.flag_l,
self.flag_r, self.flag_G, self.flag_K, self.flag_o, self.flag_S, self.flag_t, self.flag_P,
self.flag_e, self.flag_F, self.flag_d)
else:
raise ValueError("unknown case")
def __repr__(self, case: str = 'paired'):
if case == 'single':
return "/bin/bash {} {} {} {} {} {} {} -r null {} {} {} {} {} -P false {} {} {}".\
format(self.name, self.flag_s, self.flag_p, self.flag_L, self.flag_f, self.flag_c, self.flag_l,
self.flag_G, self.flag_K, self.flag_o, self.flag_S, self.flag_t,
self.flag_e, self.flag_F, self.flag_d)
elif case == 'paired':
return "/bin/bash {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {}".\
format(self.name, self.flag_s, self.flag_p, self.flag_L, self.flag_f, self.flag_c, self.flag_l,
self.flag_r, self.flag_G, self.flag_K, self.flag_o, self.flag_S, self.flag_t, self.flag_P,
self.flag_e, self.flag_F, self.flag_d)
else:
raise ValueError("unknown case")
class DeDup(Script):
"""
Constructs the dedup.sh commands for single or paired end reads.
"""
def __init__(self):
Script.__init__(self)
self.flag_s = "-s outputs/output"
self.flag_b = '-b Inputs/WGS_chr20_21_22_normal.bam'
self.flag_t = '-t 40'
self.flag_e = '-e Config/EnvProfile.file'
self.flag_S = '-S /usr/local/apps/bioapps/sentieon/sentieon-genomics-201808' # for iforge testing
self.flag_F = '-F {}/shared_functions.sh'.format(self.shell_path)
self.flag_d = '-d'
self.name = '{}/bqsr.sh'.format(self.shell_path)
self.type = 'bqsr.sh'
def __str__(self):
return "/bin/bash {} {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_b, self.flag_t, self.flag_e, self.flag_S, self.flag_F,
self.flag_d)
def __repr__(self):
return "/bin/bash {} {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_b, self.flag_t, self.flag_e, self.flag_S, self.flag_F,
self.flag_d)
class DeliverAlignment(Script):
"""
Constructs the deliver_alignment.sh commands.
"""
def __init__(self):
Script.__init__(self)
self.flag_s = "-s output"
self.flag_b = '-b Inputs/WGS_chr20_21_22_normal.bam'
self.flag_j = "-j Jsons/Runalignment.FilledIn.json"
self.flag_f = '-f Delivery' # for iforge testing
self.flag_F = '-F {}/shared_functions.sh'.format(self.shell_path)
self.flag_d = '-d'
self.name = '{}/deliver_alignment.sh'.format(self.shell_path)
self.type = 'deliver_alignment.sh'
def __str__(self, case: str = 'paired'):
return "/bin/bash {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_b, self.flag_j, self.flag_f, self.flag_F, self.flag_d)
def __repr__(self, case: str = 'paired'):
return "/bin/bash {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_b, self.flag_j, self.flag_f, self.flag_F, self.flag_d)
class MergeBams(Script):
"""
Constructs the merge_bams.sh commands.
"""
def __init__(self):
Script.__init__(self)
self.flag_s = "-s outputs/output"
self.flag_b = '-b Inputs/WGS_chr20_21_22_normal.bam'
self.flag_S = '-S /usr/local/apps/bioapps/sentieon/sentieon-genomics-201808' # for iforge testing
self.flag_t = '-t 40'
self.flag_e = '-e Config/EnvProfile.flie'
self.flag_F = '-F {}/shared_functions.sh'.format(self.shell_path)
self.flag_d = '-d'
self.name = '{}/merge_bams.sh'.format(self.shell_path)
self.type = 'merge_bams.sh'
def __str__(self, case: str = 'paired'):
return "/bin/bash {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_b, self.flag_j, self.flag_f, self.flag_F, self.flag_d)
def __repr__(self, case: str = 'paired'):
return "/bin/bash {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_b, self.flag_j, self.flag_f, self.flag_F, self.flag_d)
class Mutect(Script):
"""
Constructs the mutect.sh commands for single or paired end reads.
"""
def __init__(self):
Script.__init__(self)
self.flag_s = "-s outputs/output"
self.flag_N = '-N Inputs/WGS_chr20_21_22_normal.bam'
self.flag_T = '-T Inputs/WGS_chr20_21_22_tumor.bam'
self.flag_g = "-g Reference/Homo_sapiens_assembly38.fasta"
self.flag_G = '-G /usr/local/apps/bioapps/gatk/GenomeAnalysisTK-3.8-1-0-gf15c1c3ef'
self.flag_J = '-J /usr/local/apps/bioapps/java/java-1.8-64bit/bin'
self.flag_j = '-j \"\'-Xms2G -Xmx8G\'\"'
self.flag_B = '-B /usr/local/apps/bioapps/bcftools/bcftools-1.5'
self.flag_Z = '-Z /usr/local/apps/bioapps/bcftools/htslib-1.3.1/bin'
self.flag_S = '-S /usr/local/apps/bioapps/samtools/samtools-1.5'
self.flag_t = '-t 40'
self.flag_e = '-e Config/EnvProfile.file'
self.flag_D = '-D {]/../perl/fixDP.pl'.format(self.shell_path)
self.flag_o = '-o \"\'--dbsnp /projects/bioinformatics/jallen1 /Reference/dbsnp_138.hg38.vcf\'\"'
self.flag_F = '-F {}/shared_functions.sh'.format(self.shell_path)
self.flag_d = '-d'
self.name = '{}/mutect.sh'.format(self.shell_path)
self.type = 'mutect.sh'
def __str__(self):
return "/bin/bash {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_N, self.flag_T, self.flag_g, self.flag_G, self.flag_J,
self.flag_j, self.flag_B, self.flag_Z, self.flag_S, self.flag_t, self.flag_e, self.flag_D,
self.flag_o, self.flag_F, self.flag_d)
def __repr__(self):
return "/bin/bash {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_N, self.flag_T, self.flag_g, self.flag_G, self.flag_J,
self.flag_j, self.flag_B, self.flag_Z, self.flag_S, self.flag_t, self.flag_e, self.flag_D,
self.flag_o, self.flag_F, self.flag_d)
class Realignment(Script):
"""
Constructs the realignment.sh commands for single or paired end reads.
"""
def __init__(self):
Script.__init__(self)
self.flag_s = "-s outputs/output"
self.flag_b = '-b Inputs/WGS_chr20_21_22_normal.bam'
self.flag_G = "-G Reference/Homo_sapiens_assembly38.fasta"
self.flag_k = "-k Reference/Mills_and_1000G_gold_standard.inders.hg38.vcf"
self.flag_S = '-S /usr/local/apps/bioapps/sentieon/sentieon-genomics-201808'
self.flag_t = '-t 40'
self.flag_e = '-e Config/EnvProfile.file'
self.flag_F = '-F {}/shared_functions.sh'.format(self.shell_path)
self.flag_d = '-d'
self.name = '{}/realignment.sh'.format(self.shell_path)
self.type = 'realignment.sh'
def __str__(self):
return "/bin/bash {} {} {} {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_b, self.flag_G, self.flag_k, self.flag_S, self.flag_t,
self.flag_e, self.flag_F, self.flag_d)
def __repr__(self):
return "/bin/bash {} {} {} {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_b, self.flag_G, self.flag_k, self.flag_S, self.flag_t,
self.flag_e, self.flag_F, self.flag_d)
class Strelka(Script):
"""
Constructs the strelka.sh commands for single or paired end reads.
"""
def __init__(self):
Script.__init__(self)
self.flag_s = "-s outputs/output"
self.flag_N = '-N Inputs/WGS_chr20_21_22_normal.bam'
self.flag_T = '-T Inputs/WGS_chr20_21_22_tumor.bam'
self.flag_g = "-g Reference/Homo_sapiens_assembly38.fasta"
self.flag_B = '-B /usr/local/apps/bioapps/bcftools/bcftools-1.5'
self.flag_I = '-I /usr/local/apps/bioapps/strelka/strelka-2.9.2.centos6_x86_64/bin'
self.flag_S = '-S /usr/local/apps/bioapps/samtools/samtools-1.5'
self.flag_Z = '-Z /usr/local/apps/bioapps/bcftools/htslib-1.3.1/bin'
self.flag_t = '-t 40'
self.flag_e = '-e Config/EnvProfile.file'
self.flag_i = '-i MayomicsVC/src/perl/fixStrelka_GT_indels.pl'
self.flag_p = '-p MayomicsVC/src/perl/fixStrelka_GT_snvs.pl'
self.flag_o = '-o \"\'--outputCallableRegions\'\"'
self.flag_O = '-O \"\'-m any --force-sample\'\"'
self.flag_F = '-F {}/shared_functions.sh'.format(self.shell_path)
self.flag_d = '-d'
self.name = '{}/strelka.sh'.format(self.shell_path)
self.type = 'strelka.sh'
def __str__(self):
return "/bin/bash {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_N, self.flag_T, self.flag_g, self.flag_B, self.flag_I,
self.flag_S, self.flag_Z, self.flag_t, self.flag_e, self.flag_i, self.flag_p, self.flag_o,
self.flag_O, self.flag_F, self.flag_d)
def __repr__(self):
return "/bin/bash {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_N, self.flag_T, self.flag_g, self.flag_B, self.flag_I,
self.flag_S, self.flag_Z, self.flag_t, self.flag_e, self.flag_i, self.flag_p, self.flag_o,
self.flag_O, self.flag_F, self.flag_d)
class CombineVariants(Script):
"""
Constructs the strelka.sh commands for single or paired end reads.
"""
def __init__(self):
Script.__init__(self)
self.flag_s = "-s outputs/output"
self.flag_S = '-S Inputs/strelka.vcf.bgz'
self.flag_T = '-T Inputs/mutect.vcf.bgz'
self.flag_g = "-g Reference/Homo_sapiens_assembly38.fasta"
self.flag_G = '-G /usr/local/apps/bioapps/gatk/GenomeAnalysisTK-3.8-1-0-gf15c1c3ef'
self.flag_J = '-J /usr/local/apps/bioapps/java/java-1.8-64bit/bin'
self.flag_B = '-B /usr/local/apps/bioapps/bcftools/bcftools-1.5'
self.flag_Z = '-Z /usr/local/apps/bioapps/bcftools/htslib-1.3.1/bin'
self.flag_t = '-t 40'
self.flag_e = '-e Config/EnvProfile.file'
self.flag_o = '-o \"\' \'\"'
self.flag_p = '-p \"\'strelka,mutect\'\"'
self.flag_F = '-F {}/shared_functions.sh'.format(self.shell_path)
self.flag_d = '-d'
self.name = '{}/strelka.sh'.format(self.shell_path)
self.type = 'strelka.sh'
def __str__(self):
return "/bin/bash {} {} {} {} {} {} {} {} {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_S, self.flag_T, self.flag_g, self.flag_G, self.flag_J,
self.flag_B, self.flag_Z, self.flag_t, self.flag_e, self.flag_o, self.flag_p, self.flag_F,
self.flag_d)
def __repr__(self):
return "/bin/bash {} {} {} {} {} {} {} {} {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_S, self.flag_T, self.flag_g, self.flag_G, self.flag_J,
self.flag_B, self.flag_Z, self.flag_t, self.flag_e, self.flag_o, self.flag_p, self.flag_F,
self.flag_d)
class DeliverSomaticVC(Script):
"""
Constructs the deliver_haplotyperVC.sh commands.
"""
def __init__(self):
Script.__init__(self)
self.flag_s = "-s output"
self.flag_r = '-r Inputs/somaticvariants.vcf.gz'
self.flag_j = "-j Jsons/SomaticMasterWorkflow.FilledIn.json"
self.flag_f = '-f Delivery'
self.flag_F = '-F {}/shared_functions.sh'.format(self.shell_path)
self.flag_d = '-d'
self.name = '{}/deliver_somaticVC.sh'.format(self.shell_path)
self.type = 'deliver_somaticVC.sh'
def __str__(self, case: str = 'paired'):
return "/bin/bash {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_r, self.flag_j, self.flag_f, self.flag_F, self.flag_d)
def __repr__(self, case: str = 'paired'):
return "/bin/bash {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_r, self.flag_j, self.flag_f, self.flag_F, self.flag_d)
class Haplotyper(Script):
"""
Constructs the haplotyper.sh commands for single or paired end reads.
"""
def __init__(self):
Script.__init__(self)
self.flag_s = "-s outputs/output"
self.flag_S = '-S /usr/local/apps/bioapps/sentieon/sentieon-genomics-201808'
self.flag_G = "-G Reference/Homo_sapiens_assembly38.fasta"
self.flag_t = '-t 40'
self.flag_b = '-b Inputs/sample.bam'
self.flag_D = '-D Reference/dbsnp_138.hg38.vcf'
self.flag_r = '-r Inputs/bqsr.recal_data.table'
self.flag_o = '-o \"\' \'\"'
self.flag_e = '-e Config/EnvProfile.file'
self.flag_F = '-F {}/shared_functions.sh'.format(self.shell_path)
self.flag_d = '-d'
self.name = '{}/haplotyper.sh'.format(self.shell_path)
self.type = 'haplotyper.sh'
def __str__(self):
return "/bin/bash {} {} {} {} {} {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_S, self.flag_G, self.flag_t, self.flag_b, self.flag_D,
self.flag_r, self.flag_o, self.flag_e, self.flag_F, self.flag_d)
def __repr__(self):
return "/bin/bash {} {} {} {} {} {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_S, self.flag_G, self.flag_t, self.flag_b, self.flag_D,
self.flag_r, self.flag_o, self.flag_e, self.flag_F, self.flag_d)
class ParameterizedTestCase(unittest.TestCase):
"""
Test cases with parameters will inherit from this class
Code borrowed from and adapted: https://eli.thegreenplace.net/2011/08/02/python-unit-testing-parametrized-test-cases
"""
def __init__(self, methodName='runTest', param=None):
super(ParameterizedTestCase, self).__init__(methodName)
self.param = param
@staticmethod
def parameterize(testcase_klass, param=None):
"""
Create a suite containing all tests taken from the given subclass, passing them
the parameter 'param'
"""
testloader = unittest.TestLoader()
testnames = testloader.getTestCaseNames(testcase_klass)
suite = unittest.TestSuite()
for name in testnames:
suite.addTest(testcase_klass(name, param=param))
return suite
class TestArgs(ParameterizedTestCase):
def setUp(self):
pass
def tearDown(self):
files = glob.glob('outputs/*')
for f in files:
os.remove(f)
files = glob.glob('WGS*')
for f in files:
os.remove(f)
def test_no_arg(self):
"""
Tests the script produces help output when no argument is passed
"""
os.system("/bin/bash " + self.param.name + ' > outputs/outfile.txt')
output = self.parse_output('outputs/outfile.txt')
output = ''.join(output)
self.assertTrue('command line input: \n' in output)
self.assertTrue("No arguments passed." in output)
def test_help_function(self):
"""
While this theoretically works for all, the tricky part is that each script has a unique help output,
since each takes a different set of inputs. It'll take a bit of modification to get it to compare
the correct files. It'll also require building and maintaining a desired help-file database
"""
os.system("/bin/bash " + self.param.name + ' -h > outputs/outfile.txt')
desired_help = self.parse_output('{}/Verification_files/desired_help_output.txt'.format(self.param.test_path))
output = self.parse_output('outputs/outfile.txt')
for i in range(4, len(output)-1):
self.assertTrue(desired_help[i-4] == output[i])
def test_nonexistent_option(self):
"""
Test a flag that doesn't exist with a garbage test option. This should work for all as is.
"""
os.system("/bin/bash " + self.param.name + " -Q garbage > outputs/outfile.txt")
output = self.parse_output('outputs/outfile.txt')
output = ''.join(output)
self.assertTrue('command line input: -Q garbage' in output)
self.assertTrue("Invalid option: -Q" in output)
def test_successful_paired_end_read_and_permissions(self):
"""
This is simply a successful run of the tool and should be generalizable. I'm including a
check for file permissions to avoid having to duplicate code later.
"""
os.system(self.param.__str__('paired') + " > outputs/outfile.txt 2>&1 ")
output = self.parse_output('outputs/output.trimming.TBD.log')
output = ''.join(output)
# check that it started and ended properly
self.assertTrue('START' in output)
self.assertTrue("Finished trimming adapter sequences." in output)
cutadapt_output = 'WGS_chr1_5X_E0.005_L1_read1.fastq.gz'
# Check that it created a non-zero output
self.assertTrue(os.path.exists(cutadapt_output) and os.path.getsize(cutadapt_output) > 0)
# check that permissions have been set properly
perm_check_log = subprocess.Popen(['ls', '-l', 'outputs/output.trimming.TBD.log'], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout_log, stderr_log = perm_check_log.communicate()
self.assertTrue("-rw-r-----" in str(stdout_log))
perm_check_read1 = subprocess.Popen(['ls', '-l', 'WGS_chr1_5X_E0.005_L1_read1.fastq.gz'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout_read1, stderr_read1 = perm_check_read1.communicate()
self.assertTrue("-rw-r-----" in str(stdout_read1))
perm_check_read2 = subprocess.Popen(['ls', '-l', 'WGS_chr1_5X_E0.005_L1_read2.fastq.gz'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout_read2, stderr_read2 = perm_check_read2.communicate()
self.assertTrue("-rw-r-----" in str(stdout_read2))
# Test minimal permissions on the output files
os.chmod('WGS_chr1_5X_E0.005_L1_read1.fastq.gz', 0o200)
os.system(self.param.__str__('paired') + " > outputs/outfile.txt 2>&1 ")
self.assertTrue(oct(os.stat('WGS_chr1_5X_E0.005_L1_read2.fastq.gz').st_mode)[-3:][1] == '4')
os.chmod('WGS_chr1_5X_E0.005_L1_read2.fastq.gz', 0o200)
os.system(self.param.__str__('paired') + " > outputs/outfile.txt 2>&1 ")
self.assertTrue(oct(os.stat('WGS_chr1_5X_E0.005_L1_read2.fastq.gz').st_mode)[-3:][1] == '4')
def test_successful_single_end_read(self):
"""
This is simply a successful run of the tool and should be generalizable
"""
os.system(self.param.__str__('single') + " > outputs/outfile.txt 2>&1")
output = self.parse_output('outputs/output.trimming.TBD.log')
output = ''.join(output)
self.assertTrue('START' in output)
self.assertTrue("Finished trimming adapter sequences." in output)
cutadapt_log = 'outputs/output.cutadapt.log'
self.assertTrue(os.path.exists(cutadapt_log) and os.path.getsize(cutadapt_log) > 0)
def test_read_flags_with_bad_input(self):
"""
Most of the scripts have some sort of input, so this will probably be generalizable to a degree.
It simply tries some dummy/garbage files for read inputs to make sure the tool isn't trying to align
text that isn't genomic.
"""
if self.param.type != 'trim_sequences.sh':
print("Only valid for trim sequences")
return unittest.skip("Only valid for trim_sequences")
# test left and right read flags
flags_to_test = ['flag_l', 'flag_r']
garbage_test_files = {'dummy_test_blank.fastq':
"garbage_test_files/dummy_test_blank.fastq is empty or does not exist.",
'dummy_test_text.fastq':
"cutadapt: error: Line 1 in FASTQ file is expected to start with '@', but found "
"'Lorem ipsu'",
'dummy_test_text_with_at.fastq':
"cutadapt: error: Line 3 in FASTQ file is expected to "
"start with '+', but found 'Suspendiss'",
'WGS_chr1_5X_E0.005_L1_read1.fastq.':
'WGS_chr1_5X_E0.005_L1_read1.fastq. is empty or does not exist'}
for flag in flags_to_test:
for garbage_test in garbage_test_files.keys():
temp_flag = copy.deepcopy(self.param.__dict__[flag])
manip_flag = self.param.__dict__[flag]
if "dummy" in garbage_test:
manip_flag = manip_flag.split(' ')[0] + ' {}/garbage_test_files/'.format(self.param.test_path) \
+ garbage_test
else:
manip_flag = manip_flag.split(' ')[0] + ' Inputs/' + garbage_test
self.param.__dict__[flag] = manip_flag
os.system(str(self.param) + " > outputs/outfile.txt 2>&1 ")
output = self.parse_output('outputs/output.trimming.TBD.log')
log = self.parse_output('outputs/output.cutadapt.log')
output = ''.join(output)
log = ''.join(log)
if 'Cutadapt Read 1 and 2 failure' in output:
self.assertTrue(garbage_test_files[garbage_test] in log)
else:
self.assertTrue(garbage_test_files[garbage_test] in output)
self.param.__dict__[flag] = temp_flag
try:
os.remove(garbage_test)
except OSError:
pass
def test_garbage_adapters(self):
"""
This tests trim_sequences call for the adapter files. This may be generalizabale, since other scripts
will call in outside files as well.
"""
if self.param.type != 'trim_sequences.sh':
print("Only valid for trim sequences")
return unittest.skip("Only valid for trim_sequences")
tests = {'dummy_test_blank.fastq': "garbage_test_files/dummy_test_blank.fastq is empty or does not exist.",
'dummy_test_text.fastq': "At line 1: Expected '>' at beginning of FASTA record, but got 'Lorem ipsum dolor sit amet, consectetur adipiscing elit.'",
'dummy_test_text_with_gt.fastq': "is not a valid IUPAC code. Use only characters XACGTURYSWKMBDHVN.",
'TruSeqAdapters.fasta': "TruSeqAdapters.fasta is empty or does not exist"}
for test in tests.keys():
temp_flag = copy.deepcopy(self.param.__dict__['flag_A'])
manip_flag = self.param.__dict__['flag_A']
manip_flag = manip_flag.split(' ')[0] + ' {}/garbage_test_files/'.format(self.param.test_path) + test
self.param.__dict__['flag_A'] = manip_flag
os.system(self.param.__str__('paired') + " > outputs/outfile.txt 2>&1 ")
output = self.parse_output('outputs/output.trimming.TBD.log')
log = self.parse_output('outputs/output.cutadapt.log')
output = ''.join(output)
log = ''.join(log)
with self.subTest(test=test):
if 'Cutadapt Read 1 and 2 failure' in output:
self.assertTrue(tests[test] in log)
else:
self.assertTrue(tests[test] in output)
self.param.__dict__['flag_A'] = temp_flag
try:
os.remove(test)
except OSError:
pass
def test_bad_env_file(self):
"""
This simply uses a non-existant environmental file to test that the script checks for this.
"""
tests = {'envprof_fake.file': "No such file or directory"}
for test in tests.keys():
temp_flag = copy.deepcopy(self.param.__dict__['flag_e'])
manip_flag = self.param.__dict__['flag_e']
manip_flag = manip_flag.split(' ')[0] + ' ' + test
self.param.__dict__['flag_e'] = manip_flag
os.system(self.param.__str__('paired') + " > outputs/outfile.txt 2>&1 ")
output = self.parse_output('outputs/outfile.txt')
output = ''.join(output)
self.assertTrue(tests[test] in output)
self.param.__dict__['flag_e'] = temp_flag
def test_bad_cutadapt_path(self):
"""
Trim_sequences is the only one that uses cutadapt, though I may be able to generalize this script
to 'test bad tool path' once I get rolling on the others.
"""
if self.param.type != 'trim_sequences.sh':
print("Only valid for trim sequences")
return unittest.skip("Only valid for trim_sequences")
# Test bad cutadapt path
os.system("/bin/bash {} {} {} {} {} -C /usr/fake {} {} {} {} {} > outputs/outfile.txt 2>&1".
format(self.param.name, self.param.flag_s, self.param.flag_A, self.param.flag_l, self.param.flag_r,
self.param.flag_t, self.param.flag_P, self.param.flag_e, self.param.flag_F, self.param.flag_d))
output = self.parse_output('outputs/output.trimming.TBD.log')
output = ''.join(output)
self.assertTrue("REASON=Cutadapt directory /usr/fake is not a directory or does not exist." in output)
def test_bad_thread_options(self):
"""
This tests trim_sequences thread option. It should return errors for having too high a thread count,
though this may vary by machine.
"""
if self.param.type != 'trim_sequences.sh':
print("Only valid for trim sequences")
return unittest.skip("Only valid for trim_sequences")
values = [321, 3299, 12322]
for number in values:
os.system("/bin/bash {} {} {} {} {} {} {} {} {} {}".\
format(self.param.name, self.param.flag_s, self.param.flag_A, self.param.flag_l, self.param.flag_r,
self.param.flag_C, self.param.flag_P, self.param.flag_e, self.param.flag_F, self.param.flag_d)
+ " -t " + str(number) + " > outputs/outfile.txt 2>&1")
output = self.parse_output('outputs/output.trimming.TBD.log')
log = self.parse_output('outputs/output.cutadapt.log')
output = ''.join(output)
log = ''.join(log)
if 'Finished trimming adapter sequences.' in output:
self.assertTrue(True)
else:
self.assertTrue('Cutadapt Read 1 and 2 failure.' in output)
def test_paired_options(self):
"""
Not every script will have a true/false/read value to test; this only works with ones that do
"""
if self.param.type != 'trim_sequences.sh':
print("Only valid for trim sequences")
return unittest.skip("Only valid for trim_sequences")
tests = ['True', 'T', 'False', "F"]
for test in tests:
os.system("/bin/bash {} {} {} {} {} {} {} {} {} {}".\
format(self.param.name, self.param.flag_s, self.param.flag_A, self.param.flag_l, self.param.flag_r,
self.param.flag_C, self.param.flag_t, self.param.flag_e, self.param.flag_F, self.param.flag_d) +
' -P ' + test + " > outputs/outfile.txt 2>&1 ")
output = self.parse_output('outputs/output.trimming.TBD.log')
output = ''.join(output)
self.assertTrue("REASON=Incorrect argument for paired-end option -P. Must be set to true or false."
in output)
def test_incorrect_read_options(self):
"""
Not every script will have a true/false/read value to test; this only works with ones that do
"""
if self.param.type != 'trim_sequences.sh':
print("Only valid for trim sequences")
return unittest.skip("Only valid for trim_sequences")
os.system("/bin/bash {} {} {} {} {} {} {} -P false {} {} {}". \
format(self.param.name, self.param.flag_s, self.param.flag_A, self.param.flag_l, self.param.flag_r,
self.param.flag_C, self.param.flag_t, self.param.flag_e,
self.param.flag_F, self.param.flag_d) + " > outputs/outfile.txt 2>&1 ")
output = self.parse_output('outputs/output.trimming.TBD.log')
output = ''.join(output)
self.assertTrue("REASON=User specified Single End option, but did not set read 2 option -r to null." in output)
os.system("/bin/bash {} {} {} {} {} {} {} -P true {} {}". \
format(self.param.name, self.param.flag_s, self.param.flag_A, self.param.flag_l,
self.param.flag_C, self.param.flag_t, self.param.flag_e,
self.param.flag_F, self.param.flag_d) + " > outputs/outfile.txt 2>&1 ")
output = self.parse_output('outputs/output.trimming.TBD.log')
output = ''.join(output)
self.assertTrue("REASON=Missing read 2 option: -r. If running a single-end job, set -r null in command." in output)
os.system("/bin/bash {} {} {} -l null {} {} {} -P false {} {} {}". \
format(self.param.name, self.param.flag_s, self.param.flag_A, self.param.flag_r,
self.param.flag_C, self.param.flag_t, self.param.flag_e,
self.param.flag_F, self.param.flag_d) + " > outputs/outfile.txt 2>&1 ")
output = self.parse_output('outputs/output.trimming.TBD.log')
output = ''.join(output)
self.assertTrue("REASON=Input read 1 file null is empty or does not exist." in output)
os.system("/bin/bash {} {} {} -r null {} {} {} {} -P true {} {}". \
format(self.param.name, self.param.flag_s, self.param.flag_A, self.param.flag_l,
self.param.flag_C, self.param.flag_t, self.param.flag_e,
self.param.flag_F, self.param.flag_d) + " > outputs/outfile.txt 2>&1 ")
output = self.parse_output('outputs/output.trimming.TBD.log')
output = ''.join(output)
self.assertTrue("REASON=Input read 2 file null is empty or does not exist." in output)
def test_missing_option_values(self):
"""
Should work with any of the scripts. Note that -d flag is ommitted since it does not have a value
already
"""
attributes = list(self.param.__dict__.keys())
attributes.remove('flag_d')
options = list([a for a in attributes if "flag" in a])
for flag in options:
temp_flag = copy.deepcopy(self.param.__dict__[flag])
manip_flag = self.param.__dict__[flag]
manip_flag = manip_flag.split(' ')[0]
self.param.__dict__[flag] = manip_flag
os.system(str(self.param) + " > outputs/outfile.txt 2>&1 ")
output = self.parse_output('outputs/outfile.txt')
output = ''.join(output)
self.assertTrue("Error with option " + manip_flag + " in command. Option passed incorrectly or without argument." in output)
self.param.__dict__[flag] = temp_flag
def test_file_permissions(self):
"""
Should work with any of the scripts
"""
os.chmod('Inputs', 0o000)
os.system(str(self.param) + " > outputs/outfile.txt 2>&1 ")
output = self.parse_output('outputs/outfile.txt')
output = ''.join(output)
self.assertTrue('is empty or does not exist' in output)
os.chmod('Inputs', 0o755)
os.system('chmod 000 outputs')
os.system(str(self.param) + " > outfile.txt 2>&1 ")
output = self.parse_output('outfile.txt')
output = ''.join(output)
self.assertTrue('Permission denied' in output)
os.remove('outfile.txt')
os.chmod('outputs', 0o755)
os.chmod(self.param.shell_path, 0o000)
os.system(str(self.param) + " > outputs/outfile.txt 2>&1 ")
output = self.parse_output('outputs/outfile.txt')
output = ''.join(output)
self.assertTrue('Permission denied' in output)
os.chmod(self.param.shell_path, 0o755)
def test_logs_are_truncated(self):
# first run creates logs
os.system("/bin/bash {} -s outputs/output -A {}/garbage_test_files/dummy_test_text.fastq {} {} {} -P true {} {} {}"
" {}".format(self.param.name, self.param.test_path, self.param.flag_r, self.param.flag_l, self.param.flag_C,
self.param.flag_t, self.param.flag_e, self.param.flag_F, self.param.flag_d) +
" > outputs/outfile.txt 2>&1 ")
output_stdout = self.parse_output('outputs/output.trimming.TBD.log')
output_stdout_test = output_stdout[-2:]
output_stdout_test = ''.join(output_stdout_test)
output_stdout = ''.join(output_stdout)
output_cutlog = self.parse_output('outputs/output.cutadapt.log')
output_cutlog_test = output_cutlog[-2:]
output_cutlog_test = ''.join(output_cutlog_test)
output_cutlog = ''.join(output_cutlog)
# second run
time.sleep(2)
os.system("/bin/bash {} -s outputs/output -A {}/garbage_test_files/dummy_test_text_with_gt.fastq {} {} {} -P true"
" {} {} {} {}".format(self.param.name, self.param.test_path, self.param.flag_r, self.param.flag_l, self.param.flag_C,
self.param.flag_t, self.param.flag_e, self.param.flag_F, self.param.flag_d) +
" > outputs/outfile.txt 2>&1 ")
output_stdout2 = self.parse_output('outputs/output.trimming.TBD.log')
output_stdout2 = ''.join(output_stdout2)
output_cutlog2 = self.parse_output('outputs/output.cutadapt.log')
output_cutlog2 = ''.join(output_cutlog2)
# The logs should be different and the second log shouldn't be contained in the first
self.assertNotEqual(output_stdout, output_stdout2)
self.assertNotEqual(output_cutlog, output_cutlog2)
self.assertTrue(output_stdout_test not in output_stdout2)
self.assertTrue(output_cutlog_test not in output_cutlog2)
@staticmethod
def parse_output(file):
output = []
for line in open(file, 'r'):
output.append(line)
return output
if __name__ == "__main__":
scripts = ["trim_sequences.sh", 'deliver_haplotyperVC.sh', 'bqsr.sh', 'vqsr.sh', 'alignment.sh', 'dedup.sh',
'deliver_alignment.sh', 'merge_bams.sh', 'mutect.sh', 'realignment.sh', 'strelka.sh',
'combine_variants.sh', 'deliver_somaticVC.sh', 'haplotyper.sh']
try:
idx = scripts.index(sys.argv[1])
except ValueError:
print("Argument must be the script to test and the output_file/log_name to use.")
if idx == 0:
test_script = Trimming()
elif idx == 1:
test_script = DeliverHaplotyperVC()
elif idx == 2:
test_script = BQSR()
elif idx == 3:
test_script = VQSR()
elif idx == 4:
test_script = Alignment()
elif idx == 5:
test_script = DeDup()
elif idx == 6:
test_script = DeliverAlignment()
elif idx == 7:
test_script = MergeBams()
elif idx == 8:
test_script = Mutect()
elif idx == 9:
test_script = Realignment()
elif idx == 10:
test_script = Strelka()
elif idx == 11:
test_script = CombineVariants()
elif idx == 12:
test_script = DeliverSomaticVC()
elif idx == 13:
test_script = Haplotyper()
suite = unittest.TestSuite()
suite.addTest(ParameterizedTestCase.parameterize(TestArgs, param=test_script))
unittest.TextTestRunner(verbosity=2).run(suite)
| 48.87882
| 165
| 0.587246
|
import sys
import os
import subprocess
import unittest
import time
import glob
import copy
class Script:
__cwd = os.getcwd()
def __init__(self):
self.path = self.__cwd
self.shell_path = '{}/MayomicsVC/src/shell'.format(self.__cwd)
self.test_path = '{}/MayomicsVC/testing/shell_script_testing'.format(self.__cwd)
class Trimming(Script):
def __init__(self):
Script.__init__(self)
self.flag_s = "-s {}/outputs/output".format(self.path)
self.flag_A = '-A Inputs/TruSeqAdaptors.fasta'
self.flag_l = '-l Inputs/WGS_chr1_5X_E0.005_L1_read1.fastq.gz'
self.flag_r = '-r Inputs/WGS_chr1_5X_E0.005_L1_read2.fastq.gz'
self.flag_C = '-C /usr/local/apps/bioapps/python/Python-3.6.1/bin'
t = '-t 0'
self.flag_P = '-P true'
self.flag_e = '-e Config/EnvProfile.file'
self.flag_F = '-F {}/shared_functions.sh'.format(self.shell_path)
self.flag_d = '-d'
self.name = '{}/trim_sequences.sh'.format(self.shell_path)
self.type = 'trim_sequences.sh'
def __str__(self, case: str = 'paired'):
if case == 'single':
return "/bin/bash {} {} {} {} -r null {} {} -P false {} {} {}".format(self.name, self.flag_s, self.flag_A,
self.flag_l, self.flag_C, self.flag_t,
self.flag_e, self.flag_F, self.flag_d)
elif case == 'paired':
return "/bin/bash {} {} {} {} {} {} {} {} {} {} {}".format(self.name, self.flag_s, self.flag_A,
self.flag_l, self.flag_r, self.flag_C,
self.flag_t, self.flag_P, self.flag_e,
self.flag_F, self.flag_d)
else:
raise ValueError("unknown case")
def __repr__(self, case: str = 'paired'):
if case == 'single':
return "/bin/bash {} {} {} {} -r null {} {} -P false {} {} {}".format(self.name, self.flag_s, self.flag_A,
self.flag_l, self.flag_C, self.flag_t,
self.flag_e, self.flag_F, self.flag_d)
elif case == 'paired':
return "/bin/bash {} {} {} {} {} {} {} {} {} {} {}".format(self.name, self.flag_s, self.flag_A,
self.flag_l, self.flag_r, self.flag_C,
self.flag_t, self.flag_P, self.flag_e,
self.flag_F, self.flag_d)
else:
raise ValueError("unknown case")
class DeliverHaplotyperVC(Script):
def __init__(self):
Script.__init__(self)
self.flag_s = "-s output"
self.flag_r = '-r Inputs/somaticvariants.vcf.gz'
self.flag_j = "-j Jsons/SomaticMasterWorkflow.FilledIn.json"
self.flag_f = '-f Delivery'
self.flag_F = '-F {}/shared_functions.sh'.format(self.shell_path)
self.flag_d = '-d'
self.name = '{}/deliver_haplotyperVC.sh'.format(self.shell_path)
self.type = 'deliver_haplotyperVC.sh'
def __str__(self):
return "/bin/bash {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_r, self.flag_j, self.flag_f, self.flag_F, self.flag_d)
def __repr__(self):
return "/bin/bash {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_r, self.flag_j, self.flag_f, self.flag_F, self.flag_d)
class BQSR(Script):
def __init__(self):
Script.__init__(self)
self.flag_s = "-s outputs/output"
self.flag_S = '-S /usr/local/apps/bioapps/sentieon/sentieon-genomics-201808'
self.flag_G = "-G Reference/Homo_sapiens_assembly38.fasta"
self.flag_t = '-t 40'
self.flag_b = '-b Inputs/WGS_chr20_21_22_normal.bam'
self.flag_k = "-k Reference/Mills_and_1000G_gold_standard.inders.hg38.vcf"
self.flag_e = '-e Config/EnvProfile.file'
self.flag_F = '-F {}/shared_functions.sh'.format(self.shell_path)
self.flag_d = '-d'
self.name = '{}/bqsr.sh'.format(self.shell_path)
self.type = 'bqsr.sh'
def __str__(self):
return "/bin/bash {} {} {} {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_S, self.flag_G, self.flag_t, self.flag_b, self.flag_k,
self.flag_e, self.flag_F, self.flag_d)
def __repr__(self):
return "/bin/bash {} {} {} {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_S, self.flag_G, self.flag_t, self.flag_b, self.flag_k,
self.flag_e, self.flag_F, self.flag_d)
class VQSR(Script):
def __init__(self):
Script.__init__(self)
self.flag_s = "-s outputs/output"
self.flag_S = '-S /usr/local/apps/bioapps/sentieon/sentieon-genomics-201808'
self.flag_G = "-G Reference/Homo_sapiens_assembly38.fasta"
self.flag_t = '-t 40'
self.flag_V = '-V Inputs/somaticvariants.vcf.gz'
self.flag_r = '-r \"\'--resource /projects/bioinformatics/DataPacks/human/gatk_bundle_Oct_2017/' \
'gatk_bundle_hg38/1000G_phase1.snps.high_confidence.hg38.vcf.gz --resource_param 1000G,' \
'known=false,training=true,truth=false,prior=10.0 --resource /projects/bioinformatics/' \
'DataPacks/human/gatk_bundle_Oct_2017/gatk_bundle_hg38/1000G_omni2.5.hg38.vcf.gz ' \
'--resource_param omni,known=false,training=true,truth=false,prior=12.0 --resource /projects/' \
'bioinformatics/jallen17/Reference/dbsnp_138.hg38.vcf --resource_param dbsnp,known=true,' \
'training=false,truth=false,prior=2.0 --resource /projects/bioinformatics/DataPacks/human/' \
'gatk_bundle_Oct_2017/gatk_bundle_hg38/hapmap_3.3.hg38.vcf.gz --resource_param hapmap,known=' \
'false,training=true,truth=true,prior=15.0\'\"'
self.flag_R = '-R \"\'--resource /projects/bioinformatics/jallen17/Reference/dbsnp_138.hg38.vcf ' \
'--resource_param dbsnp,known=true,training=false,truth=false,prior=2.0 --resource /projects/' \
'bioinformatics/jallen17/Reference/Mills_and_1000G_gold_standard.indels.hg38.vcf ' \
'--resource_param Mills,known=false,training=true,truth=true,prior=12.0\'\"'
self.flag_a = '-a \"\'--annotation DP --annotation QD --annotation FS --annotation SOR --annotation MQ ' \
'--annotation MQRankSum --annotation ReadPosRankSum \'\"'
self.flag_e = '-e Config/EnvProfile.file'
self.flag_F = '-F {}/shared_functions.sh'.format(self.shell_path)
self.flag_d = '-d'
self.name = '{}/vqsr.sh'.format(self.shell_path)
self.type = 'vqsr.sh'
def __str__(self):
return "/bin/bash {} {} {} {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_S, self.flag_G, self.flag_t, self.flag_b, self.flag_k,
self.flag_e, self.flag_F, self.flag_d)
def __repr__(self):
return "/bin/bash {} {} {} {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_S, self.flag_G, self.flag_t, self.flag_b, self.flag_k,
self.flag_e, self.flag_F, self.flag_d)
class Alignment(Script):
def __init__(self):
Script.__init__(self)
self.flag_s = "-s outputs/output"
self.flag_L = "-L fake_lib"
self.flag_f = "-f normal"
self.flag_c = "-c NCSA"
self.flag_l = '-l Inputs/WGS_chr1_5X_E0.005_L1_read1.fastq.gz'
self.flag_r = '-r Inputs/WGS_chr1_5X_E0.005_L1_read2.fastq.gz'
self.flag_G = "-G Reference/Homo_sapiens_assembly38.fasta"
self.flag_K = "-K 10000000"
self.flag_o = "'-M'"
self.flag_S = '-S /usr/local/apps/bioapps/python/Python-3.6.1/bin'
self.flag_t = '-t 40'
self.flag_e = '-e Config/EnvProfile.file'
self.flag_F = '-F {}/shared_functions.sh'.format(self.shell_path)
self.flag_d = '-d'
self.name = '{}/alignment.sh'.format(self.shell_path)
self.type = 'alignment.sh'
def __str__(self, case: str = 'paired'):
if case == 'single':
return "/bin/bash {} {} {} {} {} {} {} -r null {} {} {} {} {} -P false {} {} {}".\
format(self.name, self.flag_s, self.flag_p, self.flag_L, self.flag_f, self.flag_c, self.flag_l,
self.flag_G, self.flag_K, self.flag_o, self.flag_S, self.flag_t,
self.flag_e, self.flag_F, self.flag_d)
elif case == 'paired':
return "/bin/bash {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {}".\
format(self.name, self.flag_s, self.flag_p, self.flag_L, self.flag_f, self.flag_c, self.flag_l,
self.flag_r, self.flag_G, self.flag_K, self.flag_o, self.flag_S, self.flag_t, self.flag_P,
self.flag_e, self.flag_F, self.flag_d)
else:
raise ValueError("unknown case")
def __repr__(self, case: str = 'paired'):
if case == 'single':
return "/bin/bash {} {} {} {} {} {} {} -r null {} {} {} {} {} -P false {} {} {}".\
format(self.name, self.flag_s, self.flag_p, self.flag_L, self.flag_f, self.flag_c, self.flag_l,
self.flag_G, self.flag_K, self.flag_o, self.flag_S, self.flag_t,
self.flag_e, self.flag_F, self.flag_d)
elif case == 'paired':
return "/bin/bash {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {}".\
format(self.name, self.flag_s, self.flag_p, self.flag_L, self.flag_f, self.flag_c, self.flag_l,
self.flag_r, self.flag_G, self.flag_K, self.flag_o, self.flag_S, self.flag_t, self.flag_P,
self.flag_e, self.flag_F, self.flag_d)
else:
raise ValueError("unknown case")
class DeDup(Script):
def __init__(self):
Script.__init__(self)
self.flag_s = "-s outputs/output"
self.flag_b = '-b Inputs/WGS_chr20_21_22_normal.bam'
self.flag_t = '-t 40'
self.flag_e = '-e Config/EnvProfile.file'
self.flag_S = '-S /usr/local/apps/bioapps/sentieon/sentieon-genomics-201808'
self.flag_F = '-F {}/shared_functions.sh'.format(self.shell_path)
self.flag_d = '-d'
self.name = '{}/bqsr.sh'.format(self.shell_path)
self.type = 'bqsr.sh'
def __str__(self):
return "/bin/bash {} {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_b, self.flag_t, self.flag_e, self.flag_S, self.flag_F,
self.flag_d)
def __repr__(self):
return "/bin/bash {} {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_b, self.flag_t, self.flag_e, self.flag_S, self.flag_F,
self.flag_d)
class DeliverAlignment(Script):
def __init__(self):
Script.__init__(self)
self.flag_s = "-s output"
self.flag_b = '-b Inputs/WGS_chr20_21_22_normal.bam'
self.flag_j = "-j Jsons/Runalignment.FilledIn.json"
self.flag_f = '-f Delivery'
self.flag_F = '-F {}/shared_functions.sh'.format(self.shell_path)
self.flag_d = '-d'
self.name = '{}/deliver_alignment.sh'.format(self.shell_path)
self.type = 'deliver_alignment.sh'
def __str__(self, case: str = 'paired'):
return "/bin/bash {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_b, self.flag_j, self.flag_f, self.flag_F, self.flag_d)
def __repr__(self, case: str = 'paired'):
return "/bin/bash {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_b, self.flag_j, self.flag_f, self.flag_F, self.flag_d)
class MergeBams(Script):
def __init__(self):
Script.__init__(self)
self.flag_s = "-s outputs/output"
self.flag_b = '-b Inputs/WGS_chr20_21_22_normal.bam'
self.flag_S = '-S /usr/local/apps/bioapps/sentieon/sentieon-genomics-201808'
self.flag_t = '-t 40'
self.flag_e = '-e Config/EnvProfile.flie'
self.flag_F = '-F {}/shared_functions.sh'.format(self.shell_path)
self.flag_d = '-d'
self.name = '{}/merge_bams.sh'.format(self.shell_path)
self.type = 'merge_bams.sh'
def __str__(self, case: str = 'paired'):
return "/bin/bash {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_b, self.flag_j, self.flag_f, self.flag_F, self.flag_d)
def __repr__(self, case: str = 'paired'):
return "/bin/bash {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_b, self.flag_j, self.flag_f, self.flag_F, self.flag_d)
class Mutect(Script):
def __init__(self):
Script.__init__(self)
self.flag_s = "-s outputs/output"
self.flag_N = '-N Inputs/WGS_chr20_21_22_normal.bam'
self.flag_T = '-T Inputs/WGS_chr20_21_22_tumor.bam'
self.flag_g = "-g Reference/Homo_sapiens_assembly38.fasta"
self.flag_G = '-G /usr/local/apps/bioapps/gatk/GenomeAnalysisTK-3.8-1-0-gf15c1c3ef'
self.flag_J = '-J /usr/local/apps/bioapps/java/java-1.8-64bit/bin'
self.flag_j = '-j \"\'-Xms2G -Xmx8G\'\"'
self.flag_B = '-B /usr/local/apps/bioapps/bcftools/bcftools-1.5'
self.flag_Z = '-Z /usr/local/apps/bioapps/bcftools/htslib-1.3.1/bin'
self.flag_S = '-S /usr/local/apps/bioapps/samtools/samtools-1.5'
self.flag_t = '-t 40'
self.flag_e = '-e Config/EnvProfile.file'
self.flag_D = '-D {]/../perl/fixDP.pl'.format(self.shell_path)
self.flag_o = '-o \"\'--dbsnp /projects/bioinformatics/jallen1 /Reference/dbsnp_138.hg38.vcf\'\"'
self.flag_F = '-F {}/shared_functions.sh'.format(self.shell_path)
self.flag_d = '-d'
self.name = '{}/mutect.sh'.format(self.shell_path)
self.type = 'mutect.sh'
def __str__(self):
return "/bin/bash {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_N, self.flag_T, self.flag_g, self.flag_G, self.flag_J,
self.flag_j, self.flag_B, self.flag_Z, self.flag_S, self.flag_t, self.flag_e, self.flag_D,
self.flag_o, self.flag_F, self.flag_d)
def __repr__(self):
return "/bin/bash {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_N, self.flag_T, self.flag_g, self.flag_G, self.flag_J,
self.flag_j, self.flag_B, self.flag_Z, self.flag_S, self.flag_t, self.flag_e, self.flag_D,
self.flag_o, self.flag_F, self.flag_d)
class Realignment(Script):
def __init__(self):
Script.__init__(self)
self.flag_s = "-s outputs/output"
self.flag_b = '-b Inputs/WGS_chr20_21_22_normal.bam'
self.flag_G = "-G Reference/Homo_sapiens_assembly38.fasta"
self.flag_k = "-k Reference/Mills_and_1000G_gold_standard.inders.hg38.vcf"
self.flag_S = '-S /usr/local/apps/bioapps/sentieon/sentieon-genomics-201808'
self.flag_t = '-t 40'
self.flag_e = '-e Config/EnvProfile.file'
self.flag_F = '-F {}/shared_functions.sh'.format(self.shell_path)
self.flag_d = '-d'
self.name = '{}/realignment.sh'.format(self.shell_path)
self.type = 'realignment.sh'
def __str__(self):
return "/bin/bash {} {} {} {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_b, self.flag_G, self.flag_k, self.flag_S, self.flag_t,
self.flag_e, self.flag_F, self.flag_d)
def __repr__(self):
return "/bin/bash {} {} {} {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_b, self.flag_G, self.flag_k, self.flag_S, self.flag_t,
self.flag_e, self.flag_F, self.flag_d)
class Strelka(Script):
def __init__(self):
Script.__init__(self)
self.flag_s = "-s outputs/output"
self.flag_N = '-N Inputs/WGS_chr20_21_22_normal.bam'
self.flag_T = '-T Inputs/WGS_chr20_21_22_tumor.bam'
self.flag_g = "-g Reference/Homo_sapiens_assembly38.fasta"
self.flag_B = '-B /usr/local/apps/bioapps/bcftools/bcftools-1.5'
self.flag_I = '-I /usr/local/apps/bioapps/strelka/strelka-2.9.2.centos6_x86_64/bin'
self.flag_S = '-S /usr/local/apps/bioapps/samtools/samtools-1.5'
self.flag_Z = '-Z /usr/local/apps/bioapps/bcftools/htslib-1.3.1/bin'
self.flag_t = '-t 40'
self.flag_e = '-e Config/EnvProfile.file'
self.flag_i = '-i MayomicsVC/src/perl/fixStrelka_GT_indels.pl'
self.flag_p = '-p MayomicsVC/src/perl/fixStrelka_GT_snvs.pl'
self.flag_o = '-o \"\'--outputCallableRegions\'\"'
self.flag_O = '-O \"\'-m any --force-sample\'\"'
self.flag_F = '-F {}/shared_functions.sh'.format(self.shell_path)
self.flag_d = '-d'
self.name = '{}/strelka.sh'.format(self.shell_path)
self.type = 'strelka.sh'
def __str__(self):
return "/bin/bash {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_N, self.flag_T, self.flag_g, self.flag_B, self.flag_I,
self.flag_S, self.flag_Z, self.flag_t, self.flag_e, self.flag_i, self.flag_p, self.flag_o,
self.flag_O, self.flag_F, self.flag_d)
def __repr__(self):
return "/bin/bash {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_N, self.flag_T, self.flag_g, self.flag_B, self.flag_I,
self.flag_S, self.flag_Z, self.flag_t, self.flag_e, self.flag_i, self.flag_p, self.flag_o,
self.flag_O, self.flag_F, self.flag_d)
class CombineVariants(Script):
def __init__(self):
Script.__init__(self)
self.flag_s = "-s outputs/output"
self.flag_S = '-S Inputs/strelka.vcf.bgz'
self.flag_T = '-T Inputs/mutect.vcf.bgz'
self.flag_g = "-g Reference/Homo_sapiens_assembly38.fasta"
self.flag_G = '-G /usr/local/apps/bioapps/gatk/GenomeAnalysisTK-3.8-1-0-gf15c1c3ef'
self.flag_J = '-J /usr/local/apps/bioapps/java/java-1.8-64bit/bin'
self.flag_B = '-B /usr/local/apps/bioapps/bcftools/bcftools-1.5'
self.flag_Z = '-Z /usr/local/apps/bioapps/bcftools/htslib-1.3.1/bin'
self.flag_t = '-t 40'
self.flag_e = '-e Config/EnvProfile.file'
self.flag_o = '-o \"\' \'\"'
self.flag_p = '-p \"\'strelka,mutect\'\"'
self.flag_F = '-F {}/shared_functions.sh'.format(self.shell_path)
self.flag_d = '-d'
self.name = '{}/strelka.sh'.format(self.shell_path)
self.type = 'strelka.sh'
def __str__(self):
return "/bin/bash {} {} {} {} {} {} {} {} {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_S, self.flag_T, self.flag_g, self.flag_G, self.flag_J,
self.flag_B, self.flag_Z, self.flag_t, self.flag_e, self.flag_o, self.flag_p, self.flag_F,
self.flag_d)
def __repr__(self):
return "/bin/bash {} {} {} {} {} {} {} {} {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_S, self.flag_T, self.flag_g, self.flag_G, self.flag_J,
self.flag_B, self.flag_Z, self.flag_t, self.flag_e, self.flag_o, self.flag_p, self.flag_F,
self.flag_d)
class DeliverSomaticVC(Script):
def __init__(self):
Script.__init__(self)
self.flag_s = "-s output"
self.flag_r = '-r Inputs/somaticvariants.vcf.gz'
self.flag_j = "-j Jsons/SomaticMasterWorkflow.FilledIn.json"
self.flag_f = '-f Delivery'
self.flag_F = '-F {}/shared_functions.sh'.format(self.shell_path)
self.flag_d = '-d'
self.name = '{}/deliver_somaticVC.sh'.format(self.shell_path)
self.type = 'deliver_somaticVC.sh'
def __str__(self, case: str = 'paired'):
return "/bin/bash {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_r, self.flag_j, self.flag_f, self.flag_F, self.flag_d)
def __repr__(self, case: str = 'paired'):
return "/bin/bash {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_r, self.flag_j, self.flag_f, self.flag_F, self.flag_d)
class Haplotyper(Script):
def __init__(self):
Script.__init__(self)
self.flag_s = "-s outputs/output"
self.flag_S = '-S /usr/local/apps/bioapps/sentieon/sentieon-genomics-201808'
self.flag_G = "-G Reference/Homo_sapiens_assembly38.fasta"
self.flag_t = '-t 40'
self.flag_b = '-b Inputs/sample.bam'
self.flag_D = '-D Reference/dbsnp_138.hg38.vcf'
self.flag_r = '-r Inputs/bqsr.recal_data.table'
self.flag_o = '-o \"\' \'\"'
self.flag_e = '-e Config/EnvProfile.file'
self.flag_F = '-F {}/shared_functions.sh'.format(self.shell_path)
self.flag_d = '-d'
self.name = '{}/haplotyper.sh'.format(self.shell_path)
self.type = 'haplotyper.sh'
def __str__(self):
return "/bin/bash {} {} {} {} {} {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_S, self.flag_G, self.flag_t, self.flag_b, self.flag_D,
self.flag_r, self.flag_o, self.flag_e, self.flag_F, self.flag_d)
def __repr__(self):
return "/bin/bash {} {} {} {} {} {} {} {} {} {} {} {}". \
format(self.name, self.flag_s, self.flag_S, self.flag_G, self.flag_t, self.flag_b, self.flag_D,
self.flag_r, self.flag_o, self.flag_e, self.flag_F, self.flag_d)
class ParameterizedTestCase(unittest.TestCase):
def __init__(self, methodName='runTest', param=None):
super(ParameterizedTestCase, self).__init__(methodName)
self.param = param
@staticmethod
def parameterize(testcase_klass, param=None):
testloader = unittest.TestLoader()
testnames = testloader.getTestCaseNames(testcase_klass)
suite = unittest.TestSuite()
for name in testnames:
suite.addTest(testcase_klass(name, param=param))
return suite
class TestArgs(ParameterizedTestCase):
def setUp(self):
pass
def tearDown(self):
files = glob.glob('outputs/*')
for f in files:
os.remove(f)
files = glob.glob('WGS*')
for f in files:
os.remove(f)
def test_no_arg(self):
os.system("/bin/bash " + self.param.name + ' > outputs/outfile.txt')
output = self.parse_output('outputs/outfile.txt')
output = ''.join(output)
self.assertTrue('command line input: \n' in output)
self.assertTrue("No arguments passed." in output)
def test_help_function(self):
os.system("/bin/bash " + self.param.name + ' -h > outputs/outfile.txt')
desired_help = self.parse_output('{}/Verification_files/desired_help_output.txt'.format(self.param.test_path))
output = self.parse_output('outputs/outfile.txt')
for i in range(4, len(output)-1):
self.assertTrue(desired_help[i-4] == output[i])
def test_nonexistent_option(self):
os.system("/bin/bash " + self.param.name + " -Q garbage > outputs/outfile.txt")
output = self.parse_output('outputs/outfile.txt')
output = ''.join(output)
self.assertTrue('command line input: -Q garbage' in output)
self.assertTrue("Invalid option: -Q" in output)
def test_successful_paired_end_read_and_permissions(self):
os.system(self.param.__str__('paired') + " > outputs/outfile.txt 2>&1 ")
output = self.parse_output('outputs/output.trimming.TBD.log')
output = ''.join(output)
self.assertTrue('START' in output)
self.assertTrue("Finished trimming adapter sequences." in output)
cutadapt_output = 'WGS_chr1_5X_E0.005_L1_read1.fastq.gz'
self.assertTrue(os.path.exists(cutadapt_output) and os.path.getsize(cutadapt_output) > 0)
perm_check_log = subprocess.Popen(['ls', '-l', 'outputs/output.trimming.TBD.log'], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout_log, stderr_log = perm_check_log.communicate()
self.assertTrue("-rw-r-----" in str(stdout_log))
perm_check_read1 = subprocess.Popen(['ls', '-l', 'WGS_chr1_5X_E0.005_L1_read1.fastq.gz'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout_read1, stderr_read1 = perm_check_read1.communicate()
self.assertTrue("-rw-r-----" in str(stdout_read1))
perm_check_read2 = subprocess.Popen(['ls', '-l', 'WGS_chr1_5X_E0.005_L1_read2.fastq.gz'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout_read2, stderr_read2 = perm_check_read2.communicate()
self.assertTrue("-rw-r-----" in str(stdout_read2))
os.chmod('WGS_chr1_5X_E0.005_L1_read1.fastq.gz', 0o200)
os.system(self.param.__str__('paired') + " > outputs/outfile.txt 2>&1 ")
self.assertTrue(oct(os.stat('WGS_chr1_5X_E0.005_L1_read2.fastq.gz').st_mode)[-3:][1] == '4')
os.chmod('WGS_chr1_5X_E0.005_L1_read2.fastq.gz', 0o200)
os.system(self.param.__str__('paired') + " > outputs/outfile.txt 2>&1 ")
self.assertTrue(oct(os.stat('WGS_chr1_5X_E0.005_L1_read2.fastq.gz').st_mode)[-3:][1] == '4')
def test_successful_single_end_read(self):
os.system(self.param.__str__('single') + " > outputs/outfile.txt 2>&1")
output = self.parse_output('outputs/output.trimming.TBD.log')
output = ''.join(output)
self.assertTrue('START' in output)
self.assertTrue("Finished trimming adapter sequences." in output)
cutadapt_log = 'outputs/output.cutadapt.log'
self.assertTrue(os.path.exists(cutadapt_log) and os.path.getsize(cutadapt_log) > 0)
def test_read_flags_with_bad_input(self):
if self.param.type != 'trim_sequences.sh':
print("Only valid for trim sequences")
return unittest.skip("Only valid for trim_sequences")
flags_to_test = ['flag_l', 'flag_r']
garbage_test_files = {'dummy_test_blank.fastq':
"garbage_test_files/dummy_test_blank.fastq is empty or does not exist.",
'dummy_test_text.fastq':
"cutadapt: error: Line 1 in FASTQ file is expected to start with '@', but found "
"'Lorem ipsu'",
'dummy_test_text_with_at.fastq':
"cutadapt: error: Line 3 in FASTQ file is expected to "
"start with '+', but found 'Suspendiss'",
'WGS_chr1_5X_E0.005_L1_read1.fastq.':
'WGS_chr1_5X_E0.005_L1_read1.fastq. is empty or does not exist'}
for flag in flags_to_test:
for garbage_test in garbage_test_files.keys():
temp_flag = copy.deepcopy(self.param.__dict__[flag])
manip_flag = self.param.__dict__[flag]
if "dummy" in garbage_test:
manip_flag = manip_flag.split(' ')[0] + ' {}/garbage_test_files/'.format(self.param.test_path) \
+ garbage_test
else:
manip_flag = manip_flag.split(' ')[0] + ' Inputs/' + garbage_test
self.param.__dict__[flag] = manip_flag
os.system(str(self.param) + " > outputs/outfile.txt 2>&1 ")
output = self.parse_output('outputs/output.trimming.TBD.log')
log = self.parse_output('outputs/output.cutadapt.log')
output = ''.join(output)
log = ''.join(log)
if 'Cutadapt Read 1 and 2 failure' in output:
self.assertTrue(garbage_test_files[garbage_test] in log)
else:
self.assertTrue(garbage_test_files[garbage_test] in output)
self.param.__dict__[flag] = temp_flag
try:
os.remove(garbage_test)
except OSError:
pass
def test_garbage_adapters(self):
if self.param.type != 'trim_sequences.sh':
print("Only valid for trim sequences")
return unittest.skip("Only valid for trim_sequences")
tests = {'dummy_test_blank.fastq': "garbage_test_files/dummy_test_blank.fastq is empty or does not exist.",
'dummy_test_text.fastq': "At line 1: Expected '>' at beginning of FASTA record, but got 'Lorem ipsum dolor sit amet, consectetur adipiscing elit.'",
'dummy_test_text_with_gt.fastq': "is not a valid IUPAC code. Use only characters XACGTURYSWKMBDHVN.",
'TruSeqAdapters.fasta': "TruSeqAdapters.fasta is empty or does not exist"}
for test in tests.keys():
temp_flag = copy.deepcopy(self.param.__dict__['flag_A'])
manip_flag = self.param.__dict__['flag_A']
manip_flag = manip_flag.split(' ')[0] + ' {}/garbage_test_files/'.format(self.param.test_path) + test
self.param.__dict__['flag_A'] = manip_flag
os.system(self.param.__str__('paired') + " > outputs/outfile.txt 2>&1 ")
output = self.parse_output('outputs/output.trimming.TBD.log')
log = self.parse_output('outputs/output.cutadapt.log')
output = ''.join(output)
log = ''.join(log)
with self.subTest(test=test):
if 'Cutadapt Read 1 and 2 failure' in output:
self.assertTrue(tests[test] in log)
else:
self.assertTrue(tests[test] in output)
self.param.__dict__['flag_A'] = temp_flag
try:
os.remove(test)
except OSError:
pass
def test_bad_env_file(self):
tests = {'envprof_fake.file': "No such file or directory"}
for test in tests.keys():
temp_flag = copy.deepcopy(self.param.__dict__['flag_e'])
manip_flag = self.param.__dict__['flag_e']
manip_flag = manip_flag.split(' ')[0] + ' ' + test
self.param.__dict__['flag_e'] = manip_flag
os.system(self.param.__str__('paired') + " > outputs/outfile.txt 2>&1 ")
output = self.parse_output('outputs/outfile.txt')
output = ''.join(output)
self.assertTrue(tests[test] in output)
self.param.__dict__['flag_e'] = temp_flag
def test_bad_cutadapt_path(self):
if self.param.type != 'trim_sequences.sh':
print("Only valid for trim sequences")
return unittest.skip("Only valid for trim_sequences")
os.system("/bin/bash {} {} {} {} {} -C /usr/fake {} {} {} {} {} > outputs/outfile.txt 2>&1".
format(self.param.name, self.param.flag_s, self.param.flag_A, self.param.flag_l, self.param.flag_r,
self.param.flag_t, self.param.flag_P, self.param.flag_e, self.param.flag_F, self.param.flag_d))
output = self.parse_output('outputs/output.trimming.TBD.log')
output = ''.join(output)
self.assertTrue("REASON=Cutadapt directory /usr/fake is not a directory or does not exist." in output)
def test_bad_thread_options(self):
if self.param.type != 'trim_sequences.sh':
print("Only valid for trim sequences")
return unittest.skip("Only valid for trim_sequences")
values = [321, 3299, 12322]
for number in values:
os.system("/bin/bash {} {} {} {} {} {} {} {} {} {}".\
format(self.param.name, self.param.flag_s, self.param.flag_A, self.param.flag_l, self.param.flag_r,
self.param.flag_C, self.param.flag_P, self.param.flag_e, self.param.flag_F, self.param.flag_d)
+ " -t " + str(number) + " > outputs/outfile.txt 2>&1")
output = self.parse_output('outputs/output.trimming.TBD.log')
log = self.parse_output('outputs/output.cutadapt.log')
output = ''.join(output)
log = ''.join(log)
if 'Finished trimming adapter sequences.' in output:
self.assertTrue(True)
else:
self.assertTrue('Cutadapt Read 1 and 2 failure.' in output)
def test_paired_options(self):
if self.param.type != 'trim_sequences.sh':
print("Only valid for trim sequences")
return unittest.skip("Only valid for trim_sequences")
tests = ['True', 'T', 'False', "F"]
for test in tests:
os.system("/bin/bash {} {} {} {} {} {} {} {} {} {}".\
format(self.param.name, self.param.flag_s, self.param.flag_A, self.param.flag_l, self.param.flag_r,
self.param.flag_C, self.param.flag_t, self.param.flag_e, self.param.flag_F, self.param.flag_d) +
' -P ' + test + " > outputs/outfile.txt 2>&1 ")
output = self.parse_output('outputs/output.trimming.TBD.log')
output = ''.join(output)
self.assertTrue("REASON=Incorrect argument for paired-end option -P. Must be set to true or false."
in output)
def test_incorrect_read_options(self):
if self.param.type != 'trim_sequences.sh':
print("Only valid for trim sequences")
return unittest.skip("Only valid for trim_sequences")
os.system("/bin/bash {} {} {} {} {} {} {} -P false {} {} {}". \
format(self.param.name, self.param.flag_s, self.param.flag_A, self.param.flag_l, self.param.flag_r,
self.param.flag_C, self.param.flag_t, self.param.flag_e,
self.param.flag_F, self.param.flag_d) + " > outputs/outfile.txt 2>&1 ")
output = self.parse_output('outputs/output.trimming.TBD.log')
output = ''.join(output)
self.assertTrue("REASON=User specified Single End option, but did not set read 2 option -r to null." in output)
os.system("/bin/bash {} {} {} {} {} {} {} -P true {} {}". \
format(self.param.name, self.param.flag_s, self.param.flag_A, self.param.flag_l,
self.param.flag_C, self.param.flag_t, self.param.flag_e,
self.param.flag_F, self.param.flag_d) + " > outputs/outfile.txt 2>&1 ")
output = self.parse_output('outputs/output.trimming.TBD.log')
output = ''.join(output)
self.assertTrue("REASON=Missing read 2 option: -r. If running a single-end job, set -r null in command." in output)
os.system("/bin/bash {} {} {} -l null {} {} {} -P false {} {} {}". \
format(self.param.name, self.param.flag_s, self.param.flag_A, self.param.flag_r,
self.param.flag_C, self.param.flag_t, self.param.flag_e,
self.param.flag_F, self.param.flag_d) + " > outputs/outfile.txt 2>&1 ")
output = self.parse_output('outputs/output.trimming.TBD.log')
output = ''.join(output)
self.assertTrue("REASON=Input read 1 file null is empty or does not exist." in output)
os.system("/bin/bash {} {} {} -r null {} {} {} {} -P true {} {}". \
format(self.param.name, self.param.flag_s, self.param.flag_A, self.param.flag_l,
self.param.flag_C, self.param.flag_t, self.param.flag_e,
self.param.flag_F, self.param.flag_d) + " > outputs/outfile.txt 2>&1 ")
output = self.parse_output('outputs/output.trimming.TBD.log')
output = ''.join(output)
self.assertTrue("REASON=Input read 2 file null is empty or does not exist." in output)
def test_missing_option_values(self):
attributes = list(self.param.__dict__.keys())
attributes.remove('flag_d')
options = list([a for a in attributes if "flag" in a])
for flag in options:
temp_flag = copy.deepcopy(self.param.__dict__[flag])
manip_flag = self.param.__dict__[flag]
manip_flag = manip_flag.split(' ')[0]
self.param.__dict__[flag] = manip_flag
os.system(str(self.param) + " > outputs/outfile.txt 2>&1 ")
output = self.parse_output('outputs/outfile.txt')
output = ''.join(output)
self.assertTrue("Error with option " + manip_flag + " in command. Option passed incorrectly or without argument." in output)
self.param.__dict__[flag] = temp_flag
def test_file_permissions(self):
os.chmod('Inputs', 0o000)
os.system(str(self.param) + " > outputs/outfile.txt 2>&1 ")
output = self.parse_output('outputs/outfile.txt')
output = ''.join(output)
self.assertTrue('is empty or does not exist' in output)
os.chmod('Inputs', 0o755)
os.system('chmod 000 outputs')
os.system(str(self.param) + " > outfile.txt 2>&1 ")
output = self.parse_output('outfile.txt')
output = ''.join(output)
self.assertTrue('Permission denied' in output)
os.remove('outfile.txt')
os.chmod('outputs', 0o755)
os.chmod(self.param.shell_path, 0o000)
os.system(str(self.param) + " > outputs/outfile.txt 2>&1 ")
output = self.parse_output('outputs/outfile.txt')
output = ''.join(output)
self.assertTrue('Permission denied' in output)
os.chmod(self.param.shell_path, 0o755)
def test_logs_are_truncated(self):
os.system("/bin/bash {} -s outputs/output -A {}/garbage_test_files/dummy_test_text.fastq {} {} {} -P true {} {} {}"
" {}".format(self.param.name, self.param.test_path, self.param.flag_r, self.param.flag_l, self.param.flag_C,
self.param.flag_t, self.param.flag_e, self.param.flag_F, self.param.flag_d) +
" > outputs/outfile.txt 2>&1 ")
output_stdout = self.parse_output('outputs/output.trimming.TBD.log')
output_stdout_test = output_stdout[-2:]
output_stdout_test = ''.join(output_stdout_test)
output_stdout = ''.join(output_stdout)
output_cutlog = self.parse_output('outputs/output.cutadapt.log')
output_cutlog_test = output_cutlog[-2:]
output_cutlog_test = ''.join(output_cutlog_test)
output_cutlog = ''.join(output_cutlog)
time.sleep(2)
os.system("/bin/bash {} -s outputs/output -A {}/garbage_test_files/dummy_test_text_with_gt.fastq {} {} {} -P true"
" {} {} {} {}".format(self.param.name, self.param.test_path, self.param.flag_r, self.param.flag_l, self.param.flag_C,
self.param.flag_t, self.param.flag_e, self.param.flag_F, self.param.flag_d) +
" > outputs/outfile.txt 2>&1 ")
output_stdout2 = self.parse_output('outputs/output.trimming.TBD.log')
output_stdout2 = ''.join(output_stdout2)
output_cutlog2 = self.parse_output('outputs/output.cutadapt.log')
output_cutlog2 = ''.join(output_cutlog2)
self.assertNotEqual(output_stdout, output_stdout2)
self.assertNotEqual(output_cutlog, output_cutlog2)
self.assertTrue(output_stdout_test not in output_stdout2)
self.assertTrue(output_cutlog_test not in output_cutlog2)
@staticmethod
def parse_output(file):
output = []
for line in open(file, 'r'):
output.append(line)
return output
if __name__ == "__main__":
scripts = ["trim_sequences.sh", 'deliver_haplotyperVC.sh', 'bqsr.sh', 'vqsr.sh', 'alignment.sh', 'dedup.sh',
'deliver_alignment.sh', 'merge_bams.sh', 'mutect.sh', 'realignment.sh', 'strelka.sh',
'combine_variants.sh', 'deliver_somaticVC.sh', 'haplotyper.sh']
try:
idx = scripts.index(sys.argv[1])
except ValueError:
print("Argument must be the script to test and the output_file/log_name to use.")
if idx == 0:
test_script = Trimming()
elif idx == 1:
test_script = DeliverHaplotyperVC()
elif idx == 2:
test_script = BQSR()
elif idx == 3:
test_script = VQSR()
elif idx == 4:
test_script = Alignment()
elif idx == 5:
test_script = DeDup()
elif idx == 6:
test_script = DeliverAlignment()
elif idx == 7:
test_script = MergeBams()
elif idx == 8:
test_script = Mutect()
elif idx == 9:
test_script = Realignment()
elif idx == 10:
test_script = Strelka()
elif idx == 11:
test_script = CombineVariants()
elif idx == 12:
test_script = DeliverSomaticVC()
elif idx == 13:
test_script = Haplotyper()
suite = unittest.TestSuite()
suite.addTest(ParameterizedTestCase.parameterize(TestArgs, param=test_script))
unittest.TextTestRunner(verbosity=2).run(suite)
| true
| true
|
1c3f53db2528a3e9ce469c7860b1e1d14e83ccaa
| 4,237
|
py
|
Python
|
setup.py
|
taesko/fst
|
e95b0d45ef1c181831c741604893bdc26753e551
|
[
"MIT"
] | null | null | null |
setup.py
|
taesko/fst
|
e95b0d45ef1c181831c741604893bdc26753e551
|
[
"MIT"
] | null | null | null |
setup.py
|
taesko/fst
|
e95b0d45ef1c181831c741604893bdc26753e551
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Note: To use the 'upload' functionality of this file, you must:
# $ pip install twine
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
# Package meta-data.
NAME = "fst"
DESCRIPTION = "FIle System Templates"
URL = "https://github.com/taesko/{}".format(NAME)
EMAIL = "taeskow@gmail.com"
AUTHOR = "Antonio Todorov"
# What packages are required for this module to be executed?
REQUIRED = ["watchdog"]
# The rest you shouldn't have to touch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
with io.open(os.path.join(here, "README.md"), encoding="utf-8") as f:
long_description = "\n" + f.read()
# Import the README and use it as the long-description.
# Note: this will only work if 'README.rst' is present in your MANIFEST.in file!
# with io.open(os.path.join(here, "README.md"), encoding="utf-8") as f:
# long_description = "\n" + f.read()
# try:
# import pypandoc
#
# long_description = pypandoc.convert(
# source=long_description, to="rst", format="markdown_github"
# )
# except Exception:
# print("warning - unable to convert markdown README to restructured text.")
# Load the package's __version__.py module as a dictionary.
about = {}
with open(os.path.join(here, NAME, "__version__.py")) as f:
exec(f.read(), about)
class UploadCommand(Command):
"""Support setup.py upload."""
description = "Build and publish the package."
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print("\033[1m{0}\033[0m".format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status("Removing previous builds…")
rmtree(os.path.join(here, "dist"))
except OSError:
pass
self.status("Building Source and Wheel (universal) distribution…")
os.system("{0} setup.py sdist bdist_wheel".format(sys.executable))
self.status("Uploading the package to PyPi via Twine…")
os.system("twine upload dist/*")
sys.exit()
class TestUploadCommand(Command):
"""Support setup.py test_upload."""
description = "Build and publish the package to TestPyPI."
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print("\033[1m{0}\033[0m".format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status("Removing previous builds…")
rmtree(os.path.join(here, "dist"))
except OSError:
pass
self.status("Building Source and Wheel distribution…")
os.system("{0} setup.py sdist bdist_wheel".format(sys.executable))
self.status("Uploading the package to PyPi via Twine…")
os.system(
"twine upload --repository-url https://test.pypi.org/legacy/ dist/*"
)
sys.exit()
# Where the magic happens:
setup(
name=NAME,
version=about["__version__"],
description=DESCRIPTION,
long_description=long_description,
author=AUTHOR,
author_email=EMAIL,
url=URL,
packages=find_packages(exclude=("tests",)),
python_requires=">=3.6",
# If your package is a single module, use this instead of 'packages':
# py_modules=['mypackage'],
entry_points={
"console_scripts": ["fst=fst.cli:main", "fstd=fst.au.cli:main"]
},
install_requires=REQUIRED,
include_package_data=True,
license="MIT",
classifiers=[
"Development Status :: 3 - Alpha",
"Operating System :: POSIX :: Linux",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: Implementation :: CPython",
],
# $ setup.py publish support.
cmdclass={"upload": UploadCommand, "test_upload": TestUploadCommand},
)
| 28.436242
| 81
| 0.632287
|
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
NAME = "fst"
DESCRIPTION = "FIle System Templates"
URL = "https://github.com/taesko/{}".format(NAME)
EMAIL = "taeskow@gmail.com"
AUTHOR = "Antonio Todorov"
REQUIRED = ["watchdog"]
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
with io.open(os.path.join(here, "README.md"), encoding="utf-8") as f:
long_description = "\n" + f.read()
# Import the README and use it as the long-description.
# Note: this will only work if 'README.rst' is present in your MANIFEST.in file!
# with io.open(os.path.join(here, "README.md"), encoding="utf-8") as f:
# long_description = "\n" + f.read()
# try:
# import pypandoc
#
# long_description = pypandoc.convert(
# source=long_description, to="rst", format="markdown_github"
# )
# except Exception:
# print("warning - unable to convert markdown README to restructured text.")
# Load the package's __version__.py module as a dictionary.
about = {}
with open(os.path.join(here, NAME, "__version__.py")) as f:
exec(f.read(), about)
class UploadCommand(Command):
description = "Build and publish the package."
user_options = []
@staticmethod
def status(s):
print("\033[1m{0}\033[0m".format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status("Removing previous builds…")
rmtree(os.path.join(here, "dist"))
except OSError:
pass
self.status("Building Source and Wheel (universal) distribution…")
os.system("{0} setup.py sdist bdist_wheel".format(sys.executable))
self.status("Uploading the package to PyPi via Twine…")
os.system("twine upload dist/*")
sys.exit()
class TestUploadCommand(Command):
description = "Build and publish the package to TestPyPI."
user_options = []
@staticmethod
def status(s):
print("\033[1m{0}\033[0m".format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status("Removing previous builds…")
rmtree(os.path.join(here, "dist"))
except OSError:
pass
self.status("Building Source and Wheel distribution…")
os.system("{0} setup.py sdist bdist_wheel".format(sys.executable))
self.status("Uploading the package to PyPi via Twine…")
os.system(
"twine upload --repository-url https://test.pypi.org/legacy/ dist/*"
)
sys.exit()
setup(
name=NAME,
version=about["__version__"],
description=DESCRIPTION,
long_description=long_description,
author=AUTHOR,
author_email=EMAIL,
url=URL,
packages=find_packages(exclude=("tests",)),
python_requires=">=3.6",
entry_points={
"console_scripts": ["fst=fst.cli:main", "fstd=fst.au.cli:main"]
},
install_requires=REQUIRED,
include_package_data=True,
license="MIT",
classifiers=[
"Development Status :: 3 - Alpha",
"Operating System :: POSIX :: Linux",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: Implementation :: CPython",
],
cmdclass={"upload": UploadCommand, "test_upload": TestUploadCommand},
)
| true
| true
|
1c3f5460a679d91aa7db89a2dd12f4a382a12cda
| 5,474
|
py
|
Python
|
CaloOnlineTools/EcalTools/python/ecalTPGAnalyzer_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
CaloOnlineTools/EcalTools/python/ecalTPGAnalyzer_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
CaloOnlineTools/EcalTools/python/ecalTPGAnalyzer_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("ANALYSEMIP")
# Trigger ###
process.load("L1TriggerConfig.L1GtConfigProducers.L1GtConfig_cff")
process.load("L1TriggerConfig.L1GtConfigProducers.Luminosity.startup.L1Menu_startup_v3_Unprescaled_cff")
process.load("L1TriggerConfig.L1GtConfigProducers.L1GtBoardMapsConfig_cff")
process.load("L1TriggerConfig.L1ScalesProducers.L1MuTriggerScalesConfig_cff")
process.load("L1TriggerConfig.L1ScalesProducers.L1MuTriggerPtScaleConfig_cff")
import FWCore.Modules.printContent_cfi
process.dumpEv = FWCore.Modules.printContent_cfi.printContent.clone()
import EventFilter.L1GlobalTriggerRawToDigi.l1GtUnpack_cfi
process.gtDigis = EventFilter.L1GlobalTriggerRawToDigi.l1GtUnpack_cfi.l1GtUnpack.clone()
process.gtDigis.DaqGtInputTag = 'source'
# ECAL Unpacker ###
process.load("EventFilter.EcalRawToDigi.EcalUnpackerMapping_cfi")
process.load("EventFilter.EcalRawToDigi.EcalUnpackerData_cfi")
# ECAL TPG Producer ###
process.load("Geometry.EcalMapping.EcalMapping_cfi")
process.load("Geometry.EcalMapping.EcalMappingRecord_cfi")
process.load("MagneticField.Engine.volumeBasedMagneticField_cfi")
process.load("CalibCalorimetry.Configuration.Ecal_FakeConditions_cff")
# ECAL TPG Analyzer ###
process.load("Geometry.CaloEventSetup.CaloGeometry_cfi")
process.load("Geometry.CaloEventSetup.EcalTrigTowerConstituents_cfi")
process.load("Geometry.CMSCommonData.cmsIdealGeometryXML_cfi")
process.tpparams = cms.ESSource("EmptyESSource",
recordName = cms.string('EcalTPGLinearizationConstRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
process.tpparams2 = cms.ESSource("EmptyESSource",
recordName = cms.string('EcalTPGPedestalsRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
process.tpparams3 = cms.ESSource("EmptyESSource",
recordName = cms.string('EcalTPGSlidingWindowRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
process.tpparams4 = cms.ESSource("EmptyESSource",
recordName = cms.string('EcalTPGWeightIdMapRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
process.tpparams5 = cms.ESSource("EmptyESSource",
recordName = cms.string('EcalTPGWeightGroupRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
process.tpparams6 = cms.ESSource("EmptyESSource",
recordName = cms.string('EcalTPGLutGroupRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
process.tpparams7 = cms.ESSource("EmptyESSource",
recordName = cms.string('EcalTPGLutIdMapRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
process.tpparams8 = cms.ESSource("EmptyESSource",
recordName = cms.string('EcalTPGFineGrainEBIdMapRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
process.tpparams9 = cms.ESSource("EmptyESSource",
recordName = cms.string('EcalTPGFineGrainEBGroupRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
process.tpparams10 = cms.ESSource("EmptyESSource",
recordName = cms.string('EcalTPGFineGrainStripEERcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
process.tpparams11 = cms.ESSource("EmptyESSource",
recordName = cms.string('EcalTPGFineGrainTowerEERcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
process.tpparams12 = cms.ESSource("EmptyESSource",
recordName = cms.string('EcalTPGPhysicsConstRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
process.EcalTrigPrimESProducer = cms.ESProducer("EcalTrigPrimESProducer",
DatabaseFile = cms.untracked.string('TPG_EB.txt')
)
process.ecalTriggerPrimitiveDigis = cms.EDProducer("EcalTrigPrimProducer",
InstanceEB = cms.string('ebDigis'),
InstanceEE = cms.string(''),
Label = cms.string('ecalEBunpacker'),
BarrelOnly = cms.bool(True),
Famos = cms.bool(False),
TcpOutput = cms.bool(False),
Debug = cms.bool(False),
binOfMaximum = cms.int32(6), ## optional from release 200 on, from 1-10
TTFHighEnergyEB = cms.double(1.0),
TTFHighEnergyEE = cms.double(1.0),
TTFLowEnergyEB = cms.double(1.0), ## this + the following is added from 140_pre4 on
TTFLowEnergyEE = cms.double(1.0)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(100)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('rfio:/castor/cern.ch/cms//store/data/Commissioning08/Cosmics/RAW/v1/000/068/000/08E115E1-D1A5-DD11-BCE7-000423D99AA2.root')
)
process.tpAnalyzer = cms.EDAnalyzer("EcalTPGAnalyzer",
TPCollection = cms.InputTag("ecalEBunpacker","EcalTriggerPrimitives"),
TPEmulatorCollection = cms.InputTag("ecalTriggerPrimitiveDigis",""),
DigiCollectionEB = cms.InputTag("ecalEBunpacker","ebDigis"),
DigiCollectionEE = cms.InputTag("ecalEBunpacker","eeDigis"),
GTRecordCollection = cms.string('gtDigis'),
TrackMuonCollection = cms.string('globalCosmicMuons1LegBarrelOnly'),
Print = cms.bool(True),
ReadTriggerPrimitives = cms.bool(True),
UseEndCap = cms.bool(False)
)
process.p = cms.Path(process.gtDigis*process.ecalEBunpacker*process.ecalTriggerPrimitiveDigis*process.tpAnalyzer)
process.EcalTrigPrimESProducer.DatabaseFile = 'TPG_startup.txt.gz'
| 35.545455
| 162
| 0.744611
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("ANALYSEMIP")
ss.load("L1TriggerConfig.L1GtConfigProducers.L1GtConfig_cff")
process.load("L1TriggerConfig.L1GtConfigProducers.Luminosity.startup.L1Menu_startup_v3_Unprescaled_cff")
process.load("L1TriggerConfig.L1GtConfigProducers.L1GtBoardMapsConfig_cff")
process.load("L1TriggerConfig.L1ScalesProducers.L1MuTriggerScalesConfig_cff")
process.load("L1TriggerConfig.L1ScalesProducers.L1MuTriggerPtScaleConfig_cff")
import FWCore.Modules.printContent_cfi
process.dumpEv = FWCore.Modules.printContent_cfi.printContent.clone()
import EventFilter.L1GlobalTriggerRawToDigi.l1GtUnpack_cfi
process.gtDigis = EventFilter.L1GlobalTriggerRawToDigi.l1GtUnpack_cfi.l1GtUnpack.clone()
process.gtDigis.DaqGtInputTag = 'source'
ss.load("EventFilter.EcalRawToDigi.EcalUnpackerMapping_cfi")
process.load("EventFilter.EcalRawToDigi.EcalUnpackerData_cfi")
ss.load("Geometry.EcalMapping.EcalMapping_cfi")
process.load("Geometry.EcalMapping.EcalMappingRecord_cfi")
process.load("MagneticField.Engine.volumeBasedMagneticField_cfi")
process.load("CalibCalorimetry.Configuration.Ecal_FakeConditions_cff")
ss.load("Geometry.CaloEventSetup.CaloGeometry_cfi")
process.load("Geometry.CaloEventSetup.EcalTrigTowerConstituents_cfi")
process.load("Geometry.CMSCommonData.cmsIdealGeometryXML_cfi")
process.tpparams = cms.ESSource("EmptyESSource",
recordName = cms.string('EcalTPGLinearizationConstRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
process.tpparams2 = cms.ESSource("EmptyESSource",
recordName = cms.string('EcalTPGPedestalsRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
process.tpparams3 = cms.ESSource("EmptyESSource",
recordName = cms.string('EcalTPGSlidingWindowRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
process.tpparams4 = cms.ESSource("EmptyESSource",
recordName = cms.string('EcalTPGWeightIdMapRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
process.tpparams5 = cms.ESSource("EmptyESSource",
recordName = cms.string('EcalTPGWeightGroupRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
process.tpparams6 = cms.ESSource("EmptyESSource",
recordName = cms.string('EcalTPGLutGroupRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
process.tpparams7 = cms.ESSource("EmptyESSource",
recordName = cms.string('EcalTPGLutIdMapRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
process.tpparams8 = cms.ESSource("EmptyESSource",
recordName = cms.string('EcalTPGFineGrainEBIdMapRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
process.tpparams9 = cms.ESSource("EmptyESSource",
recordName = cms.string('EcalTPGFineGrainEBGroupRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
process.tpparams10 = cms.ESSource("EmptyESSource",
recordName = cms.string('EcalTPGFineGrainStripEERcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
process.tpparams11 = cms.ESSource("EmptyESSource",
recordName = cms.string('EcalTPGFineGrainTowerEERcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
process.tpparams12 = cms.ESSource("EmptyESSource",
recordName = cms.string('EcalTPGPhysicsConstRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
process.EcalTrigPrimESProducer = cms.ESProducer("EcalTrigPrimESProducer",
DatabaseFile = cms.untracked.string('TPG_EB.txt')
)
process.ecalTriggerPrimitiveDigis = cms.EDProducer("EcalTrigPrimProducer",
InstanceEB = cms.string('ebDigis'),
InstanceEE = cms.string(''),
Label = cms.string('ecalEBunpacker'),
BarrelOnly = cms.bool(True),
Famos = cms.bool(False),
TcpOutput = cms.bool(False),
Debug = cms.bool(False),
binOfMaximum = cms.int32(6),
TTFHighEnergyEB = cms.double(1.0),
TTFHighEnergyEE = cms.double(1.0),
TTFLowEnergyEB = cms.double(1.0), .maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(100)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('rfio:/castor/cern.ch/cms//store/data/Commissioning08/Cosmics/RAW/v1/000/068/000/08E115E1-D1A5-DD11-BCE7-000423D99AA2.root')
)
process.tpAnalyzer = cms.EDAnalyzer("EcalTPGAnalyzer",
TPCollection = cms.InputTag("ecalEBunpacker","EcalTriggerPrimitives"),
TPEmulatorCollection = cms.InputTag("ecalTriggerPrimitiveDigis",""),
DigiCollectionEB = cms.InputTag("ecalEBunpacker","ebDigis"),
DigiCollectionEE = cms.InputTag("ecalEBunpacker","eeDigis"),
GTRecordCollection = cms.string('gtDigis'),
TrackMuonCollection = cms.string('globalCosmicMuons1LegBarrelOnly'),
Print = cms.bool(True),
ReadTriggerPrimitives = cms.bool(True),
UseEndCap = cms.bool(False)
)
process.p = cms.Path(process.gtDigis*process.ecalEBunpacker*process.ecalTriggerPrimitiveDigis*process.tpAnalyzer)
process.EcalTrigPrimESProducer.DatabaseFile = 'TPG_startup.txt.gz'
| true
| true
|
1c3f5487bb649785d318fed288f3c338b2157c1a
| 529
|
py
|
Python
|
testplan/runnable/interactive/resource_loader.py
|
Morgan-Stanley/Testplan
|
9374d6e0da6ae9aa7a1b5e08b42cd21993485837
|
[
"Apache-2.0"
] | null | null | null |
testplan/runnable/interactive/resource_loader.py
|
Morgan-Stanley/Testplan
|
9374d6e0da6ae9aa7a1b5e08b42cd21993485837
|
[
"Apache-2.0"
] | null | null | null |
testplan/runnable/interactive/resource_loader.py
|
Morgan-Stanley/Testplan
|
9374d6e0da6ae9aa7a1b5e08b42cd21993485837
|
[
"Apache-2.0"
] | null | null | null |
"""Import classes on runtime."""
class ResourceLoader:
"""Load logic."""
def load(self, name, kwargs):
"""Load the registered object for the given name."""
target_class = getattr(self, "_load_{}".format(name))()
return target_class(**kwargs)
def _load_TCPServer(self):
from testplan.testing.multitest.driver.tcp import TCPServer
return TCPServer
def _load_TCPClient(self):
from testplan.testing.multitest.driver.tcp import TCPClient
return TCPClient
| 25.190476
| 67
| 0.661626
|
class ResourceLoader:
def load(self, name, kwargs):
target_class = getattr(self, "_load_{}".format(name))()
return target_class(**kwargs)
def _load_TCPServer(self):
from testplan.testing.multitest.driver.tcp import TCPServer
return TCPServer
def _load_TCPClient(self):
from testplan.testing.multitest.driver.tcp import TCPClient
return TCPClient
| true
| true
|
1c3f55051364d029230f8d099b1f020f5fee8e45
| 13,587
|
py
|
Python
|
escola/tests/test_functional.py
|
vini84200/medusa2
|
37cf33d05be8b0195b10845061ca893ba5e814dd
|
[
"MIT"
] | 1
|
2019-03-15T18:04:24.000Z
|
2019-03-15T18:04:24.000Z
|
escola/tests/test_functional.py
|
vini84200/medusa2
|
37cf33d05be8b0195b10845061ca893ba5e814dd
|
[
"MIT"
] | 22
|
2019-03-17T21:53:50.000Z
|
2021-03-31T19:12:19.000Z
|
escola/tests/test_functional.py
|
vini84200/medusa2
|
37cf33d05be8b0195b10845061ca893ba5e814dd
|
[
"MIT"
] | 1
|
2018-11-25T03:05:23.000Z
|
2018-11-25T03:05:23.000Z
|
# Developed by Vinicius José Fritzen
# Last Modified 16/05/19 16:44.
# Copyright (c) 2019 Vinicius José Fritzen and Albert Angel Lanzarini
import logging
import time
import pytest
from django.test import TestCase
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
TIME_LOAD = 2
logger = logging.getLogger(__name__)
# @pytest.fixture(scope='session')
# def splinter_webdriver():
# """Override splinter webdriver name."""
# return 'django'
def HeaderWrongMsg(expected, recieved):
"""Retorna uma msg para quando o Title no HEAD da pagina estiver erado"""
return f"O titulo no HEAD da pagina não é '{expected}', e sim '{recieved}'"
def AssertHeader(expected, recieved_browser: webdriver.Firefox):
"""Verifica que o Title no Head esta coreto"""
TestCase().assertIn(expected, recieved_browser.title, HeaderWrongMsg(expected, recieved_browser.title))
def AssertAlunoInTheList(nome, num, browser):
tc = TestCase()
alunos_n = browser.find_by_css('.aluno_n')
tc.assertIn(num, [n.text for n in alunos_n])
alunos_nome = browser.find_by_css('.aluno_nome')
tc.assertIn(nome, [row.text for row in alunos_nome])
def navigate_navbar(browser, points):
"""Navega no site pela NavBar, envie quais pontos da navbar devem ser selecionados"""
for p in points:
browser.click_link_by_partial_text(p)
def click_button(browser, btn_text):
"""Clica no botão com esse texto"""
browser.find_by_text(btn_text).click()
def fill_form_id(browser, fields: dict):
"""Prenche um form usando os valores passados como key como id e value como o que escrever"""
for field_id, data in fields.items():
browser.find_by_css(f'#{field_id}').type(data)
def submit_form(browser):
"""Envia o form da pagina"""
browser.find_by_tag('form').find_by_name('submit').click()
@pytest.mark.selenium_test
@pytest.mark.live_server_no_flush
def test_loggin_in_as_admin_and_ading_a_turma_and_alunos_with_both_populate_alunos_and_simple_add(live_server, browser,
pedrinho):
"""Testa o login de um admin, adição de uma turma, alunos, professores, materias, e cargos"""
tc = TestCase()
# Pedro, admin e aluno no site de sua escola quer adicionar uma nova turma ao site, no inicio do ano letivo,
# para isso acessa o site da escola.
browser.visit(live_server.url)
# O site o redireciona a pagina de login.
# Login é o titulo
AssertHeader('Login', browser)
# Pedro adiciona sua credenciais e loga no site.
username_input = browser.find_by_css('#id_username')[0]
username_input.type(pedrinho[1])
senha_input = browser.find_by_css('#id_password')
senha_input.type(pedrinho[2])
button = browser.find_by_name('submit')[0]
button.click()
# Ele é redirecionado a pagina inicial do site,
# Pedro Verifica que a pagina possui o titulo de 'Pagina Inicial'
AssertHeader('Página Inicial', browser)
# La pedro clica no dropdonw, e então no item que o leva a listagem de turmas
navigate_navbar(browser, ['Escola', 'Lista de Turmas'])
# Ele chega lá, e vê que há um titulo 'Lista de Turmas'.
AssertHeader('Lista de Turmas', browser)
h1 = browser.find_by_tag('h1')
tc.assertIn('Lista de Turmas', h1.text, f"Lista de turmas não é o titulo no h1 da pagina, e sim '{h1.text}'")
# Ele tambem vê um botão, 'Adicionar Turma'.
click_button(browser, 'Adicionar Turma')
# Quando clica no botão, ele é redirecionado a uma pagina com um formulario para adcionar sua propria turma,
# a pagina possui titulo: 'Adicionar uma turma'
AssertHeader('Adicionar uma turma', browser)
# ele preeche com turma: '302', um terceiro ano de sua escola, e o ano, '2019', já estava preenchido. ano letivo que estava começando.
browser.find_by_css('#id_numero').type('302')
assert browser.find_by_css('#id_ano')[0].value == '2019'
# Ele aperta enter e é redirecionado a lista de turmas,
browser.find_by_css('#id_ano').type(Keys.ENTER)
# onde Pedro pode ver que sua turma está na lista, isso alegra Pedro. :D
time.sleep(TIME_LOAD)
turma_row = browser.find_by_css('.turma_302')
n_turma = turma_row.find_by_css('.turma_n')
tc.assertEqual('302', n_turma.text)
# Pedro clica na lista de alunos da turma recem criada.
turma_row.find_by_text('Alunos').click()
# Está um vazio enorme, e Pedro está determinado a muda-lo
rows = browser.find_by_tag('tr')
## Um para o header da tebela
assert len(rows) == 1
# Pedro clica no link de adicionar aluno,
browser.find_link_by_text('Adicionar Aluno').click()
# e preenche com o nome de 'Silas S Abdi', anota o n da chamada como 14.
browser.find_by_css('#id_num_chamada').type('14')
browser.find_by_css('#id_nome').type('Silas S Abdi')
browser.find_by_css('#id_nome').type(Keys.ENTER)
time.sleep(TIME_LOAD)
# Pedro preciona enter e redirecionado para uma pagina que mostra que seu novo usuario possui uma senha
# e nome de usuario, pedro os imprime e volta para pagina inicial
browser.visit(live_server.url)
#Ele está de volta a pagina inicial, de lá ele rapidamente verifica a lista de alunos da turma que ele criou
navigate_navbar(browser, ['Escola', 'Lista de Turmas'])
turma_row = browser.find_by_css('.turma_302')
turma_row.find_by_text('Alunos').click()
# e seus olhos brilham quando vê, lá está, na lista que estava vazia, há um nome.
rows = browser.find_by_tag('tr')
## Um para o header da tebela
assert len(rows) == 2
alunos_n = browser.find_by_css('.aluno_n')
tc.assertIn('14', [n.text for n in alunos_n])
alunos_nome = browser.find_by_css('.aluno_nome')
tc.assertIn('Silas S Abdi', [row.text for row in alunos_nome])
# O nome que ele adicionara. Pedro precisa adicionar mais nomes, mas ele sabe que adicionar um por vez não
# sera uma opção, então ele volta a lista de turmas,
navigate_navbar(browser, ['Escola', 'Lista de Turmas'])
# e lá ele clica no botão Popular Turmas,
browser.find_link_by_text('Adicionar Lista de Alunos').click()
# Quando a pagina carega, ele sorri, ali esta tudo o que ele precisa, uma lista de campos para ele adicionar
# alunos, ele comeca a adicionar varios:
# 2 | Eligia A Borkowska | 302
## time.sleep(30)
browser.find_by_name('form-0-num_chamada').type('2')
browser.find_by_name('form-0-nome').type('Eligia A Borkowska')
browser.find_by_name('form-0-turma').type('302')
# 3 | Kelsie E Aitken | 302
browser.find_by_name('form-1-num_chamada').type('3')
browser.find_by_name('form-1-nome').type('Kelsie E Aitken')
browser.find_by_name('form-1-turma').type('302')
# 4 | Amanda M Nilsson | 302
browser.find_by_name('form-2-num_chamada').type('4')
browser.find_by_name('form-2-nome').type('Amanda M Nilsson')
browser.find_by_name('form-2-turma').type('302')
# E então pedrinho clica em enviar,
browser.find_by_css('#submit-id-submit').click()
# ele vê então uma pagina com senhas e cartões para serem distribuidos, ele esta feliz,
# ele mais uma vez volta a pagina inicial
browser.visit(live_server.url)
# e então volta a lista de alunos
navigate_navbar(browser, ['Escola', 'Lista de Turmas'])
turma_row = browser.find_by_css('.turma_302')
turma_row.find_by_text('Alunos').click()
# e ele está satisfeito, há 4 alunos na lista, todos os nomes que ele havia adicionado.
## Um para o header da tebela
rows = browser.find_by_tag('tr')
assert len(rows) == 5
AssertAlunoInTheList('Silas S Abdi', '14', browser)
AssertAlunoInTheList('Eligia A Borkowska', '2', browser)
AssertAlunoInTheList('Kelsie E Aitken', '3', browser)
AssertAlunoInTheList('Amanda M Nilsson', '4', browser)
# Pedro vai adicionar os professores de sua escola
navigate_navbar(browser, ['Escola', 'Lista de Professores'])
click_button(browser, 'Adicionar Professor')
# Ele adiciona 'Maria das Dores' professora de Matematica
fill_form_id(browser, {
'id_nome': 'Maria das Dores'
})
submit_form(browser)
# Agora ele vai verificar que sua professora foi adicionada
browser.visit(live_server.url)
navigate_navbar(browser, ['Escola', 'Lista de Professores'])
tc.assertIn('Maria das Dores', [row.text for row in browser.find_by_css('.professor_nome')])
assert 'Lista de Professores' in browser.title
# Agora ele adiciona um novo professor
click_button(browser, 'Adicionar Professor')
# Ele adiciona 'Patricia Klainir' professora de Geografia
assert 'Adicionar Professor' in browser.title
fill_form_id(browser, {
'id_nome': 'Patricia Klainir'
})
submit_form(browser)
# Agora ele vai verificar que sua professora foi adicionada
browser.visit(live_server.url)
navigate_navbar(browser, ['Escola', 'Lista de Professores'])
# Ele verifica que seus dois professores foram adicionados
tc.assertIn('Maria das Dores', [row.text for row in browser.find_by_css('.professor_nome')])
tc.assertIn('Patricia Klainir', [row.text for row in browser.find_by_css('.professor_nome')])
# Agora Pedro volta a lista de turmas, e abre a lista de materias
navigate_navbar(browser, ['Escola', 'Lista de Turmas'])
turma_row = browser.find_by_css('.turma_302')
click_button(turma_row, "Matérias")
assert 'Matérias da 302' in browser.title
# Lá ele clica para adicionar uma nova materia 'Matematica' com a professora 'Maria das Dores'
click_button(browser, 'Adicionar Matéria')
assert 'Adicionar Matéria' in browser.title
fill_form_id(browser, {
'id_nome': 'Matematica',
'id_abreviacao': 'MAT'
})
# s = Select(browser.find_element_by_id('id_professor'))
# s.select_by_visible_text('Maria das Dores')
browser.select_by_text('professor', 'Maria das Dores')
submit_form(browser)
# Tambem adiciona a materia 'Geografia' com a professora 'Patricia Klainir'
click_button(browser, 'Adicionar Matéria')
fill_form_id(browser, {
'id_nome': 'Geografia',
'id_abreviacao': 'GEO'
})
# s = Select(browser.find_element_by_id('id_professor'))
# s.select_by_visible_text('Patricia Klainir')
browser.select_by_text('professor', 'Patricia Klainir')
submit_form(browser)
# Agora Pedro transformara Patricia na Regente da Turma
navigate_navbar(browser, ['Escola', 'Lista de Turmas'])
turma_row = browser.find_by_css('.turma_302')
click_button(turma_row, "Cargos")
assert 'Cargos da 302' in browser.title
# Pedro clica em criar cargo
click_button(browser, 'Adicionar Cargo')
assert 'Adicionar Cargo' in browser.title
# Pedro preenche como Regente e adiciona sua professora
fill_form_id(browser, {
'id_nome': 'Regente'
})
# Select(browser.find_element_by_id('id_ocupante')).select_by_visible_text('patricia.pk')
browser.select_by_text('ocupante', 'patricia.pk')
# Select(browser.find_element_by_id('id_cod_especial')).select_by_value('5')
browser.select('cod_especial', 5)
submit_form(browser)
navigate_navbar(browser, ['Escola','Lista de Turmas'])
turma_row = browser.find_by_css('.turma_302')
click_button(turma_row, "Cargos")
assert 'patricia.pk' in [r.text for r in browser.find_by_css('.cargo_ocupante')]
# Pedro sai de sua conta.
browser.find_link_by_text('Sair').click()
@pytest.mark.selenium_test
@pytest.mark.live_server_no_flush
def test_novo_aluno_pode_logar(live_server, browser, dummy_aluno, transactional_db):
"""Aluno novo loga no site"""
tc = TestCase()
# Marcos ouviu falar do novo site de escola, ele esta curioso sobre esse site e suas funcionalidades
# Então Marcos acessa o link do site
browser.visit(live_server.url)
# A primeira coisa que Marcos vê é uma tela de Login muito bonita
AssertHeader("Login", browser)
# Marcos preenche as credencias que recebeu
fill_form_id(browser, {
'id_username': dummy_aluno['username'],
'id_password': dummy_aluno['senha']
})
submit_form(browser)
# Ele é redirecionado a pagina inicial,
AssertHeader("Página Inicial", browser)
# Na pagina inicial ele vê uma tabela de horarios vazia
assert "Horário" in [a.text for a in browser.find_by_tag('h2')]
# Tambem ele vê uma tabela de Tarefas
assert "Tarefas" in [a.text for a in browser.find_by_tag('h2')]
# Ele resolve sair, sua curiosidade foi saciada
@pytest.mark.selenium_test
@pytest.mark.live_server_no_flush
@pytest.mark.xfail(reason="Teste não terminado, terminar ASAP") # TODO: 28/04/2019 por wwwvi: Terminar esse teste
def test_lider_pode_alterar_horario(live_server, browser, dummy_aluno_lider, transactional_db):
# Jorge é o lider de sua turma, ele acessa o site para definir o horario de sua turma
## Defininindo Jorge como logado
dummy_login(browser, dummy_aluno_lider, live_server)
# Ele acessa a pagina inicial
browser.visit(live_server.url)
ht = browser.find_by_css(".horario_table")
dia = ht.first.find_by_id("turno_1").first.find_by_id('dia_2')
dia.find_by_text("Alterar").click()
pytest.xfail("Terminar") # TODO: 18/05/2019 por wwwvi: TERMINAR, e automatizar uma criação de turnos e horarios
def dummy_login(browser, dummy_user, live_server):
browser.visit(live_server.url)
browser.cookies.add({'sessionid': dummy_user['cookie'].value})
browser.reload()
| 44.841584
| 138
| 0.707294
|
import logging
import time
import pytest
from django.test import TestCase
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
TIME_LOAD = 2
logger = logging.getLogger(__name__)
def HeaderWrongMsg(expected, recieved):
return f"O titulo no HEAD da pagina não é '{expected}', e sim '{recieved}'"
def AssertHeader(expected, recieved_browser: webdriver.Firefox):
TestCase().assertIn(expected, recieved_browser.title, HeaderWrongMsg(expected, recieved_browser.title))
def AssertAlunoInTheList(nome, num, browser):
tc = TestCase()
alunos_n = browser.find_by_css('.aluno_n')
tc.assertIn(num, [n.text for n in alunos_n])
alunos_nome = browser.find_by_css('.aluno_nome')
tc.assertIn(nome, [row.text for row in alunos_nome])
def navigate_navbar(browser, points):
for p in points:
browser.click_link_by_partial_text(p)
def click_button(browser, btn_text):
browser.find_by_text(btn_text).click()
def fill_form_id(browser, fields: dict):
for field_id, data in fields.items():
browser.find_by_css(f'#{field_id}').type(data)
def submit_form(browser):
browser.find_by_tag('form').find_by_name('submit').click()
@pytest.mark.selenium_test
@pytest.mark.live_server_no_flush
def test_loggin_in_as_admin_and_ading_a_turma_and_alunos_with_both_populate_alunos_and_simple_add(live_server, browser,
pedrinho):
tc = TestCase()
browser.visit(live_server.url)
AssertHeader('Login', browser)
username_input = browser.find_by_css('#id_username')[0]
username_input.type(pedrinho[1])
senha_input = browser.find_by_css('#id_password')
senha_input.type(pedrinho[2])
button = browser.find_by_name('submit')[0]
button.click()
AssertHeader('Página Inicial', browser)
navigate_navbar(browser, ['Escola', 'Lista de Turmas'])
AssertHeader('Lista de Turmas', browser)
h1 = browser.find_by_tag('h1')
tc.assertIn('Lista de Turmas', h1.text, f"Lista de turmas não é o titulo no h1 da pagina, e sim '{h1.text}'")
click_button(browser, 'Adicionar Turma')
AssertHeader('Adicionar uma turma', browser)
browser.find_by_css('#id_numero').type('302')
assert browser.find_by_css('#id_ano')[0].value == '2019'
browser.find_by_css('#id_ano').type(Keys.ENTER)
time.sleep(TIME_LOAD)
turma_row = browser.find_by_css('.turma_302')
n_turma = turma_row.find_by_css('.turma_n')
tc.assertEqual('302', n_turma.text)
turma_row.find_by_text('Alunos').click()
rows = browser.find_by_tag('tr')
browser.find_link_by_text('Adicionar Aluno').click()
browser.find_by_css('#id_num_chamada').type('14')
browser.find_by_css('#id_nome').type('Silas S Abdi')
browser.find_by_css('#id_nome').type(Keys.ENTER)
time.sleep(TIME_LOAD)
browser.visit(live_server.url)
navigate_navbar(browser, ['Escola', 'Lista de Turmas'])
turma_row = browser.find_by_css('.turma_302')
turma_row.find_by_text('Alunos').click()
rows = browser.find_by_tag('tr')
alunos_n = browser.find_by_css('.aluno_n')
tc.assertIn('14', [n.text for n in alunos_n])
alunos_nome = browser.find_by_css('.aluno_nome')
tc.assertIn('Silas S Abdi', [row.text for row in alunos_nome])
navigate_navbar(browser, ['Escola', 'Lista de Turmas'])
browser.find_link_by_text('Adicionar Lista de Alunos').click()
d_by_name('form-0-num_chamada').type('2')
browser.find_by_name('form-0-nome').type('Eligia A Borkowska')
browser.find_by_name('form-0-turma').type('302')
browser.find_by_name('form-1-num_chamada').type('3')
browser.find_by_name('form-1-nome').type('Kelsie E Aitken')
browser.find_by_name('form-1-turma').type('302')
browser.find_by_name('form-2-num_chamada').type('4')
browser.find_by_name('form-2-nome').type('Amanda M Nilsson')
browser.find_by_name('form-2-turma').type('302')
browser.find_by_css('#submit-id-submit').click()
browser.visit(live_server.url)
navigate_navbar(browser, ['Escola', 'Lista de Turmas'])
turma_row = browser.find_by_css('.turma_302')
turma_row.find_by_text('Alunos').click()
tag('tr')
assert len(rows) == 5
AssertAlunoInTheList('Silas S Abdi', '14', browser)
AssertAlunoInTheList('Eligia A Borkowska', '2', browser)
AssertAlunoInTheList('Kelsie E Aitken', '3', browser)
AssertAlunoInTheList('Amanda M Nilsson', '4', browser)
navigate_navbar(browser, ['Escola', 'Lista de Professores'])
click_button(browser, 'Adicionar Professor')
fill_form_id(browser, {
'id_nome': 'Maria das Dores'
})
submit_form(browser)
browser.visit(live_server.url)
navigate_navbar(browser, ['Escola', 'Lista de Professores'])
tc.assertIn('Maria das Dores', [row.text for row in browser.find_by_css('.professor_nome')])
assert 'Lista de Professores' in browser.title
click_button(browser, 'Adicionar Professor')
assert 'Adicionar Professor' in browser.title
fill_form_id(browser, {
'id_nome': 'Patricia Klainir'
})
submit_form(browser)
browser.visit(live_server.url)
navigate_navbar(browser, ['Escola', 'Lista de Professores'])
tc.assertIn('Maria das Dores', [row.text for row in browser.find_by_css('.professor_nome')])
tc.assertIn('Patricia Klainir', [row.text for row in browser.find_by_css('.professor_nome')])
navigate_navbar(browser, ['Escola', 'Lista de Turmas'])
turma_row = browser.find_by_css('.turma_302')
click_button(turma_row, "Matérias")
assert 'Matérias da 302' in browser.title
click_button(browser, 'Adicionar Matéria')
assert 'Adicionar Matéria' in browser.title
fill_form_id(browser, {
'id_nome': 'Matematica',
'id_abreviacao': 'MAT'
})
browser.select_by_text('professor', 'Maria das Dores')
submit_form(browser)
click_button(browser, 'Adicionar Matéria')
fill_form_id(browser, {
'id_nome': 'Geografia',
'id_abreviacao': 'GEO'
})
browser.select_by_text('professor', 'Patricia Klainir')
submit_form(browser)
navigate_navbar(browser, ['Escola', 'Lista de Turmas'])
turma_row = browser.find_by_css('.turma_302')
click_button(turma_row, "Cargos")
assert 'Cargos da 302' in browser.title
click_button(browser, 'Adicionar Cargo')
assert 'Adicionar Cargo' in browser.title
fill_form_id(browser, {
'id_nome': 'Regente'
})
browser.select_by_text('ocupante', 'patricia.pk')
browser.select('cod_especial', 5)
submit_form(browser)
navigate_navbar(browser, ['Escola','Lista de Turmas'])
turma_row = browser.find_by_css('.turma_302')
click_button(turma_row, "Cargos")
assert 'patricia.pk' in [r.text for r in browser.find_by_css('.cargo_ocupante')]
browser.find_link_by_text('Sair').click()
@pytest.mark.selenium_test
@pytest.mark.live_server_no_flush
def test_novo_aluno_pode_logar(live_server, browser, dummy_aluno, transactional_db):
tc = TestCase()
browser.visit(live_server.url)
AssertHeader("Login", browser)
fill_form_id(browser, {
'id_username': dummy_aluno['username'],
'id_password': dummy_aluno['senha']
})
submit_form(browser)
AssertHeader("Página Inicial", browser)
assert "Horário" in [a.text for a in browser.find_by_tag('h2')]
assert "Tarefas" in [a.text for a in browser.find_by_tag('h2')]
@pytest.mark.selenium_test
@pytest.mark.live_server_no_flush
@pytest.mark.xfail(reason="Teste não terminado, terminar ASAP")
def test_lider_pode_alterar_horario(live_server, browser, dummy_aluno_lider, transactional_db):
_aluno_lider, live_server)
browser.visit(live_server.url)
ht = browser.find_by_css(".horario_table")
dia = ht.first.find_by_id("turno_1").first.find_by_id('dia_2')
dia.find_by_text("Alterar").click()
pytest.xfail("Terminar")
def dummy_login(browser, dummy_user, live_server):
browser.visit(live_server.url)
browser.cookies.add({'sessionid': dummy_user['cookie'].value})
browser.reload()
| true
| true
|
1c3f558452fe4d4e193c488525a9e64c73a76bd9
| 855
|
py
|
Python
|
googler/utils/compat.py
|
commx/googler
|
6b834a3248ac297f1e1a75af3c622a0f37b34a0a
|
[
"Apache-2.0"
] | 2
|
2016-01-13T09:12:20.000Z
|
2016-03-13T10:42:21.000Z
|
googler/utils/compat.py
|
commx/googler
|
6b834a3248ac297f1e1a75af3c622a0f37b34a0a
|
[
"Apache-2.0"
] | 1
|
2018-11-11T16:40:41.000Z
|
2018-11-11T16:40:41.000Z
|
googler/utils/compat.py
|
commx/googler
|
6b834a3248ac297f1e1a75af3c622a0f37b34a0a
|
[
"Apache-2.0"
] | 1
|
2017-12-26T08:53:40.000Z
|
2017-12-26T08:53:40.000Z
|
##
# Copyright (C) 2015 Christian Jurk <commx@commx.ws>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
import sys
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY2:
text_type = unicode
bytes_type = str
from urllib import urlencode
else:
text_type = str
bytes_type = bytes
from urllib.parse import urlencode
| 27.580645
| 74
| 0.729825
|
import sys
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY2:
text_type = unicode
bytes_type = str
from urllib import urlencode
else:
text_type = str
bytes_type = bytes
from urllib.parse import urlencode
| true
| true
|
1c3f55ec18411adfef510553a7c3bbc52d63deba
| 4,221
|
py
|
Python
|
setup.py
|
Rippling/mongoengine
|
c3b6fa6ffdfe05fcf6f49857c1a89fee0175a05f
|
[
"MIT"
] | null | null | null |
setup.py
|
Rippling/mongoengine
|
c3b6fa6ffdfe05fcf6f49857c1a89fee0175a05f
|
[
"MIT"
] | 28
|
2016-11-30T03:15:18.000Z
|
2022-02-25T15:57:02.000Z
|
setup.py
|
Rippling/mongoengine
|
c3b6fa6ffdfe05fcf6f49857c1a89fee0175a05f
|
[
"MIT"
] | 1
|
2021-11-10T05:33:18.000Z
|
2021-11-10T05:33:18.000Z
|
from __future__ import absolute_import
import os
import sys
from setuptools import setup, find_packages
# Hack to silence atexit traceback in newer python versions
try:
import multiprocessing
except ImportError:
pass
DESCRIPTION = 'MongoEngine is a Python Object-Document ' + \
'Mapper for working with MongoDB.'
try:
with open('README.rst') as fin:
LONG_DESCRIPTION = fin.read()
except Exception:
LONG_DESCRIPTION = None
def _get_rp_version(actual_version):
# if __PROJECT_GIT_COMMIT_SHA is provided, that gets the highest precedence.
# if not available, then we can check if it's present in the file (the file
# is never checked in, so it's used only during the package installation step)
rp_commit_sha = None
try:
rp_commit_sha = open('rp-version', 'r').read()
except FileNotFoundError:
pass
rp_commit_sha = os.environ.get('__PROJECT_GIT_COMMIT_SHA', None) or rp_commit_sha
if rp_commit_sha is not None:
with open('rp-version', 'w') as f:
f.write(rp_commit_sha)
actual_version = actual_version + "." + rp_commit_sha
return actual_version
def get_version(version_tuple):
if not isinstance(version_tuple[-1], int):
return '.'.join(str(t) for t in version_tuple[:-1]) + version_tuple[-1]
return '.'.join(str(t) for t in version_tuple)
# Dirty hack to get version number from monogengine/__init__.py - we can't
# import it as it depends on PyMongo and PyMongo isn't installed until this
# file is read
init = os.path.join(os.path.dirname(__file__), 'mongoengine', '__init__.py')
version_line = list([l for l in open(init) if l.startswith('VERSION')])[0]
VERSION = get_version(eval(version_line.split('=')[-1]))
VERSION = _get_rp_version(VERSION)
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
'Topic :: Database',
'Topic :: Software Development :: Libraries :: Python Modules',
]
extra_opts = {"packages": find_packages(exclude=["tests", "tests.*"]) +\
['bson-stubs', 'django_mongoengine-stubs', 'django_model_changes-stubs']}
if sys.version_info[0] == 3:
extra_opts['use_2to3'] = True
extra_opts['tests_require'] = ['nose', 'rednose', 'coverage==3.7.1', 'blinker', 'Pillow>=2.0.0']
if "test" in sys.argv or "nosetests" in sys.argv:
extra_opts['packages'] = find_packages()
extra_opts['package_data'] = {"tests": ["fields/mongoengine.png", "fields/mongodb_leaf.png"]}
else:
# coverage 4 does not support Python 3.2 anymore
extra_opts['tests_require'] = ['nose', 'rednose', 'coverage==3.7.1', 'blinker', 'Pillow>=2.0.0', 'python-dateutil']
if sys.version_info[0] == 2 and sys.version_info[1] == 6:
extra_opts['tests_require'].append('unittest2')
setup(name='mongoengine',
version=VERSION,
author='Harry Marr',
author_email='harry.marr@{nospam}gmail.com',
maintainer="Ross Lawley",
maintainer_email="ross.lawley@{nospam}gmail.com",
url='http://mongoengine.org/',
download_url='https://github.com/MongoEngine/mongoengine/tarball/master',
license='MIT',
package_data={
"mongoengine": ["py.typed", "*.pyi", "base/*.pyi", "queryset/*.pyi"],
"bson-stubs": ["*.pyi"],
"django_mongoengine-stubs": ["*.pyi"],
"django_model_changes-stubs": ["*.pyi"],
},
include_package_data=True,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
platforms=['any'],
classifiers=CLASSIFIERS,
install_requires=['pymongo>=2.7.1', 'six'],
test_suite='nose.collector',
**extra_opts
)
| 37.6875
| 119
| 0.664534
|
from __future__ import absolute_import
import os
import sys
from setuptools import setup, find_packages
try:
import multiprocessing
except ImportError:
pass
DESCRIPTION = 'MongoEngine is a Python Object-Document ' + \
'Mapper for working with MongoDB.'
try:
with open('README.rst') as fin:
LONG_DESCRIPTION = fin.read()
except Exception:
LONG_DESCRIPTION = None
def _get_rp_version(actual_version):
# is never checked in, so it's used only during the package installation step)
rp_commit_sha = None
try:
rp_commit_sha = open('rp-version', 'r').read()
except FileNotFoundError:
pass
rp_commit_sha = os.environ.get('__PROJECT_GIT_COMMIT_SHA', None) or rp_commit_sha
if rp_commit_sha is not None:
with open('rp-version', 'w') as f:
f.write(rp_commit_sha)
actual_version = actual_version + "." + rp_commit_sha
return actual_version
def get_version(version_tuple):
if not isinstance(version_tuple[-1], int):
return '.'.join(str(t) for t in version_tuple[:-1]) + version_tuple[-1]
return '.'.join(str(t) for t in version_tuple)
# import it as it depends on PyMongo and PyMongo isn't installed until this
init = os.path.join(os.path.dirname(__file__), 'mongoengine', '__init__.py')
version_line = list([l for l in open(init) if l.startswith('VERSION')])[0]
VERSION = get_version(eval(version_line.split('=')[-1]))
VERSION = _get_rp_version(VERSION)
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
'Topic :: Database',
'Topic :: Software Development :: Libraries :: Python Modules',
]
extra_opts = {"packages": find_packages(exclude=["tests", "tests.*"]) +\
['bson-stubs', 'django_mongoengine-stubs', 'django_model_changes-stubs']}
if sys.version_info[0] == 3:
extra_opts['use_2to3'] = True
extra_opts['tests_require'] = ['nose', 'rednose', 'coverage==3.7.1', 'blinker', 'Pillow>=2.0.0']
if "test" in sys.argv or "nosetests" in sys.argv:
extra_opts['packages'] = find_packages()
extra_opts['package_data'] = {"tests": ["fields/mongoengine.png", "fields/mongodb_leaf.png"]}
else:
extra_opts['tests_require'] = ['nose', 'rednose', 'coverage==3.7.1', 'blinker', 'Pillow>=2.0.0', 'python-dateutil']
if sys.version_info[0] == 2 and sys.version_info[1] == 6:
extra_opts['tests_require'].append('unittest2')
setup(name='mongoengine',
version=VERSION,
author='Harry Marr',
author_email='harry.marr@{nospam}gmail.com',
maintainer="Ross Lawley",
maintainer_email="ross.lawley@{nospam}gmail.com",
url='http://mongoengine.org/',
download_url='https://github.com/MongoEngine/mongoengine/tarball/master',
license='MIT',
package_data={
"mongoengine": ["py.typed", "*.pyi", "base/*.pyi", "queryset/*.pyi"],
"bson-stubs": ["*.pyi"],
"django_mongoengine-stubs": ["*.pyi"],
"django_model_changes-stubs": ["*.pyi"],
},
include_package_data=True,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
platforms=['any'],
classifiers=CLASSIFIERS,
install_requires=['pymongo>=2.7.1', 'six'],
test_suite='nose.collector',
**extra_opts
)
| true
| true
|
1c3f560d6e75286729123a801fbff679f21bbc87
| 7,466
|
py
|
Python
|
django/db/migrations/migration.py
|
xia0AL/baby_two
|
70244363024a36463dfaeda64e9e95ac118e1934
|
[
"BSD-3-Clause"
] | 1
|
2016-02-13T15:40:02.000Z
|
2016-02-13T15:40:02.000Z
|
django/db/migrations/migration.py
|
ojengwa/django-1
|
f6b09a7f85c3b67b2011553838b079788c413432
|
[
"BSD-3-Clause"
] | null | null | null |
django/db/migrations/migration.py
|
ojengwa/django-1
|
f6b09a7f85c3b67b2011553838b079788c413432
|
[
"BSD-3-Clause"
] | 1
|
2022-03-26T09:05:09.000Z
|
2022-03-26T09:05:09.000Z
|
from __future__ import unicode_literals
from django.db.transaction import atomic
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Migration(object):
"""
The base class for all migrations.
Migration files will import this from django.db.migrations.Migration
and subclass it as a class called Migration. It will have one or more
of the following attributes:
- operations: A list of Operation instances, probably from django.db.migrations.operations
- dependencies: A list of tuples of (app_path, migration_name)
- run_before: A list of tuples of (app_path, migration_name)
- replaces: A list of migration_names
Note that all migrations come out of migrations and into the Loader or
Graph as instances, having been initialized with their app label and name.
"""
# Operations to apply during this migration, in order.
operations = []
# Other migrations that should be run before this migration.
# Should be a list of (app, migration_name).
dependencies = []
# Other migrations that should be run after this one (i.e. have
# this migration added to their dependencies). Useful to make third-party
# apps' migrations run after your AUTH_USER replacement, for example.
run_before = []
# Migration names in this app that this migration replaces. If this is
# non-empty, this migration will only be applied if all these migrations
# are not applied.
replaces = []
# Error class which is raised when a migration is irreversible
class IrreversibleError(RuntimeError):
pass
def __init__(self, name, app_label):
self.name = name
self.app_label = app_label
# Copy dependencies & other attrs as we might mutate them at runtime
self.operations = list(self.__class__.operations)
self.dependencies = list(self.__class__.dependencies)
self.run_before = list(self.__class__.run_before)
self.replaces = list(self.__class__.replaces)
def __eq__(self, other):
if not isinstance(other, Migration):
return False
return (self.name == other.name) and (self.app_label == other.app_label)
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "<Migration %s.%s>" % (self.app_label, self.name)
def __str__(self):
return "%s.%s" % (self.app_label, self.name)
def __hash__(self):
return hash("%s.%s" % (self.app_label, self.name))
def mutate_state(self, project_state):
"""
Takes a ProjectState and returns a new one with the migration's
operations applied to it.
"""
new_state = project_state.clone()
for operation in self.operations:
operation.state_forwards(self.app_label, new_state)
return new_state
def apply(self, project_state, schema_editor, collect_sql=False):
"""
Takes a project_state representing all migrations prior to this one
and a schema_editor for a live database and applies the migration
in a forwards order.
Returns the resulting project state for efficient re-use by following
Migrations.
"""
for operation in self.operations:
# If this operation cannot be represented as SQL, place a comment
# there instead
if collect_sql and not operation.reduces_to_sql:
schema_editor.collected_sql.append("--")
schema_editor.collected_sql.append("-- MIGRATION NOW PERFORMS OPERATION THAT CANNOT BE "
"WRITTEN AS SQL:")
schema_editor.collected_sql.append("-- %s" % operation.describe())
schema_editor.collected_sql.append("--")
continue
# Save the state before the operation has run
old_state = project_state.clone()
operation.state_forwards(self.app_label, project_state)
# Run the operation
if not schema_editor.connection.features.can_rollback_ddl and operation.atomic:
# We're forcing a transaction on a non-transactional-DDL backend
with atomic(schema_editor.connection.alias):
operation.database_forwards(self.app_label, schema_editor, old_state, project_state)
else:
# Normal behaviour
operation.database_forwards(self.app_label, schema_editor, old_state, project_state)
return project_state
def unapply(self, project_state, schema_editor, collect_sql=False):
"""
Takes a project_state representing all migrations prior to this one
and a schema_editor for a live database and applies the migration
in a reverse order.
The backwards migration process consists of two phases:
1. The intermediate states from right before the first until right
after the last operation inside this migration are preserved.
2. The operations are applied in reverse order using the states
recorded in step 1.
"""
# Construct all the intermediate states we need for a reverse migration
to_run = []
new_state = project_state
# Phase 1
for operation in self.operations:
# If it's irreversible, error out
if not operation.reversible:
raise Migration.IrreversibleError("Operation %s in %s is not reversible" % (operation, self))
# Preserve new state from previous run to not tamper the same state
# over all operations
new_state = new_state.clone()
old_state = new_state.clone()
operation.state_forwards(self.app_label, new_state)
to_run.insert(0, (operation, old_state, new_state))
# Phase 2
for operation, to_state, from_state in to_run:
if collect_sql:
if not operation.reduces_to_sql:
schema_editor.collected_sql.append("--")
schema_editor.collected_sql.append("-- MIGRATION NOW PERFORMS OPERATION THAT CANNOT BE "
"WRITTEN AS SQL:")
schema_editor.collected_sql.append("-- %s" % operation.describe())
schema_editor.collected_sql.append("--")
continue
if not schema_editor.connection.features.can_rollback_ddl and operation.atomic:
# We're forcing a transaction on a non-transactional-DDL backend
with atomic(schema_editor.connection.alias):
operation.database_backwards(self.app_label, schema_editor, from_state, to_state)
else:
# Normal behaviour
operation.database_backwards(self.app_label, schema_editor, from_state, to_state)
return project_state
class SwappableTuple(tuple):
"""
Subclass of tuple so Django can tell this was originally a swappable
dependency when it reads the migration file.
"""
def __new__(cls, value, setting):
self = tuple.__new__(cls, value)
self.setting = setting
return self
def swappable_dependency(value):
"""
Turns a setting value into a dependency.
"""
return SwappableTuple((value.split(".", 1)[0], "__first__"), value)
| 41.709497
| 109
| 0.648674
|
from __future__ import unicode_literals
from django.db.transaction import atomic
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Migration(object):
operations = []
dependencies = []
run_before = []
# Migration names in this app that this migration replaces. If this is
# non-empty, this migration will only be applied if all these migrations
# are not applied.
replaces = []
# Error class which is raised when a migration is irreversible
class IrreversibleError(RuntimeError):
pass
def __init__(self, name, app_label):
self.name = name
self.app_label = app_label
# Copy dependencies & other attrs as we might mutate them at runtime
self.operations = list(self.__class__.operations)
self.dependencies = list(self.__class__.dependencies)
self.run_before = list(self.__class__.run_before)
self.replaces = list(self.__class__.replaces)
def __eq__(self, other):
if not isinstance(other, Migration):
return False
return (self.name == other.name) and (self.app_label == other.app_label)
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "<Migration %s.%s>" % (self.app_label, self.name)
def __str__(self):
return "%s.%s" % (self.app_label, self.name)
def __hash__(self):
return hash("%s.%s" % (self.app_label, self.name))
def mutate_state(self, project_state):
new_state = project_state.clone()
for operation in self.operations:
operation.state_forwards(self.app_label, new_state)
return new_state
def apply(self, project_state, schema_editor, collect_sql=False):
for operation in self.operations:
# If this operation cannot be represented as SQL, place a comment
# there instead
if collect_sql and not operation.reduces_to_sql:
schema_editor.collected_sql.append("--")
schema_editor.collected_sql.append("-- MIGRATION NOW PERFORMS OPERATION THAT CANNOT BE "
"WRITTEN AS SQL:")
schema_editor.collected_sql.append("-- %s" % operation.describe())
schema_editor.collected_sql.append("--")
continue
# Save the state before the operation has run
old_state = project_state.clone()
operation.state_forwards(self.app_label, project_state)
# Run the operation
if not schema_editor.connection.features.can_rollback_ddl and operation.atomic:
# We're forcing a transaction on a non-transactional-DDL backend
with atomic(schema_editor.connection.alias):
operation.database_forwards(self.app_label, schema_editor, old_state, project_state)
else:
operation.database_forwards(self.app_label, schema_editor, old_state, project_state)
return project_state
def unapply(self, project_state, schema_editor, collect_sql=False):
to_run = []
new_state = project_state
for operation in self.operations:
if not operation.reversible:
raise Migration.IrreversibleError("Operation %s in %s is not reversible" % (operation, self))
# Preserve new state from previous run to not tamper the same state
# over all operations
new_state = new_state.clone()
old_state = new_state.clone()
operation.state_forwards(self.app_label, new_state)
to_run.insert(0, (operation, old_state, new_state))
# Phase 2
for operation, to_state, from_state in to_run:
if collect_sql:
if not operation.reduces_to_sql:
schema_editor.collected_sql.append("--")
schema_editor.collected_sql.append("-- MIGRATION NOW PERFORMS OPERATION THAT CANNOT BE "
"WRITTEN AS SQL:")
schema_editor.collected_sql.append("-- %s" % operation.describe())
schema_editor.collected_sql.append("--")
continue
if not schema_editor.connection.features.can_rollback_ddl and operation.atomic:
# We're forcing a transaction on a non-transactional-DDL backend
with atomic(schema_editor.connection.alias):
operation.database_backwards(self.app_label, schema_editor, from_state, to_state)
else:
operation.database_backwards(self.app_label, schema_editor, from_state, to_state)
return project_state
class SwappableTuple(tuple):
def __new__(cls, value, setting):
self = tuple.__new__(cls, value)
self.setting = setting
return self
def swappable_dependency(value):
return SwappableTuple((value.split(".", 1)[0], "__first__"), value)
| true
| true
|
1c3f566ac5de92909c6d445e415ff2cdc4cd53d2
| 2,184
|
py
|
Python
|
main.py
|
rnoguer22/Ejercicios_Herencia_POO
|
0e18fe1edf262f20b95cc0b40d7e61c4be31ec0d
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
rnoguer22/Ejercicios_Herencia_POO
|
0e18fe1edf262f20b95cc0b40d7e61c4be31ec0d
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
rnoguer22/Ejercicios_Herencia_POO
|
0e18fe1edf262f20b95cc0b40d7e61c4be31ec0d
|
[
"Apache-2.0"
] | null | null | null |
from Clases.ejercicio1 import *
from Clases.Puzzle import *
from Clases.Herencia_multiple_caso_real import *
if __name__ == "__main__":
#EJERCICIO 1
punto1 = Punto2D(3,2)
punto1.traslacion(1,1)
print("El nuevo valor de punto1 es ({},{})".format(punto1.x,punto1.y))
punto2 = Punto3D(1,1,1)
punto2.traslacion2(1, 2, 3)
print("El nuevo valor de punto2 es ({},{},{})".format(punto2.x,punto2.y,punto2.z))
#EJERCICIO 2
base = Base() #define base como objeto de la clase padre Base
derivada = Derivada() #define derivada como objeto de la clase hijo Derivada
base.A() #output: a, utiliza el método A de la clase padre
derivada.A() #output: a, utiliza el método A
base.B() #output: b, utiliza el método B de la clase padre
derivada.B() #output: bb, utiliza el método B de la clase hijo
base.C() #output: c, utiliza el método C de la clase padre
derivada.C() #output: cc, utiliza el método C de la clase hijo con sus atributos
derivada = base #define derivada como el mismo objeto que base, por lo que será un objeto de la clase padre
derivada.C() #output: c, utiliza el método C de la clase padre
#EJERCICIO 3
class A:
def __init__(self,a):
self.a = a
class B(A):
def __init__(self, b, a):
self.b = b
A.__init__(self, a)
class C(A):
def __init__(self, c, a):
self.c = c
A.__init__(self, a)
class D(B, C):
def __init__(self,d, a, b, c):
self.d = d
B.__init__(self, a, b)
C.__init__(self, a, c)
#EJERCICIO 4
# Instanciación de las paredes
pared_norte = Pared("NORTE")
pared_oeste = Pared("OESTE")
pared_sur = Pared("SUR")
pared_este = Pared("ESTE")
# Instanciación de las ventanas
ventana_norte = Ventana(pared_norte, 0.5)
ventana_oeste = Ventana(pared_oeste, 1)
ventana_sur = Ventana(pared_sur, 2)
ventana_este = Ventana(pared_este, 1)
# Instanciación de la casa con las 4 paredes
casa = Casa([pared_norte, pared_oeste, pared_sur, pared_este])
print(casa.superficie_acristalada())
| 36.4
| 111
| 0.625916
|
from Clases.ejercicio1 import *
from Clases.Puzzle import *
from Clases.Herencia_multiple_caso_real import *
if __name__ == "__main__":
punto1 = Punto2D(3,2)
punto1.traslacion(1,1)
print("El nuevo valor de punto1 es ({},{})".format(punto1.x,punto1.y))
punto2 = Punto3D(1,1,1)
punto2.traslacion2(1, 2, 3)
print("El nuevo valor de punto2 es ({},{},{})".format(punto2.x,punto2.y,punto2.z))
base = Base()
derivada = Derivada()
base.A()
derivada.A()
base.B()
derivada.B()
base.C()
derivada.C()
derivada = base
derivada.C()
class A:
def __init__(self,a):
self.a = a
class B(A):
def __init__(self, b, a):
self.b = b
A.__init__(self, a)
class C(A):
def __init__(self, c, a):
self.c = c
A.__init__(self, a)
class D(B, C):
def __init__(self,d, a, b, c):
self.d = d
B.__init__(self, a, b)
C.__init__(self, a, c)
pared_norte = Pared("NORTE")
pared_oeste = Pared("OESTE")
pared_sur = Pared("SUR")
pared_este = Pared("ESTE")
ventana_norte = Ventana(pared_norte, 0.5)
ventana_oeste = Ventana(pared_oeste, 1)
ventana_sur = Ventana(pared_sur, 2)
ventana_este = Ventana(pared_este, 1)
casa = Casa([pared_norte, pared_oeste, pared_sur, pared_este])
print(casa.superficie_acristalada())
| true
| true
|
1c3f571eacb5844d4f1d6425820e62fc84c1eb66
| 1,739
|
py
|
Python
|
beginning-game-development/Chapter 7/7-4.py
|
CrtomirJuren/pygame-projects
|
f710f36050bfe3ece866bbda7d570caa1e037d7a
|
[
"MIT"
] | 43
|
2015-09-20T02:05:48.000Z
|
2022-03-01T22:00:43.000Z
|
beginning-game-development/Chapter 7/7-4.py
|
CrtomirJuren/pygame-projects
|
f710f36050bfe3ece866bbda7d570caa1e037d7a
|
[
"MIT"
] | null | null | null |
beginning-game-development/Chapter 7/7-4.py
|
CrtomirJuren/pygame-projects
|
f710f36050bfe3ece866bbda7d570caa1e037d7a
|
[
"MIT"
] | 40
|
2015-05-19T06:51:13.000Z
|
2022-03-27T18:11:16.000Z
|
class World(object):
def __init__(self):
self.entities = {} # Store all the entities
self.entity_id = 0 # Last entity id assigned
# Draw the nest (a circle) on the background
self.background = pygame.surface.Surface(SCREEN_SIZE).convert()
self.background.fill((255, 255, 255))
pygame.draw.circle(self.background, (200, 255, 200), NEST_POSITION, int(NEST_SIZE))
def add_entity(self, entity):
# Stores the entity then advances the current id
self.entities[self.entity_id] = entity
entity.id = self.entity_id
self.entity_id += 1
def remove_entity(self, entity):
del self.entities[entity.id]
def get(self, entity_id):
# Find the entity, given its id (or None if it is not found)
if entity_id in self.entities:
return self.entities[entity_id]
else:
return None
def process(self, time_passed):
# Process every entity in the world
time_passed_seconds = time_passed / 1000.0
for entity in self.entities.itervalues():
entity.process(time_passed_seconds)
def render(self, surface):
# Draw the background and all the entities
surface.blit(self.background, (0, 0))
for entity in self.entities.values():
entity.render(surface)
def get_close_entity(self, name, location, e_range=100):
# Find an entity within range of a location
location = Vector2(*location)
for entity in self.entities.values():
if entity.name == name:
distance = location.get_distance_to(entity.location)
if distance < e_range:
return entity
return None
| 31.053571
| 91
| 0.626797
|
class World(object):
def __init__(self):
self.entities = {}
self.entity_id = 0
self.background = pygame.surface.Surface(SCREEN_SIZE).convert()
self.background.fill((255, 255, 255))
pygame.draw.circle(self.background, (200, 255, 200), NEST_POSITION, int(NEST_SIZE))
def add_entity(self, entity):
self.entities[self.entity_id] = entity
entity.id = self.entity_id
self.entity_id += 1
def remove_entity(self, entity):
del self.entities[entity.id]
def get(self, entity_id):
if entity_id in self.entities:
return self.entities[entity_id]
else:
return None
def process(self, time_passed):
time_passed_seconds = time_passed / 1000.0
for entity in self.entities.itervalues():
entity.process(time_passed_seconds)
def render(self, surface):
surface.blit(self.background, (0, 0))
for entity in self.entities.values():
entity.render(surface)
def get_close_entity(self, name, location, e_range=100):
location = Vector2(*location)
for entity in self.entities.values():
if entity.name == name:
distance = location.get_distance_to(entity.location)
if distance < e_range:
return entity
return None
| true
| true
|
1c3f581920ca6d909ea815a12c43b203410bfd34
| 1,113
|
py
|
Python
|
bluelog/extensions.py
|
meizhaohui/bluelog
|
e4ea274cec5abe78b142f23f99675d3fdf5f1170
|
[
"MIT"
] | null | null | null |
bluelog/extensions.py
|
meizhaohui/bluelog
|
e4ea274cec5abe78b142f23f99675d3fdf5f1170
|
[
"MIT"
] | null | null | null |
bluelog/extensions.py
|
meizhaohui/bluelog
|
e4ea274cec5abe78b142f23f99675d3fdf5f1170
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
"""
@Author : Zhaohui Mei(梅朝辉)
@Email : mzh.whut@gmail.com
@Time : 2018/11/18 20:53
@File : extensions.py
@Version : 1.0
@Interpreter: Python3.6.2
@Software: PyCharm
@Description: 扩展类实例化
"""
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
from flask_moment import Moment
from flask_ckeditor import CKEditor
from flask_mail import Mail
from flask_login import LoginManager
from flask_wtf.csrf import CSRFProtect
# 创建对象
bootstrap = Bootstrap()
db = SQLAlchemy()
moment = Moment()
ckeditor = CKEditor()
mail = Mail()
login_manager = LoginManager() # 用户登陆管理
csrf = CSRFProtect() # 使用CSRFProtect实现CSRF保护
# 视图保护设置
login_manager.login_view = 'auth.login' # 未登陆时跳转到这个视图来
login_manager.login_message_category = 'warning' # 消息类型
# 未登陆访问保护视图时的消息提示
login_manager.login_message = 'Please login to access this page.(请先登陆!)'
@login_manager.user_loader
def load_user(user_id):
"""用户加载函数,FLask-Login用于获取当前用户的对象,必须要设置"""
from bluelog.models import Admin
user = Admin.query.get(int(user_id))
return user
| 25.295455
| 73
| 0.728661
|
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
from flask_moment import Moment
from flask_ckeditor import CKEditor
from flask_mail import Mail
from flask_login import LoginManager
from flask_wtf.csrf import CSRFProtect
bootstrap = Bootstrap()
db = SQLAlchemy()
moment = Moment()
ckeditor = CKEditor()
mail = Mail()
login_manager = LoginManager()
csrf = CSRFProtect()
login_manager.login_view = 'auth.login'
login_manager.login_message_category = 'warning'
login_manager.login_message = 'Please login to access this page.(请先登陆!)'
@login_manager.user_loader
def load_user(user_id):
from bluelog.models import Admin
user = Admin.query.get(int(user_id))
return user
| true
| true
|
1c3f596687b3a50588950fdfed39db230aa16475
| 470
|
py
|
Python
|
apps/fhir/bluebutton/migrations/0004_auto_20191220_2327.py
|
dtisza1/bluebutton-web-server
|
6322f28d75bd9e00f8dc4b5988a0cd5f7c6c80cb
|
[
"Apache-2.0"
] | 25
|
2017-12-10T00:48:31.000Z
|
2022-03-25T01:29:13.000Z
|
apps/fhir/bluebutton/migrations/0004_auto_20191220_2327.py
|
dtisza1/bluebutton-web-server
|
6322f28d75bd9e00f8dc4b5988a0cd5f7c6c80cb
|
[
"Apache-2.0"
] | 298
|
2017-12-05T05:53:32.000Z
|
2022-03-21T19:29:03.000Z
|
apps/fhir/bluebutton/migrations/0004_auto_20191220_2327.py
|
dtisza1/bluebutton-web-server
|
6322f28d75bd9e00f8dc4b5988a0cd5f7c6c80cb
|
[
"Apache-2.0"
] | 31
|
2017-12-04T16:01:12.000Z
|
2021-09-26T22:34:55.000Z
|
# Generated by Django 2.1.11 on 2019-12-20 23:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bluebutton', '0003_auto_20191208_0010'),
]
operations = [
migrations.AlterField(
model_name='crosswalk',
name='_fhir_id',
field=models.CharField(db_column='fhir_id', db_index=True, default=None, max_length=80, unique=True, null=False),
),
]
| 24.736842
| 125
| 0.634043
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bluebutton', '0003_auto_20191208_0010'),
]
operations = [
migrations.AlterField(
model_name='crosswalk',
name='_fhir_id',
field=models.CharField(db_column='fhir_id', db_index=True, default=None, max_length=80, unique=True, null=False),
),
]
| true
| true
|
1c3f59d5dbbbcbb7939d11d02a3a33cb0fbdc48a
| 1,374
|
py
|
Python
|
lib/cogs/commands/Utility.py
|
Aqua-Solutions2/Aqua-Solutions-Partner
|
dfc27c77560c2e209b9aef003694641d07027950
|
[
"MIT"
] | null | null | null |
lib/cogs/commands/Utility.py
|
Aqua-Solutions2/Aqua-Solutions-Partner
|
dfc27c77560c2e209b9aef003694641d07027950
|
[
"MIT"
] | null | null | null |
lib/cogs/commands/Utility.py
|
Aqua-Solutions2/Aqua-Solutions-Partner
|
dfc27c77560c2e209b9aef003694641d07027950
|
[
"MIT"
] | null | null | null |
from discord.ext.commands import Cog, command, cooldown, BucketType
import time
start_time = time.time()
class Stats(Cog):
def __init__(self, bot):
self.bot = bot
@staticmethod
def calculate_uptime():
current_time = time.time()
uptime = current_time - start_time
if uptime >= 86400:
uptime = time.strftime('%-dd %-Hu %-Mm %-Ss', time.gmtime(uptime))
else:
uptime = time.strftime('%-Hu %-Mm %-Ss', time.gmtime(uptime))
return uptime
@command(name="ping", aliases=["uptime"])
@cooldown(1, 5, BucketType.user)
async def ping(self, ctx):
"""Returns the latency in milliseconds."""
responds_time_start = time.time()
message = await ctx.send(f":radio_button: Calculating...")
responds_time_end = time.time()
await message.edit(content=f":ping_pong: **Ping:** {self.bot.get_shard(ctx.guild.shard_id).latency * 1000:,.0f} ms\n"
f":speech_balloon: **Responds Time:** {(responds_time_end - responds_time_start) * 1000:,.0f} ms\n"
f":white_check_mark: **Uptime:** {self.calculate_uptime()}")
@Cog.listener()
async def on_ready(self):
if not self.bot.ready:
self.bot.cogs_ready.ready_up("Utility")
def setup(bot):
bot.add_cog(Stats(bot))
| 32.714286
| 134
| 0.597525
|
from discord.ext.commands import Cog, command, cooldown, BucketType
import time
start_time = time.time()
class Stats(Cog):
def __init__(self, bot):
self.bot = bot
@staticmethod
def calculate_uptime():
current_time = time.time()
uptime = current_time - start_time
if uptime >= 86400:
uptime = time.strftime('%-dd %-Hu %-Mm %-Ss', time.gmtime(uptime))
else:
uptime = time.strftime('%-Hu %-Mm %-Ss', time.gmtime(uptime))
return uptime
@command(name="ping", aliases=["uptime"])
@cooldown(1, 5, BucketType.user)
async def ping(self, ctx):
responds_time_start = time.time()
message = await ctx.send(f":radio_button: Calculating...")
responds_time_end = time.time()
await message.edit(content=f":ping_pong: **Ping:** {self.bot.get_shard(ctx.guild.shard_id).latency * 1000:,.0f} ms\n"
f":speech_balloon: **Responds Time:** {(responds_time_end - responds_time_start) * 1000:,.0f} ms\n"
f":white_check_mark: **Uptime:** {self.calculate_uptime()}")
@Cog.listener()
async def on_ready(self):
if not self.bot.ready:
self.bot.cogs_ready.ready_up("Utility")
def setup(bot):
bot.add_cog(Stats(bot))
| true
| true
|
1c3f5a0a335de8091c3d9ea5a205053cffc9b8b6
| 861
|
py
|
Python
|
generaImagenTile/test/ph1.py
|
alffore/tileimagen
|
baf7321d9e9c002ef8ec10d4c52883ef8e4f18ed
|
[
"MIT"
] | null | null | null |
generaImagenTile/test/ph1.py
|
alffore/tileimagen
|
baf7321d9e9c002ef8ec10d4c52883ef8e4f18ed
|
[
"MIT"
] | null | null | null |
generaImagenTile/test/ph1.py
|
alffore/tileimagen
|
baf7321d9e9c002ef8ec10d4c52883ef8e4f18ed
|
[
"MIT"
] | null | null | null |
"""
Prueba para obtencion de histograma
"""
import sys
import numpy as np
import skimage.color
import skimage.io
import skimage.viewer
from matplotlib import pyplot as plt
# read image, based on command line filename argument;
# read the image as grayscale from the outset
image = skimage.io.imread(fname=sys.argv[1], as_gray=True)
# display the image
#viewer = skimage.viewer.ImageViewer(image)
#viewer.show()
# tuple to select colors of each channel line
colors = ("r", "g", "b")
channel_ids = (0, 1, 2)
# create the histogram plot, with three lines, one for
# each color
plt.xlim([0, 256])
for channel_id, c in zip(channel_ids, colors):
histogram, bin_edges = np.histogram(
image[:, :, channel_id], bins=256, range=(0, 256)
)
plt.plot(bin_edges[0:-1], histogram, color=c)
plt.xlabel("Color value")
plt.ylabel("Pixels")
plt.show()
| 22.657895
| 58
| 0.710801
|
import sys
import numpy as np
import skimage.color
import skimage.io
import skimage.viewer
from matplotlib import pyplot as plt
image = skimage.io.imread(fname=sys.argv[1], as_gray=True)
colors = ("r", "g", "b")
channel_ids = (0, 1, 2)
plt.xlim([0, 256])
for channel_id, c in zip(channel_ids, colors):
histogram, bin_edges = np.histogram(
image[:, :, channel_id], bins=256, range=(0, 256)
)
plt.plot(bin_edges[0:-1], histogram, color=c)
plt.xlabel("Color value")
plt.ylabel("Pixels")
plt.show()
| true
| true
|
1c3f5c46615ce0f7a4be0e013b2441f34d4bcb1e
| 19,070
|
py
|
Python
|
pylib/gna/env.py
|
gnafit/gna
|
c1a58dac11783342c97a2da1b19c97b85bce0394
|
[
"MIT"
] | 5
|
2019-10-14T01:06:57.000Z
|
2021-02-02T16:33:06.000Z
|
pylib/gna/env.py
|
gnafit/gna
|
c1a58dac11783342c97a2da1b19c97b85bce0394
|
[
"MIT"
] | null | null | null |
pylib/gna/env.py
|
gnafit/gna
|
c1a58dac11783342c97a2da1b19c97b85bce0394
|
[
"MIT"
] | null | null | null |
from collections import defaultdict, deque, Mapping, OrderedDict
from contextlib import contextmanager
import ROOT
import cppyy
from gna.config import cfg
from . import parameters
provided_precisions = list(map(str, ROOT.GNA.provided_precisions()))
expressionproviders = tuple(ROOT.GNA.GNAObjectTemplates.ExpressionsProviderT(p) for p in provided_precisions)
env = None
class namespacedict(OrderedDict):
def __init__(self, ns):
super(namespacedict, self).__init__()
self.ns = ns
def __missing__(self, key):
if key in self.ns.storage:
return self.ns
value = namespace(self.ns, key)
self[key] = value
return value
class ExpressionWithBindings(object):
def __init__(self, ns, obj, expr, bindings):
self.ns = ns
self.obj = obj
self.expr = expr
self.bindings = bindings
def resolvepath(self, seen, known):
allpath = []
for src in self.expr.sources.values():
depname = src.name()
dep = next((bs[depname] for bs in self.bindings if depname in bs), depname)
if isinstance(dep, str):
if dep in known:
continue
try:
dep = env.nsview[dep]
except KeyError:
return None
if isinstance(dep, ExpressionsEntry):
if dep in seen:
return None
path = dep.resolvepath(seen | {dep}, known)
if path is None:
return None
known.add(dep)
allpath.extend(path)
return allpath
def get(self):
for src in self.expr.sources.values():
depname = src.name()
v = self.obj.variables[depname]
if not v.isFree():
continue
dep = next((bs[depname] for bs in self.bindings if depname in bs), depname)
if isinstance(dep, str):
dep = env.nsview[dep]
v.bind(dep.getVariable())
return self.ns.addevaluable(self.expr.name(), self.expr.get())
class ExpressionsEntry(object):
_label = None
def __init__(self, ns):
self.ns = ns
self.exprs = []
def setLabel(self, label):
self._label = label
def label(self):
return self._label
def add(self, obj, expr, bindings):
self.exprs.append(ExpressionWithBindings(self.ns, obj, expr, bindings))
def get(self):
path = self.resolvepath({self}, OrderedDict())
if not path:
names = [expr.expr.name() for expr in self.exprs]
reqs = [var.name() for expr in self.exprs for var in list(expr.expr.sources.values())]
raise KeyError('Unable to provide required variables for {!s}. Something is missing from: {!s}'.format(names, reqs))
for expr in path:
v = expr.get()
if self._label is not None:
otherlabel = v.label()
if otherlabel:
newlabel = '{}: {}'.format(self._label, otherlabel)
else:
newlabel = self._label
v.setLabel(newlabel)
return v
materialize = get
def resolvepath(self, seen, known):
minexpr, minpaths = None, None
for expr in self.exprs:
if cfg.debug_bindings:
print(expr.expr.name(), seen)
paths = expr.resolvepath(seen, set(known))
if paths is None:
continue
if len(paths) == 0:
return [expr]
if minpaths is None or len(minpaths) > len(paths):
minexpr = expr
minpaths = paths
if minexpr is None:
return None
return minpaths+[minexpr]
class namespace(Mapping):
groups = None
def __init__(self, parent, name):
self.groups=[]
self.name = name
if parent and name=='':
raise Exception( 'Only root namespace may have no name' )
if parent:
self.path = parent.pathto(name)
else:
self.path = name
self.storage = OrderedDict()
self.observables = OrderedDict()
self.observables_tags = defaultdict(set)
self.rules = []
self.namespaces = namespacedict(self)
self.objs = []
def pathto(self, name):
return '.'.join((self.path, name)) if self.path else name
def __bool__(self):
return True
def __repr__(self):
return "<namespace path='{0}'>".format(self.path)
def __enter__(self):
env.nsview.add([self])
def __exit__(self, type, value, tb):
env.nsview.remove([self])
def __call__(self, nsname):
if not nsname:
return self
if isinstance(nsname, str):
if nsname=='':
return self
parts = nsname.split('.')
elif isinstance(nsname, (list, tuple)):
parts = nsname
if not parts:
return self
return self.namespaces[parts[0]](parts[1:])
def link(self, nsname, newns):
self.namespaces[nsname] = newns
def inherit(self, otherns):
for nsname in otherns.namespaces:
if nsname not in self.namespaces:
self.namespaces[nsname] = otherns.namespaces[nsname]
def get_proper_ns(self, name, separator='.'):
if isinstance(name, (tuple, list)):
path, head = name[:-1], name[-1]
else:
path, head = (), name
if separator in head:
newpath = tuple(head.split(separator))
path+=newpath[:-1]
head = newpath[-1]
if path:
return self(path), head
else:
return None, head
def __getitem__(self, name):
if not name:
return self
ns, head = self.get_proper_ns(name)
if ns:
return ns.__getitem__(head)
if isinstance(head, cppyy.gbl.std.string):
v = self.storage[str(head)]
else:
v = self.storage[head]
if isinstance(v, str):
return env.nsview[v]
return v
def get(self, name, *args):
if not name:
return self
ns, head = self.get_proper_ns(name)
if ns:
return ns.__getitem__(head)
v = self.storage.get(head, *args)
if isinstance(v, str):
return env.nsview[v]
return v
def get(self, name, *args):
if not name:
return self
ns, head = self.get_proper_ns(name)
if ns:
return ns.__getitem__(head)
v = self.storage.get(head, *args)
if isinstance(v, basestring):
return env.nsview[v]
return v
def __setitem__(self, name, value):
ns, head = self.get_proper_ns(name)
if ns:
ns.__setitem__(head, value)
self.storage[head] = value
def __iter__(self):
return iter(self.storage.keys())
def __len__(self):
return len(self.storage)
def defparameter_group(self, *args, **kwargs):
import gna.parameters.covariance_helpers as ch
pars = [self.defparameter(name, **ctor_args) for name, ctor_args in args]
covmat_passed = kwargs.get('covmat')
if covmat_passed is not None:
ch.covariate_pars(pars, covmat_passed)
cov_from_cfg = kwargs.get('covmat_cfg')
if cov_from_cfg is not None:
ch.CovarianceHandler(cov_from_cfg, pars).covariate_pars()
return pars
def defparameter(self, name, *args, **kwargs):
ns, head = self.get_proper_ns(name)
if ns:
return ns.defparameter(head, *args, **kwargs)
if head in self.storage:
raise Exception("{} is already defined in {}".format(head, self.path))
target = self.matchrule(head)
if not target:
target = kwargs.pop('target', None)
if target:
p = target
else:
p = parameters.makeparameter(self, head, *args, **kwargs)
self[head] = p
return p
def reqparameter(self, name, *args, **kwargs):
ns, head = self.get_proper_ns(name)
if ns:
return ns.reqparameter(head, *args, **kwargs)
par = None
try:
par = self[head]
except KeyError:
pass
if not par:
try:
par = env.nsview[head]
except KeyError:
pass
found=bool(par)
if not par:
par = self.defparameter(head, *args, **kwargs)
if kwargs.get('with_status'):
return par, found
return par
def reqparameter_group(self, *args, **kwargs):
import gna.parameters.covariance_helpers as ch
args_patched = [(name, dict(ctor_args, with_status=True))
for name, ctor_args in args]
pars_with_status = [self.reqparameter(name, **ctor_args)
for name, ctor_args in args_patched]
statuses = [status for _, status in pars_with_status]
pars = [par for par, _ in pars_with_status]
if not any(statuses):
covmat_passed = kwargs.get('covmat')
if covmat_passed is not None:
ch.covariate_pars(pars, covmat_passed)
cov_from_cfg = kwargs.get('covmat_cfg')
if cov_from_cfg is not None:
ch.CovarianceHandler(cov_from_cfg, pars).covariate_pars()
return pars
def addobservable(self, name, output, export=True, ignorecheck=False):
ns, head = self.get_proper_ns(name, separator='/')
if ns:
return ns.addobservable(head, output, export, ignorecheck)
if ignorecheck or output.check():
self.observables[head] = output
print('Add observable:', '%s/%s'%(self.path, head))
else:
print("observation", name, "is invalid")
output.dump()
if not export:
self.observables_tags[name].add('internal')
def getobservable(self, name):
ns, head = self.get_proper_ns(name, separator='/')
if ns:
return ns.getobservable(head)
try:
return self.observables[head]
except:
raise KeyError('Invalid observable: {}'.format(head))
def addexpressions(self, obj, bindings=[]):
for expr in obj.evaluables.values():
if cfg.debug_bindings:
print(self.path, obj, expr.name())
name = expr.name()
if name not in self.storage:
self.storage[name] = ExpressionsEntry(self)
if isinstance(self.storage[name], ExpressionsEntry):
self.storage[name].add(obj, expr, bindings)
def addevaluable(self, name, var):
evaluable = ROOT.Variable(var.typeName())(name, var)
evaluable.setLabel(var.label())
evaluable.ns = self
self[name] = evaluable
return evaluable
def walknstree(self):
yield self
for name, subns in self.namespaces.items():
for x in subns.walknstree():
yield x
def walkobservables(self, internal=False):
for ns in self.walknstree():
for name, val in ns.observables.items():
if not internal and 'internal' in ns.observables_tags.get(name, OrderedDict()):
continue
yield '{}/{}'.format(ns.path, name), val
def walknames(self):
for ns in self.walknstree():
for name, val in ns.storage.items():
yield '{}.{}'.format(ns.path, name), val
def ref(self, name):
return '.'.join([self.path, name])
def matchrule(self, name):
for pattern, target in self.rules:
if not pattern or pattern(name):
return target
def printobservables(self, internal=False):
import gna.bindings.DataType
for path, out in self.walkobservables(internal):
print('%-30s'%(path+':'), str(out.datatype()))
def printparameters(self, **kwargs):
from gna.parameters.printer import print_parameters
print_parameters(self, **kwargs)
def materializeexpressions(self, recursive=False):
for v in self.values():
if not isinstance(v, ExpressionsEntry):
continue
v.materialize()
if recursive:
for ns in list(self.namespaces.values()):
ns.materializeexpressions(True)
def get_obs(self, *names):
import fnmatch as fn
obses = []
for name in names:
matched = fn.filter(list(self.observables.keys()), name)
obses.extend(matched)
return obses
class nsview(object):
def __init__(self):
self.nses = deque()
def add(self, nses):
self.nses.extendleft(nses)
def remove(self, nses):
for ns in nses:
self.nses.remove(ns)
def __getitem__(self, name):
for ns in self.nses:
try:
return ns[name]
except KeyError:
pass
if cfg.debug_bindings:
print("can't find name {}. Names in view: ".format(name), end='')
if self.nses:
for ns in self.nses:
print('"{}": "{}"'.format(ns.path, ', '.join(ns.storage)), ' ', end='')
print('')
else:
print('none')
raise KeyError('%s (namespaces: %s)'%(name, str([ns.name for ns in self.nses])))
def currentns(self):
return self.nses[0]
class parametersview(object):
def __getitem__(self, name):
res = env.nsview[name]
return res
@contextmanager
def update(self, newvalues={}):
params=[]
for p, v in newvalues.items():
if isinstance(p, str):
p = self[p]
p.push(v)
params.append(p)
yield
for p in params:
p.pop()
@contextmanager
def save(self, params):
oldvalues = OrderedDict()
for p in params:
if isinstance(p, str):
p = self[p]
oldvalues[p] = p.value()
yield
for p, v in oldvalues.items():
p.set(v)
class PartNotFoundError(Exception):
def __init__(self, parttype, partname):
self.parttype = parttype
self.partname = partname
msg = "Failed to find {} in the env".format(self.partname)
super(PartNotFoundError, self).__init__(msg)
class envpart(dict):
def __init__(self, parttype):
self.parttype = parttype
super(envpart, self).__init__()
def __hash__(self):
return hash(self.parttype)
def __call__(self, name):
try:
return self[name]
except KeyError:
raise PartNotFoundError(self.parttype, name)
class envparts(object):
def __init__(self):
self.storage = OrderedDict()
def __getattr__(self, parttype):
if not parttype in self.storage:
self.storage[parttype] = envpart(parttype)
return self.storage[parttype]
class _environment(object):
def __init__(self):
self._bindings = []
self.globalns = namespace(None, '')
self.nsview = nsview()
self.nsview.add([self.globalns])
self.parameters = parametersview()
self.pars = self.parameters
self.parts = envparts()
from tools.dictwrapper import DictWrapper
self.future = DictWrapper(OrderedDict(), split='.')
def view(self, ns):
if ns != self.globalns:
return nsview([ns, self.globalns])
else:
return nsview([self.globalns])
def register(self, obj, **kwargs):
ns = kwargs.pop('ns')
if not ns:
self.globalns.objs.append(obj)
else:
ns.objs.append(obj)
obj.currentns = self.nsview.currentns()
bindings = self._bindings+[kwargs.pop("bindings", OrderedDict())]
if ns:
ns.addexpressions(obj, bindings=bindings)
if not kwargs.pop('bind', True):
return obj
if isinstance(obj, expressionproviders):
return obj
freevars = kwargs.pop('freevars', [])
for v in obj.variables.values():
if v.name() in freevars:
continue
if not v.isFree():
if cfg.debug_bindings:
print('binding skipped', v.name())
continue
vname = v.name()
param = next((bs[vname] for bs in bindings if vname in bs), vname)
if isinstance(param, str):
param = self.nsview[param]
if isinstance(param, ExpressionsEntry):
param = param.get()
if param is not None:
if cfg.debug_bindings:
print("binding", v.name(), 'of', type(obj).__name__, 'to', type(param).__name__, '.'.join([param.ns.path, param.name()]))
v.bind(param.getVariable())
else:
msg = "unable to bind variable %s of %r" % (v.name(), obj)
if not v.required():
msg += ", optional"
print(msg)
else:
raise Exception(msg)
obj.variablesBound()
return obj
def ns(self, ns):
if isinstance(ns, namespace):
return ns
elif isinstance(ns, str):
return self.globalns(ns)
else:
raise Exception("unknown object %r passed to ns()" % ns)
def defparameter(self, name, **kwargs):
if '.' in name:
nsname, name = name.rsplit('.', 1)
return self.ns(nsname).defparameter(name, **kwargs)
else:
return self.globalns.defparameter(name, **kwargs)
# def iternstree(self):
# return self.globalns.iternstree()
def bind(self, **bindings):
self._bindings.append(bindings)
yield
self._bindings.pop()
def gettype(self, objtype):
types = self.parts.storage
matches = [k for k in types if k.startswith(objtype)]
if len(matches) > 1:
msg = "ambigous type specifier {0}, candidates: {1}"
raise Exception(msg.format(objtype, ', '.join(matches)))
elif not matches:
msg = "unknown type specifier {0}"
raise Exception(msg.format(objtype))
else:
return types[matches[0]]
def get(self, objspec):
if ':' in objspec:
objtype, objpath = objspec.split(":", 1)
return self.gettype(objtype)[objpath]
elif '/' in objspec:
nspath, obsname = objspec.rsplit("/", 1)
return self.ns(nspath).observables[obsname]
else:
return self.globalns[objspec]
env = _environment()
| 30.857605
| 141
| 0.549816
|
from collections import defaultdict, deque, Mapping, OrderedDict
from contextlib import contextmanager
import ROOT
import cppyy
from gna.config import cfg
from . import parameters
provided_precisions = list(map(str, ROOT.GNA.provided_precisions()))
expressionproviders = tuple(ROOT.GNA.GNAObjectTemplates.ExpressionsProviderT(p) for p in provided_precisions)
env = None
class namespacedict(OrderedDict):
def __init__(self, ns):
super(namespacedict, self).__init__()
self.ns = ns
def __missing__(self, key):
if key in self.ns.storage:
return self.ns
value = namespace(self.ns, key)
self[key] = value
return value
class ExpressionWithBindings(object):
def __init__(self, ns, obj, expr, bindings):
self.ns = ns
self.obj = obj
self.expr = expr
self.bindings = bindings
def resolvepath(self, seen, known):
allpath = []
for src in self.expr.sources.values():
depname = src.name()
dep = next((bs[depname] for bs in self.bindings if depname in bs), depname)
if isinstance(dep, str):
if dep in known:
continue
try:
dep = env.nsview[dep]
except KeyError:
return None
if isinstance(dep, ExpressionsEntry):
if dep in seen:
return None
path = dep.resolvepath(seen | {dep}, known)
if path is None:
return None
known.add(dep)
allpath.extend(path)
return allpath
def get(self):
for src in self.expr.sources.values():
depname = src.name()
v = self.obj.variables[depname]
if not v.isFree():
continue
dep = next((bs[depname] for bs in self.bindings if depname in bs), depname)
if isinstance(dep, str):
dep = env.nsview[dep]
v.bind(dep.getVariable())
return self.ns.addevaluable(self.expr.name(), self.expr.get())
class ExpressionsEntry(object):
_label = None
def __init__(self, ns):
self.ns = ns
self.exprs = []
def setLabel(self, label):
self._label = label
def label(self):
return self._label
def add(self, obj, expr, bindings):
self.exprs.append(ExpressionWithBindings(self.ns, obj, expr, bindings))
def get(self):
path = self.resolvepath({self}, OrderedDict())
if not path:
names = [expr.expr.name() for expr in self.exprs]
reqs = [var.name() for expr in self.exprs for var in list(expr.expr.sources.values())]
raise KeyError('Unable to provide required variables for {!s}. Something is missing from: {!s}'.format(names, reqs))
for expr in path:
v = expr.get()
if self._label is not None:
otherlabel = v.label()
if otherlabel:
newlabel = '{}: {}'.format(self._label, otherlabel)
else:
newlabel = self._label
v.setLabel(newlabel)
return v
materialize = get
def resolvepath(self, seen, known):
minexpr, minpaths = None, None
for expr in self.exprs:
if cfg.debug_bindings:
print(expr.expr.name(), seen)
paths = expr.resolvepath(seen, set(known))
if paths is None:
continue
if len(paths) == 0:
return [expr]
if minpaths is None or len(minpaths) > len(paths):
minexpr = expr
minpaths = paths
if minexpr is None:
return None
return minpaths+[minexpr]
class namespace(Mapping):
groups = None
def __init__(self, parent, name):
self.groups=[]
self.name = name
if parent and name=='':
raise Exception( 'Only root namespace may have no name' )
if parent:
self.path = parent.pathto(name)
else:
self.path = name
self.storage = OrderedDict()
self.observables = OrderedDict()
self.observables_tags = defaultdict(set)
self.rules = []
self.namespaces = namespacedict(self)
self.objs = []
def pathto(self, name):
return '.'.join((self.path, name)) if self.path else name
def __bool__(self):
return True
def __repr__(self):
return "<namespace path='{0}'>".format(self.path)
def __enter__(self):
env.nsview.add([self])
def __exit__(self, type, value, tb):
env.nsview.remove([self])
def __call__(self, nsname):
if not nsname:
return self
if isinstance(nsname, str):
if nsname=='':
return self
parts = nsname.split('.')
elif isinstance(nsname, (list, tuple)):
parts = nsname
if not parts:
return self
return self.namespaces[parts[0]](parts[1:])
def link(self, nsname, newns):
self.namespaces[nsname] = newns
def inherit(self, otherns):
for nsname in otherns.namespaces:
if nsname not in self.namespaces:
self.namespaces[nsname] = otherns.namespaces[nsname]
def get_proper_ns(self, name, separator='.'):
if isinstance(name, (tuple, list)):
path, head = name[:-1], name[-1]
else:
path, head = (), name
if separator in head:
newpath = tuple(head.split(separator))
path+=newpath[:-1]
head = newpath[-1]
if path:
return self(path), head
else:
return None, head
def __getitem__(self, name):
if not name:
return self
ns, head = self.get_proper_ns(name)
if ns:
return ns.__getitem__(head)
if isinstance(head, cppyy.gbl.std.string):
v = self.storage[str(head)]
else:
v = self.storage[head]
if isinstance(v, str):
return env.nsview[v]
return v
def get(self, name, *args):
if not name:
return self
ns, head = self.get_proper_ns(name)
if ns:
return ns.__getitem__(head)
v = self.storage.get(head, *args)
if isinstance(v, str):
return env.nsview[v]
return v
def get(self, name, *args):
if not name:
return self
ns, head = self.get_proper_ns(name)
if ns:
return ns.__getitem__(head)
v = self.storage.get(head, *args)
if isinstance(v, basestring):
return env.nsview[v]
return v
def __setitem__(self, name, value):
ns, head = self.get_proper_ns(name)
if ns:
ns.__setitem__(head, value)
self.storage[head] = value
def __iter__(self):
return iter(self.storage.keys())
def __len__(self):
return len(self.storage)
def defparameter_group(self, *args, **kwargs):
import gna.parameters.covariance_helpers as ch
pars = [self.defparameter(name, **ctor_args) for name, ctor_args in args]
covmat_passed = kwargs.get('covmat')
if covmat_passed is not None:
ch.covariate_pars(pars, covmat_passed)
cov_from_cfg = kwargs.get('covmat_cfg')
if cov_from_cfg is not None:
ch.CovarianceHandler(cov_from_cfg, pars).covariate_pars()
return pars
def defparameter(self, name, *args, **kwargs):
ns, head = self.get_proper_ns(name)
if ns:
return ns.defparameter(head, *args, **kwargs)
if head in self.storage:
raise Exception("{} is already defined in {}".format(head, self.path))
target = self.matchrule(head)
if not target:
target = kwargs.pop('target', None)
if target:
p = target
else:
p = parameters.makeparameter(self, head, *args, **kwargs)
self[head] = p
return p
def reqparameter(self, name, *args, **kwargs):
ns, head = self.get_proper_ns(name)
if ns:
return ns.reqparameter(head, *args, **kwargs)
par = None
try:
par = self[head]
except KeyError:
pass
if not par:
try:
par = env.nsview[head]
except KeyError:
pass
found=bool(par)
if not par:
par = self.defparameter(head, *args, **kwargs)
if kwargs.get('with_status'):
return par, found
return par
def reqparameter_group(self, *args, **kwargs):
import gna.parameters.covariance_helpers as ch
args_patched = [(name, dict(ctor_args, with_status=True))
for name, ctor_args in args]
pars_with_status = [self.reqparameter(name, **ctor_args)
for name, ctor_args in args_patched]
statuses = [status for _, status in pars_with_status]
pars = [par for par, _ in pars_with_status]
if not any(statuses):
covmat_passed = kwargs.get('covmat')
if covmat_passed is not None:
ch.covariate_pars(pars, covmat_passed)
cov_from_cfg = kwargs.get('covmat_cfg')
if cov_from_cfg is not None:
ch.CovarianceHandler(cov_from_cfg, pars).covariate_pars()
return pars
def addobservable(self, name, output, export=True, ignorecheck=False):
ns, head = self.get_proper_ns(name, separator='/')
if ns:
return ns.addobservable(head, output, export, ignorecheck)
if ignorecheck or output.check():
self.observables[head] = output
print('Add observable:', '%s/%s'%(self.path, head))
else:
print("observation", name, "is invalid")
output.dump()
if not export:
self.observables_tags[name].add('internal')
def getobservable(self, name):
ns, head = self.get_proper_ns(name, separator='/')
if ns:
return ns.getobservable(head)
try:
return self.observables[head]
except:
raise KeyError('Invalid observable: {}'.format(head))
def addexpressions(self, obj, bindings=[]):
for expr in obj.evaluables.values():
if cfg.debug_bindings:
print(self.path, obj, expr.name())
name = expr.name()
if name not in self.storage:
self.storage[name] = ExpressionsEntry(self)
if isinstance(self.storage[name], ExpressionsEntry):
self.storage[name].add(obj, expr, bindings)
def addevaluable(self, name, var):
evaluable = ROOT.Variable(var.typeName())(name, var)
evaluable.setLabel(var.label())
evaluable.ns = self
self[name] = evaluable
return evaluable
def walknstree(self):
yield self
for name, subns in self.namespaces.items():
for x in subns.walknstree():
yield x
def walkobservables(self, internal=False):
for ns in self.walknstree():
for name, val in ns.observables.items():
if not internal and 'internal' in ns.observables_tags.get(name, OrderedDict()):
continue
yield '{}/{}'.format(ns.path, name), val
def walknames(self):
for ns in self.walknstree():
for name, val in ns.storage.items():
yield '{}.{}'.format(ns.path, name), val
def ref(self, name):
return '.'.join([self.path, name])
def matchrule(self, name):
for pattern, target in self.rules:
if not pattern or pattern(name):
return target
def printobservables(self, internal=False):
import gna.bindings.DataType
for path, out in self.walkobservables(internal):
print('%-30s'%(path+':'), str(out.datatype()))
def printparameters(self, **kwargs):
from gna.parameters.printer import print_parameters
print_parameters(self, **kwargs)
def materializeexpressions(self, recursive=False):
for v in self.values():
if not isinstance(v, ExpressionsEntry):
continue
v.materialize()
if recursive:
for ns in list(self.namespaces.values()):
ns.materializeexpressions(True)
def get_obs(self, *names):
import fnmatch as fn
obses = []
for name in names:
matched = fn.filter(list(self.observables.keys()), name)
obses.extend(matched)
return obses
class nsview(object):
def __init__(self):
self.nses = deque()
def add(self, nses):
self.nses.extendleft(nses)
def remove(self, nses):
for ns in nses:
self.nses.remove(ns)
def __getitem__(self, name):
for ns in self.nses:
try:
return ns[name]
except KeyError:
pass
if cfg.debug_bindings:
print("can't find name {}. Names in view: ".format(name), end='')
if self.nses:
for ns in self.nses:
print('"{}": "{}"'.format(ns.path, ', '.join(ns.storage)), ' ', end='')
print('')
else:
print('none')
raise KeyError('%s (namespaces: %s)'%(name, str([ns.name for ns in self.nses])))
def currentns(self):
return self.nses[0]
class parametersview(object):
def __getitem__(self, name):
res = env.nsview[name]
return res
@contextmanager
def update(self, newvalues={}):
params=[]
for p, v in newvalues.items():
if isinstance(p, str):
p = self[p]
p.push(v)
params.append(p)
yield
for p in params:
p.pop()
@contextmanager
def save(self, params):
oldvalues = OrderedDict()
for p in params:
if isinstance(p, str):
p = self[p]
oldvalues[p] = p.value()
yield
for p, v in oldvalues.items():
p.set(v)
class PartNotFoundError(Exception):
def __init__(self, parttype, partname):
self.parttype = parttype
self.partname = partname
msg = "Failed to find {} in the env".format(self.partname)
super(PartNotFoundError, self).__init__(msg)
class envpart(dict):
def __init__(self, parttype):
self.parttype = parttype
super(envpart, self).__init__()
def __hash__(self):
return hash(self.parttype)
def __call__(self, name):
try:
return self[name]
except KeyError:
raise PartNotFoundError(self.parttype, name)
class envparts(object):
def __init__(self):
self.storage = OrderedDict()
def __getattr__(self, parttype):
if not parttype in self.storage:
self.storage[parttype] = envpart(parttype)
return self.storage[parttype]
class _environment(object):
def __init__(self):
self._bindings = []
self.globalns = namespace(None, '')
self.nsview = nsview()
self.nsview.add([self.globalns])
self.parameters = parametersview()
self.pars = self.parameters
self.parts = envparts()
from tools.dictwrapper import DictWrapper
self.future = DictWrapper(OrderedDict(), split='.')
def view(self, ns):
if ns != self.globalns:
return nsview([ns, self.globalns])
else:
return nsview([self.globalns])
def register(self, obj, **kwargs):
ns = kwargs.pop('ns')
if not ns:
self.globalns.objs.append(obj)
else:
ns.objs.append(obj)
obj.currentns = self.nsview.currentns()
bindings = self._bindings+[kwargs.pop("bindings", OrderedDict())]
if ns:
ns.addexpressions(obj, bindings=bindings)
if not kwargs.pop('bind', True):
return obj
if isinstance(obj, expressionproviders):
return obj
freevars = kwargs.pop('freevars', [])
for v in obj.variables.values():
if v.name() in freevars:
continue
if not v.isFree():
if cfg.debug_bindings:
print('binding skipped', v.name())
continue
vname = v.name()
param = next((bs[vname] for bs in bindings if vname in bs), vname)
if isinstance(param, str):
param = self.nsview[param]
if isinstance(param, ExpressionsEntry):
param = param.get()
if param is not None:
if cfg.debug_bindings:
print("binding", v.name(), 'of', type(obj).__name__, 'to', type(param).__name__, '.'.join([param.ns.path, param.name()]))
v.bind(param.getVariable())
else:
msg = "unable to bind variable %s of %r" % (v.name(), obj)
if not v.required():
msg += ", optional"
print(msg)
else:
raise Exception(msg)
obj.variablesBound()
return obj
def ns(self, ns):
if isinstance(ns, namespace):
return ns
elif isinstance(ns, str):
return self.globalns(ns)
else:
raise Exception("unknown object %r passed to ns()" % ns)
def defparameter(self, name, **kwargs):
if '.' in name:
nsname, name = name.rsplit('.', 1)
return self.ns(nsname).defparameter(name, **kwargs)
else:
return self.globalns.defparameter(name, **kwargs)
# def iternstree(self):
# return self.globalns.iternstree()
def bind(self, **bindings):
self._bindings.append(bindings)
yield
self._bindings.pop()
def gettype(self, objtype):
types = self.parts.storage
matches = [k for k in types if k.startswith(objtype)]
if len(matches) > 1:
msg = "ambigous type specifier {0}, candidates: {1}"
raise Exception(msg.format(objtype, ', '.join(matches)))
elif not matches:
msg = "unknown type specifier {0}"
raise Exception(msg.format(objtype))
else:
return types[matches[0]]
def get(self, objspec):
if ':' in objspec:
objtype, objpath = objspec.split(":", 1)
return self.gettype(objtype)[objpath]
elif '/' in objspec:
nspath, obsname = objspec.rsplit("/", 1)
return self.ns(nspath).observables[obsname]
else:
return self.globalns[objspec]
env = _environment()
| true
| true
|
1c3f5d6c78ec809aa195b52895b4074229d46b61
| 1,644
|
py
|
Python
|
players/migrations/0001_initial.py
|
dbisdorf/cortex-roller
|
888d4dba8f4f407660b84e12fc8e2ed9874f0b7e
|
[
"MIT"
] | 5
|
2019-10-14T11:01:00.000Z
|
2021-02-08T08:39:35.000Z
|
players/migrations/0001_initial.py
|
dbisdorf/cortex-roller
|
888d4dba8f4f407660b84e12fc8e2ed9874f0b7e
|
[
"MIT"
] | 1
|
2022-02-17T22:17:21.000Z
|
2022-03-01T14:33:46.000Z
|
players/migrations/0001_initial.py
|
dbisdorf/cortex-roller
|
888d4dba8f4f407660b84e12fc8e2ed9874f0b7e
|
[
"MIT"
] | 1
|
2019-10-15T21:17:07.000Z
|
2019-10-15T21:17:07.000Z
|
# Generated by Django 2.1.3 on 2018-11-20 16:25
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Die',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('faces', models.IntegerField(default=4)),
('result', models.IntegerField(default=0)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('selected', models.BooleanField(default=False)),
('tag', models.CharField(default='X', max_length=1)),
('room', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('text', models.CharField(max_length=200)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('room', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Roll',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp', models.DateTimeField(auto_now_add=True)),
('text', models.CharField(max_length=200)),
('room', models.CharField(max_length=30)),
],
),
]
| 35.73913
| 114
| 0.552311
|
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Die',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('faces', models.IntegerField(default=4)),
('result', models.IntegerField(default=0)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('selected', models.BooleanField(default=False)),
('tag', models.CharField(default='X', max_length=1)),
('room', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('text', models.CharField(max_length=200)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('room', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Roll',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp', models.DateTimeField(auto_now_add=True)),
('text', models.CharField(max_length=200)),
('room', models.CharField(max_length=30)),
],
),
]
| true
| true
|
1c3f5e1d3a3d9ed47f92c3618cd6b6010f4c812d
| 2,589
|
py
|
Python
|
pikuli/input/helper_types.py
|
NVoronchev/pikuli
|
b67e33fa51a7bb7252c5ac11651e2f005542f955
|
[
"MIT"
] | null | null | null |
pikuli/input/helper_types.py
|
NVoronchev/pikuli
|
b67e33fa51a7bb7252c5ac11651e2f005542f955
|
[
"MIT"
] | 1
|
2021-05-11T13:40:52.000Z
|
2021-05-13T19:42:26.000Z
|
pikuli/input/helper_types.py
|
NVoronchev/pikuli
|
b67e33fa51a7bb7252c5ac11651e2f005542f955
|
[
"MIT"
] | 2
|
2021-03-31T14:10:15.000Z
|
2022-01-24T02:16:04.000Z
|
# -*- coding: utf-8 -*-
import os
import traceback
from collections import namedtuple
from pikuli import logger
from pikuli._helpers import NotImplemetedDummyFactory
WindowsButtonCode = namedtuple('WindowsButtonCode', ['event_down', 'event_up'])
class _HookedClassInitMeta(type):
HOOKED_INIT_CLASS_METHODNAME = '__hooked_class_init'
HOOKED_INIT_CLASS_OVERRIDING = '__hooked_class_init_overriding'
def __init__(cls, name, bases, dct):
super(_HookedClassInitMeta, cls).__init__(name, bases, dct)
class_init_method_name = cls.get_private_name(cls.HOOKED_INIT_CLASS_METHODNAME)
class_init_method = getattr(cls, class_init_method_name, None)
if class_init_method:
cls.init(class_init_method)
overriding_dict_name = cls.get_private_name(cls.HOOKED_INIT_CLASS_OVERRIDING)
overriding_dict = getattr(cls, overriding_dict_name, None)
if overriding_dict:
cls.override_unavailable_methods(overriding_dict)
def init(cls, class_init_method):
try:
class_init_method()
except Exception as ex:
logger.exception(
'NOTE: Cann\'t initialize class {!r}. A dummy will be used. '
'Some features is not available.'.format(cls))
err_msg = traceback.format_exc()
cls.mark_as_fail(err_msg)
def override_unavailable_methods(cls, overriding_dict):
for _, method_names_list in overriding_dict.items():
missed_methods = [method_name for method_name in method_names_list if not hasattr(cls, method_name)]
if missed_methods:
raise AttributeError('Try to override the followin missig methods in the {!r}: {!r}'.format(
cls, missed_methods))
for failed_cls, err_msg in cls.init_class_failed.items():
methods_to_override = overriding_dict.get(failed_cls, [])
for method_name in methods_to_override:
dummy = NotImplemetedDummyFactory.make_class_method(cls, method_name, err_msg)
setattr(cls, method_name, dummy)
def mark_as_fail(cls, err_msg):
cls.init_class_failed.update({cls: err_msg})
@property
def init_class_failed(cls):
cls._init_class_failed = getattr(cls, '_init_class_failed', {})
return cls._init_class_failed
def get_private_name(cls, attr_name):
return '_{cls_name}{attr_name}'.format(
cls_name=cls.__name__,
attr_name=attr_name)
class _HookedClassInit(metaclass=_HookedClassInitMeta):
pass
| 36.464789
| 112
| 0.68791
|
import os
import traceback
from collections import namedtuple
from pikuli import logger
from pikuli._helpers import NotImplemetedDummyFactory
WindowsButtonCode = namedtuple('WindowsButtonCode', ['event_down', 'event_up'])
class _HookedClassInitMeta(type):
HOOKED_INIT_CLASS_METHODNAME = '__hooked_class_init'
HOOKED_INIT_CLASS_OVERRIDING = '__hooked_class_init_overriding'
def __init__(cls, name, bases, dct):
super(_HookedClassInitMeta, cls).__init__(name, bases, dct)
class_init_method_name = cls.get_private_name(cls.HOOKED_INIT_CLASS_METHODNAME)
class_init_method = getattr(cls, class_init_method_name, None)
if class_init_method:
cls.init(class_init_method)
overriding_dict_name = cls.get_private_name(cls.HOOKED_INIT_CLASS_OVERRIDING)
overriding_dict = getattr(cls, overriding_dict_name, None)
if overriding_dict:
cls.override_unavailable_methods(overriding_dict)
def init(cls, class_init_method):
try:
class_init_method()
except Exception as ex:
logger.exception(
'NOTE: Cann\'t initialize class {!r}. A dummy will be used. '
'Some features is not available.'.format(cls))
err_msg = traceback.format_exc()
cls.mark_as_fail(err_msg)
def override_unavailable_methods(cls, overriding_dict):
for _, method_names_list in overriding_dict.items():
missed_methods = [method_name for method_name in method_names_list if not hasattr(cls, method_name)]
if missed_methods:
raise AttributeError('Try to override the followin missig methods in the {!r}: {!r}'.format(
cls, missed_methods))
for failed_cls, err_msg in cls.init_class_failed.items():
methods_to_override = overriding_dict.get(failed_cls, [])
for method_name in methods_to_override:
dummy = NotImplemetedDummyFactory.make_class_method(cls, method_name, err_msg)
setattr(cls, method_name, dummy)
def mark_as_fail(cls, err_msg):
cls.init_class_failed.update({cls: err_msg})
@property
def init_class_failed(cls):
cls._init_class_failed = getattr(cls, '_init_class_failed', {})
return cls._init_class_failed
def get_private_name(cls, attr_name):
return '_{cls_name}{attr_name}'.format(
cls_name=cls.__name__,
attr_name=attr_name)
class _HookedClassInit(metaclass=_HookedClassInitMeta):
pass
| true
| true
|
1c3f5fe83b71fe65cd77563bf2e2591a4743c7df
| 463
|
py
|
Python
|
mkt/users/migrations/0003_userprofile_shown_dev_agreement.py
|
Witia1/zamboni
|
b1e2d5d475abff2fa5d4990415a06adee33bd647
|
[
"BSD-3-Clause"
] | null | null | null |
mkt/users/migrations/0003_userprofile_shown_dev_agreement.py
|
Witia1/zamboni
|
b1e2d5d475abff2fa5d4990415a06adee33bd647
|
[
"BSD-3-Clause"
] | null | null | null |
mkt/users/migrations/0003_userprofile_shown_dev_agreement.py
|
Witia1/zamboni
|
b1e2d5d475abff2fa5d4990415a06adee33bd647
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20150826_0807'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='shown_dev_agreement',
field=models.DateTimeField(null=True, blank=True),
preserve_default=True,
),
]
| 22.047619
| 62
| 0.617711
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20150826_0807'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='shown_dev_agreement',
field=models.DateTimeField(null=True, blank=True),
preserve_default=True,
),
]
| true
| true
|
1c3f607124fede9bd7a78b6a7d1b13b66bac75fa
| 5,374
|
py
|
Python
|
croissant/output/base.py
|
v-legoff/croissant
|
ec45f530d22d98503182b0dcf635e552c72df831
|
[
"BSD-3-Clause"
] | null | null | null |
croissant/output/base.py
|
v-legoff/croissant
|
ec45f530d22d98503182b0dcf635e552c72df831
|
[
"BSD-3-Clause"
] | null | null | null |
croissant/output/base.py
|
v-legoff/croissant
|
ec45f530d22d98503182b0dcf635e552c72df831
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2013 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Module containing the base class of outputs."""
from abc import *
import argparse
import sys
import traceback
from croissant.language.exceptions.syntax import LanguageSyntaxError
from croissant.step.exceptions import *
from croissant.story.story_set import StorySet
class BaseOutput(metaclass=ABCMeta):
"""Base class for ouput.
Ideally, all outputs should inherit from this class. It contains
methods to help control how the reports are displayed.
"""
def __init__(self):
self.set = StorySet()
self.stories = self.set.stories
self.failures = []
self.errors = []
self.traces = {}
self.directory = None
self.parser = argparse.ArgumentParser()
self.parser.add_argument("directory")
def parse_args(self):
"""Parse the arguments from the argument parser."""
args = self.parser.parse_args()
self.handle_args(args)
def handle_args(self, args):
"""Handle the command-line arguments."""
self.directory = args.directory
def load(self):
"""Load the steps and stories of a given directory."""
directory = self.directory
try:
self.set.load(directory)
except LanguageSyntaxError as err:
self.handle_syntax_error(err)
sys.exit(1)
def run(self):
"""Run the different stories."""
for story_name in self.stories:
self.run_story(story_name)
def run_story(self, story_name):
"""Run a specific story.
In this method, the different assert_* errors are being tested
to produce a report.
"""
story = self.set.stories[story_name]
for scenario in story.scenarios:
self.run_scenario(story_name, scenario)
def run_scenario(self, story_name, scenario):
"""Run a specific scenario."""
try:
self.set.run_scenario(story_name, scenario)
except StepNotFound as err:
self.failures.append(err)
self.traces[scenario.identifier] = traceback.format_exc()
self.handle_step_not_found(err)
except StepAssertionError as err:
self.failures.append(err)
self.traces[scenario.identifier] = traceback.format_exc()
self.handle_assertion(err)
except Exception as err:
self.errors.append((scenario, err))
self.traces[scenario.identifier] = traceback.format_exc()
self.handle_exception(err)
else:
self.handle_success(story_name)
self.display_report()
@abstractmethod
def handle_syntax_error(self, error):
"""Handle a syntax error in one of the user stories."""
pass
@abstractmethod
def handle_step_not_found(self, error):
"""Handle the error if a step cannot be found."""
pass
@abstractmethod
def handle_assertion(self, error):
"""Handle the error when an assertion fails."""
pass
@abstractmethod
def handle_exception(self, exception):
"""Handle the exception when an error occures while executing."""
pass
@abstractmethod
def handle_success(self, story_name):
"""Handle when a user story passes without errors."""
pass
def display_report(self):
"""Display a report."""
self.display_main_report()
self.display_report_failures()
self.display_report_errors()
@abstractmethod
def display_main_report(self):
"""Display the main report (statistics)."""
pass
@abstractmethod
def display_report_failures(self):
"""Display the report on failures."""
pass
@abstractmethod
def display_report_errors(self):
"""Display the report on errors."""
pass
| 33.798742
| 79
| 0.676219
|
from abc import *
import argparse
import sys
import traceback
from croissant.language.exceptions.syntax import LanguageSyntaxError
from croissant.step.exceptions import *
from croissant.story.story_set import StorySet
class BaseOutput(metaclass=ABCMeta):
def __init__(self):
self.set = StorySet()
self.stories = self.set.stories
self.failures = []
self.errors = []
self.traces = {}
self.directory = None
self.parser = argparse.ArgumentParser()
self.parser.add_argument("directory")
def parse_args(self):
args = self.parser.parse_args()
self.handle_args(args)
def handle_args(self, args):
self.directory = args.directory
def load(self):
directory = self.directory
try:
self.set.load(directory)
except LanguageSyntaxError as err:
self.handle_syntax_error(err)
sys.exit(1)
def run(self):
for story_name in self.stories:
self.run_story(story_name)
def run_story(self, story_name):
story = self.set.stories[story_name]
for scenario in story.scenarios:
self.run_scenario(story_name, scenario)
def run_scenario(self, story_name, scenario):
try:
self.set.run_scenario(story_name, scenario)
except StepNotFound as err:
self.failures.append(err)
self.traces[scenario.identifier] = traceback.format_exc()
self.handle_step_not_found(err)
except StepAssertionError as err:
self.failures.append(err)
self.traces[scenario.identifier] = traceback.format_exc()
self.handle_assertion(err)
except Exception as err:
self.errors.append((scenario, err))
self.traces[scenario.identifier] = traceback.format_exc()
self.handle_exception(err)
else:
self.handle_success(story_name)
self.display_report()
@abstractmethod
def handle_syntax_error(self, error):
pass
@abstractmethod
def handle_step_not_found(self, error):
pass
@abstractmethod
def handle_assertion(self, error):
pass
@abstractmethod
def handle_exception(self, exception):
pass
@abstractmethod
def handle_success(self, story_name):
pass
def display_report(self):
self.display_main_report()
self.display_report_failures()
self.display_report_errors()
@abstractmethod
def display_main_report(self):
pass
@abstractmethod
def display_report_failures(self):
pass
@abstractmethod
def display_report_errors(self):
pass
| true
| true
|
1c3f60c2bbe712b82221105cad6428fa3a7f9b19
| 23
|
py
|
Python
|
proclist/scripts/scythe/proclist/__init__.py
|
scythe-io/community-modules
|
bc0d16e30a928f3ba11aecfa6cbac760b1de529b
|
[
"MIT"
] | 2
|
2022-03-14T18:48:04.000Z
|
2022-03-25T14:37:23.000Z
|
proclist/scripts/scythe/proclist/__init__.py
|
scythe-io/community-modules
|
bc0d16e30a928f3ba11aecfa6cbac760b1de529b
|
[
"MIT"
] | null | null | null |
proclist/scripts/scythe/proclist/__init__.py
|
scythe-io/community-modules
|
bc0d16e30a928f3ba11aecfa6cbac760b1de529b
|
[
"MIT"
] | null | null | null |
from .proclist import *
| 23
| 23
| 0.782609
|
from .proclist import *
| true
| true
|
1c3f61758a2bea75a69023c5d49c913db0bf27eb
| 4,747
|
py
|
Python
|
PyOpenGL/GLUT/ex10 - ProjectionMatrix - A MESS/main.py
|
hoppfull/Legacy-Python
|
43f465bfdb76c91f2ac16aabb0783fdf5f459adb
|
[
"MIT"
] | null | null | null |
PyOpenGL/GLUT/ex10 - ProjectionMatrix - A MESS/main.py
|
hoppfull/Legacy-Python
|
43f465bfdb76c91f2ac16aabb0783fdf5f459adb
|
[
"MIT"
] | null | null | null |
PyOpenGL/GLUT/ex10 - ProjectionMatrix - A MESS/main.py
|
hoppfull/Legacy-Python
|
43f465bfdb76c91f2ac16aabb0783fdf5f459adb
|
[
"MIT"
] | null | null | null |
import utils_engine, utils_math, utils_resource
import OpenGL.GL as GL
import OpenGL.GL.shaders as GL_shaders
import numpy as np
import ctypes as c
class MyApp(utils_engine.GameEngine):
def __init__(self, name, width, height):
utils_engine.GameEngine.__init__(self, name, width, height)
def setup(self): # Initialize this session
GL.glClearColor(0.4, 0.4, 0.4, 0.0) # Set background color
self.shader = utils_resource.loadShader("vertexshader.glsl", "fragmentshader.glsl")
self.UNIFORMS = {
'my_ModelMatrix':GL.glGetUniformLocation(self.shader, 'my_ModelMatrix'),
'my_ViewMatrix':GL.glGetUniformLocation(self.shader, 'my_ViewMatrix'),
'my_ProjectionMatrix':GL.glGetUniformLocation(self.shader, 'my_ProjectionMatrix')}
#Define geometry:
self.vertex_data = np.array([
[-1.0, 0.0,-1.0, 0.0, 0.0, 1.0],
[ 1.0, 0.0,-1.0, -1.0, 0.0,-1.0],
[-1.0, 0.0, 1.0, 1.0, 0.0,-1.0]
], dtype=np.float32)
self.index_data = np.array([
[0, 1, 2]
], dtype=np.uint32)
self.vbo = GL.glGenBuffers(1)
self.ibo = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vbo) # Select self.vbo
GL.glBufferData(GL.GL_ARRAY_BUFFER, self.vertex_data, GL.GL_STATIC_DRAW) # Assign data to selected buffer
GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, self.ibo) # Select self.ibo
GL.glBufferData(GL.GL_ELEMENT_ARRAY_BUFFER, self.index_data, GL.GL_STATIC_DRAW) # Assign data to selected buffer
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0) # Deselect buffers
#Set model in world coordinates:
self.ModelMatrix = utils_math.ModelMatrix()
#self.ModelMatrix.rotate(0, (1, 0, 0))
self.ModelMatrix.setDirection(forward=(0,-1, 0), up=(0, 0, 1))
#self.ModelMatrix.translate((0, 0, 0))
self.ModelMatrix.setScale((0.5, 0.5, 0.5))
#Set camera in world coordinates:
self.ViewMatrix = utils_math.ViewMatrix()
self.ViewMatrix.setPosition((0, -5, 0))
self.ViewMatrix.setDirection(forward=(0, 1, 0), up=(0, 0, 1))
#self.ViewMatrix.rotate(0.01, (0, 0, 1))
# Setup frustum properties, near coords, far coords, field of view in turns
self.ProjectionMatrix = utils_math.ProjectionMatrix(self.width, self.height, 0.1, 100.0, 0.3)
def mainLoop(self): # Run this session
try:
GL_shaders.glUseProgram(self.shader)
GL.glUniformMatrix4fv(
self.UNIFORMS['my_ModelMatrix'],1, GL.GL_TRUE,
self.ModelMatrix.get()
)
GL.glUniformMatrix4fv(
self.UNIFORMS['my_ViewMatrix'],1, GL.GL_TRUE,
self.ViewMatrix.get()
)
GL.glUniformMatrix4fv(
self.UNIFORMS['my_ProjectionMatrix'],1, GL.GL_TRUE,
self.ProjectionMatrix.get()
)
try:
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vbo)
GL.glVertexAttribPointer( # Vertex data
0, # Attribute 0 in this attribute array
3, # This attribute uses 3 elements
GL.GL_FLOAT, # These values are of type "GL_FLOAT"
False, # Normalize values? No!
self.vertex_data.shape[1]*c.sizeof(c.c_float), # bits per row, 4 bits for floats, 6 elements in one row (doubles are 8)
c.c_void_p(0)) # Where in each row does attribute start?
GL.glEnableVertexAttribArray(0)
GL.glVertexAttribPointer( # Extra vertex data
1,
3,
GL.GL_FLOAT,
False,
self.vertex_data.shape[1]*c.sizeof(c.c_float),
c.c_void_p(3*4))
GL.glEnableVertexAttribArray(1)
GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, self.ibo)
GL.glDrawElements(
GL.GL_TRIANGLES,
self.index_data.size,
GL.GL_UNSIGNED_INT, c.c_void_p(0))
finally:
GL.glDisableVertexAttribArray(0)
GL.glDisableVertexAttribArray(1)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)
finally:
GL_shaders.glUseProgram(0)
if __name__ == "__main__":
app = MyApp("OpenGL!", 800, 600)
app.run()
| 43.154545
| 139
| 0.552138
|
import utils_engine, utils_math, utils_resource
import OpenGL.GL as GL
import OpenGL.GL.shaders as GL_shaders
import numpy as np
import ctypes as c
class MyApp(utils_engine.GameEngine):
def __init__(self, name, width, height):
utils_engine.GameEngine.__init__(self, name, width, height)
def setup(self):
GL.glClearColor(0.4, 0.4, 0.4, 0.0)
self.shader = utils_resource.loadShader("vertexshader.glsl", "fragmentshader.glsl")
self.UNIFORMS = {
'my_ModelMatrix':GL.glGetUniformLocation(self.shader, 'my_ModelMatrix'),
'my_ViewMatrix':GL.glGetUniformLocation(self.shader, 'my_ViewMatrix'),
'my_ProjectionMatrix':GL.glGetUniformLocation(self.shader, 'my_ProjectionMatrix')}
self.vertex_data = np.array([
[-1.0, 0.0,-1.0, 0.0, 0.0, 1.0],
[ 1.0, 0.0,-1.0, -1.0, 0.0,-1.0],
[-1.0, 0.0, 1.0, 1.0, 0.0,-1.0]
], dtype=np.float32)
self.index_data = np.array([
[0, 1, 2]
], dtype=np.uint32)
self.vbo = GL.glGenBuffers(1)
self.ibo = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vbo)
GL.glBufferData(GL.GL_ARRAY_BUFFER, self.vertex_data, GL.GL_STATIC_DRAW)
GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, self.ibo)
GL.glBufferData(GL.GL_ELEMENT_ARRAY_BUFFER, self.index_data, GL.GL_STATIC_DRAW)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)
self.ModelMatrix = utils_math.ModelMatrix()
self.ModelMatrix.setDirection(forward=(0,-1, 0), up=(0, 0, 1))
self.ModelMatrix.setScale((0.5, 0.5, 0.5))
self.ViewMatrix = utils_math.ViewMatrix()
self.ViewMatrix.setPosition((0, -5, 0))
self.ViewMatrix.setDirection(forward=(0, 1, 0), up=(0, 0, 1))
self.ProjectionMatrix = utils_math.ProjectionMatrix(self.width, self.height, 0.1, 100.0, 0.3)
def mainLoop(self):
try:
GL_shaders.glUseProgram(self.shader)
GL.glUniformMatrix4fv(
self.UNIFORMS['my_ModelMatrix'],1, GL.GL_TRUE,
self.ModelMatrix.get()
)
GL.glUniformMatrix4fv(
self.UNIFORMS['my_ViewMatrix'],1, GL.GL_TRUE,
self.ViewMatrix.get()
)
GL.glUniformMatrix4fv(
self.UNIFORMS['my_ProjectionMatrix'],1, GL.GL_TRUE,
self.ProjectionMatrix.get()
)
try:
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vbo)
GL.glVertexAttribPointer(
0,
3,
GL.GL_FLOAT,
False,
self.vertex_data.shape[1]*c.sizeof(c.c_float),
c.c_void_p(0))
GL.glEnableVertexAttribArray(0)
GL.glVertexAttribPointer(
1,
3,
GL.GL_FLOAT,
False,
self.vertex_data.shape[1]*c.sizeof(c.c_float),
c.c_void_p(3*4))
GL.glEnableVertexAttribArray(1)
GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, self.ibo)
GL.glDrawElements(
GL.GL_TRIANGLES,
self.index_data.size,
GL.GL_UNSIGNED_INT, c.c_void_p(0))
finally:
GL.glDisableVertexAttribArray(0)
GL.glDisableVertexAttribArray(1)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)
finally:
GL_shaders.glUseProgram(0)
if __name__ == "__main__":
app = MyApp("OpenGL!", 800, 600)
app.run()
| true
| true
|
1c3f645e30bcabf29bc1edf77a2662965cc28fd6
| 655
|
py
|
Python
|
functional_tests/factory/__init__.py
|
vindeolal/pari
|
8c69d15101480c3e803d6d74f8007cefee20c350
|
[
"BSD-3-Clause"
] | null | null | null |
functional_tests/factory/__init__.py
|
vindeolal/pari
|
8c69d15101480c3e803d6d74f8007cefee20c350
|
[
"BSD-3-Clause"
] | null | null | null |
functional_tests/factory/__init__.py
|
vindeolal/pari
|
8c69d15101480c3e803d6d74f8007cefee20c350
|
[
"BSD-3-Clause"
] | null | null | null |
from author_factory import AuthorFactory
from page_factory import PageFactory
from page_factory import ContentTypeFactory
from home_page_factory import HomePageFactory
from article_factory import ArticleFactory
from category_factory import CategoryFactory
from location_factory import LocationFactory
from album_factory import TalkingAlbumSlideFactory
from album_factory import AlbumFactory
from album_factory import PhotoAlbumSlideFactory
from image_factory import ImageFactory
from gallery_home_page_factory import GalleryHomePageFactory
from guidelines_page_factory import GuidelinesPageFactory
from face_factory import *
from resource_factory import *
| 43.666667
| 60
| 0.90687
|
from author_factory import AuthorFactory
from page_factory import PageFactory
from page_factory import ContentTypeFactory
from home_page_factory import HomePageFactory
from article_factory import ArticleFactory
from category_factory import CategoryFactory
from location_factory import LocationFactory
from album_factory import TalkingAlbumSlideFactory
from album_factory import AlbumFactory
from album_factory import PhotoAlbumSlideFactory
from image_factory import ImageFactory
from gallery_home_page_factory import GalleryHomePageFactory
from guidelines_page_factory import GuidelinesPageFactory
from face_factory import *
from resource_factory import *
| true
| true
|
1c3f66e91094f62aa9f4d2795bd6a5c95d2780dd
| 381
|
py
|
Python
|
1f_http_server/python/fastapi_srv.py
|
pjuangph/python2rust
|
cc99abe8738e5d1d7d9a34debb2892186ff77965
|
[
"CC0-1.0"
] | 24
|
2021-07-09T13:56:45.000Z
|
2022-03-26T19:44:00.000Z
|
1f_http_server/python/fastapi_srv.py
|
pjuangph/python2rust
|
cc99abe8738e5d1d7d9a34debb2892186ff77965
|
[
"CC0-1.0"
] | null | null | null |
1f_http_server/python/fastapi_srv.py
|
pjuangph/python2rust
|
cc99abe8738e5d1d7d9a34debb2892186ff77965
|
[
"CC0-1.0"
] | 3
|
2021-07-09T17:16:31.000Z
|
2022-03-24T15:44:44.000Z
|
from fastapi import FastAPI
from fastapi.staticfiles import StaticFiles
app = FastAPI()
app.mount("/static", StaticFiles(directory="../static"), name="static")
@app.get("/")
@app.get("/hello/{name}")
async def root(name: str = "World"):
return {"message": f"Hello, {name}!"}
@app.get("/items/{item_id}")
async def read_item(item_id: int):
return {"item_id": item_id}
| 21.166667
| 71
| 0.671916
|
from fastapi import FastAPI
from fastapi.staticfiles import StaticFiles
app = FastAPI()
app.mount("/static", StaticFiles(directory="../static"), name="static")
@app.get("/")
@app.get("/hello/{name}")
async def root(name: str = "World"):
return {"message": f"Hello, {name}!"}
@app.get("/items/{item_id}")
async def read_item(item_id: int):
return {"item_id": item_id}
| true
| true
|
1c3f6817d7d24807136393dde05bea0604f29c9c
| 18,102
|
py
|
Python
|
tests/test_isoparser.py
|
cccntu/dateutil
|
86ab39008e6eddce2b0837553d9ff42aee25c783
|
[
"Apache-2.0"
] | null | null | null |
tests/test_isoparser.py
|
cccntu/dateutil
|
86ab39008e6eddce2b0837553d9ff42aee25c783
|
[
"Apache-2.0"
] | null | null | null |
tests/test_isoparser.py
|
cccntu/dateutil
|
86ab39008e6eddce2b0837553d9ff42aee25c783
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import datetime, timedelta, date, time
import itertools as it
from bs_dateutil import tz
from bs_dateutil.tz import UTC
from bs_dateutil.parser import isoparser, isoparse
import pytest
import six
def _generate_tzoffsets(limited):
def _mkoffset(hmtuple, fmt):
h, m = hmtuple
m_td = (-1 if h < 0 else 1) * m
tzo = tz.tzoffset(None, timedelta(hours=h, minutes=m_td))
return tzo, fmt.format(h, m)
out = []
if not limited:
# The subset that's just hours
hm_out_h = [(h, 0) for h in (-23, -5, 0, 5, 23)]
out.extend([_mkoffset(hm, "{:+03d}") for hm in hm_out_h])
# Ones that have hours and minutes
hm_out = [] + hm_out_h
hm_out += [(-12, 15), (11, 30), (10, 2), (5, 15), (-5, 30)]
else:
hm_out = [(-5, -0)]
fmts = ["{:+03d}:{:02d}", "{:+03d}{:02d}"]
out += [_mkoffset(hm, fmt) for hm in hm_out for fmt in fmts]
# Also add in UTC and naive
out.append((UTC, "Z"))
out.append((None, ""))
return out
FULL_TZOFFSETS = _generate_tzoffsets(False)
FULL_TZOFFSETS_AWARE = [x for x in FULL_TZOFFSETS if x[1]]
TZOFFSETS = _generate_tzoffsets(True)
DATES = [datetime(1996, 1, 1), datetime(2017, 1, 1)]
@pytest.mark.parametrize("dt", tuple(DATES))
def test_year_only(dt):
dtstr = dt.strftime("%Y")
assert isoparse(dtstr) == dt
DATES += [datetime(2000, 2, 1), datetime(2017, 4, 1)]
@pytest.mark.parametrize("dt", tuple(DATES))
def test_year_month(dt):
fmt = "%Y-%m"
dtstr = dt.strftime(fmt)
assert isoparse(dtstr) == dt
DATES += [datetime(2016, 2, 29), datetime(2018, 3, 15)]
YMD_FMTS = ("%Y%m%d", "%Y-%m-%d")
@pytest.mark.parametrize("dt", tuple(DATES))
@pytest.mark.parametrize("fmt", YMD_FMTS)
def test_year_month_day(dt, fmt):
dtstr = dt.strftime(fmt)
assert isoparse(dtstr) == dt
def _isoparse_date_and_time(
dt, date_fmt, time_fmt, tzoffset, microsecond_precision=None
):
tzi, offset_str = tzoffset
fmt = date_fmt + "T" + time_fmt
dt = dt.replace(tzinfo=tzi)
dtstr = dt.strftime(fmt)
if microsecond_precision is not None:
if not fmt.endswith("%f"): # pragma: nocover
raise ValueError("Time format has no microseconds!")
if microsecond_precision != 6:
dtstr = dtstr[: -(6 - microsecond_precision)]
elif microsecond_precision > 6: # pragma: nocover
raise ValueError("Precision must be 1-6")
dtstr += offset_str
assert isoparse(dtstr) == dt
DATETIMES = [
datetime(1998, 4, 16, 12),
datetime(2019, 11, 18, 23),
datetime(2014, 12, 16, 4),
]
@pytest.mark.parametrize("dt", tuple(DATETIMES))
@pytest.mark.parametrize("date_fmt", YMD_FMTS)
@pytest.mark.parametrize("tzoffset", TZOFFSETS)
def test_ymd_h(dt, date_fmt, tzoffset):
_isoparse_date_and_time(dt, date_fmt, "%H", tzoffset)
DATETIMES = [datetime(2012, 1, 6, 9, 37)]
@pytest.mark.parametrize("dt", tuple(DATETIMES))
@pytest.mark.parametrize("date_fmt", YMD_FMTS)
@pytest.mark.parametrize("time_fmt", ("%H%M", "%H:%M"))
@pytest.mark.parametrize("tzoffset", TZOFFSETS)
def test_ymd_hm(dt, date_fmt, time_fmt, tzoffset):
_isoparse_date_and_time(dt, date_fmt, time_fmt, tzoffset)
DATETIMES = [
datetime(2003, 9, 2, 22, 14, 2),
datetime(2003, 8, 8, 14, 9, 14),
datetime(2003, 4, 7, 6, 14, 59),
]
HMS_FMTS = ("%H%M%S", "%H:%M:%S")
@pytest.mark.parametrize("dt", tuple(DATETIMES))
@pytest.mark.parametrize("date_fmt", YMD_FMTS)
@pytest.mark.parametrize("time_fmt", HMS_FMTS)
@pytest.mark.parametrize("tzoffset", TZOFFSETS)
def test_ymd_hms(dt, date_fmt, time_fmt, tzoffset):
_isoparse_date_and_time(dt, date_fmt, time_fmt, tzoffset)
DATETIMES = [datetime(2017, 11, 27, 6, 14, 30, 123456)]
@pytest.mark.parametrize("dt", tuple(DATETIMES))
@pytest.mark.parametrize("date_fmt", YMD_FMTS)
@pytest.mark.parametrize("time_fmt", (x + sep + "%f" for x in HMS_FMTS for sep in ".,"))
@pytest.mark.parametrize("tzoffset", TZOFFSETS)
@pytest.mark.parametrize("precision", list(range(3, 7)))
def test_ymd_hms_micro(dt, date_fmt, time_fmt, tzoffset, precision):
# Truncate the microseconds to the desired precision for the representation
dt = dt.replace(microsecond=int(round(dt.microsecond, precision - 6)))
_isoparse_date_and_time(dt, date_fmt, time_fmt, tzoffset, precision)
###
# Truncation of extra digits beyond microsecond precision
@pytest.mark.parametrize(
"dt_str",
[
"2018-07-03T14:07:00.123456000001",
"2018-07-03T14:07:00.123456999999",
],
)
def test_extra_subsecond_digits(dt_str):
assert isoparse(dt_str) == datetime(2018, 7, 3, 14, 7, 0, 123456)
@pytest.mark.parametrize("tzoffset", FULL_TZOFFSETS)
def test_full_tzoffsets(tzoffset):
dt = datetime(2017, 11, 27, 6, 14, 30, 123456)
date_fmt = "%Y-%m-%d"
time_fmt = "%H:%M:%S.%f"
_isoparse_date_and_time(dt, date_fmt, time_fmt, tzoffset)
@pytest.mark.parametrize(
"dt_str",
[
"2014-04-11T00",
"2014-04-10T24",
"2014-04-11T00:00",
"2014-04-10T24:00",
"2014-04-11T00:00:00",
"2014-04-10T24:00:00",
"2014-04-11T00:00:00.000",
"2014-04-10T24:00:00.000",
"2014-04-11T00:00:00.000000",
"2014-04-10T24:00:00.000000",
],
)
def test_datetime_midnight(dt_str):
assert isoparse(dt_str) == datetime(2014, 4, 11, 0, 0, 0, 0)
@pytest.mark.parametrize(
"datestr",
[
"2014-01-01",
"20140101",
],
)
@pytest.mark.parametrize("sep", [" ", "a", "T", "_", "-"])
def test_isoparse_sep_none(datestr, sep):
isostr = datestr + sep + "14:33:09"
assert isoparse(isostr) == datetime(2014, 1, 1, 14, 33, 9)
##
# Uncommon date formats
TIME_ARGS = (
"time_args",
((None, time(0), None),)
+ tuple(
("%H:%M:%S.%f", _t, _tz)
for _t, _tz in it.product([time(0), time(9, 30), time(14, 47)], TZOFFSETS)
),
)
@pytest.mark.parametrize(
"isocal,dt_expected",
[
((2017, 10), datetime(2017, 3, 6)),
((2020, 1), datetime(2019, 12, 30)), # ISO year != Cal year
((2004, 53), datetime(2004, 12, 27)), # Only half the week is in 2014
],
)
def test_isoweek(isocal, dt_expected):
# TODO: Figure out how to parametrize this on formats, too
for fmt in ("{:04d}-W{:02d}", "{:04d}W{:02d}"):
dtstr = fmt.format(*isocal)
assert isoparse(dtstr) == dt_expected
@pytest.mark.parametrize(
"isocal,dt_expected",
[
((2016, 13, 7), datetime(2016, 4, 3)),
((2004, 53, 7), datetime(2005, 1, 2)), # ISO year != Cal year
((2009, 1, 2), datetime(2008, 12, 30)), # ISO year < Cal year
((2009, 53, 6), datetime(2010, 1, 2)), # ISO year > Cal year
],
)
def test_isoweek_day(isocal, dt_expected):
# TODO: Figure out how to parametrize this on formats, too
for fmt in ("{:04d}-W{:02d}-{:d}", "{:04d}W{:02d}{:d}"):
dtstr = fmt.format(*isocal)
assert isoparse(dtstr) == dt_expected
@pytest.mark.parametrize(
"isoord,dt_expected",
[
((2004, 1), datetime(2004, 1, 1)),
((2016, 60), datetime(2016, 2, 29)),
((2017, 60), datetime(2017, 3, 1)),
((2016, 366), datetime(2016, 12, 31)),
((2017, 365), datetime(2017, 12, 31)),
],
)
def test_iso_ordinal(isoord, dt_expected):
for fmt in ("{:04d}-{:03d}", "{:04d}{:03d}"):
dtstr = fmt.format(*isoord)
assert isoparse(dtstr) == dt_expected
###
# Acceptance of bytes
@pytest.mark.parametrize(
"isostr,dt",
[
(b"2014", datetime(2014, 1, 1)),
(b"20140204", datetime(2014, 2, 4)),
(b"2014-02-04", datetime(2014, 2, 4)),
(b"2014-02-04T12", datetime(2014, 2, 4, 12)),
(b"2014-02-04T12:30", datetime(2014, 2, 4, 12, 30)),
(b"2014-02-04T12:30:15", datetime(2014, 2, 4, 12, 30, 15)),
(b"2014-02-04T12:30:15.224", datetime(2014, 2, 4, 12, 30, 15, 224000)),
(b"20140204T123015.224", datetime(2014, 2, 4, 12, 30, 15, 224000)),
(b"2014-02-04T12:30:15.224Z", datetime(2014, 2, 4, 12, 30, 15, 224000, UTC)),
(b"2014-02-04T12:30:15.224z", datetime(2014, 2, 4, 12, 30, 15, 224000, UTC)),
(
b"2014-02-04T12:30:15.224+05:00",
datetime(
2014,
2,
4,
12,
30,
15,
224000,
tzinfo=tz.tzoffset(None, timedelta(hours=5)),
),
),
],
)
def test_bytes(isostr, dt):
assert isoparse(isostr) == dt
###
# Invalid ISO strings
@pytest.mark.parametrize(
"isostr,exception",
[
("201", ValueError), # ISO string too short
("2012-0425", ValueError), # Inconsistent date separators
("201204-25", ValueError), # Inconsistent date separators
("20120425T0120:00", ValueError), # Inconsistent time separators
("20120425T01:2000", ValueError), # Inconsistent time separators
("14:3015", ValueError), # Inconsistent time separator
("20120425T012500-334", ValueError), # Wrong microsecond separator
("2001-1", ValueError), # YYYY-M not valid
("2012-04-9", ValueError), # YYYY-MM-D not valid
("201204", ValueError), # YYYYMM not valid
("20120411T03:30+", ValueError), # Time zone too short
("20120411T03:30+1234567", ValueError), # Time zone too long
("20120411T03:30-25:40", ValueError), # Time zone invalid
("2012-1a", ValueError), # Invalid month
("20120411T03:30+00:60", ValueError), # Time zone invalid minutes
("20120411T03:30+00:61", ValueError), # Time zone invalid minutes
("20120411T033030.123456012:00", ValueError), # No sign in time zone
("2012-W00", ValueError), # Invalid ISO week
("2012-W55", ValueError), # Invalid ISO week
("2012-W01-0", ValueError), # Invalid ISO week day
("2012-W01-8", ValueError), # Invalid ISO week day
("2013-000", ValueError), # Invalid ordinal day
("2013-366", ValueError), # Invalid ordinal day
("2013366", ValueError), # Invalid ordinal day
("2014-03-12Т12:30:14", ValueError), # Cyrillic T
("2014-04-21T24:00:01", ValueError), # Invalid use of 24 for midnight
("2014_W01-1", ValueError), # Invalid separator
("2014W01-1", ValueError), # Inconsistent use of dashes
("2014-W011", ValueError), # Inconsistent use of dashes
],
)
def test_iso_raises(isostr, exception):
with pytest.raises(exception):
isoparse(isostr)
@pytest.mark.parametrize(
"sep_act, valid_sep, exception",
[
("T", "C", ValueError),
("C", "T", ValueError),
],
)
def test_iso_with_sep_raises(sep_act, valid_sep, exception):
parser = isoparser(sep=valid_sep)
isostr = "2012-04-25" + sep_act + "01:25:00"
with pytest.raises(exception):
parser.isoparse(isostr)
###
# Test ISOParser constructor
@pytest.mark.parametrize("sep", [" ", "9", "🍛"])
def test_isoparser_invalid_sep(sep):
with pytest.raises(ValueError):
isoparser(sep=sep)
# This only fails on Python 3
@pytest.mark.xfail(not six.PY2, reason="Fails on Python 3 only")
def test_isoparser_byte_sep():
dt = datetime(2017, 12, 6, 12, 30, 45)
dt_str = dt.isoformat(sep=str("T"))
dt_rt = isoparser(sep=b"T").isoparse(dt_str)
assert dt == dt_rt
###
# Test parse_tzstr
@pytest.mark.parametrize("tzoffset", FULL_TZOFFSETS)
def test_parse_tzstr(tzoffset):
dt = datetime(2017, 11, 27, 6, 14, 30, 123456)
date_fmt = "%Y-%m-%d"
time_fmt = "%H:%M:%S.%f"
_isoparse_date_and_time(dt, date_fmt, time_fmt, tzoffset)
@pytest.mark.parametrize("tzstr", ["-00:00", "+00:00", "+00", "-00", "+0000", "-0000"])
@pytest.mark.parametrize("zero_as_utc", [True, False])
def test_parse_tzstr_zero_as_utc(tzstr, zero_as_utc):
tzi = isoparser().parse_tzstr(tzstr, zero_as_utc=zero_as_utc)
assert tzi == UTC
assert (type(tzi) == tz.tzutc) == zero_as_utc
@pytest.mark.parametrize(
"tzstr,exception",
[
("00:00", ValueError), # No sign
("05:00", ValueError), # No sign
("_00:00", ValueError), # Invalid sign
("+25:00", ValueError), # Offset too large
("00:0000", ValueError), # String too long
],
)
def test_parse_tzstr_fails(tzstr, exception):
with pytest.raises(exception):
isoparser().parse_tzstr(tzstr)
###
# Test parse_isodate
def __make_date_examples():
dates_no_day = [date(1999, 12, 1), date(2016, 2, 1)]
if not six.PY2:
# strftime does not support dates before 1900 in Python 2
dates_no_day.append(date(1000, 11, 1))
# Only one supported format for dates with no day
o = zip(dates_no_day, it.repeat("%Y-%m"))
dates_w_day = [
date(1969, 12, 31),
date(1900, 1, 1),
date(2016, 2, 29),
date(2017, 11, 14),
]
dates_w_day_fmts = ("%Y%m%d", "%Y-%m-%d")
o = it.chain(o, it.product(dates_w_day, dates_w_day_fmts))
return list(o)
@pytest.mark.parametrize("d,dt_fmt", __make_date_examples())
@pytest.mark.parametrize("as_bytes", [True, False])
def test_parse_isodate(d, dt_fmt, as_bytes):
d_str = d.strftime(dt_fmt)
if isinstance(d_str, six.text_type) and as_bytes:
d_str = d_str.encode("ascii")
elif isinstance(d_str, bytes) and not as_bytes:
d_str = d_str.decode("ascii")
iparser = isoparser()
assert iparser.parse_isodate(d_str) == d
@pytest.mark.parametrize(
"isostr,exception",
[
("243", ValueError), # ISO string too short
("2014-0423", ValueError), # Inconsistent date separators
("201404-23", ValueError), # Inconsistent date separators
("2014日03月14", ValueError), # Not ASCII
("2013-02-29", ValueError), # Not a leap year
("2014/12/03", ValueError), # Wrong separators
("2014-04-19T", ValueError), # Unknown components
("201202", ValueError), # Invalid format
],
)
def test_isodate_raises(isostr, exception):
with pytest.raises(exception):
isoparser().parse_isodate(isostr)
def test_parse_isodate_error_text():
with pytest.raises(ValueError) as excinfo:
isoparser().parse_isodate("2014-0423")
# ensure the error message does not contain b' prefixes
if six.PY2:
expected_error = "String contains unknown ISO components: u'2014-0423'"
else:
expected_error = "String contains unknown ISO components: '2014-0423'"
assert expected_error == str(excinfo.value)
###
# Test parse_isotime
def __make_time_examples():
outputs = []
# HH
time_h = [time(0), time(8), time(22)]
time_h_fmts = ["%H"]
outputs.append(it.product(time_h, time_h_fmts))
# HHMM / HH:MM
time_hm = [time(0, 0), time(0, 30), time(8, 47), time(16, 1)]
time_hm_fmts = ["%H%M", "%H:%M"]
outputs.append(it.product(time_hm, time_hm_fmts))
# HHMMSS / HH:MM:SS
time_hms = [
time(0, 0, 0),
time(0, 15, 30),
time(8, 2, 16),
time(12, 0),
time(16, 2),
time(20, 45),
]
time_hms_fmts = ["%H%M%S", "%H:%M:%S"]
outputs.append(it.product(time_hms, time_hms_fmts))
# HHMMSS.ffffff / HH:MM:SS.ffffff
time_hmsu = [
time(0, 0, 0, 0),
time(4, 15, 3, 247993),
time(14, 21, 59, 948730),
time(23, 59, 59, 999999),
]
time_hmsu_fmts = ["%H%M%S.%f", "%H:%M:%S.%f"]
outputs.append(it.product(time_hmsu, time_hmsu_fmts))
outputs = list(map(list, outputs))
# Time zones
ex_naive = list(it.chain.from_iterable(x[0:2] for x in outputs))
o = it.product(ex_naive, TZOFFSETS) # ((time, fmt), (tzinfo, offsetstr))
o = ((t.replace(tzinfo=tzi), fmt + off_str) for (t, fmt), (tzi, off_str) in o)
outputs.append(o)
return list(it.chain.from_iterable(outputs))
@pytest.mark.parametrize("time_val,time_fmt", __make_time_examples())
@pytest.mark.parametrize("as_bytes", [True, False])
def test_isotime(time_val, time_fmt, as_bytes):
tstr = time_val.strftime(time_fmt)
if isinstance(tstr, six.text_type) and as_bytes:
tstr = tstr.encode("ascii")
elif isinstance(tstr, bytes) and not as_bytes:
tstr = tstr.decode("ascii")
iparser = isoparser()
assert iparser.parse_isotime(tstr) == time_val
@pytest.mark.parametrize(
"isostr",
[
"24:00",
"2400",
"24:00:00",
"240000",
"24:00:00.000",
"24:00:00,000",
"24:00:00.000000",
"24:00:00,000000",
],
)
def test_isotime_midnight(isostr):
iparser = isoparser()
assert iparser.parse_isotime(isostr) == time(0, 0, 0, 0)
@pytest.mark.parametrize(
"isostr,exception",
[
("3", ValueError), # ISO string too short
("14時30分15秒", ValueError), # Not ASCII
("14_30_15", ValueError), # Invalid separators
("1430:15", ValueError), # Inconsistent separator use
("25", ValueError), # Invalid hours
("25:15", ValueError), # Invalid hours
("14:60", ValueError), # Invalid minutes
("14:59:61", ValueError), # Invalid seconds
("14:30:15.34468305:00", ValueError), # No sign in time zone
("14:30:15+", ValueError), # Time zone too short
("14:30:15+1234567", ValueError), # Time zone invalid
("14:59:59+25:00", ValueError), # Invalid tz hours
("14:59:59+12:62", ValueError), # Invalid tz minutes
("14:59:30_344583", ValueError), # Invalid microsecond separator
("24:01", ValueError), # 24 used for non-midnight time
("24:00:01", ValueError), # 24 used for non-midnight time
("24:00:00.001", ValueError), # 24 used for non-midnight time
("24:00:00.000001", ValueError), # 24 used for non-midnight time
],
)
def test_isotime_raises(isostr, exception):
iparser = isoparser()
with pytest.raises(exception):
iparser.parse_isotime(isostr)
| 30.270903
| 88
| 0.610872
|
from __future__ import unicode_literals
from datetime import datetime, timedelta, date, time
import itertools as it
from bs_dateutil import tz
from bs_dateutil.tz import UTC
from bs_dateutil.parser import isoparser, isoparse
import pytest
import six
def _generate_tzoffsets(limited):
def _mkoffset(hmtuple, fmt):
h, m = hmtuple
m_td = (-1 if h < 0 else 1) * m
tzo = tz.tzoffset(None, timedelta(hours=h, minutes=m_td))
return tzo, fmt.format(h, m)
out = []
if not limited:
hm_out_h = [(h, 0) for h in (-23, -5, 0, 5, 23)]
out.extend([_mkoffset(hm, "{:+03d}") for hm in hm_out_h])
# Ones that have hours and minutes
hm_out = [] + hm_out_h
hm_out += [(-12, 15), (11, 30), (10, 2), (5, 15), (-5, 30)]
else:
hm_out = [(-5, -0)]
fmts = ["{:+03d}:{:02d}", "{:+03d}{:02d}"]
out += [_mkoffset(hm, fmt) for hm in hm_out for fmt in fmts]
# Also add in UTC and naive
out.append((UTC, "Z"))
out.append((None, ""))
return out
FULL_TZOFFSETS = _generate_tzoffsets(False)
FULL_TZOFFSETS_AWARE = [x for x in FULL_TZOFFSETS if x[1]]
TZOFFSETS = _generate_tzoffsets(True)
DATES = [datetime(1996, 1, 1), datetime(2017, 1, 1)]
@pytest.mark.parametrize("dt", tuple(DATES))
def test_year_only(dt):
dtstr = dt.strftime("%Y")
assert isoparse(dtstr) == dt
DATES += [datetime(2000, 2, 1), datetime(2017, 4, 1)]
@pytest.mark.parametrize("dt", tuple(DATES))
def test_year_month(dt):
fmt = "%Y-%m"
dtstr = dt.strftime(fmt)
assert isoparse(dtstr) == dt
DATES += [datetime(2016, 2, 29), datetime(2018, 3, 15)]
YMD_FMTS = ("%Y%m%d", "%Y-%m-%d")
@pytest.mark.parametrize("dt", tuple(DATES))
@pytest.mark.parametrize("fmt", YMD_FMTS)
def test_year_month_day(dt, fmt):
dtstr = dt.strftime(fmt)
assert isoparse(dtstr) == dt
def _isoparse_date_and_time(
dt, date_fmt, time_fmt, tzoffset, microsecond_precision=None
):
tzi, offset_str = tzoffset
fmt = date_fmt + "T" + time_fmt
dt = dt.replace(tzinfo=tzi)
dtstr = dt.strftime(fmt)
if microsecond_precision is not None:
if not fmt.endswith("%f"): # pragma: nocover
raise ValueError("Time format has no microseconds!")
if microsecond_precision != 6:
dtstr = dtstr[: -(6 - microsecond_precision)]
elif microsecond_precision > 6: # pragma: nocover
raise ValueError("Precision must be 1-6")
dtstr += offset_str
assert isoparse(dtstr) == dt
DATETIMES = [
datetime(1998, 4, 16, 12),
datetime(2019, 11, 18, 23),
datetime(2014, 12, 16, 4),
]
@pytest.mark.parametrize("dt", tuple(DATETIMES))
@pytest.mark.parametrize("date_fmt", YMD_FMTS)
@pytest.mark.parametrize("tzoffset", TZOFFSETS)
def test_ymd_h(dt, date_fmt, tzoffset):
_isoparse_date_and_time(dt, date_fmt, "%H", tzoffset)
DATETIMES = [datetime(2012, 1, 6, 9, 37)]
@pytest.mark.parametrize("dt", tuple(DATETIMES))
@pytest.mark.parametrize("date_fmt", YMD_FMTS)
@pytest.mark.parametrize("time_fmt", ("%H%M", "%H:%M"))
@pytest.mark.parametrize("tzoffset", TZOFFSETS)
def test_ymd_hm(dt, date_fmt, time_fmt, tzoffset):
_isoparse_date_and_time(dt, date_fmt, time_fmt, tzoffset)
DATETIMES = [
datetime(2003, 9, 2, 22, 14, 2),
datetime(2003, 8, 8, 14, 9, 14),
datetime(2003, 4, 7, 6, 14, 59),
]
HMS_FMTS = ("%H%M%S", "%H:%M:%S")
@pytest.mark.parametrize("dt", tuple(DATETIMES))
@pytest.mark.parametrize("date_fmt", YMD_FMTS)
@pytest.mark.parametrize("time_fmt", HMS_FMTS)
@pytest.mark.parametrize("tzoffset", TZOFFSETS)
def test_ymd_hms(dt, date_fmt, time_fmt, tzoffset):
_isoparse_date_and_time(dt, date_fmt, time_fmt, tzoffset)
DATETIMES = [datetime(2017, 11, 27, 6, 14, 30, 123456)]
@pytest.mark.parametrize("dt", tuple(DATETIMES))
@pytest.mark.parametrize("date_fmt", YMD_FMTS)
@pytest.mark.parametrize("time_fmt", (x + sep + "%f" for x in HMS_FMTS for sep in ".,"))
@pytest.mark.parametrize("tzoffset", TZOFFSETS)
@pytest.mark.parametrize("precision", list(range(3, 7)))
def test_ymd_hms_micro(dt, date_fmt, time_fmt, tzoffset, precision):
# Truncate the microseconds to the desired precision for the representation
dt = dt.replace(microsecond=int(round(dt.microsecond, precision - 6)))
_isoparse_date_and_time(dt, date_fmt, time_fmt, tzoffset, precision)
###
# Truncation of extra digits beyond microsecond precision
@pytest.mark.parametrize(
"dt_str",
[
"2018-07-03T14:07:00.123456000001",
"2018-07-03T14:07:00.123456999999",
],
)
def test_extra_subsecond_digits(dt_str):
assert isoparse(dt_str) == datetime(2018, 7, 3, 14, 7, 0, 123456)
@pytest.mark.parametrize("tzoffset", FULL_TZOFFSETS)
def test_full_tzoffsets(tzoffset):
dt = datetime(2017, 11, 27, 6, 14, 30, 123456)
date_fmt = "%Y-%m-%d"
time_fmt = "%H:%M:%S.%f"
_isoparse_date_and_time(dt, date_fmt, time_fmt, tzoffset)
@pytest.mark.parametrize(
"dt_str",
[
"2014-04-11T00",
"2014-04-10T24",
"2014-04-11T00:00",
"2014-04-10T24:00",
"2014-04-11T00:00:00",
"2014-04-10T24:00:00",
"2014-04-11T00:00:00.000",
"2014-04-10T24:00:00.000",
"2014-04-11T00:00:00.000000",
"2014-04-10T24:00:00.000000",
],
)
def test_datetime_midnight(dt_str):
assert isoparse(dt_str) == datetime(2014, 4, 11, 0, 0, 0, 0)
@pytest.mark.parametrize(
"datestr",
[
"2014-01-01",
"20140101",
],
)
@pytest.mark.parametrize("sep", [" ", "a", "T", "_", "-"])
def test_isoparse_sep_none(datestr, sep):
isostr = datestr + sep + "14:33:09"
assert isoparse(isostr) == datetime(2014, 1, 1, 14, 33, 9)
##
# Uncommon date formats
TIME_ARGS = (
"time_args",
((None, time(0), None),)
+ tuple(
("%H:%M:%S.%f", _t, _tz)
for _t, _tz in it.product([time(0), time(9, 30), time(14, 47)], TZOFFSETS)
),
)
@pytest.mark.parametrize(
"isocal,dt_expected",
[
((2017, 10), datetime(2017, 3, 6)),
((2020, 1), datetime(2019, 12, 30)), # ISO year != Cal year
((2004, 53), datetime(2004, 12, 27)), # Only half the week is in 2014
],
)
def test_isoweek(isocal, dt_expected):
# TODO: Figure out how to parametrize this on formats, too
for fmt in ("{:04d}-W{:02d}", "{:04d}W{:02d}"):
dtstr = fmt.format(*isocal)
assert isoparse(dtstr) == dt_expected
@pytest.mark.parametrize(
"isocal,dt_expected",
[
((2016, 13, 7), datetime(2016, 4, 3)),
((2004, 53, 7), datetime(2005, 1, 2)), # ISO year != Cal year
((2009, 1, 2), datetime(2008, 12, 30)), # ISO year < Cal year
((2009, 53, 6), datetime(2010, 1, 2)), # ISO year > Cal year
],
)
def test_isoweek_day(isocal, dt_expected):
# TODO: Figure out how to parametrize this on formats, too
for fmt in ("{:04d}-W{:02d}-{:d}", "{:04d}W{:02d}{:d}"):
dtstr = fmt.format(*isocal)
assert isoparse(dtstr) == dt_expected
@pytest.mark.parametrize(
"isoord,dt_expected",
[
((2004, 1), datetime(2004, 1, 1)),
((2016, 60), datetime(2016, 2, 29)),
((2017, 60), datetime(2017, 3, 1)),
((2016, 366), datetime(2016, 12, 31)),
((2017, 365), datetime(2017, 12, 31)),
],
)
def test_iso_ordinal(isoord, dt_expected):
for fmt in ("{:04d}-{:03d}", "{:04d}{:03d}"):
dtstr = fmt.format(*isoord)
assert isoparse(dtstr) == dt_expected
###
# Acceptance of bytes
@pytest.mark.parametrize(
"isostr,dt",
[
(b"2014", datetime(2014, 1, 1)),
(b"20140204", datetime(2014, 2, 4)),
(b"2014-02-04", datetime(2014, 2, 4)),
(b"2014-02-04T12", datetime(2014, 2, 4, 12)),
(b"2014-02-04T12:30", datetime(2014, 2, 4, 12, 30)),
(b"2014-02-04T12:30:15", datetime(2014, 2, 4, 12, 30, 15)),
(b"2014-02-04T12:30:15.224", datetime(2014, 2, 4, 12, 30, 15, 224000)),
(b"20140204T123015.224", datetime(2014, 2, 4, 12, 30, 15, 224000)),
(b"2014-02-04T12:30:15.224Z", datetime(2014, 2, 4, 12, 30, 15, 224000, UTC)),
(b"2014-02-04T12:30:15.224z", datetime(2014, 2, 4, 12, 30, 15, 224000, UTC)),
(
b"2014-02-04T12:30:15.224+05:00",
datetime(
2014,
2,
4,
12,
30,
15,
224000,
tzinfo=tz.tzoffset(None, timedelta(hours=5)),
),
),
],
)
def test_bytes(isostr, dt):
assert isoparse(isostr) == dt
###
# Invalid ISO strings
@pytest.mark.parametrize(
"isostr,exception",
[
("201", ValueError), # ISO string too short
("2012-0425", ValueError), # Inconsistent date separators
("201204-25", ValueError), # Inconsistent date separators
("20120425T0120:00", ValueError), # Inconsistent time separators
("20120425T01:2000", ValueError), # Inconsistent time separators
("14:3015", ValueError), # Inconsistent time separator
("20120425T012500-334", ValueError), # Wrong microsecond separator
("2001-1", ValueError), # YYYY-M not valid
("2012-04-9", ValueError), # YYYY-MM-D not valid
("201204", ValueError), # YYYYMM not valid
("20120411T03:30+", ValueError), # Time zone too short
("20120411T03:30+1234567", ValueError), # Time zone too long
("20120411T03:30-25:40", ValueError), # Time zone invalid
("2012-1a", ValueError), # Invalid month
("20120411T03:30+00:60", ValueError), # Time zone invalid minutes
("20120411T03:30+00:61", ValueError), # Time zone invalid minutes
("20120411T033030.123456012:00", ValueError), # No sign in time zone
("2012-W00", ValueError), # Invalid ISO week
("2012-W55", ValueError), # Invalid ISO week
("2012-W01-0", ValueError), # Invalid ISO week day
("2012-W01-8", ValueError), # Invalid ISO week day
("2013-000", ValueError), # Invalid ordinal day
("2013-366", ValueError), # Invalid ordinal day
("2013366", ValueError), # Invalid ordinal day
("2014-03-12Т12:30:14", ValueError), # Cyrillic T
("2014-04-21T24:00:01", ValueError), # Invalid use of 24 for midnight
("2014_W01-1", ValueError), # Invalid separator
("2014W01-1", ValueError), # Inconsistent use of dashes
("2014-W011", ValueError), # Inconsistent use of dashes
],
)
def test_iso_raises(isostr, exception):
with pytest.raises(exception):
isoparse(isostr)
@pytest.mark.parametrize(
"sep_act, valid_sep, exception",
[
("T", "C", ValueError),
("C", "T", ValueError),
],
)
def test_iso_with_sep_raises(sep_act, valid_sep, exception):
parser = isoparser(sep=valid_sep)
isostr = "2012-04-25" + sep_act + "01:25:00"
with pytest.raises(exception):
parser.isoparse(isostr)
###
# Test ISOParser constructor
@pytest.mark.parametrize("sep", [" ", "9", "🍛"])
def test_isoparser_invalid_sep(sep):
with pytest.raises(ValueError):
isoparser(sep=sep)
# This only fails on Python 3
@pytest.mark.xfail(not six.PY2, reason="Fails on Python 3 only")
def test_isoparser_byte_sep():
dt = datetime(2017, 12, 6, 12, 30, 45)
dt_str = dt.isoformat(sep=str("T"))
dt_rt = isoparser(sep=b"T").isoparse(dt_str)
assert dt == dt_rt
###
# Test parse_tzstr
@pytest.mark.parametrize("tzoffset", FULL_TZOFFSETS)
def test_parse_tzstr(tzoffset):
dt = datetime(2017, 11, 27, 6, 14, 30, 123456)
date_fmt = "%Y-%m-%d"
time_fmt = "%H:%M:%S.%f"
_isoparse_date_and_time(dt, date_fmt, time_fmt, tzoffset)
@pytest.mark.parametrize("tzstr", ["-00:00", "+00:00", "+00", "-00", "+0000", "-0000"])
@pytest.mark.parametrize("zero_as_utc", [True, False])
def test_parse_tzstr_zero_as_utc(tzstr, zero_as_utc):
tzi = isoparser().parse_tzstr(tzstr, zero_as_utc=zero_as_utc)
assert tzi == UTC
assert (type(tzi) == tz.tzutc) == zero_as_utc
@pytest.mark.parametrize(
"tzstr,exception",
[
("00:00", ValueError), # No sign
("05:00", ValueError), # No sign
("_00:00", ValueError), # Invalid sign
("+25:00", ValueError), # Offset too large
("00:0000", ValueError), # String too long
],
)
def test_parse_tzstr_fails(tzstr, exception):
with pytest.raises(exception):
isoparser().parse_tzstr(tzstr)
###
# Test parse_isodate
def __make_date_examples():
dates_no_day = [date(1999, 12, 1), date(2016, 2, 1)]
if not six.PY2:
# strftime does not support dates before 1900 in Python 2
dates_no_day.append(date(1000, 11, 1))
# Only one supported format for dates with no day
o = zip(dates_no_day, it.repeat("%Y-%m"))
dates_w_day = [
date(1969, 12, 31),
date(1900, 1, 1),
date(2016, 2, 29),
date(2017, 11, 14),
]
dates_w_day_fmts = ("%Y%m%d", "%Y-%m-%d")
o = it.chain(o, it.product(dates_w_day, dates_w_day_fmts))
return list(o)
@pytest.mark.parametrize("d,dt_fmt", __make_date_examples())
@pytest.mark.parametrize("as_bytes", [True, False])
def test_parse_isodate(d, dt_fmt, as_bytes):
d_str = d.strftime(dt_fmt)
if isinstance(d_str, six.text_type) and as_bytes:
d_str = d_str.encode("ascii")
elif isinstance(d_str, bytes) and not as_bytes:
d_str = d_str.decode("ascii")
iparser = isoparser()
assert iparser.parse_isodate(d_str) == d
@pytest.mark.parametrize(
"isostr,exception",
[
("243", ValueError), # ISO string too short
("2014-0423", ValueError), # Inconsistent date separators
("201404-23", ValueError), # Inconsistent date separators
("2014日03月14", ValueError), # Not ASCII
("2013-02-29", ValueError), # Not a leap year
("2014/12/03", ValueError), # Wrong separators
("2014-04-19T", ValueError), # Unknown components
("201202", ValueError), # Invalid format
],
)
def test_isodate_raises(isostr, exception):
with pytest.raises(exception):
isoparser().parse_isodate(isostr)
def test_parse_isodate_error_text():
with pytest.raises(ValueError) as excinfo:
isoparser().parse_isodate("2014-0423")
# ensure the error message does not contain b' prefixes
if six.PY2:
expected_error = "String contains unknown ISO components: u'2014-0423'"
else:
expected_error = "String contains unknown ISO components: '2014-0423'"
assert expected_error == str(excinfo.value)
ef __make_time_examples():
outputs = []
time_h = [time(0), time(8), time(22)]
time_h_fmts = ["%H"]
outputs.append(it.product(time_h, time_h_fmts))
time_hm = [time(0, 0), time(0, 30), time(8, 47), time(16, 1)]
time_hm_fmts = ["%H%M", "%H:%M"]
outputs.append(it.product(time_hm, time_hm_fmts))
time_hms = [
time(0, 0, 0),
time(0, 15, 30),
time(8, 2, 16),
time(12, 0),
time(16, 2),
time(20, 45),
]
time_hms_fmts = ["%H%M%S", "%H:%M:%S"]
outputs.append(it.product(time_hms, time_hms_fmts))
time_hmsu = [
time(0, 0, 0, 0),
time(4, 15, 3, 247993),
time(14, 21, 59, 948730),
time(23, 59, 59, 999999),
]
time_hmsu_fmts = ["%H%M%S.%f", "%H:%M:%S.%f"]
outputs.append(it.product(time_hmsu, time_hmsu_fmts))
outputs = list(map(list, outputs))
ex_naive = list(it.chain.from_iterable(x[0:2] for x in outputs))
o = it.product(ex_naive, TZOFFSETS)
o = ((t.replace(tzinfo=tzi), fmt + off_str) for (t, fmt), (tzi, off_str) in o)
outputs.append(o)
return list(it.chain.from_iterable(outputs))
@pytest.mark.parametrize("time_val,time_fmt", __make_time_examples())
@pytest.mark.parametrize("as_bytes", [True, False])
def test_isotime(time_val, time_fmt, as_bytes):
tstr = time_val.strftime(time_fmt)
if isinstance(tstr, six.text_type) and as_bytes:
tstr = tstr.encode("ascii")
elif isinstance(tstr, bytes) and not as_bytes:
tstr = tstr.decode("ascii")
iparser = isoparser()
assert iparser.parse_isotime(tstr) == time_val
@pytest.mark.parametrize(
"isostr",
[
"24:00",
"2400",
"24:00:00",
"240000",
"24:00:00.000",
"24:00:00,000",
"24:00:00.000000",
"24:00:00,000000",
],
)
def test_isotime_midnight(isostr):
iparser = isoparser()
assert iparser.parse_isotime(isostr) == time(0, 0, 0, 0)
@pytest.mark.parametrize(
"isostr,exception",
[
("3", ValueError),
("14時30分15秒", ValueError),
("14_30_15", ValueError),
("1430:15", ValueError),
("25", ValueError),
("25:15", ValueError),
("14:60", ValueError),
("14:59:61", ValueError),
("14:30:15.34468305:00", ValueError),
("14:30:15+", ValueError),
("14:30:15+1234567", ValueError),
("14:59:59+25:00", ValueError),
("14:59:59+12:62", ValueError),
("14:59:30_344583", ValueError),
("24:01", ValueError),
("24:00:01", ValueError),
("24:00:00.001", ValueError),
("24:00:00.000001", ValueError),
],
)
def test_isotime_raises(isostr, exception):
iparser = isoparser()
with pytest.raises(exception):
iparser.parse_isotime(isostr)
| true
| true
|
1c3f6935c2fe92c6a0e7e7de5c115dad5830b9da
| 1,414
|
py
|
Python
|
examples/resnet_app.py
|
yaoxuefeng6/FleetX
|
4e1a77789b76eccc20154a7e2ad2b120ffc512c4
|
[
"Apache-2.0"
] | null | null | null |
examples/resnet_app.py
|
yaoxuefeng6/FleetX
|
4e1a77789b76eccc20154a7e2ad2b120ffc512c4
|
[
"Apache-2.0"
] | null | null | null |
examples/resnet_app.py
|
yaoxuefeng6/FleetX
|
4e1a77789b76eccc20154a7e2ad2b120ffc512c4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fleetx as X
import paddle
import paddle.distributed.fleet as fleet
configs = X.parse_train_configs()
fleet.init(is_collective=True)
model = X.applications.Resnet50()
imagenet_downloader = X.utils.ImageNetDownloader()
local_path = imagenet_downloader.download_from_bos(local_path='./data')
loader = model.load_imagenet_from_file(
"{}/train.txt".format(local_path), batch_size=32)
dist_strategy = fleet.DistributedStrategy()
dist_strategy.amp = True
optimizer = paddle.fluid.optimizer.Momentum(
learning_rate=configs.lr,
momentum=configs.momentum,
regularization=paddle.fluid.regularizer.L2Decay(0.0001))
optimizer = fleet.distributed_optimizer(optimizer, strategy=dist_strategy)
optimizer.minimize(model.loss)
trainer = X.MultiGPUTrainer()
trainer.fit(model, loader, epoch=10)
| 36.25641
| 74
| 0.7843
|
import fleetx as X
import paddle
import paddle.distributed.fleet as fleet
configs = X.parse_train_configs()
fleet.init(is_collective=True)
model = X.applications.Resnet50()
imagenet_downloader = X.utils.ImageNetDownloader()
local_path = imagenet_downloader.download_from_bos(local_path='./data')
loader = model.load_imagenet_from_file(
"{}/train.txt".format(local_path), batch_size=32)
dist_strategy = fleet.DistributedStrategy()
dist_strategy.amp = True
optimizer = paddle.fluid.optimizer.Momentum(
learning_rate=configs.lr,
momentum=configs.momentum,
regularization=paddle.fluid.regularizer.L2Decay(0.0001))
optimizer = fleet.distributed_optimizer(optimizer, strategy=dist_strategy)
optimizer.minimize(model.loss)
trainer = X.MultiGPUTrainer()
trainer.fit(model, loader, epoch=10)
| true
| true
|
1c3f693af9e7d5604a693ebf041c5169a43922a3
| 12,733
|
py
|
Python
|
sdk/formrecognizer/azure-ai-formrecognizer/tests/test_content.py
|
detienne20/azure-sdk-for-python
|
f3522cd897ca6adf113b1a1204a2627d8ba76a6b
|
[
"MIT"
] | null | null | null |
sdk/formrecognizer/azure-ai-formrecognizer/tests/test_content.py
|
detienne20/azure-sdk-for-python
|
f3522cd897ca6adf113b1a1204a2627d8ba76a6b
|
[
"MIT"
] | 15
|
2019-07-12T18:18:04.000Z
|
2019-07-25T20:55:51.000Z
|
sdk/formrecognizer/azure-ai-formrecognizer/tests/test_content.py
|
detienne20/azure-sdk-for-python
|
f3522cd897ca6adf113b1a1204a2627d8ba76a6b
|
[
"MIT"
] | 1
|
2020-07-05T21:13:37.000Z
|
2020-07-05T21:13:37.000Z
|
# coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import pytest
import functools
from io import BytesIO
from azure.core.exceptions import ServiceRequestError, ClientAuthenticationError, HttpResponseError
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer._generated.models import AnalyzeOperationResult
from azure.ai.formrecognizer._response_handlers import prepare_content_result
from azure.ai.formrecognizer import FormRecognizerClient, FormContentType
from testcase import FormRecognizerTest, GlobalFormRecognizerAccountPreparer
from testcase import GlobalClientPreparer as _GlobalClientPreparer
GlobalClientPreparer = functools.partial(_GlobalClientPreparer, FormRecognizerClient)
class TestContentFromStream(FormRecognizerTest):
@GlobalFormRecognizerAccountPreparer()
def test_content_bad_endpoint(self, resource_group, location, form_recognizer_account, form_recognizer_account_key):
with open(self.invoice_pdf, "rb") as fd:
myfile = fd.read()
with self.assertRaises(ServiceRequestError):
client = FormRecognizerClient("http://notreal.azure.com", AzureKeyCredential(form_recognizer_account_key))
poller = client.begin_recognize_content(myfile)
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
def test_content_authentication_successful_key(self, client):
with open(self.invoice_pdf, "rb") as fd:
myfile = fd.read()
poller = client.begin_recognize_content(myfile)
result = poller.result()
@GlobalFormRecognizerAccountPreparer()
def test_content_authentication_bad_key(self, resource_group, location, form_recognizer_account, form_recognizer_account_key):
client = FormRecognizerClient(form_recognizer_account, AzureKeyCredential("xxxx"))
with self.assertRaises(ClientAuthenticationError):
poller = client.begin_recognize_content(b"xx", content_type="application/pdf")
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
def test_passing_enum_content_type(self, client):
with open(self.invoice_pdf, "rb") as fd:
myfile = fd.read()
poller = client.begin_recognize_content(
myfile,
content_type=FormContentType.application_pdf
)
result = poller.result()
self.assertIsNotNone(result)
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
def test_damaged_file_passed_as_bytes(self, client):
damaged_pdf = b"\x25\x50\x44\x46\x55\x55\x55" # still has correct bytes to be recognized as PDF
with self.assertRaises(HttpResponseError):
poller = client.begin_recognize_content(
damaged_pdf,
)
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
def test_damaged_file_bytes_fails_autodetect_content_type(self, client):
damaged_pdf = b"\x50\x44\x46\x55\x55\x55" # doesn't match any magic file numbers
with self.assertRaises(ValueError):
poller = client.begin_recognize_content(
damaged_pdf,
)
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
def test_damaged_file_passed_as_bytes_io(self, client):
damaged_pdf = BytesIO(b"\x25\x50\x44\x46\x55\x55\x55") # still has correct bytes to be recognized as PDF
with self.assertRaises(HttpResponseError):
poller = client.begin_recognize_content(
damaged_pdf,
)
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
def test_damaged_file_bytes_io_fails_autodetect(self, client):
damaged_pdf = BytesIO(b"\x50\x44\x46\x55\x55\x55") # doesn't match any magic file numbers
with self.assertRaises(ValueError):
poller = client.begin_recognize_content(
damaged_pdf,
)
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
def test_blank_page(self, client):
with open(self.blank_pdf, "rb") as stream:
poller = client.begin_recognize_content(
stream,
)
result = poller.result()
self.assertIsNotNone(result)
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
def test_passing_bad_content_type_param_passed(self, client):
with open(self.invoice_pdf, "rb") as fd:
myfile = fd.read()
with self.assertRaises(ValueError):
poller = client.begin_recognize_content(
myfile,
content_type="application/jpeg"
)
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
def test_content_stream_passing_url(self, client):
with self.assertRaises(TypeError):
poller = client.begin_recognize_content("https://badurl.jpg", content_type="application/json")
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
def test_auto_detect_unsupported_stream_content(self, client):
with open(self.unsupported_content_py, "rb") as fd:
myfile = fd.read()
with self.assertRaises(ValueError):
poller = client.begin_recognize_content(
myfile
)
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
def test_content_stream_transform_pdf(self, client):
with open(self.invoice_pdf, "rb") as fd:
myform = fd.read()
responses = []
def callback(raw_response, _, headers):
analyze_result = client._client._deserialize(AnalyzeOperationResult, raw_response)
extracted_layout = prepare_content_result(analyze_result)
responses.append(analyze_result)
responses.append(extracted_layout)
poller = client.begin_recognize_content(myform, cls=callback)
result = poller.result()
raw_response = responses[0]
layout = responses[1]
page_results = raw_response.analyze_result.page_results
read_results = raw_response.analyze_result.read_results
# Check form pages
self.assertFormPagesTransformCorrect(layout, read_results, page_results)
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
def test_content_stream_pdf(self, client):
with open(self.invoice_pdf, "rb") as fd:
myform = fd.read()
poller = client.begin_recognize_content(myform)
result = poller.result()
self.assertEqual(len(result), 1)
layout = result[0]
self.assertEqual(layout.page_number, 1)
self.assertFormPagesHasValues(result)
self.assertEqual(layout.tables[0].row_count, 2)
self.assertEqual(layout.tables[0].column_count, 6)
self.assertEqual(layout.tables[0].page_number, 1)
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
def test_content_stream_transform_jpg(self, client):
with open(self.form_jpg, "rb") as fd:
myform = fd.read()
responses = []
def callback(raw_response, _, headers):
analyze_result = client._client._deserialize(AnalyzeOperationResult, raw_response)
extracted_layout = prepare_content_result(analyze_result)
responses.append(analyze_result)
responses.append(extracted_layout)
poller = client.begin_recognize_content(myform, cls=callback)
result = poller.result()
raw_response = responses[0]
layout = responses[1]
page_results = raw_response.analyze_result.page_results
read_results = raw_response.analyze_result.read_results
# Check form pages
self.assertFormPagesTransformCorrect(layout, read_results, page_results)
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
def test_content_stream_jpg(self, client):
with open(self.form_jpg, "rb") as stream:
poller = client.begin_recognize_content(stream)
result = poller.result()
self.assertEqual(len(result), 1)
layout = result[0]
self.assertEqual(layout.page_number, 1)
self.assertFormPagesHasValues(result)
self.assertEqual(layout.tables[0].row_count, 4)
self.assertEqual(layout.tables[0].column_count, 3)
self.assertEqual(layout.tables[1].row_count, 6)
self.assertEqual(layout.tables[1].column_count, 4)
self.assertEqual(layout.tables[0].page_number, 1)
self.assertEqual(layout.tables[1].page_number, 1)
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
def test_content_multipage(self, client):
with open(self.multipage_invoice_pdf, "rb") as fd:
invoice = fd.read()
poller = client.begin_recognize_content(invoice)
result = poller.result()
self.assertEqual(len(result), 3)
self.assertFormPagesHasValues(result)
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
def test_content_multipage_transform(self, client):
with open(self.multipage_invoice_pdf, "rb") as fd:
myform = fd.read()
responses = []
def callback(raw_response, _, headers):
analyze_result = client._client._deserialize(AnalyzeOperationResult, raw_response)
extracted_layout = prepare_content_result(analyze_result)
responses.append(analyze_result)
responses.append(extracted_layout)
poller = client.begin_recognize_content(myform, cls=callback)
result = poller.result()
raw_response = responses[0]
layout = responses[1]
page_results = raw_response.analyze_result.page_results
read_results = raw_response.analyze_result.read_results
# Check form pages
self.assertFormPagesTransformCorrect(layout, read_results, page_results)
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
@pytest.mark.live_test_only
def test_content_continuation_token(self, client):
with open(self.form_jpg, "rb") as fd:
myfile = fd.read()
initial_poller = client.begin_recognize_content(myfile)
cont_token = initial_poller.continuation_token()
poller = client.begin_recognize_content(myfile, continuation_token=cont_token)
result = poller.result()
self.assertIsNotNone(result)
initial_poller.wait() # necessary so azure-devtools doesn't throw assertion error
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
def test_content_multipage_table_span_pdf(self, client):
with open(self.multipage_table_pdf, "rb") as stream:
poller = client.begin_recognize_content(stream)
result = poller.result()
self.assertEqual(len(result), 2)
layout = result[0]
self.assertEqual(layout.page_number, 1)
self.assertEqual(len(layout.tables), 2)
self.assertEqual(layout.tables[0].row_count, 30)
self.assertEqual(layout.tables[0].column_count, 5)
self.assertEqual(layout.tables[0].page_number, 1)
self.assertEqual(layout.tables[1].row_count, 6)
self.assertEqual(layout.tables[1].column_count, 5)
self.assertEqual(layout.tables[1].page_number, 1)
layout = result[1]
self.assertEqual(len(layout.tables), 1)
self.assertEqual(layout.page_number, 2)
self.assertEqual(layout.tables[0].row_count, 24)
self.assertEqual(layout.tables[0].column_count, 5)
self.assertEqual(layout.tables[0].page_number, 2)
self.assertFormPagesHasValues(result)
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
def test_content_multipage_table_span_transform(self, client):
with open(self.multipage_table_pdf, "rb") as fd:
myform = fd.read()
responses = []
def callback(raw_response, _, headers):
analyze_result = client._client._deserialize(AnalyzeOperationResult, raw_response)
extracted_layout = prepare_content_result(analyze_result)
responses.append(analyze_result)
responses.append(extracted_layout)
poller = client.begin_recognize_content(myform, cls=callback)
result = poller.result()
raw_response = responses[0]
layout = responses[1]
page_results = raw_response.analyze_result.page_results
read_results = raw_response.analyze_result.read_results
# Check form pages
self.assertFormPagesTransformCorrect(layout, read_results, page_results)
| 41.074194
| 130
| 0.693317
|
import pytest
import functools
from io import BytesIO
from azure.core.exceptions import ServiceRequestError, ClientAuthenticationError, HttpResponseError
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer._generated.models import AnalyzeOperationResult
from azure.ai.formrecognizer._response_handlers import prepare_content_result
from azure.ai.formrecognizer import FormRecognizerClient, FormContentType
from testcase import FormRecognizerTest, GlobalFormRecognizerAccountPreparer
from testcase import GlobalClientPreparer as _GlobalClientPreparer
GlobalClientPreparer = functools.partial(_GlobalClientPreparer, FormRecognizerClient)
class TestContentFromStream(FormRecognizerTest):
@GlobalFormRecognizerAccountPreparer()
def test_content_bad_endpoint(self, resource_group, location, form_recognizer_account, form_recognizer_account_key):
with open(self.invoice_pdf, "rb") as fd:
myfile = fd.read()
with self.assertRaises(ServiceRequestError):
client = FormRecognizerClient("http://notreal.azure.com", AzureKeyCredential(form_recognizer_account_key))
poller = client.begin_recognize_content(myfile)
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
def test_content_authentication_successful_key(self, client):
with open(self.invoice_pdf, "rb") as fd:
myfile = fd.read()
poller = client.begin_recognize_content(myfile)
result = poller.result()
@GlobalFormRecognizerAccountPreparer()
def test_content_authentication_bad_key(self, resource_group, location, form_recognizer_account, form_recognizer_account_key):
client = FormRecognizerClient(form_recognizer_account, AzureKeyCredential("xxxx"))
with self.assertRaises(ClientAuthenticationError):
poller = client.begin_recognize_content(b"xx", content_type="application/pdf")
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
def test_passing_enum_content_type(self, client):
with open(self.invoice_pdf, "rb") as fd:
myfile = fd.read()
poller = client.begin_recognize_content(
myfile,
content_type=FormContentType.application_pdf
)
result = poller.result()
self.assertIsNotNone(result)
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
def test_damaged_file_passed_as_bytes(self, client):
damaged_pdf = b"\x25\x50\x44\x46\x55\x55\x55"
with self.assertRaises(HttpResponseError):
poller = client.begin_recognize_content(
damaged_pdf,
)
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
def test_damaged_file_bytes_fails_autodetect_content_type(self, client):
damaged_pdf = b"\x50\x44\x46\x55\x55\x55"
with self.assertRaises(ValueError):
poller = client.begin_recognize_content(
damaged_pdf,
)
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
def test_damaged_file_passed_as_bytes_io(self, client):
damaged_pdf = BytesIO(b"\x25\x50\x44\x46\x55\x55\x55") # still has correct bytes to be recognized as PDF
with self.assertRaises(HttpResponseError):
poller = client.begin_recognize_content(
damaged_pdf,
)
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
def test_damaged_file_bytes_io_fails_autodetect(self, client):
damaged_pdf = BytesIO(b"\x50\x44\x46\x55\x55\x55") # doesn't match any magic file numbers
with self.assertRaises(ValueError):
poller = client.begin_recognize_content(
damaged_pdf,
)
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
def test_blank_page(self, client):
with open(self.blank_pdf, "rb") as stream:
poller = client.begin_recognize_content(
stream,
)
result = poller.result()
self.assertIsNotNone(result)
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
def test_passing_bad_content_type_param_passed(self, client):
with open(self.invoice_pdf, "rb") as fd:
myfile = fd.read()
with self.assertRaises(ValueError):
poller = client.begin_recognize_content(
myfile,
content_type="application/jpeg"
)
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
def test_content_stream_passing_url(self, client):
with self.assertRaises(TypeError):
poller = client.begin_recognize_content("https://badurl.jpg", content_type="application/json")
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
def test_auto_detect_unsupported_stream_content(self, client):
with open(self.unsupported_content_py, "rb") as fd:
myfile = fd.read()
with self.assertRaises(ValueError):
poller = client.begin_recognize_content(
myfile
)
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
def test_content_stream_transform_pdf(self, client):
with open(self.invoice_pdf, "rb") as fd:
myform = fd.read()
responses = []
def callback(raw_response, _, headers):
analyze_result = client._client._deserialize(AnalyzeOperationResult, raw_response)
extracted_layout = prepare_content_result(analyze_result)
responses.append(analyze_result)
responses.append(extracted_layout)
poller = client.begin_recognize_content(myform, cls=callback)
result = poller.result()
raw_response = responses[0]
layout = responses[1]
page_results = raw_response.analyze_result.page_results
read_results = raw_response.analyze_result.read_results
self.assertFormPagesTransformCorrect(layout, read_results, page_results)
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
def test_content_stream_pdf(self, client):
with open(self.invoice_pdf, "rb") as fd:
myform = fd.read()
poller = client.begin_recognize_content(myform)
result = poller.result()
self.assertEqual(len(result), 1)
layout = result[0]
self.assertEqual(layout.page_number, 1)
self.assertFormPagesHasValues(result)
self.assertEqual(layout.tables[0].row_count, 2)
self.assertEqual(layout.tables[0].column_count, 6)
self.assertEqual(layout.tables[0].page_number, 1)
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
def test_content_stream_transform_jpg(self, client):
with open(self.form_jpg, "rb") as fd:
myform = fd.read()
responses = []
def callback(raw_response, _, headers):
analyze_result = client._client._deserialize(AnalyzeOperationResult, raw_response)
extracted_layout = prepare_content_result(analyze_result)
responses.append(analyze_result)
responses.append(extracted_layout)
poller = client.begin_recognize_content(myform, cls=callback)
result = poller.result()
raw_response = responses[0]
layout = responses[1]
page_results = raw_response.analyze_result.page_results
read_results = raw_response.analyze_result.read_results
self.assertFormPagesTransformCorrect(layout, read_results, page_results)
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
def test_content_stream_jpg(self, client):
with open(self.form_jpg, "rb") as stream:
poller = client.begin_recognize_content(stream)
result = poller.result()
self.assertEqual(len(result), 1)
layout = result[0]
self.assertEqual(layout.page_number, 1)
self.assertFormPagesHasValues(result)
self.assertEqual(layout.tables[0].row_count, 4)
self.assertEqual(layout.tables[0].column_count, 3)
self.assertEqual(layout.tables[1].row_count, 6)
self.assertEqual(layout.tables[1].column_count, 4)
self.assertEqual(layout.tables[0].page_number, 1)
self.assertEqual(layout.tables[1].page_number, 1)
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
def test_content_multipage(self, client):
with open(self.multipage_invoice_pdf, "rb") as fd:
invoice = fd.read()
poller = client.begin_recognize_content(invoice)
result = poller.result()
self.assertEqual(len(result), 3)
self.assertFormPagesHasValues(result)
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
def test_content_multipage_transform(self, client):
with open(self.multipage_invoice_pdf, "rb") as fd:
myform = fd.read()
responses = []
def callback(raw_response, _, headers):
analyze_result = client._client._deserialize(AnalyzeOperationResult, raw_response)
extracted_layout = prepare_content_result(analyze_result)
responses.append(analyze_result)
responses.append(extracted_layout)
poller = client.begin_recognize_content(myform, cls=callback)
result = poller.result()
raw_response = responses[0]
layout = responses[1]
page_results = raw_response.analyze_result.page_results
read_results = raw_response.analyze_result.read_results
self.assertFormPagesTransformCorrect(layout, read_results, page_results)
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
@pytest.mark.live_test_only
def test_content_continuation_token(self, client):
with open(self.form_jpg, "rb") as fd:
myfile = fd.read()
initial_poller = client.begin_recognize_content(myfile)
cont_token = initial_poller.continuation_token()
poller = client.begin_recognize_content(myfile, continuation_token=cont_token)
result = poller.result()
self.assertIsNotNone(result)
initial_poller.wait()
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
def test_content_multipage_table_span_pdf(self, client):
with open(self.multipage_table_pdf, "rb") as stream:
poller = client.begin_recognize_content(stream)
result = poller.result()
self.assertEqual(len(result), 2)
layout = result[0]
self.assertEqual(layout.page_number, 1)
self.assertEqual(len(layout.tables), 2)
self.assertEqual(layout.tables[0].row_count, 30)
self.assertEqual(layout.tables[0].column_count, 5)
self.assertEqual(layout.tables[0].page_number, 1)
self.assertEqual(layout.tables[1].row_count, 6)
self.assertEqual(layout.tables[1].column_count, 5)
self.assertEqual(layout.tables[1].page_number, 1)
layout = result[1]
self.assertEqual(len(layout.tables), 1)
self.assertEqual(layout.page_number, 2)
self.assertEqual(layout.tables[0].row_count, 24)
self.assertEqual(layout.tables[0].column_count, 5)
self.assertEqual(layout.tables[0].page_number, 2)
self.assertFormPagesHasValues(result)
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
def test_content_multipage_table_span_transform(self, client):
with open(self.multipage_table_pdf, "rb") as fd:
myform = fd.read()
responses = []
def callback(raw_response, _, headers):
analyze_result = client._client._deserialize(AnalyzeOperationResult, raw_response)
extracted_layout = prepare_content_result(analyze_result)
responses.append(analyze_result)
responses.append(extracted_layout)
poller = client.begin_recognize_content(myform, cls=callback)
result = poller.result()
raw_response = responses[0]
layout = responses[1]
page_results = raw_response.analyze_result.page_results
read_results = raw_response.analyze_result.read_results
# Check form pages
self.assertFormPagesTransformCorrect(layout, read_results, page_results)
| true
| true
|
1c3f6a5f08afd75c67f920ff05c9b729750be194
| 35
|
py
|
Python
|
TechAdoption/__init__.py
|
Epeiffer1/TechAdoption
|
54dcdf95ba19699f6fc901e92551fb71557d2f23
|
[
"MIT"
] | 15
|
2021-08-05T17:58:38.000Z
|
2022-03-29T22:16:37.000Z
|
TechAdoption/__init__.py
|
Epeiffer1/TechAdoption
|
54dcdf95ba19699f6fc901e92551fb71557d2f23
|
[
"MIT"
] | null | null | null |
TechAdoption/__init__.py
|
Epeiffer1/TechAdoption
|
54dcdf95ba19699f6fc901e92551fb71557d2f23
|
[
"MIT"
] | 1
|
2020-06-12T18:07:52.000Z
|
2020-06-12T18:07:52.000Z
|
# Dummy file to make this a package
| 35
| 35
| 0.771429
| true
| true
|
|
1c3f6ad824ec4cf4650b2b94fd15be8fa9305a23
| 21,915
|
py
|
Python
|
dist_filter_torch.py
|
ShansanChu/filter_pruning_fpgm
|
ea24a5a8aaa2642937a7655eddb5b0c8c8328d3f
|
[
"MIT"
] | 4
|
2021-02-01T15:08:09.000Z
|
2021-07-15T08:47:33.000Z
|
dist_filter_torch.py
|
ShansanChu/filter_pruning_fpgm
|
ea24a5a8aaa2642937a7655eddb5b0c8c8328d3f
|
[
"MIT"
] | null | null | null |
dist_filter_torch.py
|
ShansanChu/filter_pruning_fpgm
|
ea24a5a8aaa2642937a7655eddb5b0c8c8328d3f
|
[
"MIT"
] | 2
|
2021-01-11T12:54:44.000Z
|
2021-01-17T13:01:32.000Z
|
'''
filter pruners with FPGM
'''
import argparse
import os
import json
import torch
import sys
import numpy as np
import torch.nn.parallel
import torch.utils.data.distributed
from torch.optim.lr_scheduler import StepLR, MultiStepLR
from torchvision import datasets, transforms
import time
from models.mnist.lenet import LeNet
from models.cifar10.vgg import VGG
from nni.compression.torch.utils.config_validation import CompressorSchema
from schema import And, Optional, SchemaError
import torchvision
from utils.loggers import *
from utils.dist import *
from nni.compression.torch import L1FilterPruner, L2FilterPruner, FPGMPruner
from nni.compression.torch.utils.counter import count_flops_params
import logging
_logger = logging.getLogger('FPGM_Pruner')
_logger.setLevel(logging.INFO)
#/data/shan_4GPU/model_optimization/vision/references/classification/
sys.path.append("/data/shan_4GPU/model_optimization/vision/references/classification/")
from train import evaluate, train_one_epoch, load_data
def _setattr(model, name, module):
name_list = name.split(".")
for name in name_list[:-1]:
model = getattr(model, name)
setattr(model, name_list[-1], module)
def get_dummy_input_img(device):
dummy_input=torch.randn([1,3,224,224]).to(device)
return dummy_input
class BNWrapper(torch.nn.Module):
def __init__(self, module, module_name, module_type, config, pruner, prune_idx):
"""
Wrap an module to enable data parallel, forward method customization and buffer registeration.
Parameters
----------
module : pytorch module
the module user wants to compress
config : dict
the configurations that users specify for compression
module_name : str
the name of the module to compress, wrapper module shares same name
module_type : str
the type of the module to compress
pruner : Pruner
the pruner used to calculate mask
"""
super().__init__()
# origin layer information
self.module = module
self.name = module_name
self.type = module_type
# config and pruner
self.config = config
self.pruner = pruner
# register buffer for mask
self.register_buffer("weight_mask", torch.ones(self.module.weight.shape))
if hasattr(self.module, 'bias') and self.module.bias is not None:
self.register_buffer("bias_mask", torch.ones(self.module.bias.shape))
else:
self.register_buffer("bias_mask", None)
#update the bias mask
self.update_mask(prune_idx)
def update_mask(self,prune_idx):
for idx in prune_idx:
self.bias_mask[idx]=0
self.weight_mask[idx]=0 # add pruning after BN layers also
def forward(self, *inputs):
# apply mask to weight, bias
self.module.weight.data = self.module.weight.data.mul_(self.weight_mask)
if hasattr(self.module, 'bias') and self.module.bias is not None:
self.module.bias.data = self.module.bias.data.mul_(self.bias_mask)
return self.module(*inputs)
class MyPruner(FPGMPruner):
def __init__(self,model,config_list,dependency_aware=False):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
super().__init__(model, config_list, dependency_aware=False,dummy_input=get_dummy_input_img(device))
def update_bn(self):
"""
apply mask to the corresponding bn layer
"""
self.update_mask()
masked={}
def prune_idx(array):
N=len(array)
pruned_id=[i for i in range(N) if not np.all(array[i])==1]
return pruned_id
for module in self.bound_model.named_modules():
if isinstance(module[1],PrunerModuleWrapper):
masked[module[0]]=module[1]
if isinstance(module[1],torch.nn.BatchNorm2d) and 'bn3' not in module[0]:#for resnet not prune the residual layes
to_mask=module[0].replace('bn','conv')
print(to_mask,module[0],masked)
if to_mask in masked:
mask=masked[to_mask].state_dict()['weight_mask']
pruned_idx=prune_idx(mask.cpu().numpy())
module_type=type(module[1]).__name__
wrapper=BNWrapper(module[1],module[0], module_type, None, self, pruned_idx)
print(wrapper)
#wrapper = PrunerModuleWrapper(layer.module, layer.name, layer.type, config, self)
assert hasattr(module[1], 'weight'), "module %s does not have 'weight' attribute" % module[0]
# move newly registered buffers to the same device of weight
wrapper.to(module[1].weight.device)
_setattr(self.bound_model, wrapper.name, wrapper)
self.modules_wrapper.append(wrapper)
else:
continue
def compress(self):
print(self.config_list)
self.update_bn()
return self.bound_model
def select_config(self, layer):
"""
overwite schema
"""
ret = None
for config in self.config_list:
config = config.copy()
# expand config if key `default` is in config['op_types']
if 'op_types' in config and 'default' in config['op_types']:
expanded_op_types = []
for op_type in config['op_types']:
if op_type == 'default':
expanded_op_types.extend(default_layers.weighted_modules)
else:
expanded_op_types.append(op_type)
config['op_types'] = expanded_op_types
# check if condition is satisified
if config['exclude_names'] in layer.name:
continue
if 'op_types' in config and layer.type not in config['op_types']:
continue
if 'op_names' in config and layer.name not in config['op_names']:
continue
ret = config
if ret is None or 'exclude' in ret:
return None
#print('============',ret)
#print(config['exclude_names'],'-----',layer.name)
return ret
def validate_config(self, model, config_list):
schema = CompressorSchema([{
Optional('sparsity'): And(float, lambda n: 0 < n < 1),
Optional('op_types'): ['Conv2d'],
Optional('op_names'): [str],
Optional('exclude_names'):str,
Optional('exclude'): bool
}], model, _logger)
schema.validate(config_list)
for config in config_list:
if 'exclude' not in config and 'sparsity' not in config:
raise SchemaError('Either sparisty or exclude must be specified!')
def get_data(dataset, data_dir, batch_size, test_batch_size):
'''
get data for imagenet
'''
nThread=4
pin=True # for cuda device
traindir = os.path.join(data_dir, 'train')
valdir = os.path.join(data_dir, 'validation')
print('train_dir is ',traindir)
dataset, dataset_test, train_sampler, test_sampler = load_data(traindir, valdir, False,True)
train_loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size,
sampler=train_sampler, num_workers=nThread, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
dataset_test, batch_size=test_batch_size,
sampler=test_sampler, num_workers=nThread, pin_memory=True)
criterion = torch.nn.CrossEntropyLoss()
return train_loader, val_loader, criterion
from nni.compression.torch.compressor import *
def train(args, model, device, train_loader, criterion, optimizer, epoch,logger, callback=None):
model.train()
paral=get_world_size()
print(len(train_loader.dataset))
Nstep=len(train_loader.dataset)//paral
loss_per_batch=AverageMeter()
overall_time=AverageMeter()
print('current device is {}'.format(device))
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
#print(data.shape)
stime=time.time()
output = model(data)
#if batch_idx%args.log_interval==0:
# print('The performace of training is {} fps'.format(args.batch_size/(etime-stime)))
loss = criterion(output, target)
loss.backward()
loss_per_batch.update(loss)
# callback should be inserted between loss.backward() and optimizer.step()
if callback:
callback()
optimizer.step()
etime=time.time()
overall_time.update(etime-stime)
if batch_idx%args.log_interval==0:
print('The performace of training is {} fps'.format(args.batch_size/(etime-stime)))
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
args.rank = get_rank()
#if args.rank==0:
tensorboard_log = []
tensorboard_train_loss=[]
tensorboard_lr=[]
wrap_mask=[(module[0],module[1].state_dict()['weight_mask'])
for module in model.named_modules() if isinstance(module[1],PrunerModuleWrapper)]
bn_mask=[(module[0],module[1].state_dict()['bias_mask'])
for module in model.named_modules() if isinstance(module[1],BNWrapper)]
wrap_mask+=bn_mask
masks=[(mask[0],mask[1].cpu().numpy()) for mask in wrap_mask]
def ratio(array):
N=len(array)
remain=sum([np.all(array[i]==1) for i in range(N)])
return (remain,N)
mask_remain=[(mask[0],ratio(mask[1])) for mask in masks]
for i, (name,ratios) in enumerate(mask_remain):
tensorboard_log += [(f"{name}_num_filters", ratios[1])]
tensorboard_log += [(f"{name}_num_filters_remain", ratios[0])]
tensorboard_train_loss += [("loss", loss.item())]
tensorboard_lr += [("lr", optimizer.param_groups[0]['lr'])]
logger.list_of_scalars_summary('train', tensorboard_log,
args.batch_size*batch_idx+(epoch)*Nstep)
logger.list_of_scalars_summary('train_loss', tensorboard_train_loss,
args.batch_size*batch_idx+(epoch)*Nstep)
logger.list_of_scalars_summary('learning_rate', tensorboard_lr,
args.batch_size*batch_idx+(epoch)*Nstep)
#bn_weights = gather_bn_weights(model.module_list, prune_idx)
#logger.writer.add_histogram('bn_weights/hist', bn_weights.numpy(), epoch, bins='doane')
overall_time.reduce('mean')
print('over_all card average time is',overall_time.avg)
def test(model, device, criterion, val_loader,step,logger):
paral=get_world_size()
model.eval()
test_loss = 0
correct_curr = 0
correct=AverageMeter()
print('current device is {}'.format(device))
with torch.no_grad():
for idx,(data, target) in enumerate(val_loader):
data, target = data.to(device), target.to(device)
stime=time.time()
output = model(data)
etime=time.time()
if idx%args.log_interval==0:
print('Performance for inference is {} second'.format(etime-stime))
# sum up batch loss
test_loss += criterion(output, target).item()
# get the index of the max log-probability
pred = output.argmax(dim=1, keepdim=True)
correct_curr += pred.eq(target.view_as(pred)).sum().item()
correct.update(pred.eq(target.view_as(pred)).sum().item())
if idx % args.log_interval == 0:
print('Evaluation: [{}/{} ({:.0f}%)]\tcorrect: {:.6f}'.format(
idx * len(data), len(val_loader.dataset),
100. * idx / len(val_loader), correct_curr))
#logger.list_of_scalars_summary('valid', test_loss, idx)
print('Done for the validation dataset')
test_loss /= (len(val_loader.dataset)/paral)
correct.reduce('sum')
accuracy = correct.sum/ len(val_loader.dataset)
print('corrent all is {} and accuracy is {}'.format(correct.avg,accuracy))
curr_rank=get_rank()
logger.list_of_scalars_summary('valid_loss',[('loss',test_loss)],step)
logger.list_of_scalars_summary('valid_accuracy',[('accuracy',accuracy)],step)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
test_loss, correct.avg, len(val_loader.dataset), 100. * accuracy))
return accuracy
def get_dummy_input(args, device):
if args.dataset=='imagenet':
dummy_input=torch.randn([args.test_batch_size,3,224,224]).to(device)
return dummy_input
def get_input_size(dataset):
if dataset == 'mnist':
input_size = (1, 1, 28, 28)
elif dataset == 'cifar10':
input_size = (1, 3, 32, 32)
elif dataset == 'imagenet':
input_size = (1, 3, 256, 256)
return input_size
def update_model(model,pruner):
# add by shan, update model at every epoch
pruner.bound_model=model
pruner.update_mask
return pruner.bound_model
def main(args):
# prepare dataset
torch.manual_seed(0)
#device = torch.device('cuda',args.local_rank) if distributed else torch.device("cuda" if torch.cuda.is_available() else "cpu")
device = set_device(args.cuda, args.local_rank)
inited=init_distributed(True) #use nccl fro communication
print('all cudas numbers are ',get_world_size())
distributed=(get_world_size()>1) and inited
paral=get_world_size()
args.rank = get_rank()
#write to tensorboard
logger = Logger("logs/"+str(args.rank))
print(distributed)
#device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('device is',device)
print('rank is {} local rank is {}'.format(args.rank,args.local_rank))
train_loader, val_loader, criterion = get_data(args.dataset, args.data_dir, args.batch_size, args.test_batch_size)
model=torchvision.models.resnet50(pretrained=True)
model=model.cuda()
print('to distribute ',distributed)
if distributed:
model = DDP(model, device_ids=[args.local_rank], output_device=args.local_rank)
#model = torch.nn.DataParallel(model).cuda()
optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
scheduler = MultiStepLR(
optimizer, milestones=[int(args.pretrain_epochs*0.5), int(args.pretrain_epochs*0.75)], gamma=0.1)
criterion=criterion.cuda()
#model, optimizer = get_trained_model_optimizer(args, device, train_loader, val_loader, criterion)
def short_term_fine_tuner(model, epochs=1):
for epoch in range(epochs):
train(args, model, device, train_loader, criterion, optimizer, epoch,logger)
def trainer(model, optimizer, criterion, epoch, callback):
return train(args, model, device, train_loader, criterion, optimizer, epoch=epoch, logger=logger, callback=callback)
def evaluator(model,step):
return test(model, device, criterion, val_loader,step,logger)
# used to save the performance of the original & pruned & finetuned models
result = {'flops': {}, 'params': {}, 'performance':{}}
flops, params = count_flops_params(model, get_input_size(args.dataset))
result['flops']['original'] = flops
result['params']['original'] = params
evaluation_result = evaluator(model,0)
print('Evaluation result (original model): %s' % evaluation_result)
result['performance']['original'] = evaluation_result
# module types to prune, only "Conv2d" supported for channel pruning
if args.base_algo in ['l1', 'l2']:
op_types = ['Conv2d']
elif args.base_algo == 'level':
op_types = ['default']
config_list = [{
'sparsity': args.sparsity,
'op_types': op_types,
'exclude_names':'downsample'
}]
dummy_input = get_dummy_input(args, device)
if args.pruner == 'FPGMPruner':
pruner=MyPruner(model,config_list)
else:
raise ValueError(
"Pruner not supported.")
# Pruner.compress() returns the masked model
model = pruner.compress()
evaluation_result = evaluator(model,0)
print('Evaluation result (masked model): %s' % evaluation_result)
result['performance']['pruned'] = evaluation_result
if args.rank==0 and args.save_model:
pruner.export_model(
os.path.join(args.experiment_data_dir, 'model_masked.pth'), os.path.join(args.experiment_data_dir, 'mask.pth'))
print('Masked model saved to %s', args.experiment_data_dir)
def wrapped(module):
return isinstance(module,BNWrapper) or isinstance(module,PrunerModuleWrapper)
wrap_mask=[module for module in model.named_modules() if wrapped(module[1])]
for mm in wrap_mask:
print('====****'*10)
print(mm[0])
print(mm[1].state_dict().keys())
print('weight mask is ',mm[1].state_dict()['weight_mask'])
if 'bias_mask' in mm[1].state_dict():
print('bias mask is ',mm[1].state_dict()['bias_mask'])
if args.fine_tune:
if args.dataset in ['imagenet'] and args.model == 'resnet50':
optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=1e-4)
scheduler = MultiStepLR(
optimizer, milestones=[int(args.fine_tune_epochs*0.3), int(args.fine_tune_epochs*0.6),int(args.fine_tune_epochs*0.8)], gamma=0.1)
else:
raise ValueError("Pruner not supported.")
best_acc = 0
for epoch in range(args.fine_tune_epochs):
print('start fine tune for epoch {}/{}'.format(epoch,args.fine_tune_epochs))
stime=time.time()
train(args, model, device, train_loader, criterion, optimizer, epoch,logger)
scheduler.step()
acc = evaluator(model,epoch)
print('end fine tune for epoch {}/{} for {} seconds'.format(epoch,
args.fine_tune_epochs,time.time()-stime))
if acc > best_acc and args.rank==0:
best_acc = acc
torch.save(model,os.path.join(args.experiment_data_dir,args.model,'finetune_model.pt'))
torch.save(model.state_dict(), os.path.join(args.experiment_data_dir, 'model_fine_tuned.pth'))
print('Evaluation result (fine tuned): %s' % best_acc)
print('Fined tuned model saved to %s', args.experiment_data_dir)
result['performance']['finetuned'] = best_acc
if args.rank==0:
with open(os.path.join(args.experiment_data_dir, 'result.json'), 'w+') as f:
json.dump(result, f)
if __name__ == '__main__':
def str2bool(s):
if isinstance(s, bool):
return s
if s.lower() in ('yes', 'true', 't', 'y', '1'):
return True
if s.lower() in ('no', 'false', 'f', 'n', '0'):
return False
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser(description='PyTorch Example for SimulatedAnnealingPruner')
# dataset and model
parser.add_argument('--dataset', type=str, default='imagenet',
help='dataset to use, currently only imagenet be support')
parser.add_argument('--data-dir', type=str, default='./data/',
help='dataset directory')
parser.add_argument('--model', type=str, default='resnet50',
help='model to use, only resnet50')
parser.add_argument('--cuda',type=str2bool,default=True,
help='whether use cuda')
parser.add_argument('--load-pretrained-model', type=str2bool, default=False,
help='whether to load pretrained model')
parser.add_argument('--pretrained-model-dir', type=str, default='./',
help='path to pretrained model')
parser.add_argument('--pretrain-epochs', type=int, default=100,
help='number of epochs to pretrain the model')
parser.add_argument("--local_rank",type=int,help='Local rank. Necessary for distributed train')
parser.add_argument('--batch-size', type=int, default=256,
help='input batch size for training (default: 256)')
parser.add_argument('--test-batch-size', type=int, default=256,
help='input batch size for testing (default: 256)')
parser.add_argument('--fine-tune', type=str2bool, default=True,
help='whether to fine-tune the pruned model')
parser.add_argument('--fine-tune-epochs', type=int, default=100,
help='epochs to fine tune')
parser.add_argument('--experiment-data-dir', type=str, default='./experiment_data/resnet_bn',
help='For saving experiment data')
# pruner
parser.add_argument('--pruner', type=str, default='FPGMPruner',
help='pruner to use')
parser.add_argument('--sparsity', type=float, default=0.3,
help='target overall target sparsity')
# others
parser.add_argument('--log-interval', type=int, default=50,
help='how many batches to wait before logging training status')
parser.add_argument('--save-model', type=str2bool, default=True,
help='For Saving the current Model')
args = parser.parse_args()
if not os.path.exists(args.experiment_data_dir):
os.makedirs(args.experiment_data_dir)
main(args)
| 43.310277
| 145
| 0.630116
|
import argparse
import os
import json
import torch
import sys
import numpy as np
import torch.nn.parallel
import torch.utils.data.distributed
from torch.optim.lr_scheduler import StepLR, MultiStepLR
from torchvision import datasets, transforms
import time
from models.mnist.lenet import LeNet
from models.cifar10.vgg import VGG
from nni.compression.torch.utils.config_validation import CompressorSchema
from schema import And, Optional, SchemaError
import torchvision
from utils.loggers import *
from utils.dist import *
from nni.compression.torch import L1FilterPruner, L2FilterPruner, FPGMPruner
from nni.compression.torch.utils.counter import count_flops_params
import logging
_logger = logging.getLogger('FPGM_Pruner')
_logger.setLevel(logging.INFO)
sys.path.append("/data/shan_4GPU/model_optimization/vision/references/classification/")
from train import evaluate, train_one_epoch, load_data
def _setattr(model, name, module):
name_list = name.split(".")
for name in name_list[:-1]:
model = getattr(model, name)
setattr(model, name_list[-1], module)
def get_dummy_input_img(device):
dummy_input=torch.randn([1,3,224,224]).to(device)
return dummy_input
class BNWrapper(torch.nn.Module):
def __init__(self, module, module_name, module_type, config, pruner, prune_idx):
super().__init__()
self.module = module
self.name = module_name
self.type = module_type
self.config = config
self.pruner = pruner
self.register_buffer("weight_mask", torch.ones(self.module.weight.shape))
if hasattr(self.module, 'bias') and self.module.bias is not None:
self.register_buffer("bias_mask", torch.ones(self.module.bias.shape))
else:
self.register_buffer("bias_mask", None)
self.update_mask(prune_idx)
def update_mask(self,prune_idx):
for idx in prune_idx:
self.bias_mask[idx]=0
self.weight_mask[idx]=0
def forward(self, *inputs):
self.module.weight.data = self.module.weight.data.mul_(self.weight_mask)
if hasattr(self.module, 'bias') and self.module.bias is not None:
self.module.bias.data = self.module.bias.data.mul_(self.bias_mask)
return self.module(*inputs)
class MyPruner(FPGMPruner):
def __init__(self,model,config_list,dependency_aware=False):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
super().__init__(model, config_list, dependency_aware=False,dummy_input=get_dummy_input_img(device))
def update_bn(self):
self.update_mask()
masked={}
def prune_idx(array):
N=len(array)
pruned_id=[i for i in range(N) if not np.all(array[i])==1]
return pruned_id
for module in self.bound_model.named_modules():
if isinstance(module[1],PrunerModuleWrapper):
masked[module[0]]=module[1]
if isinstance(module[1],torch.nn.BatchNorm2d) and 'bn3' not in module[0]:
to_mask=module[0].replace('bn','conv')
print(to_mask,module[0],masked)
if to_mask in masked:
mask=masked[to_mask].state_dict()['weight_mask']
pruned_idx=prune_idx(mask.cpu().numpy())
module_type=type(module[1]).__name__
wrapper=BNWrapper(module[1],module[0], module_type, None, self, pruned_idx)
print(wrapper)
assert hasattr(module[1], 'weight'), "module %s does not have 'weight' attribute" % module[0]
wrapper.to(module[1].weight.device)
_setattr(self.bound_model, wrapper.name, wrapper)
self.modules_wrapper.append(wrapper)
else:
continue
def compress(self):
print(self.config_list)
self.update_bn()
return self.bound_model
def select_config(self, layer):
ret = None
for config in self.config_list:
config = config.copy()
if 'op_types' in config and 'default' in config['op_types']:
expanded_op_types = []
for op_type in config['op_types']:
if op_type == 'default':
expanded_op_types.extend(default_layers.weighted_modules)
else:
expanded_op_types.append(op_type)
config['op_types'] = expanded_op_types
if config['exclude_names'] in layer.name:
continue
if 'op_types' in config and layer.type not in config['op_types']:
continue
if 'op_names' in config and layer.name not in config['op_names']:
continue
ret = config
if ret is None or 'exclude' in ret:
return None
return ret
def validate_config(self, model, config_list):
schema = CompressorSchema([{
Optional('sparsity'): And(float, lambda n: 0 < n < 1),
Optional('op_types'): ['Conv2d'],
Optional('op_names'): [str],
Optional('exclude_names'):str,
Optional('exclude'): bool
}], model, _logger)
schema.validate(config_list)
for config in config_list:
if 'exclude' not in config and 'sparsity' not in config:
raise SchemaError('Either sparisty or exclude must be specified!')
def get_data(dataset, data_dir, batch_size, test_batch_size):
nThread=4
pin=True
traindir = os.path.join(data_dir, 'train')
valdir = os.path.join(data_dir, 'validation')
print('train_dir is ',traindir)
dataset, dataset_test, train_sampler, test_sampler = load_data(traindir, valdir, False,True)
train_loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size,
sampler=train_sampler, num_workers=nThread, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
dataset_test, batch_size=test_batch_size,
sampler=test_sampler, num_workers=nThread, pin_memory=True)
criterion = torch.nn.CrossEntropyLoss()
return train_loader, val_loader, criterion
from nni.compression.torch.compressor import *
def train(args, model, device, train_loader, criterion, optimizer, epoch,logger, callback=None):
model.train()
paral=get_world_size()
print(len(train_loader.dataset))
Nstep=len(train_loader.dataset)//paral
loss_per_batch=AverageMeter()
overall_time=AverageMeter()
print('current device is {}'.format(device))
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
stime=time.time()
output = model(data)
loss = criterion(output, target)
loss.backward()
loss_per_batch.update(loss)
if callback:
callback()
optimizer.step()
etime=time.time()
overall_time.update(etime-stime)
if batch_idx%args.log_interval==0:
print('The performace of training is {} fps'.format(args.batch_size/(etime-stime)))
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
args.rank = get_rank()
tensorboard_log = []
tensorboard_train_loss=[]
tensorboard_lr=[]
wrap_mask=[(module[0],module[1].state_dict()['weight_mask'])
for module in model.named_modules() if isinstance(module[1],PrunerModuleWrapper)]
bn_mask=[(module[0],module[1].state_dict()['bias_mask'])
for module in model.named_modules() if isinstance(module[1],BNWrapper)]
wrap_mask+=bn_mask
masks=[(mask[0],mask[1].cpu().numpy()) for mask in wrap_mask]
def ratio(array):
N=len(array)
remain=sum([np.all(array[i]==1) for i in range(N)])
return (remain,N)
mask_remain=[(mask[0],ratio(mask[1])) for mask in masks]
for i, (name,ratios) in enumerate(mask_remain):
tensorboard_log += [(f"{name}_num_filters", ratios[1])]
tensorboard_log += [(f"{name}_num_filters_remain", ratios[0])]
tensorboard_train_loss += [("loss", loss.item())]
tensorboard_lr += [("lr", optimizer.param_groups[0]['lr'])]
logger.list_of_scalars_summary('train', tensorboard_log,
args.batch_size*batch_idx+(epoch)*Nstep)
logger.list_of_scalars_summary('train_loss', tensorboard_train_loss,
args.batch_size*batch_idx+(epoch)*Nstep)
logger.list_of_scalars_summary('learning_rate', tensorboard_lr,
args.batch_size*batch_idx+(epoch)*Nstep)
overall_time.reduce('mean')
print('over_all card average time is',overall_time.avg)
def test(model, device, criterion, val_loader,step,logger):
paral=get_world_size()
model.eval()
test_loss = 0
correct_curr = 0
correct=AverageMeter()
print('current device is {}'.format(device))
with torch.no_grad():
for idx,(data, target) in enumerate(val_loader):
data, target = data.to(device), target.to(device)
stime=time.time()
output = model(data)
etime=time.time()
if idx%args.log_interval==0:
print('Performance for inference is {} second'.format(etime-stime))
test_loss += criterion(output, target).item()
pred = output.argmax(dim=1, keepdim=True)
correct_curr += pred.eq(target.view_as(pred)).sum().item()
correct.update(pred.eq(target.view_as(pred)).sum().item())
if idx % args.log_interval == 0:
print('Evaluation: [{}/{} ({:.0f}%)]\tcorrect: {:.6f}'.format(
idx * len(data), len(val_loader.dataset),
100. * idx / len(val_loader), correct_curr))
print('Done for the validation dataset')
test_loss /= (len(val_loader.dataset)/paral)
correct.reduce('sum')
accuracy = correct.sum/ len(val_loader.dataset)
print('corrent all is {} and accuracy is {}'.format(correct.avg,accuracy))
curr_rank=get_rank()
logger.list_of_scalars_summary('valid_loss',[('loss',test_loss)],step)
logger.list_of_scalars_summary('valid_accuracy',[('accuracy',accuracy)],step)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
test_loss, correct.avg, len(val_loader.dataset), 100. * accuracy))
return accuracy
def get_dummy_input(args, device):
if args.dataset=='imagenet':
dummy_input=torch.randn([args.test_batch_size,3,224,224]).to(device)
return dummy_input
def get_input_size(dataset):
if dataset == 'mnist':
input_size = (1, 1, 28, 28)
elif dataset == 'cifar10':
input_size = (1, 3, 32, 32)
elif dataset == 'imagenet':
input_size = (1, 3, 256, 256)
return input_size
def update_model(model,pruner):
pruner.bound_model=model
pruner.update_mask
return pruner.bound_model
def main(args):
torch.manual_seed(0)
device = set_device(args.cuda, args.local_rank)
inited=init_distributed(True)
print('all cudas numbers are ',get_world_size())
distributed=(get_world_size()>1) and inited
paral=get_world_size()
args.rank = get_rank()
logger = Logger("logs/"+str(args.rank))
print(distributed)
print('device is',device)
print('rank is {} local rank is {}'.format(args.rank,args.local_rank))
train_loader, val_loader, criterion = get_data(args.dataset, args.data_dir, args.batch_size, args.test_batch_size)
model=torchvision.models.resnet50(pretrained=True)
model=model.cuda()
print('to distribute ',distributed)
if distributed:
model = DDP(model, device_ids=[args.local_rank], output_device=args.local_rank)
optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
scheduler = MultiStepLR(
optimizer, milestones=[int(args.pretrain_epochs*0.5), int(args.pretrain_epochs*0.75)], gamma=0.1)
criterion=criterion.cuda()
def short_term_fine_tuner(model, epochs=1):
for epoch in range(epochs):
train(args, model, device, train_loader, criterion, optimizer, epoch,logger)
def trainer(model, optimizer, criterion, epoch, callback):
return train(args, model, device, train_loader, criterion, optimizer, epoch=epoch, logger=logger, callback=callback)
def evaluator(model,step):
return test(model, device, criterion, val_loader,step,logger)
result = {'flops': {}, 'params': {}, 'performance':{}}
flops, params = count_flops_params(model, get_input_size(args.dataset))
result['flops']['original'] = flops
result['params']['original'] = params
evaluation_result = evaluator(model,0)
print('Evaluation result (original model): %s' % evaluation_result)
result['performance']['original'] = evaluation_result
if args.base_algo in ['l1', 'l2']:
op_types = ['Conv2d']
elif args.base_algo == 'level':
op_types = ['default']
config_list = [{
'sparsity': args.sparsity,
'op_types': op_types,
'exclude_names':'downsample'
}]
dummy_input = get_dummy_input(args, device)
if args.pruner == 'FPGMPruner':
pruner=MyPruner(model,config_list)
else:
raise ValueError(
"Pruner not supported.")
model = pruner.compress()
evaluation_result = evaluator(model,0)
print('Evaluation result (masked model): %s' % evaluation_result)
result['performance']['pruned'] = evaluation_result
if args.rank==0 and args.save_model:
pruner.export_model(
os.path.join(args.experiment_data_dir, 'model_masked.pth'), os.path.join(args.experiment_data_dir, 'mask.pth'))
print('Masked model saved to %s', args.experiment_data_dir)
def wrapped(module):
return isinstance(module,BNWrapper) or isinstance(module,PrunerModuleWrapper)
wrap_mask=[module for module in model.named_modules() if wrapped(module[1])]
for mm in wrap_mask:
print('====****'*10)
print(mm[0])
print(mm[1].state_dict().keys())
print('weight mask is ',mm[1].state_dict()['weight_mask'])
if 'bias_mask' in mm[1].state_dict():
print('bias mask is ',mm[1].state_dict()['bias_mask'])
if args.fine_tune:
if args.dataset in ['imagenet'] and args.model == 'resnet50':
optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=1e-4)
scheduler = MultiStepLR(
optimizer, milestones=[int(args.fine_tune_epochs*0.3), int(args.fine_tune_epochs*0.6),int(args.fine_tune_epochs*0.8)], gamma=0.1)
else:
raise ValueError("Pruner not supported.")
best_acc = 0
for epoch in range(args.fine_tune_epochs):
print('start fine tune for epoch {}/{}'.format(epoch,args.fine_tune_epochs))
stime=time.time()
train(args, model, device, train_loader, criterion, optimizer, epoch,logger)
scheduler.step()
acc = evaluator(model,epoch)
print('end fine tune for epoch {}/{} for {} seconds'.format(epoch,
args.fine_tune_epochs,time.time()-stime))
if acc > best_acc and args.rank==0:
best_acc = acc
torch.save(model,os.path.join(args.experiment_data_dir,args.model,'finetune_model.pt'))
torch.save(model.state_dict(), os.path.join(args.experiment_data_dir, 'model_fine_tuned.pth'))
print('Evaluation result (fine tuned): %s' % best_acc)
print('Fined tuned model saved to %s', args.experiment_data_dir)
result['performance']['finetuned'] = best_acc
if args.rank==0:
with open(os.path.join(args.experiment_data_dir, 'result.json'), 'w+') as f:
json.dump(result, f)
if __name__ == '__main__':
def str2bool(s):
if isinstance(s, bool):
return s
if s.lower() in ('yes', 'true', 't', 'y', '1'):
return True
if s.lower() in ('no', 'false', 'f', 'n', '0'):
return False
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser(description='PyTorch Example for SimulatedAnnealingPruner')
parser.add_argument('--dataset', type=str, default='imagenet',
help='dataset to use, currently only imagenet be support')
parser.add_argument('--data-dir', type=str, default='./data/',
help='dataset directory')
parser.add_argument('--model', type=str, default='resnet50',
help='model to use, only resnet50')
parser.add_argument('--cuda',type=str2bool,default=True,
help='whether use cuda')
parser.add_argument('--load-pretrained-model', type=str2bool, default=False,
help='whether to load pretrained model')
parser.add_argument('--pretrained-model-dir', type=str, default='./',
help='path to pretrained model')
parser.add_argument('--pretrain-epochs', type=int, default=100,
help='number of epochs to pretrain the model')
parser.add_argument("--local_rank",type=int,help='Local rank. Necessary for distributed train')
parser.add_argument('--batch-size', type=int, default=256,
help='input batch size for training (default: 256)')
parser.add_argument('--test-batch-size', type=int, default=256,
help='input batch size for testing (default: 256)')
parser.add_argument('--fine-tune', type=str2bool, default=True,
help='whether to fine-tune the pruned model')
parser.add_argument('--fine-tune-epochs', type=int, default=100,
help='epochs to fine tune')
parser.add_argument('--experiment-data-dir', type=str, default='./experiment_data/resnet_bn',
help='For saving experiment data')
parser.add_argument('--pruner', type=str, default='FPGMPruner',
help='pruner to use')
parser.add_argument('--sparsity', type=float, default=0.3,
help='target overall target sparsity')
parser.add_argument('--log-interval', type=int, default=50,
help='how many batches to wait before logging training status')
parser.add_argument('--save-model', type=str2bool, default=True,
help='For Saving the current Model')
args = parser.parse_args()
if not os.path.exists(args.experiment_data_dir):
os.makedirs(args.experiment_data_dir)
main(args)
| true
| true
|
1c3f6b75a0f5defb105eb5e2aee525e5912bf9b8
| 35,124
|
py
|
Python
|
pytrx/async_tron.py
|
Connor-Holmes/PyTRX
|
7d129f27a6e87c1f1bdad93a7ae9503390b2ec70
|
[
"MIT"
] | null | null | null |
pytrx/async_tron.py
|
Connor-Holmes/PyTRX
|
7d129f27a6e87c1f1bdad93a7ae9503390b2ec70
|
[
"MIT"
] | null | null | null |
pytrx/async_tron.py
|
Connor-Holmes/PyTRX
|
7d129f27a6e87c1f1bdad93a7ae9503390b2ec70
|
[
"MIT"
] | null | null | null |
from typing import Union, Tuple, Optional
import asyncio
from typing import Union, Tuple
import time
from pprint import pprint
import json
from decimal import Decimal
from pytrx import keys
from pytrx.async_contract import AsyncContract, ShieldedTRC20, AsyncContractMethod
from pytrx.keys import PrivateKey
from pytrx.abi import tron_abi
from pytrx.defaults import conf_for_name
from pytrx.providers.async_http import AsyncHTTPProvider
from pytrx.exceptions import (
BadSignature,
BadKey,
BadHash,
BlockNotFound,
AssetNotFound,
TaposError,
UnknownError,
TransactionError,
ValidationError,
ApiError,
AddressNotFound,
TransactionNotFound,
TvmError,
BugInJavaTron,
)
TAddress = str
DEFAULT_CONF = {
'fee_limit': 10_000_000,
'timeout': 10.0, # in second
}
def current_timestamp() -> int:
return int(time.time() * 1000)
# noinspection PyBroadException
class AsyncTransactionRet(dict):
def __init__(self, iterable, client: "AsyncTron", method: AsyncContractMethod = None):
super().__init__(iterable)
self._client = client
self._txid = self["txid"]
self._method = method
@property
def txid(self):
"""The transaction id in hex."""
return self._txid
async def wait(self, timeout=30, interval=1.6, solid=False) -> dict:
"""Wait the transaction to be on chain.
:returns: TransactionInfo
"""
get_transaction_info = self._client.get_transaction_info
if solid:
get_transaction_info = self._client.get_solid_transaction_info
end_time = time.time() + timeout
while time.time() < end_time:
try:
return await get_transaction_info(self._txid)
except TransactionNotFound:
await asyncio.sleep(interval)
raise TransactionNotFound("timeout and can not find the transaction")
async def result(self, timeout=30, interval=1.6, solid=False) -> dict:
"""Wait the contract calling result.
:returns: Result of contract method
"""
if self._method is None:
raise TypeError("Not a smart contract call")
receipt = await self.wait(timeout, interval, solid)
if receipt.get('result', None) == 'FAILED':
msg = receipt.get('resMessage', receipt['result'])
if receipt['receipt']['result'] == 'REVERT':
try:
result = receipt.get('contractResult', [])
if result and len(result[0]) > (4 + 32) * 2:
error_msg = tron_abi.decode_single('string', bytes.fromhex(result[0])[4 + 32 :])
msg = "{}: {}".format(msg, error_msg)
except Exception:
pass
raise TvmError(msg)
return self._method.parse_output(receipt['contractResult'][0])
# noinspection PyBroadException,PyProtectedMember
class AsyncTransaction(object):
"""The Transaction object, signed or unsigned."""
def __init__(self,
raw_data: dict,
client: "AsyncTron" = None,
method: AsyncContractMethod = None,
txid: str = "",
permission: dict = None,
signature: list = None):
self._raw_data: dict = raw_data
self._signature: list = signature or []
self._client = client
self._method = method
self.txid: str = txid
"""The transaction id in hex."""
self._permission: Optional[dict] = permission
# IMPORTANT must use "Transaction.create" to create a new Transaction
@classmethod
async def create(cls, *args, **kwargs) -> Optional["AsyncTransaction"]:
_tx = cls(*args, **kwargs)
if not _tx.txid or not _tx._permission:
await _tx.check_sign_weight()
return _tx
async def check_sign_weight(self):
sign_weight = await self._client.get_sign_weight(self)
if "transaction" not in sign_weight:
self._client._handle_api_error(sign_weight)
raise TransactionError('transaction not in sign_weight')
self.txid = sign_weight["transaction"]["transaction"]["txID"]
# when account not exist on-chain
self._permission = sign_weight.get("permission", None)
def to_json(self) -> dict:
return {
"txID": self.txid, "raw_data": self._raw_data,
"signature": self._signature, "permission": self._permission
}
@classmethod
async def from_json(cls, data: Union[str, dict], client: "AsyncTron" = None) -> "AsyncTransaction":
if isinstance(json, str):
data = json.loads(data)
return await cls.create(
client=client,
txid=data['txID'], permission=data['permission'],
raw_data=data['raw_data'], signature=data['signature']
)
def inspect(self) -> "AsyncTransaction":
pprint(self.to_json())
return self
def sign(self, priv_key: PrivateKey) -> "AsyncTransaction":
"""Sign the transaction with a private key."""
assert self.txid, "txID not calculated"
assert self.is_expired is False, 'expired'
if self._permission is not None:
addr_of_key = priv_key.public_key.to_hex_address()
for key in self._permission["keys"]:
if key["address"] == addr_of_key:
break
else:
raise BadKey(
"provided private key is not in the permission list",
"provided {}".format(priv_key.public_key.to_base58check_address()),
"required {}".format(self._permission),
)
sig = priv_key.sign_msg_hash(bytes.fromhex(self.txid))
self._signature.append(sig.hex())
return self
async def broadcast(self) -> AsyncTransactionRet:
"""Broadcast the transaction to TRON network."""
return AsyncTransactionRet(await self._client.broadcast(self), client=self._client, method=self._method)
@property
def is_expired(self) -> bool:
return current_timestamp() >= self._raw_data['expiration']
async def update(self):
"""update Transaction, change ref_block and txID, remove all signature"""
self._raw_data["timestamp"] = current_timestamp()
self._raw_data["expiration"] = self._raw_data["timestamp"] + 60_000
ref_block_id = await self._client.get_latest_solid_block_id()
# last 2 byte of block number part
self._raw_data["ref_block_bytes"] = ref_block_id[12:16]
# last half part of block hash
self._raw_data["ref_block_hash"] = ref_block_id[16:32]
self.txid = ""
self._permission = None
self._signature = []
sign_weight = await self._client.get_sign_weight(self)
if "transaction" not in sign_weight:
self._client._handle_api_error(sign_weight)
return # unreachable
self.txid = sign_weight["transaction"]["transaction"]["txID"]
# when account not exist on-chain
self._permission = sign_weight.get("permission", None)
# remove all _signature
self._signature = []
def __str__(self):
return json.dumps(self.to_json(), indent=2)
# noinspection PyBroadException
class AsyncTransactionBuilder(object):
"""TransactionBuilder, to build a :class:`~Transaction` object."""
def __init__(self, inner: dict, client: "AsyncTron", method: AsyncContractMethod = None):
self._client = client
self._raw_data = {
"contract": [inner],
"timestamp": current_timestamp(),
"expiration": current_timestamp() + 60_000,
"ref_block_bytes": None,
"ref_block_hash": None,
}
if inner.get('type', None) in ['TriggerSmartContract', 'CreateSmartContract']:
self._raw_data["fee_limit"] = self._client.conf['fee_limit']
self._method = method
def with_owner(self, addr: TAddress) -> "AsyncTransactionBuilder":
"""Set owner of the transaction."""
if "owner_address" in self._raw_data["contract"][0]["parameter"]["value"]:
self._raw_data["contract"][0]["parameter"]["value"]["owner_address"] = keys.to_hex_address(addr)
else:
raise TypeError("can not set owner")
return self
def permission_id(self, perm_id: int) -> "AsyncTransactionBuilder":
"""Set permission_id of the transaction."""
self._raw_data["contract"][0]["Permission_id"] = perm_id
return self
def memo(self, memo: Union[str, bytes]) -> "AsyncTransactionBuilder":
"""Set memo of the transaction."""
data = memo.encode() if isinstance(memo, (str,)) else memo
self._raw_data["data"] = data.hex()
return self
def fee_limit(self, value: int) -> "AsyncTransactionBuilder":
"""Set fee_limit of the transaction, in `SUN`."""
self._raw_data["fee_limit"] = value
return self
async def build(self, options=None, **kwargs) -> AsyncTransaction:
"""Build the transaction."""
ref_block_id = await self._client.get_latest_solid_block_id()
# last 2 byte of block number part
self._raw_data["ref_block_bytes"] = ref_block_id[12:16]
# last half part of block hash
self._raw_data["ref_block_hash"] = ref_block_id[16:32]
if self._method:
return await AsyncTransaction.create(self._raw_data, client=self._client, method=self._method)
return await AsyncTransaction.create(self._raw_data, client=self._client)
# noinspection PyBroadException
class AsyncTrx(object):
"""The Trx(transaction) API."""
def __init__(self, tron):
self._tron = tron
@property
def client(self) -> "AsyncTron":
return self._tron
def _build_transaction(
self, type_: str, obj: dict, *, method: AsyncContractMethod = None
) -> AsyncTransactionBuilder:
inner = {
"parameter": {"value": obj, "type_url": "type.googleapis.com/protocol.{}".format(type_)},
"type": type_,
}
if method:
return AsyncTransactionBuilder(inner, client=self.client, method=method)
return AsyncTransactionBuilder(inner, client=self.client)
def transfer(self, from_: TAddress, to: TAddress, amount: int) -> AsyncTransactionBuilder:
"""Transfer TRX. ``amount`` in `SUN`."""
return self._build_transaction(
"TransferContract",
{"owner_address": keys.to_hex_address(from_), "to_address": keys.to_hex_address(to), "amount": amount},
)
# TRC10 asset
def asset_transfer(self, from_: TAddress, to: TAddress, amount: int, token_id: int) -> AsyncTransactionBuilder:
"""Transfer TRC10 tokens."""
return self._build_transaction(
"TransferAssetContract",
{
"owner_address": keys.to_hex_address(from_),
"to_address": keys.to_hex_address(to),
"amount": amount,
"asset_name": str(token_id).encode().hex(),
},
)
def asset_issue(
self,
owner: TAddress,
abbr: str,
total_supply: int,
*,
url: str,
name: str = None,
description: str = "",
start_time: int = None,
end_time: int = None,
precision: int = 6,
frozen_supply: list = None,
trx_num: int = 1,
num: int = 1,
) -> AsyncTransactionBuilder:
"""Issue a TRC10 token.
Almost all parameters have resonable defaults.
"""
if name is None:
name = abbr
if start_time is None:
# use default expiration
start_time = current_timestamp() + 60_000
if end_time is None:
# use default expiration
end_time = current_timestamp() + 60_000 + 1
if frozen_supply is None:
frozen_supply = []
return self._build_transaction(
"AssetIssueContract",
{
"owner_address": keys.to_hex_address(owner),
"abbr": abbr.encode().hex(),
"name": name.encode().hex(),
"total_supply": total_supply,
"precision": precision,
"url": url.encode().hex(),
"description": description.encode().hex(),
"start_time": start_time,
"end_time": end_time,
"frozen_supply": frozen_supply,
"trx_num": trx_num,
"num": num,
"public_free_asset_net_limit": 0,
"free_asset_net_limit": 0,
},
)
# Account
def account_permission_update(self, owner: TAddress, perm: dict) -> "AsyncTransactionBuilder":
"""Update account permission.
:param owner: Address of owner
:param perm: Permission dict from :meth:`~pytrx.Tron.get_account_permission`
"""
if 'owner' in perm:
for key in perm['owner']['keys']:
key['address'] = keys.to_hex_address(key['address'])
if 'actives' in perm:
for act in perm['actives']:
for key in act['keys']:
key['address'] = keys.to_hex_address(key['address'])
if perm.get('witness', None):
for key in perm['witness']['keys']:
key['address'] = keys.to_hex_address(key['address'])
return self._build_transaction(
"AccountPermissionUpdateContract", dict(owner_address=keys.to_hex_address(owner), **perm),
)
def account_update(self, owner: TAddress, name: str) -> "AsyncTransactionBuilder":
"""Update account name. An account can only set name once."""
return self._build_transaction(
"UpdateAccountContract", {"owner_address": keys.to_hex_address(owner), "account_name": name.encode().hex()},
)
def freeze_balance(
self, owner: TAddress, amount: int, resource: str = "ENERGY", *, receiver: TAddress = None
) -> "AsyncTransactionBuilder":
"""Freeze balance to get energy or bandwidth, for 3 days.
:param resource: Resource type, can be ``"ENERGY"`` or ``"BANDWIDTH"``
"""
payload = {
"owner_address": keys.to_hex_address(owner),
"frozen_balance": amount,
"frozen_duration": 3,
"resource": resource,
}
if receiver is not None:
payload["receiver_address"] = keys.to_hex_address(receiver)
return self._build_transaction("FreezeBalanceContract", payload)
def unfreeze_balance(
self, owner: TAddress, resource: str = "ENERGY", receiver: TAddress = None
) -> "AsyncTransactionBuilder":
"""Unfreeze balance to get TRX back.
:param resource: Resource type, can be ``"ENERGY"`` or ``"BANDWIDTH"``
"""
payload = {
"owner_address": keys.to_hex_address(owner),
"resource": resource,
}
if receiver is not None:
payload["receiver_address"] = keys.to_hex_address(receiver)
return self._build_transaction("UnfreezeBalanceContract", payload)
# Witness
def create_witness(self, owner: TAddress, url: str) -> "AsyncTransactionBuilder":
"""Create a new witness, will consume 1_000 TRX."""
payload = {"owner_address": keys.to_hex_address(owner), "url": url.encode().hex()}
return self._build_transaction("WitnessCreateContract", payload)
def vote_witness(self, owner: TAddress, *votes: Tuple[TAddress, int]) -> "AsyncTransactionBuilder":
"""Vote for witnesses. Empty ``votes`` to clean voted."""
votes = [dict(vote_address=keys.to_hex_address(addr), vote_count=count) for addr, count in votes]
payload = {"owner_address": keys.to_hex_address(owner), "votes": votes}
return self._build_transaction("VoteWitnessContract", payload)
# Contract
def deploy_contract(self, owner: TAddress, contract: AsyncContract) -> "AsyncTransactionBuilder":
"""Deploy a new contract on chain."""
contract._client = self.client
contract.owner_address = owner
contract.origin_address = owner
contract.contract_address = None
return contract.deploy()
# noinspection PyBroadException
class AsyncTron(object):
"""The Async TRON API Client.
:param provider: An :class:`~pytrx.providers.HTTPProvider` object, can be configured to use private node
:param network: Which network to connect, one of ``"mainnet"``, ``"shasta"``, ``"nile"``, or ``"tronex"``
"""
# Address API
is_address = staticmethod(keys.is_address)
"""Is object a TRON address, both hex format and base58check format."""
is_base58check_address = staticmethod(keys.is_base58check_address)
"""Is object an address in base58check format."""
is_hex_address = staticmethod(keys.is_hex_address)
"""Is object an address in hex str format."""
to_base58check_address = staticmethod(keys.to_base58check_address)
"""Convert address of any format to a base58check format."""
to_hex_address = staticmethod(keys.to_hex_address)
"""Convert address of any format to a hex format."""
to_canonical_address = staticmethod(keys.to_base58check_address)
def __init__(self, provider: AsyncHTTPProvider = None, *, network: str = "mainnet", conf: dict = None):
self.conf = DEFAULT_CONF
"""The config dict."""
if conf is not None:
self.conf = dict(DEFAULT_CONF, **conf)
if provider is None:
self.provider = AsyncHTTPProvider(conf_for_name(network), self.conf['timeout'])
elif isinstance(provider, (AsyncHTTPProvider,)):
self.provider = provider
else:
raise TypeError("provider is not a HTTPProvider")
self._trx = AsyncTrx(self)
@property
def trx(self) -> AsyncTrx:
"""
Helper object to send various transactions.
:type: Trx
"""
return self._trx
def _handle_api_error(self, payload: dict):
if payload.get("result", None) is True:
return
if "Error" in payload:
# class java.lang.NullPointerException : null
raise ApiError(payload["Error"])
if "code" in payload:
try:
msg = bytes.fromhex(payload["message"]).decode()
except Exception:
msg = payload.get("message", str(payload))
if payload["code"] == "SIGERROR":
raise BadSignature(msg)
elif payload["code"] == "TAPOS_ERROR":
raise TaposError(msg)
elif payload["code"] in ["TRANSACTION_EXPIRATION_ERROR", "TOO_BIG_TRANSACTION_ERROR"]:
raise TransactionError(msg)
elif payload["code"] == "CONTRACT_VALIDATE_ERROR":
raise ValidationError(msg)
raise UnknownError(msg, payload["code"])
if "result" in payload and isinstance(payload["result"], (dict,)):
return self._handle_api_error(payload["result"])
# Address utilities
def generate_address(self, priv_key=None) -> dict:
"""Generate a random address."""
if priv_key is None:
priv_key = PrivateKey.random()
return {
"base58check_address": priv_key.public_key.to_base58check_address(),
"hex_address": priv_key.public_key.to_hex_address(),
"private_key": priv_key.hex(),
"public_key": priv_key.public_key.hex(),
}
def get_address_from_passphrase(self, passphrase: str) -> dict:
"""Get an address from a passphrase, compatiable with `wallet/createaddress`."""
priv_key = PrivateKey.from_passphrase(passphrase.encode())
return self.generate_address(priv_key)
async def generate_zkey(self) -> dict:
"""Generate a random shielded address."""
return await self.provider.make_request("wallet/getnewshieldedaddress")
async def get_zkey_from_sk(self, sk: str, d: str = None) -> dict:
"""Get the shielded address from sk(spending key) and d(diversifier)."""
if len(sk) != 64:
raise BadKey("32 byte sk required")
if d and len(d) != 22:
raise BadKey("11 byte d required")
esk = await self.provider.make_request("wallet/getexpandedspendingkey", {"value": sk})
ask = esk["ask"]
nsk = esk["nsk"]
ovk = esk["ovk"]
ak = (await self.provider.make_request("wallet/getakfromask", {"value": ask}))["value"]
nk = (await self.provider.make_request("wallet/getnkfromnsk", {"value": nsk}))["value"]
ivk = (await self.provider.make_request("wallet/getincomingviewingkey", {"ak": ak, "nk": nk}))["ivk"]
if d is None:
d = (await self.provider.make_request("wallet/getdiversifier"))["d"]
ret = await self.provider.make_request("wallet/getzenpaymentaddress", {"ivk": ivk, "d": d})
pkD = ret["pkD"]
payment_address = ret["payment_address"]
return dict(
sk=sk, ask=ask, nsk=nsk, ovk=ovk, ak=ak, nk=nk, ivk=ivk, d=d, pkD=pkD, payment_address=payment_address,
)
# Account query
async def get_account(self, addr: TAddress) -> dict:
"""Get account info from an address."""
ret = await self.provider.make_request(
"wallet/getaccount", {"address": keys.to_base58check_address(addr), "visible": True}
)
if ret:
return ret
else:
raise AddressNotFound("account not found on-chain")
async def get_account_resource(self, addr: TAddress) -> dict:
"""Get resource info of an account."""
ret = await self.provider.make_request(
"wallet/getaccountresource", {"address": keys.to_base58check_address(addr), "visible": True},
)
if ret:
return ret
else:
raise AddressNotFound("account not found on-chain")
async def get_account_balance(self, addr: TAddress) -> Decimal:
"""Get TRX balance of an account. Result in `TRX`."""
info = await self.get_account(addr)
return Decimal(info.get("balance", 0)) / 1_000_000
async def get_account_asset_balances(self, addr: TAddress) -> dict:
"""Get all TRC10 token balances of an account."""
info = await self.get_account(addr)
return {p['key']: p['value'] for p in info.get("assetV2", {}) if p['value'] > 0}
async def get_account_asset_balance(self, addr: TAddress, token_id: Union[int, str]) -> int:
"""Get TRC10 token balance of an account. Result is in raw amount."""
if int(token_id) < 1000000 or int(token_id) > 1999999:
raise ValueError("invalid token_id range")
balances = await self.get_account_asset_balances(addr)
return balances.get(str(token_id), 0)
async def get_account_permission(self, addr: TAddress) -> dict:
"""Get account's permission info from an address. Can be used in `account_permission_update`."""
addr = keys.to_base58check_address(addr)
# will check account existence
info = await self.get_account(addr)
# For old accounts prior to AccountPermissionUpdate, these fields are not set.
# So default permission is for backward compatibility.
default_witness = None
if info.get("is_witness", None):
default_witness = {
"type": "Witness",
"id": 1,
"permission_name": "witness",
"threshold": 1,
"keys": [{"address": addr, "weight": 1}],
}
return {
"owner": info.get(
"owner_permission",
{"permission_name": "owner", "threshold": 1, "keys": [{"address": addr, "weight": 1}]},
),
"actives": info.get(
"active_permission",
[
{
"type": "Active",
"id": 2,
"permission_name": "active",
"threshold": 1,
"operations": "7fff1fc0033e0100000000000000000000000000000000000000000000000000",
"keys": [{"address": addr, "weight": 1}],
}
],
),
"witness": info.get("witness_permission", default_witness),
}
# Block query
async def get_latest_solid_block(self) -> dict:
return await self.provider.make_request("walletsolidity/getnowblock")
async def get_latest_solid_block_id(self) -> str:
"""Get latest solid block id in hex."""
info = await self.provider.make_request("wallet/getnodeinfo")
return info["solidityBlock"].split(",ID:", 1)[-1]
async def get_latest_solid_block_number(self) -> int:
"""Get latest solid block number. Implemented via `wallet/getnodeinfo`,
which is faster than `walletsolidity/getnowblock`."""
info = await self.provider.make_request("wallet/getnodeinfo")
return int(info["solidityBlock"].split(",ID:", 1)[0].replace("Num:", "", 1))
async def get_latest_block(self) -> dict:
"""Get latest block."""
return await self.provider.make_request("wallet/getnowblock", {"visible": True})
async def get_latest_block_id(self) -> str:
"""Get latest block id in hex."""
info = await self.provider.make_request("wallet/getnodeinfo")
return info["block"].split(",ID:", 1)[-1]
async def get_latest_block_number(self) -> int:
"""Get latest block number. Implemented via `wallet/getnodeinfo`, which is faster than `wallet/getnowblock`."""
info = await self.provider.make_request("wallet/getnodeinfo")
return int(info["block"].split(",ID:", 1)[0].replace("Num:", "", 1))
async def get_block(self, id_or_num: Union[None, str, int] = None, *, visible: bool = True) -> dict:
"""Get block from a block id or block number.
:param id_or_num: Block number, or Block hash(id), or ``None`` (default) to get the latest block.
:param visible: Use ``visible=False`` to get non-base58check addresses and strings instead of hex strings.
"""
if isinstance(id_or_num, (int,)):
block = await self.provider.make_request("wallet/getblockbynum", {"num": id_or_num, "visible": visible})
elif isinstance(id_or_num, (str,)):
block = await self.provider.make_request("wallet/getblockbyid", {"value": id_or_num, "visible": visible})
elif id_or_num is None:
block = await self.provider.make_request("wallet/getnowblock", {"visible": visible})
else:
raise TypeError("can not infer type of {}".format(id_or_num))
if 'Error' in (block or {}):
raise BugInJavaTron(block)
elif block:
return block
else:
raise BlockNotFound
async def get_transaction(self, txn_id: str) -> dict:
"""Get transaction from a transaction id."""
if len(txn_id) != 64:
raise BadHash("wrong transaction hash length")
ret = await self.provider.make_request("wallet/gettransactionbyid", {"value": txn_id, "visible": True})
self._handle_api_error(ret)
if ret:
return ret
raise TransactionNotFound
async def get_solid_transaction(self, txn_id: str) -> dict:
"""Get transaction from a transaction id, must be in solid block."""
if len(txn_id) != 64:
raise BadHash("wrong transaction hash length")
ret = await self.provider.make_request("walletsolidity/gettransactionbyid", {"value": txn_id, "visible": True})
self._handle_api_error(ret)
if ret:
return ret
raise TransactionNotFound
async def get_transaction_info(self, txn_id: str) -> dict:
"""Get transaction receipt info from a transaction id."""
if len(txn_id) != 64:
raise BadHash("wrong transaction hash length")
ret = await self.provider.make_request("wallet/gettransactioninfobyid", {"value": txn_id, "visible": True})
self._handle_api_error(ret)
if ret:
return ret
raise TransactionNotFound
async def get_solid_transaction_info(self, txn_id: str) -> dict:
"""Get transaction receipt info from a transaction id, must be in solid block."""
if len(txn_id) != 64:
raise BadHash("wrong transaction hash length")
ret = await self.provider.make_request(
"walletsolidity/gettransactioninfobyid", {"value": txn_id, "visible": True}
)
self._handle_api_error(ret)
if ret:
return ret
raise TransactionNotFound
# Chain parameters
async def list_witnesses(self) -> list:
"""List all witnesses, including SR, SRP, and SRC."""
# NOTE: visible parameter is ignored
ret = await self.provider.make_request("wallet/listwitnesses", {"visible": True})
witnesses = ret.get("witnesses", [])
for witness in witnesses:
witness["address"] = keys.to_base58check_address(witness["address"])
return witnesses
async def list_nodes(self) -> list:
"""List all nodes that current API node is connected to."""
# NOTE: visible parameter is ignored
ret = await self.provider.make_request("wallet/listnodes", {"visible": True})
nodes = ret.get("nodes", [])
for node in nodes:
node["address"]["host"] = bytes.fromhex(node["address"]["host"]).decode()
return nodes
async def get_node_info(self) -> dict:
"""Get current API node' info."""
return await self.provider.make_request("wallet/getnodeinfo", {"visible": True})
async def get_chain_parameters(self) -> dict:
"""List all chain parameters, values that can be changed via proposal."""
params = await self.provider.make_request("wallet/getchainparameters", {"visible": True})
return params.get("chainParameter", [])
# Asset (TRC10)
async def get_asset(self, id: int = None, issuer: TAddress = None) -> dict:
"""Get TRC10(asset) info by asset's id or issuer."""
if id and issuer:
raise ValueError("either query by id or issuer")
if id:
return await self.provider.make_request("wallet/getassetissuebyid", {"value": id, "visible": True})
else:
return await self.provider.make_request(
"wallet/getassetissuebyaccount", {"address": keys.to_base58check_address(issuer), "visible": True},
)
async def get_asset_from_name(self, name: str) -> dict:
"""Get asset info from its abbr name, might fail if there're duplicates."""
assets = [asset for asset in await self.list_assets() if asset['abbr'] == name]
if assets:
if len(assets) == 1:
return assets[0]
raise ValueError("duplicated assets with the same name", [asset['id'] for asset in assets])
raise AssetNotFound
async def list_assets(self) -> list:
"""List all TRC10 tokens(assets)."""
ret = await self.provider.make_request("wallet/getassetissuelist", {"visible": True})
assets = ret["assetIssue"]
for asset in assets:
asset["id"] = int(asset["id"])
asset["owner_address"] = keys.to_base58check_address(asset["owner_address"])
asset["name"] = bytes.fromhex(asset["name"]).decode()
if "abbr" in asset:
asset["abbr"] = bytes.fromhex(asset["abbr"]).decode()
else:
asset["abbr"] = ""
asset["description"] = bytes.fromhex(asset["description"]).decode("utf8", "replace")
asset["url"] = bytes.fromhex(asset["url"]).decode()
return assets
# Smart contract
async def get_contract(self, addr: TAddress) -> AsyncContract:
"""Get a contract object."""
addr = keys.to_base58check_address(addr)
info = await self.provider.make_request("wallet/getcontract", {"value": addr, "visible": True})
try:
self._handle_api_error(info)
except ApiError:
# your java's null pointer exception sucks
raise AddressNotFound("contract address not found")
cntr = AsyncContract(
addr=addr,
bytecode=info.get("bytecode", ''),
name=info.get("name", ""),
abi=info.get("abi", {}).get("entrys", []),
origin_energy_limit=info.get("origin_energy_limit", 0),
user_resource_percent=info.get("consume_user_resource_percent", 100),
client=self,
)
return cntr
async def get_contract_as_shielded_trc20(self, addr: TAddress) -> ShieldedTRC20:
"""Get a Shielded TRC20 Contract object."""
contract = await self.get_contract(addr)
return ShieldedTRC20(contract)
async def trigger_const_smart_contract_function(
self, owner_address: TAddress, contract_address: TAddress, function_selector: str, parameter: str,
) -> str:
ret = await self.provider.make_request(
"wallet/triggerconstantcontract",
{
"owner_address": keys.to_base58check_address(owner_address),
"contract_address": keys.to_base58check_address(contract_address),
"function_selector": function_selector,
"parameter": parameter,
"visible": True,
},
)
self._handle_api_error(ret)
if 'message' in ret.get('result', {}):
msg = ret['result']['message']
result = ret.get('constant_result', [])
try:
if result and len(result[0]) > (4 + 32) * 2:
error_msg = tron_abi.decode_single('string', bytes.fromhex(result[0])[4 + 32 :])
msg = "{}: {}".format(msg, error_msg)
except Exception:
pass
raise TvmError(msg)
return ret["constant_result"][0]
# Transaction handling
async def broadcast(self, txn: AsyncTransaction) -> dict:
payload = await self.provider.make_request("/wallet/broadcasttransaction", txn.to_json())
self._handle_api_error(payload)
return payload
async def get_sign_weight(self, txn: AsyncTransaction) -> dict:
return await self.provider.make_request("wallet/getsignweight", txn.to_json())
async def close(self):
if not self.provider.client.is_closed:
await self.provider.client.aclose()
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.provider.client.aclose()
| 38.428884
| 120
| 0.607875
|
from typing import Union, Tuple, Optional
import asyncio
from typing import Union, Tuple
import time
from pprint import pprint
import json
from decimal import Decimal
from pytrx import keys
from pytrx.async_contract import AsyncContract, ShieldedTRC20, AsyncContractMethod
from pytrx.keys import PrivateKey
from pytrx.abi import tron_abi
from pytrx.defaults import conf_for_name
from pytrx.providers.async_http import AsyncHTTPProvider
from pytrx.exceptions import (
BadSignature,
BadKey,
BadHash,
BlockNotFound,
AssetNotFound,
TaposError,
UnknownError,
TransactionError,
ValidationError,
ApiError,
AddressNotFound,
TransactionNotFound,
TvmError,
BugInJavaTron,
)
TAddress = str
DEFAULT_CONF = {
'fee_limit': 10_000_000,
'timeout': 10.0,
}
def current_timestamp() -> int:
return int(time.time() * 1000)
class AsyncTransactionRet(dict):
def __init__(self, iterable, client: "AsyncTron", method: AsyncContractMethod = None):
super().__init__(iterable)
self._client = client
self._txid = self["txid"]
self._method = method
@property
def txid(self):
return self._txid
async def wait(self, timeout=30, interval=1.6, solid=False) -> dict:
get_transaction_info = self._client.get_transaction_info
if solid:
get_transaction_info = self._client.get_solid_transaction_info
end_time = time.time() + timeout
while time.time() < end_time:
try:
return await get_transaction_info(self._txid)
except TransactionNotFound:
await asyncio.sleep(interval)
raise TransactionNotFound("timeout and can not find the transaction")
async def result(self, timeout=30, interval=1.6, solid=False) -> dict:
if self._method is None:
raise TypeError("Not a smart contract call")
receipt = await self.wait(timeout, interval, solid)
if receipt.get('result', None) == 'FAILED':
msg = receipt.get('resMessage', receipt['result'])
if receipt['receipt']['result'] == 'REVERT':
try:
result = receipt.get('contractResult', [])
if result and len(result[0]) > (4 + 32) * 2:
error_msg = tron_abi.decode_single('string', bytes.fromhex(result[0])[4 + 32 :])
msg = "{}: {}".format(msg, error_msg)
except Exception:
pass
raise TvmError(msg)
return self._method.parse_output(receipt['contractResult'][0])
class AsyncTransaction(object):
def __init__(self,
raw_data: dict,
client: "AsyncTron" = None,
method: AsyncContractMethod = None,
txid: str = "",
permission: dict = None,
signature: list = None):
self._raw_data: dict = raw_data
self._signature: list = signature or []
self._client = client
self._method = method
self.txid: str = txid
self._permission: Optional[dict] = permission
@classmethod
async def create(cls, *args, **kwargs) -> Optional["AsyncTransaction"]:
_tx = cls(*args, **kwargs)
if not _tx.txid or not _tx._permission:
await _tx.check_sign_weight()
return _tx
async def check_sign_weight(self):
sign_weight = await self._client.get_sign_weight(self)
if "transaction" not in sign_weight:
self._client._handle_api_error(sign_weight)
raise TransactionError('transaction not in sign_weight')
self.txid = sign_weight["transaction"]["transaction"]["txID"]
self._permission = sign_weight.get("permission", None)
def to_json(self) -> dict:
return {
"txID": self.txid, "raw_data": self._raw_data,
"signature": self._signature, "permission": self._permission
}
@classmethod
async def from_json(cls, data: Union[str, dict], client: "AsyncTron" = None) -> "AsyncTransaction":
if isinstance(json, str):
data = json.loads(data)
return await cls.create(
client=client,
txid=data['txID'], permission=data['permission'],
raw_data=data['raw_data'], signature=data['signature']
)
def inspect(self) -> "AsyncTransaction":
pprint(self.to_json())
return self
def sign(self, priv_key: PrivateKey) -> "AsyncTransaction":
assert self.txid, "txID not calculated"
assert self.is_expired is False, 'expired'
if self._permission is not None:
addr_of_key = priv_key.public_key.to_hex_address()
for key in self._permission["keys"]:
if key["address"] == addr_of_key:
break
else:
raise BadKey(
"provided private key is not in the permission list",
"provided {}".format(priv_key.public_key.to_base58check_address()),
"required {}".format(self._permission),
)
sig = priv_key.sign_msg_hash(bytes.fromhex(self.txid))
self._signature.append(sig.hex())
return self
async def broadcast(self) -> AsyncTransactionRet:
return AsyncTransactionRet(await self._client.broadcast(self), client=self._client, method=self._method)
@property
def is_expired(self) -> bool:
return current_timestamp() >= self._raw_data['expiration']
async def update(self):
self._raw_data["timestamp"] = current_timestamp()
self._raw_data["expiration"] = self._raw_data["timestamp"] + 60_000
ref_block_id = await self._client.get_latest_solid_block_id()
self._raw_data["ref_block_bytes"] = ref_block_id[12:16]
self._raw_data["ref_block_hash"] = ref_block_id[16:32]
self.txid = ""
self._permission = None
self._signature = []
sign_weight = await self._client.get_sign_weight(self)
if "transaction" not in sign_weight:
self._client._handle_api_error(sign_weight)
return
self.txid = sign_weight["transaction"]["transaction"]["txID"]
self._permission = sign_weight.get("permission", None)
self._signature = []
def __str__(self):
return json.dumps(self.to_json(), indent=2)
class AsyncTransactionBuilder(object):
def __init__(self, inner: dict, client: "AsyncTron", method: AsyncContractMethod = None):
self._client = client
self._raw_data = {
"contract": [inner],
"timestamp": current_timestamp(),
"expiration": current_timestamp() + 60_000,
"ref_block_bytes": None,
"ref_block_hash": None,
}
if inner.get('type', None) in ['TriggerSmartContract', 'CreateSmartContract']:
self._raw_data["fee_limit"] = self._client.conf['fee_limit']
self._method = method
def with_owner(self, addr: TAddress) -> "AsyncTransactionBuilder":
if "owner_address" in self._raw_data["contract"][0]["parameter"]["value"]:
self._raw_data["contract"][0]["parameter"]["value"]["owner_address"] = keys.to_hex_address(addr)
else:
raise TypeError("can not set owner")
return self
def permission_id(self, perm_id: int) -> "AsyncTransactionBuilder":
self._raw_data["contract"][0]["Permission_id"] = perm_id
return self
def memo(self, memo: Union[str, bytes]) -> "AsyncTransactionBuilder":
data = memo.encode() if isinstance(memo, (str,)) else memo
self._raw_data["data"] = data.hex()
return self
def fee_limit(self, value: int) -> "AsyncTransactionBuilder":
self._raw_data["fee_limit"] = value
return self
async def build(self, options=None, **kwargs) -> AsyncTransaction:
ref_block_id = await self._client.get_latest_solid_block_id()
self._raw_data["ref_block_bytes"] = ref_block_id[12:16]
self._raw_data["ref_block_hash"] = ref_block_id[16:32]
if self._method:
return await AsyncTransaction.create(self._raw_data, client=self._client, method=self._method)
return await AsyncTransaction.create(self._raw_data, client=self._client)
class AsyncTrx(object):
def __init__(self, tron):
self._tron = tron
@property
def client(self) -> "AsyncTron":
return self._tron
def _build_transaction(
self, type_: str, obj: dict, *, method: AsyncContractMethod = None
) -> AsyncTransactionBuilder:
inner = {
"parameter": {"value": obj, "type_url": "type.googleapis.com/protocol.{}".format(type_)},
"type": type_,
}
if method:
return AsyncTransactionBuilder(inner, client=self.client, method=method)
return AsyncTransactionBuilder(inner, client=self.client)
def transfer(self, from_: TAddress, to: TAddress, amount: int) -> AsyncTransactionBuilder:
return self._build_transaction(
"TransferContract",
{"owner_address": keys.to_hex_address(from_), "to_address": keys.to_hex_address(to), "amount": amount},
)
def asset_transfer(self, from_: TAddress, to: TAddress, amount: int, token_id: int) -> AsyncTransactionBuilder:
return self._build_transaction(
"TransferAssetContract",
{
"owner_address": keys.to_hex_address(from_),
"to_address": keys.to_hex_address(to),
"amount": amount,
"asset_name": str(token_id).encode().hex(),
},
)
def asset_issue(
self,
owner: TAddress,
abbr: str,
total_supply: int,
*,
url: str,
name: str = None,
description: str = "",
start_time: int = None,
end_time: int = None,
precision: int = 6,
frozen_supply: list = None,
trx_num: int = 1,
num: int = 1,
) -> AsyncTransactionBuilder:
if name is None:
name = abbr
if start_time is None:
start_time = current_timestamp() + 60_000
if end_time is None:
end_time = current_timestamp() + 60_000 + 1
if frozen_supply is None:
frozen_supply = []
return self._build_transaction(
"AssetIssueContract",
{
"owner_address": keys.to_hex_address(owner),
"abbr": abbr.encode().hex(),
"name": name.encode().hex(),
"total_supply": total_supply,
"precision": precision,
"url": url.encode().hex(),
"description": description.encode().hex(),
"start_time": start_time,
"end_time": end_time,
"frozen_supply": frozen_supply,
"trx_num": trx_num,
"num": num,
"public_free_asset_net_limit": 0,
"free_asset_net_limit": 0,
},
)
def account_permission_update(self, owner: TAddress, perm: dict) -> "AsyncTransactionBuilder":
if 'owner' in perm:
for key in perm['owner']['keys']:
key['address'] = keys.to_hex_address(key['address'])
if 'actives' in perm:
for act in perm['actives']:
for key in act['keys']:
key['address'] = keys.to_hex_address(key['address'])
if perm.get('witness', None):
for key in perm['witness']['keys']:
key['address'] = keys.to_hex_address(key['address'])
return self._build_transaction(
"AccountPermissionUpdateContract", dict(owner_address=keys.to_hex_address(owner), **perm),
)
def account_update(self, owner: TAddress, name: str) -> "AsyncTransactionBuilder":
return self._build_transaction(
"UpdateAccountContract", {"owner_address": keys.to_hex_address(owner), "account_name": name.encode().hex()},
)
def freeze_balance(
self, owner: TAddress, amount: int, resource: str = "ENERGY", *, receiver: TAddress = None
) -> "AsyncTransactionBuilder":
payload = {
"owner_address": keys.to_hex_address(owner),
"frozen_balance": amount,
"frozen_duration": 3,
"resource": resource,
}
if receiver is not None:
payload["receiver_address"] = keys.to_hex_address(receiver)
return self._build_transaction("FreezeBalanceContract", payload)
def unfreeze_balance(
self, owner: TAddress, resource: str = "ENERGY", receiver: TAddress = None
) -> "AsyncTransactionBuilder":
payload = {
"owner_address": keys.to_hex_address(owner),
"resource": resource,
}
if receiver is not None:
payload["receiver_address"] = keys.to_hex_address(receiver)
return self._build_transaction("UnfreezeBalanceContract", payload)
def create_witness(self, owner: TAddress, url: str) -> "AsyncTransactionBuilder":
payload = {"owner_address": keys.to_hex_address(owner), "url": url.encode().hex()}
return self._build_transaction("WitnessCreateContract", payload)
def vote_witness(self, owner: TAddress, *votes: Tuple[TAddress, int]) -> "AsyncTransactionBuilder":
votes = [dict(vote_address=keys.to_hex_address(addr), vote_count=count) for addr, count in votes]
payload = {"owner_address": keys.to_hex_address(owner), "votes": votes}
return self._build_transaction("VoteWitnessContract", payload)
def deploy_contract(self, owner: TAddress, contract: AsyncContract) -> "AsyncTransactionBuilder":
contract._client = self.client
contract.owner_address = owner
contract.origin_address = owner
contract.contract_address = None
return contract.deploy()
class AsyncTron(object):
is_address = staticmethod(keys.is_address)
is_base58check_address = staticmethod(keys.is_base58check_address)
is_hex_address = staticmethod(keys.is_hex_address)
to_base58check_address = staticmethod(keys.to_base58check_address)
to_hex_address = staticmethod(keys.to_hex_address)
to_canonical_address = staticmethod(keys.to_base58check_address)
def __init__(self, provider: AsyncHTTPProvider = None, *, network: str = "mainnet", conf: dict = None):
self.conf = DEFAULT_CONF
if conf is not None:
self.conf = dict(DEFAULT_CONF, **conf)
if provider is None:
self.provider = AsyncHTTPProvider(conf_for_name(network), self.conf['timeout'])
elif isinstance(provider, (AsyncHTTPProvider,)):
self.provider = provider
else:
raise TypeError("provider is not a HTTPProvider")
self._trx = AsyncTrx(self)
@property
def trx(self) -> AsyncTrx:
return self._trx
def _handle_api_error(self, payload: dict):
if payload.get("result", None) is True:
return
if "Error" in payload:
raise ApiError(payload["Error"])
if "code" in payload:
try:
msg = bytes.fromhex(payload["message"]).decode()
except Exception:
msg = payload.get("message", str(payload))
if payload["code"] == "SIGERROR":
raise BadSignature(msg)
elif payload["code"] == "TAPOS_ERROR":
raise TaposError(msg)
elif payload["code"] in ["TRANSACTION_EXPIRATION_ERROR", "TOO_BIG_TRANSACTION_ERROR"]:
raise TransactionError(msg)
elif payload["code"] == "CONTRACT_VALIDATE_ERROR":
raise ValidationError(msg)
raise UnknownError(msg, payload["code"])
if "result" in payload and isinstance(payload["result"], (dict,)):
return self._handle_api_error(payload["result"])
def generate_address(self, priv_key=None) -> dict:
if priv_key is None:
priv_key = PrivateKey.random()
return {
"base58check_address": priv_key.public_key.to_base58check_address(),
"hex_address": priv_key.public_key.to_hex_address(),
"private_key": priv_key.hex(),
"public_key": priv_key.public_key.hex(),
}
def get_address_from_passphrase(self, passphrase: str) -> dict:
priv_key = PrivateKey.from_passphrase(passphrase.encode())
return self.generate_address(priv_key)
async def generate_zkey(self) -> dict:
return await self.provider.make_request("wallet/getnewshieldedaddress")
async def get_zkey_from_sk(self, sk: str, d: str = None) -> dict:
if len(sk) != 64:
raise BadKey("32 byte sk required")
if d and len(d) != 22:
raise BadKey("11 byte d required")
esk = await self.provider.make_request("wallet/getexpandedspendingkey", {"value": sk})
ask = esk["ask"]
nsk = esk["nsk"]
ovk = esk["ovk"]
ak = (await self.provider.make_request("wallet/getakfromask", {"value": ask}))["value"]
nk = (await self.provider.make_request("wallet/getnkfromnsk", {"value": nsk}))["value"]
ivk = (await self.provider.make_request("wallet/getincomingviewingkey", {"ak": ak, "nk": nk}))["ivk"]
if d is None:
d = (await self.provider.make_request("wallet/getdiversifier"))["d"]
ret = await self.provider.make_request("wallet/getzenpaymentaddress", {"ivk": ivk, "d": d})
pkD = ret["pkD"]
payment_address = ret["payment_address"]
return dict(
sk=sk, ask=ask, nsk=nsk, ovk=ovk, ak=ak, nk=nk, ivk=ivk, d=d, pkD=pkD, payment_address=payment_address,
)
async def get_account(self, addr: TAddress) -> dict:
ret = await self.provider.make_request(
"wallet/getaccount", {"address": keys.to_base58check_address(addr), "visible": True}
)
if ret:
return ret
else:
raise AddressNotFound("account not found on-chain")
async def get_account_resource(self, addr: TAddress) -> dict:
ret = await self.provider.make_request(
"wallet/getaccountresource", {"address": keys.to_base58check_address(addr), "visible": True},
)
if ret:
return ret
else:
raise AddressNotFound("account not found on-chain")
async def get_account_balance(self, addr: TAddress) -> Decimal:
info = await self.get_account(addr)
return Decimal(info.get("balance", 0)) / 1_000_000
async def get_account_asset_balances(self, addr: TAddress) -> dict:
info = await self.get_account(addr)
return {p['key']: p['value'] for p in info.get("assetV2", {}) if p['value'] > 0}
async def get_account_asset_balance(self, addr: TAddress, token_id: Union[int, str]) -> int:
if int(token_id) < 1000000 or int(token_id) > 1999999:
raise ValueError("invalid token_id range")
balances = await self.get_account_asset_balances(addr)
return balances.get(str(token_id), 0)
async def get_account_permission(self, addr: TAddress) -> dict:
addr = keys.to_base58check_address(addr)
info = await self.get_account(addr)
default_witness = None
if info.get("is_witness", None):
default_witness = {
"type": "Witness",
"id": 1,
"permission_name": "witness",
"threshold": 1,
"keys": [{"address": addr, "weight": 1}],
}
return {
"owner": info.get(
"owner_permission",
{"permission_name": "owner", "threshold": 1, "keys": [{"address": addr, "weight": 1}]},
),
"actives": info.get(
"active_permission",
[
{
"type": "Active",
"id": 2,
"permission_name": "active",
"threshold": 1,
"operations": "7fff1fc0033e0100000000000000000000000000000000000000000000000000",
"keys": [{"address": addr, "weight": 1}],
}
],
),
"witness": info.get("witness_permission", default_witness),
}
async def get_latest_solid_block(self) -> dict:
return await self.provider.make_request("walletsolidity/getnowblock")
async def get_latest_solid_block_id(self) -> str:
info = await self.provider.make_request("wallet/getnodeinfo")
return info["solidityBlock"].split(",ID:", 1)[-1]
async def get_latest_solid_block_number(self) -> int:
info = await self.provider.make_request("wallet/getnodeinfo")
return int(info["solidityBlock"].split(",ID:", 1)[0].replace("Num:", "", 1))
async def get_latest_block(self) -> dict:
return await self.provider.make_request("wallet/getnowblock", {"visible": True})
async def get_latest_block_id(self) -> str:
info = await self.provider.make_request("wallet/getnodeinfo")
return info["block"].split(",ID:", 1)[-1]
async def get_latest_block_number(self) -> int:
info = await self.provider.make_request("wallet/getnodeinfo")
return int(info["block"].split(",ID:", 1)[0].replace("Num:", "", 1))
async def get_block(self, id_or_num: Union[None, str, int] = None, *, visible: bool = True) -> dict:
if isinstance(id_or_num, (int,)):
block = await self.provider.make_request("wallet/getblockbynum", {"num": id_or_num, "visible": visible})
elif isinstance(id_or_num, (str,)):
block = await self.provider.make_request("wallet/getblockbyid", {"value": id_or_num, "visible": visible})
elif id_or_num is None:
block = await self.provider.make_request("wallet/getnowblock", {"visible": visible})
else:
raise TypeError("can not infer type of {}".format(id_or_num))
if 'Error' in (block or {}):
raise BugInJavaTron(block)
elif block:
return block
else:
raise BlockNotFound
async def get_transaction(self, txn_id: str) -> dict:
if len(txn_id) != 64:
raise BadHash("wrong transaction hash length")
ret = await self.provider.make_request("wallet/gettransactionbyid", {"value": txn_id, "visible": True})
self._handle_api_error(ret)
if ret:
return ret
raise TransactionNotFound
async def get_solid_transaction(self, txn_id: str) -> dict:
if len(txn_id) != 64:
raise BadHash("wrong transaction hash length")
ret = await self.provider.make_request("walletsolidity/gettransactionbyid", {"value": txn_id, "visible": True})
self._handle_api_error(ret)
if ret:
return ret
raise TransactionNotFound
async def get_transaction_info(self, txn_id: str) -> dict:
if len(txn_id) != 64:
raise BadHash("wrong transaction hash length")
ret = await self.provider.make_request("wallet/gettransactioninfobyid", {"value": txn_id, "visible": True})
self._handle_api_error(ret)
if ret:
return ret
raise TransactionNotFound
async def get_solid_transaction_info(self, txn_id: str) -> dict:
if len(txn_id) != 64:
raise BadHash("wrong transaction hash length")
ret = await self.provider.make_request(
"walletsolidity/gettransactioninfobyid", {"value": txn_id, "visible": True}
)
self._handle_api_error(ret)
if ret:
return ret
raise TransactionNotFound
async def list_witnesses(self) -> list:
ret = await self.provider.make_request("wallet/listwitnesses", {"visible": True})
witnesses = ret.get("witnesses", [])
for witness in witnesses:
witness["address"] = keys.to_base58check_address(witness["address"])
return witnesses
async def list_nodes(self) -> list:
ret = await self.provider.make_request("wallet/listnodes", {"visible": True})
nodes = ret.get("nodes", [])
for node in nodes:
node["address"]["host"] = bytes.fromhex(node["address"]["host"]).decode()
return nodes
async def get_node_info(self) -> dict:
return await self.provider.make_request("wallet/getnodeinfo", {"visible": True})
async def get_chain_parameters(self) -> dict:
params = await self.provider.make_request("wallet/getchainparameters", {"visible": True})
return params.get("chainParameter", [])
async def get_asset(self, id: int = None, issuer: TAddress = None) -> dict:
if id and issuer:
raise ValueError("either query by id or issuer")
if id:
return await self.provider.make_request("wallet/getassetissuebyid", {"value": id, "visible": True})
else:
return await self.provider.make_request(
"wallet/getassetissuebyaccount", {"address": keys.to_base58check_address(issuer), "visible": True},
)
async def get_asset_from_name(self, name: str) -> dict:
assets = [asset for asset in await self.list_assets() if asset['abbr'] == name]
if assets:
if len(assets) == 1:
return assets[0]
raise ValueError("duplicated assets with the same name", [asset['id'] for asset in assets])
raise AssetNotFound
async def list_assets(self) -> list:
ret = await self.provider.make_request("wallet/getassetissuelist", {"visible": True})
assets = ret["assetIssue"]
for asset in assets:
asset["id"] = int(asset["id"])
asset["owner_address"] = keys.to_base58check_address(asset["owner_address"])
asset["name"] = bytes.fromhex(asset["name"]).decode()
if "abbr" in asset:
asset["abbr"] = bytes.fromhex(asset["abbr"]).decode()
else:
asset["abbr"] = ""
asset["description"] = bytes.fromhex(asset["description"]).decode("utf8", "replace")
asset["url"] = bytes.fromhex(asset["url"]).decode()
return assets
async def get_contract(self, addr: TAddress) -> AsyncContract:
addr = keys.to_base58check_address(addr)
info = await self.provider.make_request("wallet/getcontract", {"value": addr, "visible": True})
try:
self._handle_api_error(info)
except ApiError:
raise AddressNotFound("contract address not found")
cntr = AsyncContract(
addr=addr,
bytecode=info.get("bytecode", ''),
name=info.get("name", ""),
abi=info.get("abi", {}).get("entrys", []),
origin_energy_limit=info.get("origin_energy_limit", 0),
user_resource_percent=info.get("consume_user_resource_percent", 100),
client=self,
)
return cntr
async def get_contract_as_shielded_trc20(self, addr: TAddress) -> ShieldedTRC20:
contract = await self.get_contract(addr)
return ShieldedTRC20(contract)
async def trigger_const_smart_contract_function(
self, owner_address: TAddress, contract_address: TAddress, function_selector: str, parameter: str,
) -> str:
ret = await self.provider.make_request(
"wallet/triggerconstantcontract",
{
"owner_address": keys.to_base58check_address(owner_address),
"contract_address": keys.to_base58check_address(contract_address),
"function_selector": function_selector,
"parameter": parameter,
"visible": True,
},
)
self._handle_api_error(ret)
if 'message' in ret.get('result', {}):
msg = ret['result']['message']
result = ret.get('constant_result', [])
try:
if result and len(result[0]) > (4 + 32) * 2:
error_msg = tron_abi.decode_single('string', bytes.fromhex(result[0])[4 + 32 :])
msg = "{}: {}".format(msg, error_msg)
except Exception:
pass
raise TvmError(msg)
return ret["constant_result"][0]
# Transaction handling
async def broadcast(self, txn: AsyncTransaction) -> dict:
payload = await self.provider.make_request("/wallet/broadcasttransaction", txn.to_json())
self._handle_api_error(payload)
return payload
async def get_sign_weight(self, txn: AsyncTransaction) -> dict:
return await self.provider.make_request("wallet/getsignweight", txn.to_json())
async def close(self):
if not self.provider.client.is_closed:
await self.provider.client.aclose()
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.provider.client.aclose()
| true
| true
|
1c3f6d28293ce3580e61d89bb0ac676e5899d742
| 1,247
|
py
|
Python
|
conanfile.py
|
jkleinecke/util
|
8dab223de686e15165b11e56deabdc21d0f063ab
|
[
"MIT"
] | null | null | null |
conanfile.py
|
jkleinecke/util
|
8dab223de686e15165b11e56deabdc21d0f063ab
|
[
"MIT"
] | null | null | null |
conanfile.py
|
jkleinecke/util
|
8dab223de686e15165b11e56deabdc21d0f063ab
|
[
"MIT"
] | null | null | null |
from conans import ConanFile, CMake, tools
import os
class UtilConan(ConanFile):
name = "util"
version = "0.1"
license = "<Put the package license here>"
url = "<Package recipe repository url here, for issues about the package>"
settings = "os", "compiler", "build_type", "arch"
generators = "cmake", "txt", "env"
exports = "src/*", "CMakeLists.txt"
options = {"shared": [True, False]}
default_options = "shared=False"
def imports(self):
self.copy("*.dll", dst="bin", src="bin") # From bin to bin
self.copy("*.dylib*", dst="bin", src="lib") # From lib to bin
def build(self):
shared = "-DBUILD_SHARED_LIBS=ON" if self.options.shared else ""
cmake = CMake(self.settings)
self.run('cmake %s %s %s' % (self.conanfile_directory, cmake.command_line, shared))
self.run("cmake --build . %s" % cmake.build_config)
def package(self):
self.copy("*.h", dst="include/util", src="src")
self.copy("*.lib", dst="lib", src="lib")
self.copy("*.dylib", dst="lib", src="lib")
self.copy("*.dll", dst="bin", src="bin")
self.copy("*.a", dst="lib", src="lib")
def package_info(self):
self.cpp_info.libs = ["util"]
| 34.638889
| 91
| 0.588613
|
from conans import ConanFile, CMake, tools
import os
class UtilConan(ConanFile):
name = "util"
version = "0.1"
license = "<Put the package license here>"
url = "<Package recipe repository url here, for issues about the package>"
settings = "os", "compiler", "build_type", "arch"
generators = "cmake", "txt", "env"
exports = "src/*", "CMakeLists.txt"
options = {"shared": [True, False]}
default_options = "shared=False"
def imports(self):
self.copy("*.dll", dst="bin", src="bin")
self.copy("*.dylib*", dst="bin", src="lib")
def build(self):
shared = "-DBUILD_SHARED_LIBS=ON" if self.options.shared else ""
cmake = CMake(self.settings)
self.run('cmake %s %s %s' % (self.conanfile_directory, cmake.command_line, shared))
self.run("cmake --build . %s" % cmake.build_config)
def package(self):
self.copy("*.h", dst="include/util", src="src")
self.copy("*.lib", dst="lib", src="lib")
self.copy("*.dylib", dst="lib", src="lib")
self.copy("*.dll", dst="bin", src="bin")
self.copy("*.a", dst="lib", src="lib")
def package_info(self):
self.cpp_info.libs = ["util"]
| true
| true
|
1c3f6f00a36362626b78c0c97b85523d99e1c6cc
| 3,042
|
py
|
Python
|
corehq/messaging/smsbackends/apposit/models.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | 1
|
2020-07-14T13:00:23.000Z
|
2020-07-14T13:00:23.000Z
|
corehq/messaging/smsbackends/apposit/models.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | 94
|
2020-12-11T06:57:31.000Z
|
2022-03-15T10:24:06.000Z
|
corehq/messaging/smsbackends/apposit/models.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | null | null | null |
import json
import requests
from corehq.apps.sms.models import SMS, SQLSMSBackend
from corehq.apps.sms.util import strip_plus
from corehq.messaging.smsbackends.apposit.forms import AppositBackendForm
from django.conf import settings
ETHIOPIA_COUNTRY_CODE = '251'
class AppositException(Exception):
pass
class SQLAppositBackend(SQLSMSBackend):
class Meta(object):
app_label = 'sms'
proxy = True
@classmethod
def get_opt_in_keywords(cls):
return ['START']
@classmethod
def get_opt_out_keywords(cls):
return ['STOP']
@classmethod
def get_available_extra_fields(cls):
return [
# the username used in basic auth on http requests
'application_id',
# the password used in basic auth on http requests
'application_token',
'from_number',
'host',
]
@property
def url(self):
return 'https://%s/mmp/api/v2/json/sms/send' % self.config.host
@classmethod
def get_api_id(cls):
return 'APPOSIT'
@classmethod
def get_generic_name(cls):
return 'Apposit'
@classmethod
def get_form_class(cls):
return AppositBackendForm
def response_is_error(self, response_json):
return response_json.get('statusCode') not in ('0', 0)
def is_ethiopia_number(self, msg):
phone = strip_plus(msg.phone_number)
return phone.startswith(ETHIOPIA_COUNTRY_CODE)
def handle_error(self, response, response_json, msg):
exception_message = "Error with the Apposit backend. Http response code: %s; Apposit status: %s %s"
exception_params = (
response.status_code,
response_json.get('statusCode'),
response_json.get('statusMessage'),
)
raise AppositException(exception_message % exception_params)
def handle_success(self, response, response_json, msg):
msg.backend_message_id = response_json.get('messageId')
def send(self, msg, *args, **kwargs):
if not self.is_ethiopia_number(msg):
msg.set_system_error(SMS.ERROR_INVALID_DESTINATION_NUMBER)
return
config = self.config
data = {
'from': config.from_number,
'to': msg.phone_number,
'message': msg.text,
}
json_payload = json.dumps(data)
response = requests.post(
self.url,
auth=(config.application_id, config.application_token),
data=json_payload,
headers={'content-type': 'application/json'},
timeout=settings.SMS_GATEWAY_TIMEOUT,
)
try:
response_json = response.json()
except:
raise AppositException("Could not parse json response. HTTP response code: %s" % response.status_code)
if self.response_is_error(response_json):
self.handle_error(response, response_json, msg)
else:
self.handle_success(response, response_json, msg)
| 28.971429
| 114
| 0.636752
|
import json
import requests
from corehq.apps.sms.models import SMS, SQLSMSBackend
from corehq.apps.sms.util import strip_plus
from corehq.messaging.smsbackends.apposit.forms import AppositBackendForm
from django.conf import settings
ETHIOPIA_COUNTRY_CODE = '251'
class AppositException(Exception):
pass
class SQLAppositBackend(SQLSMSBackend):
class Meta(object):
app_label = 'sms'
proxy = True
@classmethod
def get_opt_in_keywords(cls):
return ['START']
@classmethod
def get_opt_out_keywords(cls):
return ['STOP']
@classmethod
def get_available_extra_fields(cls):
return [
'application_id',
'application_token',
'from_number',
'host',
]
@property
def url(self):
return 'https://%s/mmp/api/v2/json/sms/send' % self.config.host
@classmethod
def get_api_id(cls):
return 'APPOSIT'
@classmethod
def get_generic_name(cls):
return 'Apposit'
@classmethod
def get_form_class(cls):
return AppositBackendForm
def response_is_error(self, response_json):
return response_json.get('statusCode') not in ('0', 0)
def is_ethiopia_number(self, msg):
phone = strip_plus(msg.phone_number)
return phone.startswith(ETHIOPIA_COUNTRY_CODE)
def handle_error(self, response, response_json, msg):
exception_message = "Error with the Apposit backend. Http response code: %s; Apposit status: %s %s"
exception_params = (
response.status_code,
response_json.get('statusCode'),
response_json.get('statusMessage'),
)
raise AppositException(exception_message % exception_params)
def handle_success(self, response, response_json, msg):
msg.backend_message_id = response_json.get('messageId')
def send(self, msg, *args, **kwargs):
if not self.is_ethiopia_number(msg):
msg.set_system_error(SMS.ERROR_INVALID_DESTINATION_NUMBER)
return
config = self.config
data = {
'from': config.from_number,
'to': msg.phone_number,
'message': msg.text,
}
json_payload = json.dumps(data)
response = requests.post(
self.url,
auth=(config.application_id, config.application_token),
data=json_payload,
headers={'content-type': 'application/json'},
timeout=settings.SMS_GATEWAY_TIMEOUT,
)
try:
response_json = response.json()
except:
raise AppositException("Could not parse json response. HTTP response code: %s" % response.status_code)
if self.response_is_error(response_json):
self.handle_error(response, response_json, msg)
else:
self.handle_success(response, response_json, msg)
| true
| true
|
1c3f6f55e0cfc68d9078c041efbc80f1919db067
| 924
|
py
|
Python
|
apis/nb/clients/network_plug_and_play_client/models/ZtdDefaultImageListResult.py
|
CiscoDevNet/APIC-EM-Generic-Scripts-
|
74211d9488f1e77cf56ef86dba20ec8e8eb49cc1
|
[
"ECL-2.0",
"Apache-2.0"
] | 45
|
2016-06-09T15:41:25.000Z
|
2019-08-06T17:13:11.000Z
|
apis/nb/clients/network_plug_and_play_client/models/ZtdDefaultImageListResult.py
|
CiscoDevNet/APIC-EM-Generic-Scripts
|
74211d9488f1e77cf56ef86dba20ec8e8eb49cc1
|
[
"ECL-2.0",
"Apache-2.0"
] | 36
|
2016-06-12T03:03:56.000Z
|
2017-03-13T18:20:11.000Z
|
apis/nb/clients/network_plug_and_play_client/models/ZtdDefaultImageListResult.py
|
CiscoDevNet/APIC-EM-Generic-Scripts
|
74211d9488f1e77cf56ef86dba20ec8e8eb49cc1
|
[
"ECL-2.0",
"Apache-2.0"
] | 15
|
2016-06-22T03:51:37.000Z
|
2019-07-10T10:06:02.000Z
|
#!/usr/bin/env python
#pylint: skip-file
# This source code is licensed under the Apache license found in the
# LICENSE file in the root directory of this project.
class ZtdDefaultImageListResult(object):
def __init__(self):
"""
Attributes:
swaggerTypes (dict): The key is attribute name and the value is attribute type.
attributeMap (dict): The key is attribute name and the value is json key in definition.
"""
self.swaggerTypes = {
'version': 'str',
'response': 'list[ZtdDefaultImage]'
}
self.attributeMap = {
'version': 'version',
'response': 'response'
}
self.version = None # str
self.response = None # list[ZtdDefaultImage]
| 23.1
| 97
| 0.50974
|
class ZtdDefaultImageListResult(object):
def __init__(self):
self.swaggerTypes = {
'version': 'str',
'response': 'list[ZtdDefaultImage]'
}
self.attributeMap = {
'version': 'version',
'response': 'response'
}
self.version = None
self.response = None
| true
| true
|
1c3f6f7dd3a5fc91947f13b25f2556617e624240
| 2,433
|
py
|
Python
|
linky_note/adapters/markdown/marko_ext/marko_builder.py
|
jb-delafosse/linky-note
|
51249414531bab98e45465aab695285698d59d80
|
[
"MIT"
] | 14
|
2021-02-26T16:21:02.000Z
|
2022-01-17T20:30:40.000Z
|
linky_note/adapters/markdown/marko_ext/marko_builder.py
|
jb-delafosse/linky-note
|
51249414531bab98e45465aab695285698d59d80
|
[
"MIT"
] | 76
|
2021-02-25T06:34:58.000Z
|
2022-03-30T19:06:26.000Z
|
linky_note/adapters/markdown/marko_ext/marko_builder.py
|
jb-delafosse/marko-backlinks
|
e7798ed49874a784b71e2a2a0ce0a1e7a1204389
|
[
"MIT"
] | null | null | null |
from typing import List, Optional, Union
from marko.block import BlankLine, Heading
from marko.block import List as MdList
from marko.block import ListItem, Paragraph
from marko.inline import Link, RawText
class MarkoBuilder:
@staticmethod
def build_raw_element(label: str) -> RawText:
raw_text = object.__new__(RawText)
raw_text.children = label
return raw_text
@staticmethod
def build_blank_line() -> BlankLine:
blank_line = object.__new__(BlankLine)
blank_line.inline_children = False
blank_line.override = False
blank_line.priority = 5
blank_line.virtual = False
return blank_line
@staticmethod
def build_heading(level: int, label: str) -> Heading:
raw_text = MarkoBuilder.build_raw_element(label)
heading = object.__new__(Heading)
heading.inline_children = True
heading.level = level
heading.override = True
heading.priority = 6
heading.virtual = False
heading.children = [raw_text]
return heading
@staticmethod
def build_list(items: List[ListItem], bullet: str = "*") -> MdList:
md_list = object.__new__(MdList)
md_list.bullet = bullet
md_list.inline_children = False
md_list.ordered = False
md_list.override = False
md_list.priority = 6
md_list.start = 1
md_list.children = items
return md_list
@staticmethod
def build_list_item(children: List[Union[Paragraph, MdList]]) -> ListItem:
item = object.__new__(ListItem)
item.inline_children = False
item.override = False
item.priority = 5
item.virtual = True
item.children = children
return item
@staticmethod
def build_link(dest: str, label: str, title: Optional[str]) -> Link:
link = object.__new__(Link)
link.label = label
link.title = title
link.dest = dest
link.children = [MarkoBuilder.build_raw_element(label)]
link.override = True
return link
@staticmethod
def build_paragraph(children: List[Union[Link, RawText]]) -> Paragraph:
paragraph = object.__new__(Paragraph)
paragraph.inline_children = True
paragraph.override = False
paragraph.priority = 1
paragraph.virtual = False
paragraph.children = children
return paragraph
| 31.192308
| 78
| 0.64776
|
from typing import List, Optional, Union
from marko.block import BlankLine, Heading
from marko.block import List as MdList
from marko.block import ListItem, Paragraph
from marko.inline import Link, RawText
class MarkoBuilder:
@staticmethod
def build_raw_element(label: str) -> RawText:
raw_text = object.__new__(RawText)
raw_text.children = label
return raw_text
@staticmethod
def build_blank_line() -> BlankLine:
blank_line = object.__new__(BlankLine)
blank_line.inline_children = False
blank_line.override = False
blank_line.priority = 5
blank_line.virtual = False
return blank_line
@staticmethod
def build_heading(level: int, label: str) -> Heading:
raw_text = MarkoBuilder.build_raw_element(label)
heading = object.__new__(Heading)
heading.inline_children = True
heading.level = level
heading.override = True
heading.priority = 6
heading.virtual = False
heading.children = [raw_text]
return heading
@staticmethod
def build_list(items: List[ListItem], bullet: str = "*") -> MdList:
md_list = object.__new__(MdList)
md_list.bullet = bullet
md_list.inline_children = False
md_list.ordered = False
md_list.override = False
md_list.priority = 6
md_list.start = 1
md_list.children = items
return md_list
@staticmethod
def build_list_item(children: List[Union[Paragraph, MdList]]) -> ListItem:
item = object.__new__(ListItem)
item.inline_children = False
item.override = False
item.priority = 5
item.virtual = True
item.children = children
return item
@staticmethod
def build_link(dest: str, label: str, title: Optional[str]) -> Link:
link = object.__new__(Link)
link.label = label
link.title = title
link.dest = dest
link.children = [MarkoBuilder.build_raw_element(label)]
link.override = True
return link
@staticmethod
def build_paragraph(children: List[Union[Link, RawText]]) -> Paragraph:
paragraph = object.__new__(Paragraph)
paragraph.inline_children = True
paragraph.override = False
paragraph.priority = 1
paragraph.virtual = False
paragraph.children = children
return paragraph
| true
| true
|
1c3f70d650130584622f48491aba3dd16d3a1053
| 3,873
|
py
|
Python
|
paper/notebooks/util/kfac.py
|
npshub/last_layer_laplace
|
31ec9b70a292e9783e9005425793199a8830bf01
|
[
"BSD-3-Clause"
] | 60
|
2020-07-05T05:31:19.000Z
|
2022-02-07T05:05:40.000Z
|
paper/notebooks/util/kfac.py
|
npshub/last_layer_laplace
|
31ec9b70a292e9783e9005425793199a8830bf01
|
[
"BSD-3-Clause"
] | 8
|
2020-07-15T18:27:53.000Z
|
2021-10-18T07:53:22.000Z
|
paper/notebooks/util/kfac.py
|
npshub/last_layer_laplace
|
31ec9b70a292e9783e9005425793199a8830bf01
|
[
"BSD-3-Clause"
] | 12
|
2020-07-14T11:44:10.000Z
|
2021-11-06T03:10:15.000Z
|
##########################################################################
#
# Taken with modifications from
# https://github.com/wjmaddox/swa_gaussian/
#
##########################################################################
import torch
import torch.nn.functional as F
from torch.optim.optimizer import Optimizer
class KFAC(Optimizer):
def __init__(self, net, alpha=0.95):
""" K-FAC Preconditionner for Linear and Conv2d layers.
Computes the K-FAC of the second moment of the gradients.
It works for Linear and Conv2d layers and silently skip other layers.
Args:
net (torch.nn.Module): Network to precondition.
alpha (float): Running average parameter (if == 1, no r. ave.).
"""
self.alpha = alpha
self.params = []
self._fwd_handles = []
self._bwd_handles = []
self._iteration_counter = 0
for mod in net.modules():
mod_class = mod.__class__.__name__
if mod_class in ['Linear', 'Conv2d']:
handle = mod.register_forward_pre_hook(self._save_input)
self._fwd_handles.append(handle)
handle = mod.register_backward_hook(self._save_grad_output)
self._bwd_handles.append(handle)
params = [mod.weight]
if mod.bias is not None:
params.append(mod.bias)
d = {'params': params, 'mod': mod, 'layer_type': mod_class}
self.params.append(d)
super(KFAC, self).__init__(self.params, {})
def step(self):
for group in self.param_groups:
# Getting parameters
if len(group['params']) == 2:
weight, bias = group['params']
else:
weight = group['params'][0]
bias = None
state = self.state[group['mod']]
self._compute_covs(group, state)
self._iteration_counter += 1
def _save_input(self, mod, i):
"""Saves input of layer to compute covariance."""
self.state[mod]['x'] = i[0]
def _save_grad_output(self, mod, grad_input, grad_output):
"""Saves grad on output of layer to compute covariance."""
self.state[mod]['gy'] = grad_output[0] * grad_output[0].size(0)
def _compute_covs(self, group, state):
"""Computes the covariances."""
mod = group['mod']
x = self.state[group['mod']]['x']
gy = self.state[group['mod']]['gy']
# Computation of xxt
if group['layer_type'] == 'Conv2d':
x = F.unfold(x, mod.kernel_size, padding=mod.padding,
stride=mod.stride)
x = x.data.permute(1, 0, 2).contiguous().view(x.shape[1], -1)
else:
x = x.data.t()
# if mod.bias is not None:
# ones = torch.ones_like(x[:1])
# x = torch.cat([x, ones], dim=0)
if self._iteration_counter == 0:
state['xxt'] = torch.mm(x, x.t()) / float(x.shape[1])
else:
state['xxt'].addmm_(mat1=x, mat2=x.t(),
beta=(1. - self.alpha),
alpha=self.alpha / float(x.shape[1]))
# Computation of ggt
if group['layer_type'] == 'Conv2d':
gy = gy.data.permute(1, 0, 2, 3)
state['num_locations'] = gy.shape[2] * gy.shape[3]
gy = gy.contiguous().view(gy.shape[0], -1)
else:
gy = gy.data.t()
state['num_locations'] = 1
if self._iteration_counter == 0:
state['ggt'] = torch.mm(gy, gy.t()) / float(gy.shape[1])
else:
state['ggt'].addmm_(mat1=gy, mat2=gy.t(),
beta=(1. - self.alpha),
alpha=self.alpha / float(gy.shape[1]))
| 33.678261
| 77
| 0.506584
| true
| true
|
|
1c3f7124803e580cd7296463519e07f704c99418
| 1,728
|
py
|
Python
|
anchore_engine/subsys/discovery.py
|
roachmd/anchore-engine
|
521d6796778139a95f51542670714205c2735a81
|
[
"Apache-2.0"
] | null | null | null |
anchore_engine/subsys/discovery.py
|
roachmd/anchore-engine
|
521d6796778139a95f51542670714205c2735a81
|
[
"Apache-2.0"
] | null | null | null |
anchore_engine/subsys/discovery.py
|
roachmd/anchore-engine
|
521d6796778139a95f51542670714205c2735a81
|
[
"Apache-2.0"
] | null | null | null |
"""
Utilities for service discovery. Basic methods are lookup and return of a list of endpoints given a service name.
Will use the local config as authority if entry is found or defer to the database for a lookup if necessary.
"""
import re
from anchore_engine.configuration import localconfig
from anchore_engine.db import db_services, session_scope
from anchore_engine.subsys import logger
def get_endpoints(service_name):
"""
Return a list of endpoint urls for the given service name.
:param service_name:
:return: list of url strings
"""
local_conf = localconfig.get_config()
urls = []
try:
if service_name + '_endpoint' in local_conf:
urls = [re.sub("/+$", "", local_conf[service_name + '_endpoint'])]
else:
with session_scope() as dbsession:
service_reports = db_services.get_byname(service_name, session=dbsession)
if service_reports:
for service in service_reports:
base_url = service.get('base_url')
if base_url:
apiversion = service.get('version', '')
urls.append('/'.join([base_url, apiversion]))
else:
raise Exception("cannot load valid endpoint from DB for service {}".format(service_name))
if not urls:
raise Exception("cannot locate registered service in DB: " + service_name)
except Exception as err:
logger.exception('Error during endpoint lookup for service {}'.format(service_name))
raise Exception("could not find valid endpoint - exception: " + str(err))
return urls
| 38.4
| 117
| 0.624421
|
import re
from anchore_engine.configuration import localconfig
from anchore_engine.db import db_services, session_scope
from anchore_engine.subsys import logger
def get_endpoints(service_name):
local_conf = localconfig.get_config()
urls = []
try:
if service_name + '_endpoint' in local_conf:
urls = [re.sub("/+$", "", local_conf[service_name + '_endpoint'])]
else:
with session_scope() as dbsession:
service_reports = db_services.get_byname(service_name, session=dbsession)
if service_reports:
for service in service_reports:
base_url = service.get('base_url')
if base_url:
apiversion = service.get('version', '')
urls.append('/'.join([base_url, apiversion]))
else:
raise Exception("cannot load valid endpoint from DB for service {}".format(service_name))
if not urls:
raise Exception("cannot locate registered service in DB: " + service_name)
except Exception as err:
logger.exception('Error during endpoint lookup for service {}'.format(service_name))
raise Exception("could not find valid endpoint - exception: " + str(err))
return urls
| true
| true
|
1c3f7196de24de8454c0187dbe0a0fbd829d34ed
| 630
|
py
|
Python
|
setup.py
|
hsph/micropython-ili9341
|
1924159a32cf685812bd372d02b4ec2cb9c8e381
|
[
"MIT"
] | null | null | null |
setup.py
|
hsph/micropython-ili9341
|
1924159a32cf685812bd372d02b4ec2cb9c8e381
|
[
"MIT"
] | null | null | null |
setup.py
|
hsph/micropython-ili9341
|
1924159a32cf685812bd372d02b4ec2cb9c8e381
|
[
"MIT"
] | null | null | null |
import sys
sys.path.pop(0)
from setuptools import setup
setup(
name="micropython-ili9341",
py_modules=["ili934xnew"],
version="0.1.0",
description="Micropython Driver for ILI9341 display",
long_description="",
keywords="micropython tft lcd",
url="https://github.com/jeffmer/micropython-ili9341",
author="jeffmer",
author_email="",
maintainer="",
maintainer_email="",
license="MIT",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Programming Language :: Python :: Implementation :: MicroPython",
"License :: OSI Approved :: MIT License",
],
)
| 26.25
| 74
| 0.644444
|
import sys
sys.path.pop(0)
from setuptools import setup
setup(
name="micropython-ili9341",
py_modules=["ili934xnew"],
version="0.1.0",
description="Micropython Driver for ILI9341 display",
long_description="",
keywords="micropython tft lcd",
url="https://github.com/jeffmer/micropython-ili9341",
author="jeffmer",
author_email="",
maintainer="",
maintainer_email="",
license="MIT",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Programming Language :: Python :: Implementation :: MicroPython",
"License :: OSI Approved :: MIT License",
],
)
| true
| true
|
1c3f7306637cd8a61fde0fb2b8d72c8f272b725a
| 4,680
|
py
|
Python
|
tasks/cron.py
|
jeeysie/site
|
f136050635cac9cc0174387ea60249f5e26e45a3
|
[
"MIT"
] | 66
|
2019-05-13T11:45:14.000Z
|
2020-11-02T11:58:52.000Z
|
tasks/cron.py
|
jeeysie/site
|
f136050635cac9cc0174387ea60249f5e26e45a3
|
[
"MIT"
] | 9
|
2019-04-26T02:05:13.000Z
|
2020-10-06T03:49:29.000Z
|
tasks/cron.py
|
jeeyshe/site
|
f136050635cac9cc0174387ea60249f5e26e45a3
|
[
"MIT"
] | 20
|
2019-12-30T06:23:17.000Z
|
2020-10-06T01:48:58.000Z
|
# coding: utf-8
"""
Created by Jeyrce.Lu 2020/4/4 下午5:16, contact with jeyrce@gmail.com or website https://www.lujianxin.com
---------------------------------------------------------------------------------------------------------
>>> 项目中的一些定时任务
"""
import logging
import datetime
import time
from django.core.mail import send_mail
from django.core.cache import caches
from django.contrib.auth import get_user_model
from django.template import loader
from tasks import app
from blog.models import Expand, Blog, Link
from tasks.mail import add_prefix, supervisor_receivers, webmaster_receivers
from ishare.settings import SERVER_EMAIL, SITE
logger = logging.getLogger(__name__)
UserAccount = get_user_model()
@app.task(name='cron.update_visit_count')
def update_visit_count(*args, **kwargs):
"""
每小时从redis更新网站总浏览量到mysql
"""
logger.info("Start update visit count")
cache = caches['four']
obj, is_created = Expand.objects.get_or_create(key='VISIT_CNT', defaults={'key': 'VISIT_CNT', 'value': '1'})
if not is_created:
real = cache.get('total', 0)
obj.value = str(int(obj.value) + real)
obj.save(update_fields=('value',))
cache.set('total', 0, 60 * 60 + 60)
logger.info("更新网站浏览量完成")
@app.task(name='cron.notify_new_link')
def notify_new_link(*args, **kwargs):
"""
每天定时提醒今日新增友链
"""
logger.info("Start notify new link today")
now = datetime.datetime.now()
start = now - datetime.timedelta(days=1, minutes=3)
links = Link.objects.filter(is_active=False, add__gte=start, add__lt=now)
total = links.count()
logger.info("Find %s new links" % total)
if total > 0:
html_email = loader.render_to_string(
'mail/new_link_today.html',
context={
'title': 'New Link Today',
'total': total,
'public': links.filter(cat=0),
'personal': links.filter(cat=1),
'business': links.filter(cat=2),
'SITE': SITE,
}
)
send_mail(
subject=add_prefix("新增友链通知"),
message='',
from_email=SERVER_EMAIL,
recipient_list=supervisor_receivers(),
html_message=html_email,
)
logger.info("今日新增的待通过友链")
@app.task(name='cron.notify_new_article')
def notify_new_article(*args, **kwargs):
"""
每天定时提醒今日新增待审核文章
"""
logger.info("Start notify new article today")
now = datetime.datetime.now()
start = now - datetime.timedelta(days=1, minutes=3)
blogs = Blog.objects.filter(is_active=False, add__lt=now, add__gte=start)
total = blogs.count()
logger.info("Find %s new blog" % total)
if total > 0:
html_email = loader.render_to_string(
'mail/new_blog_today.html',
context={
'title': 'New Blog Today',
'total': total,
'blogs': blogs,
'SITE': SITE,
}
)
send_mail(
subject=add_prefix("新增文章通知"),
message="",
from_email=SERVER_EMAIL,
recipient_list=supervisor_receivers(),
html_message=html_email,
)
print("今日新增待审核文章")
@app.task(name='cron.recommend_month')
def recommend_month(*args, **kwargs):
"""
每月向友链发送阅读推荐
"""
logger.info("Start recommend to friend-links")
now = datetime.datetime.now()
start = now - datetime.timedelta(days=30, hours=1)
blogs = Blog.objects.filter(is_active=True, add__lt=now, add__gte=start)
total = blogs.count()
logger.info("Find %s new blog" % total)
links = Link.objects.filter(is_active=True, cat__gt=0, email__isnull=False)
if total > 0:
for link in links:
html_email = loader.render_to_string(
'mail/new_blog_this_month.html',
context={
'title': 'New Blog This Month',
'total': total,
'link': link,
'blogs': blogs,
'days': (now - link.add).days,
'SITE': SITE,
}
)
try:
send_mail(
subject=add_prefix("友链推荐阅读通知"),
message="",
from_email=SERVER_EMAIL,
recipient_list=[link.email],
html_message=html_email,
)
except Exception as e:
logger.error("通知[{}]站长失败: {}".format(link.link_name, e))
else:
logger.info("通知[{}]站长成功!".format(link.link_name))
time.sleep(5)
print("每月总结完毕")
| 32.5
| 112
| 0.562179
|
import logging
import datetime
import time
from django.core.mail import send_mail
from django.core.cache import caches
from django.contrib.auth import get_user_model
from django.template import loader
from tasks import app
from blog.models import Expand, Blog, Link
from tasks.mail import add_prefix, supervisor_receivers, webmaster_receivers
from ishare.settings import SERVER_EMAIL, SITE
logger = logging.getLogger(__name__)
UserAccount = get_user_model()
@app.task(name='cron.update_visit_count')
def update_visit_count(*args, **kwargs):
logger.info("Start update visit count")
cache = caches['four']
obj, is_created = Expand.objects.get_or_create(key='VISIT_CNT', defaults={'key': 'VISIT_CNT', 'value': '1'})
if not is_created:
real = cache.get('total', 0)
obj.value = str(int(obj.value) + real)
obj.save(update_fields=('value',))
cache.set('total', 0, 60 * 60 + 60)
logger.info("更新网站浏览量完成")
@app.task(name='cron.notify_new_link')
def notify_new_link(*args, **kwargs):
logger.info("Start notify new link today")
now = datetime.datetime.now()
start = now - datetime.timedelta(days=1, minutes=3)
links = Link.objects.filter(is_active=False, add__gte=start, add__lt=now)
total = links.count()
logger.info("Find %s new links" % total)
if total > 0:
html_email = loader.render_to_string(
'mail/new_link_today.html',
context={
'title': 'New Link Today',
'total': total,
'public': links.filter(cat=0),
'personal': links.filter(cat=1),
'business': links.filter(cat=2),
'SITE': SITE,
}
)
send_mail(
subject=add_prefix("新增友链通知"),
message='',
from_email=SERVER_EMAIL,
recipient_list=supervisor_receivers(),
html_message=html_email,
)
logger.info("今日新增的待通过友链")
@app.task(name='cron.notify_new_article')
def notify_new_article(*args, **kwargs):
logger.info("Start notify new article today")
now = datetime.datetime.now()
start = now - datetime.timedelta(days=1, minutes=3)
blogs = Blog.objects.filter(is_active=False, add__lt=now, add__gte=start)
total = blogs.count()
logger.info("Find %s new blog" % total)
if total > 0:
html_email = loader.render_to_string(
'mail/new_blog_today.html',
context={
'title': 'New Blog Today',
'total': total,
'blogs': blogs,
'SITE': SITE,
}
)
send_mail(
subject=add_prefix("新增文章通知"),
message="",
from_email=SERVER_EMAIL,
recipient_list=supervisor_receivers(),
html_message=html_email,
)
print("今日新增待审核文章")
@app.task(name='cron.recommend_month')
def recommend_month(*args, **kwargs):
logger.info("Start recommend to friend-links")
now = datetime.datetime.now()
start = now - datetime.timedelta(days=30, hours=1)
blogs = Blog.objects.filter(is_active=True, add__lt=now, add__gte=start)
total = blogs.count()
logger.info("Find %s new blog" % total)
links = Link.objects.filter(is_active=True, cat__gt=0, email__isnull=False)
if total > 0:
for link in links:
html_email = loader.render_to_string(
'mail/new_blog_this_month.html',
context={
'title': 'New Blog This Month',
'total': total,
'link': link,
'blogs': blogs,
'days': (now - link.add).days,
'SITE': SITE,
}
)
try:
send_mail(
subject=add_prefix("友链推荐阅读通知"),
message="",
from_email=SERVER_EMAIL,
recipient_list=[link.email],
html_message=html_email,
)
except Exception as e:
logger.error("通知[{}]站长失败: {}".format(link.link_name, e))
else:
logger.info("通知[{}]站长成功!".format(link.link_name))
time.sleep(5)
print("每月总结完毕")
| true
| true
|
1c3f74c3c64a04ce73e6cd9f4b7d8a0af43690df
| 3,035
|
py
|
Python
|
18.Web Scraping with Python Scrapy - RM/03_Advanced_Techniques/form/form/settings.py
|
ptyadana/python-dojo
|
98c7234b84f0afea99a091c7198342d66bbdff5b
|
[
"MIT"
] | 3
|
2020-06-01T04:17:18.000Z
|
2020-12-18T03:05:55.000Z
|
18.Web Scraping with Python Scrapy - RM/03_Advanced_Techniques/form/form/settings.py
|
ptyadana/python-dojo
|
98c7234b84f0afea99a091c7198342d66bbdff5b
|
[
"MIT"
] | 1
|
2020-04-25T08:01:59.000Z
|
2020-04-25T08:01:59.000Z
|
18.Web Scraping with Python Scrapy - RM/03_Advanced_Techniques/form/form/settings.py
|
ptyadana/python-dojo
|
98c7234b84f0afea99a091c7198342d66bbdff5b
|
[
"MIT"
] | 7
|
2020-04-26T10:02:36.000Z
|
2021-06-08T05:12:46.000Z
|
# Scrapy settings for form project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'form'
SPIDER_MODULES = ['form.spiders']
NEWSPIDER_MODULE = 'form.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'form (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'form.middlewares.FormSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'form.middlewares.FormDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'form.pipelines.FormPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| 34.101124
| 103
| 0.774629
|
BOT_NAME = 'form'
SPIDER_MODULES = ['form.spiders']
NEWSPIDER_MODULE = 'form.spiders'
ROBOTSTXT_OBEY = True
| true
| true
|
1c3f76c215b4fd57915de3ea39d046dee571fc8d
| 859
|
py
|
Python
|
LeetCode/0695. Max Area of Island/solution.py
|
InnoFang/oh-my-algorithms
|
f559dba371ce725a926725ad28d5e1c2facd0ab2
|
[
"Apache-2.0"
] | 1
|
2017-03-31T15:24:01.000Z
|
2017-03-31T15:24:01.000Z
|
LeetCode/0695. Max Area of Island/solution.py
|
InnoFang/Algorithm-Library
|
1896b9d8b1fa4cd73879aaecf97bc32d13ae0169
|
[
"Apache-2.0"
] | null | null | null |
LeetCode/0695. Max Area of Island/solution.py
|
InnoFang/Algorithm-Library
|
1896b9d8b1fa4cd73879aaecf97bc32d13ae0169
|
[
"Apache-2.0"
] | null | null | null |
"""
728 / 728 test cases passed.
Runtime: 64 ms
Memory Usage: 15.3 MB
"""
class Solution:
def maxAreaOfIsland(self, grid: List[List[int]]) -> int:
ans, m, n = 0, len(grid), len(grid[0])
for i in range(m):
for j in range(n):
if grid[i][j] == 1:
que = collections.deque([(i, j)])
grid[i][j] = 0
count = 1
while que:
x, y = que.pop()
for xx, yy in [(x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1)]:
if 0 <= xx < m and 0 <= yy < n and grid[xx][yy] == 1:
count += 1
grid[xx][yy] = 0
que.append([xx, yy])
ans = max(ans, count)
return ans
| 35.791667
| 87
| 0.351572
|
class Solution:
def maxAreaOfIsland(self, grid: List[List[int]]) -> int:
ans, m, n = 0, len(grid), len(grid[0])
for i in range(m):
for j in range(n):
if grid[i][j] == 1:
que = collections.deque([(i, j)])
grid[i][j] = 0
count = 1
while que:
x, y = que.pop()
for xx, yy in [(x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1)]:
if 0 <= xx < m and 0 <= yy < n and grid[xx][yy] == 1:
count += 1
grid[xx][yy] = 0
que.append([xx, yy])
ans = max(ans, count)
return ans
| true
| true
|
1c3f784e25e1d1d8e3ba8a230dd11355ebb201ce
| 52,836
|
py
|
Python
|
second/pytorch/models/middle.py
|
robertkarklinsh/faster-more-furious
|
2a5e34f829488acfa6fec84d800f409d590bd03c
|
[
"MIT"
] | 1
|
2019-05-20T03:43:18.000Z
|
2019-05-20T03:43:18.000Z
|
second/pytorch/models/middle.py
|
pflab-ut/second.pytorch
|
c7bb659a7937ee62aef8049aeb055a457fcd8fa7
|
[
"MIT"
] | null | null | null |
second/pytorch/models/middle.py
|
pflab-ut/second.pytorch
|
c7bb659a7937ee62aef8049aeb055a457fcd8fa7
|
[
"MIT"
] | null | null | null |
import time
import numpy as np
import spconv
import torch
from torch import nn
from torch.nn import functional as F
from second.pytorch.models.resnet import SparseBasicBlock
from torchplus.nn import Empty, GroupNorm, Sequential
from torchplus.ops.array_ops import gather_nd, scatter_nd
from torchplus.tools import change_default_args
class SparseMiddleExtractor(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
num_filters_down1=[64],
num_filters_down2=[64, 64],
name='SparseMiddleExtractor'):
super(SparseMiddleExtractor, self).__init__()
self.name = name
if use_norm:
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Linear = change_default_args(bias=False)(nn.Linear)
else:
BatchNorm1d = Empty
Linear = change_default_args(bias=True)(nn.Linear)
sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]
# sparse_shape[0] = 11
print(sparse_shape)
self.sparse_shape = sparse_shape
self.scn_input = scn.InputLayer(3, sparse_shape.tolist())
self.voxel_output_shape = output_shape
middle_layers = []
num_filters = [num_input_features] + num_filters_down1
# num_filters = [64] + num_filters_down1
filters_pairs_d1 = [[num_filters[i], num_filters[i + 1]]
for i in range(len(num_filters) - 1)]
for i, o in filters_pairs_d1:
middle_layers.append(
spconv.SubMConv3d(i, o, 3, bias=False, indice_key="subm0"))
middle_layers.append(BatchNorm1d(o))
middle_layers.append(nn.ReLU())
middle_layers.append(
spconv.SparseConv3d(
num_filters[-1],
num_filters[-1], (3, 1, 1), (2, 1, 1),
bias=False))
middle_layers.append(BatchNorm1d(num_filters[-1]))
middle_layers.append(nn.ReLU())
# assert len(num_filters_down2) > 0
if len(num_filters_down1) == 0:
num_filters = [num_filters[-1]] + num_filters_down2
else:
num_filters = [num_filters_down1[-1]] + num_filters_down2
filters_pairs_d2 = [[num_filters[i], num_filters[i + 1]]
for i in range(len(num_filters) - 1)]
for i, o in filters_pairs_d2:
middle_layers.append(
spconv.SubMConv3d(i, o, 3, bias=False, indice_key="subm1"))
middle_layers.append(BatchNorm1d(o))
middle_layers.append(nn.ReLU())
middle_layers.append(
spconv.SparseConv3d(
num_filters[-1],
num_filters[-1], (3, 1, 1), (2, 1, 1),
bias=False))
middle_layers.append(BatchNorm1d(num_filters[-1]))
middle_layers.append(nn.ReLU())
self.middle_conv = spconv.SparseSequential(*middle_layers)
def forward(self, voxel_features, coors, batch_size):
# coors[:, 1] += 1
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
# t = time.time()
# torch.cuda.synchronize()
ret = self.middle_conv(ret)
# torch.cuda.synchronize()
# print("spconv forward time", time.time() - t)
ret = ret.dense()
N, C, D, H, W = ret.shape
ret = ret.view(N, C * D, H, W)
return ret
class SpMiddleD4HD(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
num_filters_down1=[64],
num_filters_down2=[64, 64],
name='SpMiddleD4HD'):
super(SpMiddleD4HD, self).__init__()
self.name = name
if use_norm:
BatchNorm2d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=False)(
nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
BatchNorm1d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=True)(
nn.ConvTranspose2d)
sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]
# sparse_shape[0] = 11
print(sparse_shape)
self.sparse_shape = sparse_shape
self.voxel_output_shape = output_shape
# num_input_features = 4
self.middle_conv = spconv.SparseSequential(
SubMConv3d(num_input_features, 32, 3, indice_key="subm0"),
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm0"),
BatchNorm1d(32),
nn.ReLU(),
SpConv3d(32, 64, 3, 2,
padding=1), # [800, 600, 21] -> [400, 300, 11]
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm1"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm1"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm1"),
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, 3, 2,
padding=[0, 1, 1]), # [400, 300, 11] -> [200, 150, 5]
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, (3, 1, 1),
(2, 1, 1)), # [200, 150, 5] -> [200, 150, 2]
BatchNorm1d(64),
nn.ReLU(),
)
def forward(self, voxel_features, coors, batch_size):
# coors[:, 1] += 1
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
# t = time.time()
# torch.cuda.synchronize()
ret = self.middle_conv(ret)
# torch.cuda.synchronize()
# print("spconv forward time", time.time() - t)
ret = ret.dense()
N, C, D, H, W = ret.shape
ret = ret.view(N, C * D, H, W)
return ret
class SpResNetD4HD(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
num_filters_down1=[64],
num_filters_down2=[64, 64],
name='SpResNetD4HD'):
super(SpResNetD4HD, self).__init__()
self.name = name
if use_norm:
BatchNorm2d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=False)(
nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
BatchNorm1d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=True)(
nn.ConvTranspose2d)
sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]
# sparse_shape[0] = 11
print(sparse_shape)
self.sparse_shape = sparse_shape
self.voxel_output_shape = output_shape
# num_input_features = 4
self.middle_conv = spconv.SparseSequential(
SubMConv3d(num_input_features, 32, 3, indice_key="res0"),
BatchNorm1d(32),
nn.ReLU(),
SparseBasicBlock(32, 32, indice_key="res0"),
SparseBasicBlock(32, 32, indice_key="res0"),
SpConv3d(32, 64, 3, 2,
padding=1), # [800, 600, 21] -> [400, 300, 11]
BatchNorm1d(64),
nn.ReLU(),
SparseBasicBlock(64, 64, indice_key="res1"),
SparseBasicBlock(64, 64, indice_key="res1"),
SpConv3d(64, 64, 3, 2,
padding=[0, 1, 1]), # [400, 300, 11] -> [200, 150, 5]
BatchNorm1d(64),
nn.ReLU(),
SparseBasicBlock(64, 64, indice_key="res2"),
SparseBasicBlock(64, 64, indice_key="res2"),
SpConv3d(64, 64, (3, 1, 1),
(2, 1, 1)), # [200, 150, 5] -> [200, 150, 2]
BatchNorm1d(64),
nn.ReLU(),
)
def forward(self, voxel_features, coors, batch_size):
# coors[:, 1] += 1
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
# t = time.time()
# torch.cuda.synchronize()
ret = self.middle_conv(ret)
# torch.cuda.synchronize()
# print("spconv forward time", time.time() - t)
ret = ret.dense()
N, C, D, H, W = ret.shape
ret = ret.view(N, C * D, H, W)
return ret
class SpMiddleD4HDLite(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
num_filters_down1=[64],
num_filters_down2=[64, 64],
name='SpMiddleD4HDLite'):
super(SpMiddleD4HDLite, self).__init__()
self.name = name
if use_norm:
BatchNorm2d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=False)(
nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
BatchNorm1d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=True)(
nn.ConvTranspose2d)
sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]
# sparse_shape[0] = 11
print(sparse_shape)
self.sparse_shape = sparse_shape
self.voxel_output_shape = output_shape
# num_input_features = 4
self.middle_conv = spconv.SparseSequential(
SubMConv3d(num_input_features, 16, 3, indice_key="subm0"),
BatchNorm1d(16),
nn.ReLU(),
SubMConv3d(16, 16, 3, indice_key="subm0"),
BatchNorm1d(16),
nn.ReLU(),
SpConv3d(16, 32, 3, 2,
padding=1), # [800, 600, 21] -> [400, 300, 11]
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm1"),
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm1"),
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm1"),
BatchNorm1d(32),
nn.ReLU(),
SpConv3d(32, 64, 3, 2,
padding=[0, 1, 1]), # [400, 300, 11] -> [200, 150, 5]
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, (3, 1, 1),
(2, 1, 1)), # [200, 150, 5] -> [200, 150, 2]
BatchNorm1d(64),
nn.ReLU(),
)
def forward(self, voxel_features, coors, batch_size):
# coors[:, 1] += 1
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
# t = time.time()
# torch.cuda.synchronize()
ret = self.middle_conv(ret)
# torch.cuda.synchronize()
# print("spconv forward time", time.time() - t)
ret = ret.dense()
N, C, D, H, W = ret.shape
ret = ret.view(N, C * D, H, W)
return ret
class SpMiddleD8HD(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
num_filters_down1=[64],
num_filters_down2=[64, 64],
name='SpMiddleD8HD'):
super(SpMiddleD8HD, self).__init__()
self.name = name
if use_norm:
BatchNorm2d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=False)(
nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
BatchNorm1d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=True)(
nn.ConvTranspose2d)
sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]
# sparse_shape[0] = 11
print(sparse_shape)
self.sparse_shape = sparse_shape
self.voxel_output_shape = output_shape
self.middle_conv = spconv.SparseSequential(
SubMConv3d(num_input_features, 16, 3, indice_key="subm0"),
BatchNorm1d(16),
nn.ReLU(),
SubMConv3d(16, 16, 3, indice_key="subm0"),
BatchNorm1d(16),
nn.ReLU(),
SpConv3d(16, 32, 3, 2,
padding=1), # [800, 600, 41] -> [400, 300, 21]
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm1"),
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm1"),
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm1"),
BatchNorm1d(32),
nn.ReLU(),
SpConv3d(32, 64, 3, 2,
padding=1), # [400, 300, 21] -> [200, 150, 11]
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, 3, 2,
padding=[0, 1, 1]), # [200, 150, 11] -> [100, 75, 5]
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm3"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm3"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm3"),
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, (3, 1, 1),
(2, 1, 1)), # [100, 75, 5] -> [100, 75, 2]
BatchNorm1d(64),
nn.ReLU(),
)
def forward(self, voxel_features, coors, batch_size):
# coors[:, 1] += 1
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
# t = time.time()
# torch.cuda.synchronize()
ret = self.middle_conv(ret)
# torch.cuda.synchronize()
# print("spconv forward time", time.time() - t)
ret = ret.dense()
N, C, D, H, W = ret.shape
ret = ret.view(N, C * D, H, W)
return ret
class SpMiddleFHD(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
num_filters_down1=[64],
num_filters_down2=[64, 64],
name='SpMiddleFHD'):
super(SpMiddleFHD, self).__init__()
self.name = name
if use_norm:
BatchNorm2d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=False)(
nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
BatchNorm1d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=True)(
nn.ConvTranspose2d)
sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]
# sparse_shape[0] = 11
print(sparse_shape)
self.sparse_shape = sparse_shape
self.voxel_output_shape = output_shape
# input: # [1600, 1200, 41]
self.middle_conv = spconv.SparseSequential(
SubMConv3d(num_input_features, 16, 3, indice_key="subm0"),
BatchNorm1d(16),
nn.ReLU(),
SubMConv3d(16, 16, 3, indice_key="subm0"),
BatchNorm1d(16),
nn.ReLU(),
SpConv3d(16, 32, 3, 2,
padding=1), # [1600, 1200, 41] -> [800, 600, 21]
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm1"),
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm1"),
BatchNorm1d(32),
nn.ReLU(),
SpConv3d(32, 64, 3, 2,
padding=1), # [800, 600, 21] -> [400, 300, 11]
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, 3, 2,
padding=[0, 1, 1]), # [400, 300, 11] -> [200, 150, 5]
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm3"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm3"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm3"),
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, (3, 1, 1),
(2, 1, 1)), # [200, 150, 5] -> [200, 150, 2]
BatchNorm1d(64),
nn.ReLU(),
)
self.max_batch_size = 6
# self.grid = torch.full([self.max_batch_size, *sparse_shape], -1, dtype=torch.int32).cuda()
def forward(self, voxel_features, coors, batch_size):
# coors[:, 1] += 1
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
# t = time.time()
# torch.cuda.synchronize()
ret = self.middle_conv(ret)
# torch.cuda.synchronize()
# print("spconv forward time", time.time() - t)
ret = ret.dense()
N, C, D, H, W = ret.shape
ret = ret.view(N, C * D, H, W)
return ret
class SpMiddleFHDPeople(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
num_filters_down1=[64],
num_filters_down2=[64, 64],
name='SpMiddleFHD'):
super(SpMiddleFHDPeople, self).__init__()
self.name = name
if use_norm:
BatchNorm2d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=False)(
nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
BatchNorm1d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=True)(
nn.ConvTranspose2d)
sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]
# sparse_shape[0] = 11
print(sparse_shape)
self.sparse_shape = sparse_shape
self.voxel_output_shape = output_shape
# input: # [1600, 1200, 41]
self.middle_conv = spconv.SparseSequential(
SubMConv3d(num_input_features, 16, 3, indice_key="subm0"),
BatchNorm1d(16),
nn.ReLU(),
SubMConv3d(16, 16, 3, indice_key="subm0"),
BatchNorm1d(16),
nn.ReLU(),
SpConv3d(16, 32, 3, 2,
padding=1), # [1600, 1200, 21] -> [800, 600, 11]
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm1"),
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm1"),
BatchNorm1d(32),
nn.ReLU(),
SpConv3d(32, 64, 3, 2,
padding=[0, 1, 1]), # [800, 600, 11] -> [400, 300, 5]
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, (3, 1, 1),
(2, 1, 1)), # [400, 300, 5] -> [400, 300, 2]
BatchNorm1d(64),
nn.ReLU(),
)
self.max_batch_size = 6
# self.grid = torch.full([self.max_batch_size, *sparse_shape], -1, dtype=torch.int32).cuda()
def forward(self, voxel_features, coors, batch_size):
# coors[:, 1] += 1
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
# t = time.time()
# torch.cuda.synchronize()
ret = self.middle_conv(ret)
# torch.cuda.synchronize()
# print("spconv forward time", time.time() - t)
ret = ret.dense()
N, C, D, H, W = ret.shape
ret = ret.view(N, C * D, H, W)
return ret
class SpMiddle2KPeople(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
num_filters_down1=[64],
num_filters_down2=[64, 64],
name='SpMiddleFHD'):
super(SpMiddle2KPeople, self).__init__()
self.name = name
if use_norm:
BatchNorm2d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=False)(
nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
BatchNorm1d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=True)(
nn.ConvTranspose2d)
sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]
# sparse_shape[0] = 11
print(sparse_shape)
self.sparse_shape = sparse_shape
self.voxel_output_shape = output_shape
# input: # [1600, 1200, 41]
self.middle_conv = spconv.SparseSequential(
SubMConv3d(num_input_features, 8, 3, indice_key="subm0"),
BatchNorm1d(8),
nn.ReLU(),
SubMConv3d(8, 8, 3, indice_key="subm0"),
BatchNorm1d(8),
nn.ReLU(),
SpConv3d(8, 16, 3, 2,
padding=1), # [1600, 1200, 21] -> [800, 600, 11]
BatchNorm1d(16),
nn.ReLU(),
SubMConv3d(16, 16, 3, indice_key="subm1"),
BatchNorm1d(16),
nn.ReLU(),
SubMConv3d(16, 16, 3, indice_key="subm1"),
BatchNorm1d(16),
nn.ReLU(),
SpConv3d(16, 32, 3, 2,
padding=1), # [800, 600, 11] -> [400, 300, 5]
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm2"),
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm2"),
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm2"),
BatchNorm1d(32),
nn.ReLU(),
SpConv3d(32, 64, 3, 2,
padding=[0, 1, 1]), # [800, 600, 11] -> [400, 300, 5]
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm3"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm3"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm3"),
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, (3, 1, 1),
(2, 1, 1)), # [400, 300, 5] -> [400, 300, 2]
BatchNorm1d(64),
nn.ReLU(),
)
self.max_batch_size = 6
# self.grid = torch.full([self.max_batch_size, *sparse_shape], -1, dtype=torch.int32).cuda()
def forward(self, voxel_features, coors, batch_size):
# coors[:, 1] += 1
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
# t = time.time()
# torch.cuda.synchronize()
ret = self.middle_conv(ret)
# torch.cuda.synchronize()
# print("spconv forward time", time.time() - t)
ret = ret.dense()
N, C, D, H, W = ret.shape
ret = ret.view(N, C * D, H, W)
return ret
class SpMiddleFHDV2(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
num_filters_down1=[64],
num_filters_down2=[64, 64],
name='SpMiddleFHDV2'):
super(SpMiddleFHDV2, self).__init__()
self.name = name
if use_norm:
BatchNorm2d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=False)(
nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
BatchNorm1d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=True)(
nn.ConvTranspose2d)
sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]
# sparse_shape[0] = 11
print(sparse_shape)
self.sparse_shape = sparse_shape
self.voxel_output_shape = output_shape
# input: # [1600, 1200, 41]
self.middle_conv = spconv.SparseSequential(
SubMConv3d(num_input_features, 16, 3, indice_key="subm0"),
BatchNorm1d(16),
nn.ReLU(),
SubMConv3d(16, 16, 3, indice_key="subm0"),
BatchNorm1d(16),
nn.ReLU(),
SpConv3d(16, 32, 3, 2,
padding=1), # [1600, 1200, 41] -> [800, 600, 21]
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm1"),
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm1"),
BatchNorm1d(32),
nn.ReLU(),
SpConv3d(32, 64, 3, 2,
padding=1), # [800, 600, 21] -> [400, 300, 11]
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, 3, 2,
padding=[0, 1, 1]), # [400, 300, 11] -> [200, 150, 5]
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm3"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm3"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm3"),
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, (3, 1, 1),
(2, 1, 1)), # [200, 150, 5] -> [200, 150, 2]
BatchNorm1d(64),
nn.ReLU(),
spconv.SparseMaxPool3d([2, 1, 1]),
)
self.max_batch_size = 6
self.grid = torch.full([self.max_batch_size, *sparse_shape],
-1,
dtype=torch.int32).cuda()
def forward(self, voxel_features, coors, batch_size):
# coors[:, 1] += 1
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size, self.grid)
# t = time.time()
# torch.cuda.synchronize()
ret = self.middle_conv(ret)
# torch.cuda.synchronize()
# print("spconv forward time", time.time() - t)
ret = ret.dense()
N, C, D, H, W = ret.shape
ret = ret.view(N, C * D, H, W)
return ret
class SpMiddle2K(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
num_filters_down1=[64],
num_filters_down2=[64, 64],
name='SpMiddle2K'):
super(SpMiddle2K, self).__init__()
self.name = name
if use_norm:
BatchNorm2d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=False)(
nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
BatchNorm1d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=True)(
nn.ConvTranspose2d)
sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]
# sparse_shape[0] = 11
print(sparse_shape)
self.sparse_shape = sparse_shape
self.voxel_output_shape = output_shape
# input: # [1600, 1200, 41]
self.middle_conv = spconv.SparseSequential(
SubMConv3d(
num_input_features, 8, 3,
indice_key="subm0"), # [3200, 2400, 81] -> [1600, 1200, 41]
BatchNorm1d(8),
nn.ReLU(),
SubMConv3d(8, 8, 3, indice_key="subm0"),
BatchNorm1d(8),
nn.ReLU(),
SpConv3d(8, 16, 3, 2,
padding=1), # [1600, 1200, 41] -> [800, 600, 21]
BatchNorm1d(16),
nn.ReLU(),
SubMConv3d(16, 16, 3, indice_key="subm1"),
BatchNorm1d(16),
nn.ReLU(),
SubMConv3d(16, 16, 3, indice_key="subm1"),
BatchNorm1d(16),
nn.ReLU(),
SpConv3d(16, 32, 3, 2,
padding=1), # [1600, 1200, 41] -> [800, 600, 21]
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm2"),
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm2"),
BatchNorm1d(32),
nn.ReLU(),
SpConv3d(32, 64, 3, 2,
padding=1), # [800, 600, 21] -> [400, 300, 11]
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm3"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm3"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm3"),
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, 3, 2,
padding=[0, 1, 1]), # [400, 300, 11] -> [200, 150, 5]
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm4"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm4"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm4"),
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, (3, 1, 1),
(2, 1, 1)), # [200, 150, 5] -> [200, 150, 2]
BatchNorm1d(64),
nn.ReLU(),
)
self.max_batch_size = 3
self.grid = torch.full([self.max_batch_size, *sparse_shape],
-1,
dtype=torch.int32).cuda()
def forward(self, voxel_features, coors, batch_size):
# coors[:, 1] += 1
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size, self.grid)
# t = time.time()
# torch.cuda.synchronize()
ret = self.middle_conv(ret)
# torch.cuda.synchronize()
# print("spconv forward time", time.time() - t)
ret = ret.dense()
N, C, D, H, W = ret.shape
ret = ret.view(N, C * D, H, W)
return ret
class SpMiddleFHDLite(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
num_filters_down1=[64],
num_filters_down2=[64, 64],
name='SpMiddleFHDLite'):
super(SpMiddleFHDLite, self).__init__()
self.name = name
if use_norm:
BatchNorm2d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=False)(
nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
BatchNorm1d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=True)(
nn.ConvTranspose2d)
sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]
# sparse_shape[0] = 11
print(sparse_shape)
self.sparse_shape = sparse_shape
self.voxel_output_shape = output_shape
# input: # [1600, 1200, 41]
self.middle_conv = spconv.SparseSequential(
SpConv3d(num_input_features, 16, 3, 2,
padding=1), # [1600, 1200, 41] -> [800, 600, 21]
BatchNorm1d(16),
nn.ReLU(),
SpConv3d(16, 32, 3, 2,
padding=1), # [800, 600, 21] -> [400, 300, 11]
BatchNorm1d(32),
nn.ReLU(),
SpConv3d(32, 64, 3, 2,
padding=[0, 1, 1]), # [400, 300, 11] -> [200, 150, 5]
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, (3, 1, 1),
(2, 1, 1)), # [200, 150, 5] -> [200, 150, 2]
BatchNorm1d(64),
nn.ReLU(),
)
def forward(self, voxel_features, coors, batch_size):
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
ret = self.middle_conv(ret)
ret = ret.dense()
N, C, D, H, W = ret.shape
ret = ret.view(N, C * D, H, W)
return ret
class SpMiddleFHDLiteNoNorm(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
num_filters_down1=[64],
num_filters_down2=[64, 64],
name='SpMiddleFHDLite'):
super(SpMiddleFHDLiteNoNorm, self).__init__()
self.name = name
use_norm = False
if use_norm:
BatchNorm2d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=False)(
nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
BatchNorm1d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=True)(
nn.ConvTranspose2d)
sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]
# sparse_shape[0] = 11
print(sparse_shape)
self.sparse_shape = sparse_shape
self.voxel_output_shape = output_shape
# input: # [1600, 1200, 41]
self.middle_conv = spconv.SparseSequential(
SpConv3d(num_input_features, 16, 3, 2,
padding=1), # [1600, 1200, 41] -> [800, 600, 21]
BatchNorm1d(16),
nn.ReLU(),
SpConv3d(16, 32, 3, 2,
padding=1), # [800, 600, 21] -> [400, 300, 11]
BatchNorm1d(32),
nn.ReLU(),
SpConv3d(32, 64, 3, 2,
padding=[0, 1, 1]), # [400, 300, 11] -> [200, 150, 5]
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, (3, 1, 1),
(2, 1, 1)), # [200, 150, 5] -> [200, 150, 2]
BatchNorm1d(64),
nn.ReLU(),
)
def forward(self, voxel_features, coors, batch_size):
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
ret = self.middle_conv(ret)
ret = ret.dense()
N, C, D, H, W = ret.shape
ret = ret.view(N, C * D, H, W)
return ret
class SpMiddleHDLite(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
num_filters_down1=[64],
num_filters_down2=[64, 64],
name='SpMiddleHDLite'):
super(SpMiddleHDLite, self).__init__()
self.name = name
if use_norm:
BatchNorm2d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=False)(
nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
BatchNorm1d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=True)(
nn.ConvTranspose2d)
sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]
# sparse_shape[0] = 11
print(sparse_shape)
self.sparse_shape = sparse_shape
self.voxel_output_shape = output_shape
# input: # [1600, 1200, 41]
self.middle_conv = spconv.SparseSequential(
SpConv3d(num_input_features, 16, 3, 2,
padding=1), # [800, 600, 21] -> [400, 300, 11]
BatchNorm1d(16),
nn.ReLU(),
SpConv3d(16, 32, 3, 2,
padding=[0, 1, 1]), # [400, 300, 11] -> [200, 150, 5]
BatchNorm1d(32),
nn.ReLU(),
SpConv3d(32, 64, (3, 1, 1),
(2, 1, 1)), # [200, 150, 5] -> [200, 150, 2]
BatchNorm1d(64),
nn.ReLU(),
)
def forward(self, voxel_features, coors, batch_size):
# coors[:, 1] += 1
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
ret = self.middle_conv(ret)
ret = ret.dense()
N, C, D, H, W = ret.shape
ret = ret.view(N, C * D, H, W)
return ret
class SpMiddleResNetFHD(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
num_filters_down1=[64],
num_filters_down2=[64, 64],
name='SpMiddleResNetFHD'):
super(SpMiddleResNetFHD, self).__init__()
self.name = name
if use_norm:
BatchNorm2d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=False)(
nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
BatchNorm1d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=True)(
nn.ConvTranspose2d)
sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]
# sparse_shape[0] = 11
print(sparse_shape)
self.sparse_shape = sparse_shape
self.voxel_output_shape = output_shape
# input: # [1600, 1200, 41]
self.middle_conv = spconv.SparseSequential(
SubMConv3d(num_input_features, 16, 3, indice_key="res0"),
BatchNorm1d(16),
nn.ReLU(),
SparseBasicBlock(16, 16, indice_key="res0"),
SparseBasicBlock(16, 16, indice_key="res0"),
SpConv3d(16, 32, 3, 2,
padding=1), # [1600, 1200, 41] -> [800, 600, 21]
BatchNorm1d(32),
nn.ReLU(),
SparseBasicBlock(32, 32, indice_key="res1"),
SparseBasicBlock(32, 32, indice_key="res1"),
SpConv3d(32, 64, 3, 2,
padding=1), # [800, 600, 21] -> [400, 300, 11]
BatchNorm1d(64),
nn.ReLU(),
SparseBasicBlock(64, 64, indice_key="res2"),
SparseBasicBlock(64, 64, indice_key="res2"),
SpConv3d(64, 128, 3, 2,
padding=[0, 1, 1]), # [400, 300, 11] -> [200, 150, 5]
BatchNorm1d(128),
nn.ReLU(),
SparseBasicBlock(128, 128, indice_key="res3"),
SparseBasicBlock(128, 128, indice_key="res3"),
SpConv3d(128, 128, (3, 1, 1),
(2, 1, 1)), # [200, 150, 5] -> [200, 150, 2]
BatchNorm1d(128),
nn.ReLU(),
)
def forward(self, voxel_features, coors, batch_size):
# coors[:, 1] += 1
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
# t = time.time()
# torch.cuda.synchronize()
ret = self.middle_conv(ret)
# torch.cuda.synchronize()
# print("spconv forward time", time.time() - t)
ret = ret.dense()
N, C, D, H, W = ret.shape
ret = ret.view(N, C * D, H, W)
return ret
class SpMiddleFHDLarge(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
num_filters_down1=[64],
num_filters_down2=[64, 64],
name='SpMiddleFHDLarge'):
super(SpMiddleFHDLarge, self).__init__()
self.name = name
if use_norm:
BatchNorm2d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=False)(
nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
BatchNorm1d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=True)(
nn.ConvTranspose2d)
sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]
# sparse_shape[0] = 11
print(sparse_shape)
self.sparse_shape = sparse_shape
self.voxel_output_shape = output_shape
# input: # [1600, 1200, 41]
self.middle_conv = spconv.SparseSequential(
SubMConv3d(num_input_features, 16, 3, indice_key="subm0"),
BatchNorm1d(16),
nn.ReLU(),
SubMConv3d(16, 16, 3, indice_key="subm0"),
BatchNorm1d(16),
nn.ReLU(),
SpConv3d(16, 32, 3, 2,
padding=1), # [1600, 1200, 41] -> [800, 600, 21]
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm1"),
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm1"),
BatchNorm1d(32),
nn.ReLU(),
SpConv3d(32, 64, 3, 2,
padding=1), # [800, 600, 21] -> [400, 300, 11]
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 128, 3, 2,
padding=[0, 1, 1]), # [400, 300, 11] -> [200, 150, 5]
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(128, 128, 3, indice_key="subm3"),
BatchNorm1d(128),
nn.ReLU(),
SubMConv3d(128, 128, 3, indice_key="subm3"),
BatchNorm1d(128),
nn.ReLU(),
SubMConv3d(128, 128, 3, indice_key="subm3"),
BatchNorm1d(128),
nn.ReLU(),
SpConv3d(128, 128, (3, 1, 1),
(2, 1, 1)), # [200, 150, 5] -> [200, 150, 2]
BatchNorm1d(128),
nn.ReLU(),
)
def forward(self, voxel_features, coors, batch_size):
# coors[:, 1] += 1
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
# t = time.time()
# torch.cuda.synchronize()
ret = self.middle_conv(ret)
# torch.cuda.synchronize()
# print("spconv forward time", time.time() - t)
ret = ret.dense()
N, C, D, H, W = ret.shape
ret = ret.view(N, C * D, H, W)
return ret
| 39.577528
| 100
| 0.521557
|
import time
import numpy as np
import spconv
import torch
from torch import nn
from torch.nn import functional as F
from second.pytorch.models.resnet import SparseBasicBlock
from torchplus.nn import Empty, GroupNorm, Sequential
from torchplus.ops.array_ops import gather_nd, scatter_nd
from torchplus.tools import change_default_args
class SparseMiddleExtractor(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
num_filters_down1=[64],
num_filters_down2=[64, 64],
name='SparseMiddleExtractor'):
super(SparseMiddleExtractor, self).__init__()
self.name = name
if use_norm:
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Linear = change_default_args(bias=False)(nn.Linear)
else:
BatchNorm1d = Empty
Linear = change_default_args(bias=True)(nn.Linear)
sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]
print(sparse_shape)
self.sparse_shape = sparse_shape
self.scn_input = scn.InputLayer(3, sparse_shape.tolist())
self.voxel_output_shape = output_shape
middle_layers = []
num_filters = [num_input_features] + num_filters_down1
filters_pairs_d1 = [[num_filters[i], num_filters[i + 1]]
for i in range(len(num_filters) - 1)]
for i, o in filters_pairs_d1:
middle_layers.append(
spconv.SubMConv3d(i, o, 3, bias=False, indice_key="subm0"))
middle_layers.append(BatchNorm1d(o))
middle_layers.append(nn.ReLU())
middle_layers.append(
spconv.SparseConv3d(
num_filters[-1],
num_filters[-1], (3, 1, 1), (2, 1, 1),
bias=False))
middle_layers.append(BatchNorm1d(num_filters[-1]))
middle_layers.append(nn.ReLU())
if len(num_filters_down1) == 0:
num_filters = [num_filters[-1]] + num_filters_down2
else:
num_filters = [num_filters_down1[-1]] + num_filters_down2
filters_pairs_d2 = [[num_filters[i], num_filters[i + 1]]
for i in range(len(num_filters) - 1)]
for i, o in filters_pairs_d2:
middle_layers.append(
spconv.SubMConv3d(i, o, 3, bias=False, indice_key="subm1"))
middle_layers.append(BatchNorm1d(o))
middle_layers.append(nn.ReLU())
middle_layers.append(
spconv.SparseConv3d(
num_filters[-1],
num_filters[-1], (3, 1, 1), (2, 1, 1),
bias=False))
middle_layers.append(BatchNorm1d(num_filters[-1]))
middle_layers.append(nn.ReLU())
self.middle_conv = spconv.SparseSequential(*middle_layers)
def forward(self, voxel_features, coors, batch_size):
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
ret = self.middle_conv(ret)
ret = ret.dense()
N, C, D, H, W = ret.shape
ret = ret.view(N, C * D, H, W)
return ret
class SpMiddleD4HD(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
num_filters_down1=[64],
num_filters_down2=[64, 64],
name='SpMiddleD4HD'):
super(SpMiddleD4HD, self).__init__()
self.name = name
if use_norm:
BatchNorm2d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=False)(
nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
BatchNorm1d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=True)(
nn.ConvTranspose2d)
sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]
print(sparse_shape)
self.sparse_shape = sparse_shape
self.voxel_output_shape = output_shape
self.middle_conv = spconv.SparseSequential(
SubMConv3d(num_input_features, 32, 3, indice_key="subm0"),
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm0"),
BatchNorm1d(32),
nn.ReLU(),
SpConv3d(32, 64, 3, 2,
padding=1),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm1"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm1"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm1"),
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, 3, 2,
padding=[0, 1, 1]),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, (3, 1, 1),
(2, 1, 1)),
BatchNorm1d(64),
nn.ReLU(),
)
def forward(self, voxel_features, coors, batch_size):
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
ret = self.middle_conv(ret)
ret = ret.dense()
N, C, D, H, W = ret.shape
ret = ret.view(N, C * D, H, W)
return ret
class SpResNetD4HD(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
num_filters_down1=[64],
num_filters_down2=[64, 64],
name='SpResNetD4HD'):
super(SpResNetD4HD, self).__init__()
self.name = name
if use_norm:
BatchNorm2d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=False)(
nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
BatchNorm1d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=True)(
nn.ConvTranspose2d)
sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]
print(sparse_shape)
self.sparse_shape = sparse_shape
self.voxel_output_shape = output_shape
self.middle_conv = spconv.SparseSequential(
SubMConv3d(num_input_features, 32, 3, indice_key="res0"),
BatchNorm1d(32),
nn.ReLU(),
SparseBasicBlock(32, 32, indice_key="res0"),
SparseBasicBlock(32, 32, indice_key="res0"),
SpConv3d(32, 64, 3, 2,
padding=1),
BatchNorm1d(64),
nn.ReLU(),
SparseBasicBlock(64, 64, indice_key="res1"),
SparseBasicBlock(64, 64, indice_key="res1"),
SpConv3d(64, 64, 3, 2,
padding=[0, 1, 1]),
BatchNorm1d(64),
nn.ReLU(),
SparseBasicBlock(64, 64, indice_key="res2"),
SparseBasicBlock(64, 64, indice_key="res2"),
SpConv3d(64, 64, (3, 1, 1),
(2, 1, 1)),
BatchNorm1d(64),
nn.ReLU(),
)
def forward(self, voxel_features, coors, batch_size):
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
ret = self.middle_conv(ret)
ret = ret.dense()
N, C, D, H, W = ret.shape
ret = ret.view(N, C * D, H, W)
return ret
class SpMiddleD4HDLite(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
num_filters_down1=[64],
num_filters_down2=[64, 64],
name='SpMiddleD4HDLite'):
super(SpMiddleD4HDLite, self).__init__()
self.name = name
if use_norm:
BatchNorm2d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=False)(
nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
BatchNorm1d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=True)(
nn.ConvTranspose2d)
sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]
print(sparse_shape)
self.sparse_shape = sparse_shape
self.voxel_output_shape = output_shape
self.middle_conv = spconv.SparseSequential(
SubMConv3d(num_input_features, 16, 3, indice_key="subm0"),
BatchNorm1d(16),
nn.ReLU(),
SubMConv3d(16, 16, 3, indice_key="subm0"),
BatchNorm1d(16),
nn.ReLU(),
SpConv3d(16, 32, 3, 2,
padding=1),
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm1"),
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm1"),
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm1"),
BatchNorm1d(32),
nn.ReLU(),
SpConv3d(32, 64, 3, 2,
padding=[0, 1, 1]),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, (3, 1, 1),
(2, 1, 1)),
BatchNorm1d(64),
nn.ReLU(),
)
def forward(self, voxel_features, coors, batch_size):
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
ret = self.middle_conv(ret)
ret = ret.dense()
N, C, D, H, W = ret.shape
ret = ret.view(N, C * D, H, W)
return ret
class SpMiddleD8HD(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
num_filters_down1=[64],
num_filters_down2=[64, 64],
name='SpMiddleD8HD'):
super(SpMiddleD8HD, self).__init__()
self.name = name
if use_norm:
BatchNorm2d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=False)(
nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
BatchNorm1d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=True)(
nn.ConvTranspose2d)
sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]
print(sparse_shape)
self.sparse_shape = sparse_shape
self.voxel_output_shape = output_shape
self.middle_conv = spconv.SparseSequential(
SubMConv3d(num_input_features, 16, 3, indice_key="subm0"),
BatchNorm1d(16),
nn.ReLU(),
SubMConv3d(16, 16, 3, indice_key="subm0"),
BatchNorm1d(16),
nn.ReLU(),
SpConv3d(16, 32, 3, 2,
padding=1),
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm1"),
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm1"),
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm1"),
BatchNorm1d(32),
nn.ReLU(),
SpConv3d(32, 64, 3, 2,
padding=1),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, 3, 2,
padding=[0, 1, 1]),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm3"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm3"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm3"),
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, (3, 1, 1),
(2, 1, 1)),
BatchNorm1d(64),
nn.ReLU(),
)
def forward(self, voxel_features, coors, batch_size):
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
ret = self.middle_conv(ret)
ret = ret.dense()
N, C, D, H, W = ret.shape
ret = ret.view(N, C * D, H, W)
return ret
class SpMiddleFHD(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
num_filters_down1=[64],
num_filters_down2=[64, 64],
name='SpMiddleFHD'):
super(SpMiddleFHD, self).__init__()
self.name = name
if use_norm:
BatchNorm2d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=False)(
nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
BatchNorm1d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=True)(
nn.ConvTranspose2d)
sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]
print(sparse_shape)
self.sparse_shape = sparse_shape
self.voxel_output_shape = output_shape
le_conv = spconv.SparseSequential(
SubMConv3d(num_input_features, 16, 3, indice_key="subm0"),
BatchNorm1d(16),
nn.ReLU(),
SubMConv3d(16, 16, 3, indice_key="subm0"),
BatchNorm1d(16),
nn.ReLU(),
SpConv3d(16, 32, 3, 2,
padding=1),
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm1"),
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm1"),
BatchNorm1d(32),
nn.ReLU(),
SpConv3d(32, 64, 3, 2,
padding=1),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, 3, 2,
padding=[0, 1, 1]),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm3"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm3"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm3"),
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, (3, 1, 1),
(2, 1, 1)),
BatchNorm1d(64),
nn.ReLU(),
)
self.max_batch_size = 6
def forward(self, voxel_features, coors, batch_size):
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
ret = self.middle_conv(ret)
ret = ret.dense()
N, C, D, H, W = ret.shape
ret = ret.view(N, C * D, H, W)
return ret
class SpMiddleFHDPeople(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
num_filters_down1=[64],
num_filters_down2=[64, 64],
name='SpMiddleFHD'):
super(SpMiddleFHDPeople, self).__init__()
self.name = name
if use_norm:
BatchNorm2d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=False)(
nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
BatchNorm1d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=True)(
nn.ConvTranspose2d)
sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]
print(sparse_shape)
self.sparse_shape = sparse_shape
self.voxel_output_shape = output_shape
le_conv = spconv.SparseSequential(
SubMConv3d(num_input_features, 16, 3, indice_key="subm0"),
BatchNorm1d(16),
nn.ReLU(),
SubMConv3d(16, 16, 3, indice_key="subm0"),
BatchNorm1d(16),
nn.ReLU(),
SpConv3d(16, 32, 3, 2,
padding=1),
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm1"),
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm1"),
BatchNorm1d(32),
nn.ReLU(),
SpConv3d(32, 64, 3, 2,
padding=[0, 1, 1]),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, (3, 1, 1),
(2, 1, 1)),
BatchNorm1d(64),
nn.ReLU(),
)
self.max_batch_size = 6
def forward(self, voxel_features, coors, batch_size):
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
ret = self.middle_conv(ret)
ret = ret.dense()
N, C, D, H, W = ret.shape
ret = ret.view(N, C * D, H, W)
return ret
class SpMiddle2KPeople(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
num_filters_down1=[64],
num_filters_down2=[64, 64],
name='SpMiddleFHD'):
super(SpMiddle2KPeople, self).__init__()
self.name = name
if use_norm:
BatchNorm2d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=False)(
nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
BatchNorm1d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=True)(
nn.ConvTranspose2d)
sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]
print(sparse_shape)
self.sparse_shape = sparse_shape
self.voxel_output_shape = output_shape
le_conv = spconv.SparseSequential(
SubMConv3d(num_input_features, 8, 3, indice_key="subm0"),
BatchNorm1d(8),
nn.ReLU(),
SubMConv3d(8, 8, 3, indice_key="subm0"),
BatchNorm1d(8),
nn.ReLU(),
SpConv3d(8, 16, 3, 2,
padding=1),
BatchNorm1d(16),
nn.ReLU(),
SubMConv3d(16, 16, 3, indice_key="subm1"),
BatchNorm1d(16),
nn.ReLU(),
SubMConv3d(16, 16, 3, indice_key="subm1"),
BatchNorm1d(16),
nn.ReLU(),
SpConv3d(16, 32, 3, 2,
padding=1),
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm2"),
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm2"),
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm2"),
BatchNorm1d(32),
nn.ReLU(),
SpConv3d(32, 64, 3, 2,
padding=[0, 1, 1]),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm3"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm3"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm3"),
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, (3, 1, 1),
(2, 1, 1)),
BatchNorm1d(64),
nn.ReLU(),
)
self.max_batch_size = 6
def forward(self, voxel_features, coors, batch_size):
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
ret = self.middle_conv(ret)
ret = ret.dense()
N, C, D, H, W = ret.shape
ret = ret.view(N, C * D, H, W)
return ret
class SpMiddleFHDV2(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
num_filters_down1=[64],
num_filters_down2=[64, 64],
name='SpMiddleFHDV2'):
super(SpMiddleFHDV2, self).__init__()
self.name = name
if use_norm:
BatchNorm2d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=False)(
nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
BatchNorm1d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=True)(
nn.ConvTranspose2d)
sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]
print(sparse_shape)
self.sparse_shape = sparse_shape
self.voxel_output_shape = output_shape
le_conv = spconv.SparseSequential(
SubMConv3d(num_input_features, 16, 3, indice_key="subm0"),
BatchNorm1d(16),
nn.ReLU(),
SubMConv3d(16, 16, 3, indice_key="subm0"),
BatchNorm1d(16),
nn.ReLU(),
SpConv3d(16, 32, 3, 2,
padding=1),
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm1"),
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm1"),
BatchNorm1d(32),
nn.ReLU(),
SpConv3d(32, 64, 3, 2,
padding=1),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, 3, 2,
padding=[0, 1, 1]),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm3"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm3"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm3"),
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, (3, 1, 1),
(2, 1, 1)),
BatchNorm1d(64),
nn.ReLU(),
spconv.SparseMaxPool3d([2, 1, 1]),
)
self.max_batch_size = 6
self.grid = torch.full([self.max_batch_size, *sparse_shape],
-1,
dtype=torch.int32).cuda()
def forward(self, voxel_features, coors, batch_size):
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size, self.grid)
ret = self.middle_conv(ret)
ret = ret.dense()
N, C, D, H, W = ret.shape
ret = ret.view(N, C * D, H, W)
return ret
class SpMiddle2K(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
num_filters_down1=[64],
num_filters_down2=[64, 64],
name='SpMiddle2K'):
super(SpMiddle2K, self).__init__()
self.name = name
if use_norm:
BatchNorm2d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=False)(
nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
BatchNorm1d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=True)(
nn.ConvTranspose2d)
sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]
print(sparse_shape)
self.sparse_shape = sparse_shape
self.voxel_output_shape = output_shape
le_conv = spconv.SparseSequential(
SubMConv3d(
num_input_features, 8, 3,
indice_key="subm0"),
BatchNorm1d(8),
nn.ReLU(),
SubMConv3d(8, 8, 3, indice_key="subm0"),
BatchNorm1d(8),
nn.ReLU(),
SpConv3d(8, 16, 3, 2,
padding=1),
BatchNorm1d(16),
nn.ReLU(),
SubMConv3d(16, 16, 3, indice_key="subm1"),
BatchNorm1d(16),
nn.ReLU(),
SubMConv3d(16, 16, 3, indice_key="subm1"),
BatchNorm1d(16),
nn.ReLU(),
SpConv3d(16, 32, 3, 2,
padding=1),
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm2"),
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm2"),
BatchNorm1d(32),
nn.ReLU(),
SpConv3d(32, 64, 3, 2,
padding=1),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm3"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm3"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm3"),
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, 3, 2,
padding=[0, 1, 1]),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm4"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm4"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm4"),
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, (3, 1, 1),
(2, 1, 1)),
BatchNorm1d(64),
nn.ReLU(),
)
self.max_batch_size = 3
self.grid = torch.full([self.max_batch_size, *sparse_shape],
-1,
dtype=torch.int32).cuda()
def forward(self, voxel_features, coors, batch_size):
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size, self.grid)
ret = self.middle_conv(ret)
ret = ret.dense()
N, C, D, H, W = ret.shape
ret = ret.view(N, C * D, H, W)
return ret
class SpMiddleFHDLite(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
num_filters_down1=[64],
num_filters_down2=[64, 64],
name='SpMiddleFHDLite'):
super(SpMiddleFHDLite, self).__init__()
self.name = name
if use_norm:
BatchNorm2d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=False)(
nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
BatchNorm1d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=True)(
nn.ConvTranspose2d)
sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]
print(sparse_shape)
self.sparse_shape = sparse_shape
self.voxel_output_shape = output_shape
le_conv = spconv.SparseSequential(
SpConv3d(num_input_features, 16, 3, 2,
padding=1),
BatchNorm1d(16),
nn.ReLU(),
SpConv3d(16, 32, 3, 2,
padding=1),
BatchNorm1d(32),
nn.ReLU(),
SpConv3d(32, 64, 3, 2,
padding=[0, 1, 1]),
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, (3, 1, 1),
(2, 1, 1)),
BatchNorm1d(64),
nn.ReLU(),
)
def forward(self, voxel_features, coors, batch_size):
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
ret = self.middle_conv(ret)
ret = ret.dense()
N, C, D, H, W = ret.shape
ret = ret.view(N, C * D, H, W)
return ret
class SpMiddleFHDLiteNoNorm(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
num_filters_down1=[64],
num_filters_down2=[64, 64],
name='SpMiddleFHDLite'):
super(SpMiddleFHDLiteNoNorm, self).__init__()
self.name = name
use_norm = False
if use_norm:
BatchNorm2d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=False)(
nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
BatchNorm1d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=True)(
nn.ConvTranspose2d)
sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]
print(sparse_shape)
self.sparse_shape = sparse_shape
self.voxel_output_shape = output_shape
le_conv = spconv.SparseSequential(
SpConv3d(num_input_features, 16, 3, 2,
padding=1),
BatchNorm1d(16),
nn.ReLU(),
SpConv3d(16, 32, 3, 2,
padding=1),
BatchNorm1d(32),
nn.ReLU(),
SpConv3d(32, 64, 3, 2,
padding=[0, 1, 1]),
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, (3, 1, 1),
(2, 1, 1)),
BatchNorm1d(64),
nn.ReLU(),
)
def forward(self, voxel_features, coors, batch_size):
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
ret = self.middle_conv(ret)
ret = ret.dense()
N, C, D, H, W = ret.shape
ret = ret.view(N, C * D, H, W)
return ret
class SpMiddleHDLite(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
num_filters_down1=[64],
num_filters_down2=[64, 64],
name='SpMiddleHDLite'):
super(SpMiddleHDLite, self).__init__()
self.name = name
if use_norm:
BatchNorm2d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=False)(
nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
BatchNorm1d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=True)(
nn.ConvTranspose2d)
sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]
print(sparse_shape)
self.sparse_shape = sparse_shape
self.voxel_output_shape = output_shape
le_conv = spconv.SparseSequential(
SpConv3d(num_input_features, 16, 3, 2,
padding=1),
BatchNorm1d(16),
nn.ReLU(),
SpConv3d(16, 32, 3, 2,
padding=[0, 1, 1]),
BatchNorm1d(32),
nn.ReLU(),
SpConv3d(32, 64, (3, 1, 1),
(2, 1, 1)),
BatchNorm1d(64),
nn.ReLU(),
)
def forward(self, voxel_features, coors, batch_size):
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
ret = self.middle_conv(ret)
ret = ret.dense()
N, C, D, H, W = ret.shape
ret = ret.view(N, C * D, H, W)
return ret
class SpMiddleResNetFHD(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
num_filters_down1=[64],
num_filters_down2=[64, 64],
name='SpMiddleResNetFHD'):
super(SpMiddleResNetFHD, self).__init__()
self.name = name
if use_norm:
BatchNorm2d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=False)(
nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
BatchNorm1d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=True)(
nn.ConvTranspose2d)
sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]
print(sparse_shape)
self.sparse_shape = sparse_shape
self.voxel_output_shape = output_shape
le_conv = spconv.SparseSequential(
SubMConv3d(num_input_features, 16, 3, indice_key="res0"),
BatchNorm1d(16),
nn.ReLU(),
SparseBasicBlock(16, 16, indice_key="res0"),
SparseBasicBlock(16, 16, indice_key="res0"),
SpConv3d(16, 32, 3, 2,
padding=1),
BatchNorm1d(32),
nn.ReLU(),
SparseBasicBlock(32, 32, indice_key="res1"),
SparseBasicBlock(32, 32, indice_key="res1"),
SpConv3d(32, 64, 3, 2,
padding=1),
BatchNorm1d(64),
nn.ReLU(),
SparseBasicBlock(64, 64, indice_key="res2"),
SparseBasicBlock(64, 64, indice_key="res2"),
SpConv3d(64, 128, 3, 2,
padding=[0, 1, 1]),
BatchNorm1d(128),
nn.ReLU(),
SparseBasicBlock(128, 128, indice_key="res3"),
SparseBasicBlock(128, 128, indice_key="res3"),
SpConv3d(128, 128, (3, 1, 1),
(2, 1, 1)),
BatchNorm1d(128),
nn.ReLU(),
)
def forward(self, voxel_features, coors, batch_size):
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
ret = self.middle_conv(ret)
ret = ret.dense()
N, C, D, H, W = ret.shape
ret = ret.view(N, C * D, H, W)
return ret
class SpMiddleFHDLarge(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
num_filters_down1=[64],
num_filters_down2=[64, 64],
name='SpMiddleFHDLarge'):
super(SpMiddleFHDLarge, self).__init__()
self.name = name
if use_norm:
BatchNorm2d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=False)(
nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
BatchNorm1d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=True)(
nn.ConvTranspose2d)
sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]
print(sparse_shape)
self.sparse_shape = sparse_shape
self.voxel_output_shape = output_shape
le_conv = spconv.SparseSequential(
SubMConv3d(num_input_features, 16, 3, indice_key="subm0"),
BatchNorm1d(16),
nn.ReLU(),
SubMConv3d(16, 16, 3, indice_key="subm0"),
BatchNorm1d(16),
nn.ReLU(),
SpConv3d(16, 32, 3, 2,
padding=1),
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm1"),
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm1"),
BatchNorm1d(32),
nn.ReLU(),
SpConv3d(32, 64, 3, 2,
padding=1),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 128, 3, 2,
padding=[0, 1, 1]),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(128, 128, 3, indice_key="subm3"),
BatchNorm1d(128),
nn.ReLU(),
SubMConv3d(128, 128, 3, indice_key="subm3"),
BatchNorm1d(128),
nn.ReLU(),
SubMConv3d(128, 128, 3, indice_key="subm3"),
BatchNorm1d(128),
nn.ReLU(),
SpConv3d(128, 128, (3, 1, 1),
(2, 1, 1)),
BatchNorm1d(128),
nn.ReLU(),
)
def forward(self, voxel_features, coors, batch_size):
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
ret = self.middle_conv(ret)
ret = ret.dense()
N, C, D, H, W = ret.shape
ret = ret.view(N, C * D, H, W)
return ret
| true
| true
|
1c3f78da7320a443287010f3c5804dafaf315c80
| 11,094
|
py
|
Python
|
pymatgen/io/tests/test_zeopp.py
|
Chessmag/pymatgen
|
61a4bb7a1792e1ea2379abd45b3c40efb816fd64
|
[
"MIT"
] | 1
|
2021-10-18T01:26:50.000Z
|
2021-10-18T01:26:50.000Z
|
pymatgen/io/tests/test_zeopp.py
|
Chessmag/pymatgen
|
61a4bb7a1792e1ea2379abd45b3c40efb816fd64
|
[
"MIT"
] | null | null | null |
pymatgen/io/tests/test_zeopp.py
|
Chessmag/pymatgen
|
61a4bb7a1792e1ea2379abd45b3c40efb816fd64
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
__author__ = "Bharat Medasani"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "bkmedasani@lbl.gov"
__date__ = "Aug 2, 2013"
import unittest
import os
import re
from pymatgen.core.periodic_table import Species
from pymatgen.core.structure import Structure, Molecule
from pymatgen.io.cif import CifParser
from pymatgen.io.zeopp import ZeoCssr, ZeoVoronoiXYZ, get_voronoi_nodes, \
get_high_accuracy_voronoi_nodes, get_void_volume_surfarea, \
get_free_sphere_params
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.analysis.bond_valence import BVAnalyzer
try:
import zeo
except ImportError:
zeo = None
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
@unittest.skipIf(not zeo, "zeo not present.")
class ZeoCssrTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(test_dir, 'POSCAR')
p = Poscar.from_file(filepath)
self.zeocssr = ZeoCssr(p.structure)
def test_str(self):
expected_string = """4.7595 10.4118 6.0672
90.00 90.00 90.00 SPGR = 1 P 1 OPT = 1
24 0
0 Fe4 P4 O16
1 Fe 0.4749 0.2187 0.7500 0 0 0 0 0 0 0 0 0.0000
2 Fe 0.9749 0.2813 0.2500 0 0 0 0 0 0 0 0 0.0000
3 Fe 0.0251 0.7187 0.7500 0 0 0 0 0 0 0 0 0.0000
4 Fe 0.5251 0.7813 0.2500 0 0 0 0 0 0 0 0 0.0000
5 P 0.4182 0.0946 0.2500 0 0 0 0 0 0 0 0 0.0000
6 P 0.9182 0.4054 0.7500 0 0 0 0 0 0 0 0 0.0000
7 P 0.0818 0.5946 0.2500 0 0 0 0 0 0 0 0 0.0000
8 P 0.5818 0.9054 0.7500 0 0 0 0 0 0 0 0 0.0000
9 O 0.7071 0.0434 0.7500 0 0 0 0 0 0 0 0 0.0000
10 O 0.7413 0.0966 0.2500 0 0 0 0 0 0 0 0 0.0000
11 O 0.2854 0.1657 0.0461 0 0 0 0 0 0 0 0 0.0000
12 O 0.2854 0.1657 0.4539 0 0 0 0 0 0 0 0 0.0000
13 O 0.7854 0.3343 0.5461 0 0 0 0 0 0 0 0 0.0000
14 O 0.7854 0.3343 0.9539 0 0 0 0 0 0 0 0 0.0000
15 O 0.2413 0.4034 0.7500 0 0 0 0 0 0 0 0 0.0000
16 O 0.2071 0.4566 0.2500 0 0 0 0 0 0 0 0 0.0000
17 O 0.7929 0.5434 0.7500 0 0 0 0 0 0 0 0 0.0000
18 O 0.7587 0.5966 0.2500 0 0 0 0 0 0 0 0 0.0000
19 O 0.2146 0.6657 0.0461 0 0 0 0 0 0 0 0 0.0000
20 O 0.2146 0.6657 0.4539 0 0 0 0 0 0 0 0 0.0000
21 O 0.7146 0.8343 0.5461 0 0 0 0 0 0 0 0 0.0000
22 O 0.7146 0.8343 0.9539 0 0 0 0 0 0 0 0 0.0000
23 O 0.2587 0.9034 0.7500 0 0 0 0 0 0 0 0 0.0000
24 O 0.2929 0.9566 0.2500 0 0 0 0 0 0 0 0 0.0000"""
self.assertEqual(str(self.zeocssr), expected_string)
def test_from_file(self):
filename = os.path.join(test_dir, "EDI.cssr")
zeocssr = ZeoCssr.from_file(filename)
self.assertIsInstance(zeocssr.structure, Structure)
# @unittest.skipIf(not zeo, "zeo not present.")
class ZeoCssrOxiTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(test_dir, 'POSCAR')
p = Poscar.from_file(filepath)
structure = BVAnalyzer().get_oxi_state_decorated_structure(p.structure)
self.zeocssr = ZeoCssr(structure)
def test_str(self):
expected_string = """4.7595 10.4118 6.0672
90.00 90.00 90.00 SPGR = 1 P 1 OPT = 1
24 0
0 Fe4 P4 O16
1 Fe3+ 0.4749 0.2187 0.7500 0 0 0 0 0 0 0 0 0.0000
2 Fe3+ 0.9749 0.2813 0.2500 0 0 0 0 0 0 0 0 0.0000
3 Fe3+ 0.0251 0.7187 0.7500 0 0 0 0 0 0 0 0 0.0000
4 Fe3+ 0.5251 0.7813 0.2500 0 0 0 0 0 0 0 0 0.0000
5 P5+ 0.4182 0.0946 0.2500 0 0 0 0 0 0 0 0 0.0000
6 P5+ 0.9182 0.4054 0.7500 0 0 0 0 0 0 0 0 0.0000
7 P5+ 0.0818 0.5946 0.2500 0 0 0 0 0 0 0 0 0.0000
8 P5+ 0.5818 0.9054 0.7500 0 0 0 0 0 0 0 0 0.0000
9 O2- 0.7071 0.0434 0.7500 0 0 0 0 0 0 0 0 0.0000
10 O2- 0.7413 0.0966 0.2500 0 0 0 0 0 0 0 0 0.0000
11 O2- 0.2854 0.1657 0.0461 0 0 0 0 0 0 0 0 0.0000
12 O2- 0.2854 0.1657 0.4539 0 0 0 0 0 0 0 0 0.0000
13 O2- 0.7854 0.3343 0.5461 0 0 0 0 0 0 0 0 0.0000
14 O2- 0.7854 0.3343 0.9539 0 0 0 0 0 0 0 0 0.0000
15 O2- 0.2413 0.4034 0.7500 0 0 0 0 0 0 0 0 0.0000
16 O2- 0.2071 0.4566 0.2500 0 0 0 0 0 0 0 0 0.0000
17 O2- 0.7929 0.5434 0.7500 0 0 0 0 0 0 0 0 0.0000
18 O2- 0.7587 0.5966 0.2500 0 0 0 0 0 0 0 0 0.0000
19 O2- 0.2146 0.6657 0.0461 0 0 0 0 0 0 0 0 0.0000
20 O2- 0.2146 0.6657 0.4539 0 0 0 0 0 0 0 0 0.0000
21 O2- 0.7146 0.8343 0.5461 0 0 0 0 0 0 0 0 0.0000
22 O2- 0.7146 0.8343 0.9539 0 0 0 0 0 0 0 0 0.0000
23 O2- 0.2587 0.9034 0.7500 0 0 0 0 0 0 0 0 0.0000
24 O2- 0.2929 0.9566 0.2500 0 0 0 0 0 0 0 0 0.0000"""
self.assertEqual(str(self.zeocssr), expected_string)
def test_from_file(self):
filename = os.path.join(test_dir, "EDI_oxistate_decorated.cssr")
zeocssr = ZeoCssr.from_file(filename)
self.assertIsInstance(zeocssr.structure, Structure)
@unittest.skipIf(not zeo, "zeo not present.")
class ZeoVoronoiXYZTest(unittest.TestCase):
def setUp(self):
coords = [
[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.089000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000]]
prop = [0.4, 0.2, 0.2, 0.2, 0.2]
self.mol = Molecule(
["C", "H", "H", "H", "H"], coords,
site_properties={"voronoi_radius": prop})
self.xyz = ZeoVoronoiXYZ(self.mol)
def test_str(self):
ans = """5
H4 C1
C 0.000000 0.000000 0.000000 0.400000
H 1.089000 0.000000 0.000000 0.200000
H -0.363000 1.026719 0.000000 0.200000
H -0.363000 -0.513360 -0.889165 0.200000
H -0.363000 -0.513360 0.889165 0.200000"""
self.assertEqual(str(self.xyz), ans)
self.assertEqual(str(self.xyz), ans)
def test_from_file(self):
filename = os.path.join(test_dir, "EDI_voro.xyz")
vor = ZeoVoronoiXYZ.from_file(filename)
self.assertIsInstance(vor.molecule, Molecule)
@unittest.skipIf(not zeo, "zeo not present.")
class GetVoronoiNodesTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(test_dir, 'POSCAR')
p = Poscar.from_file(filepath)
self.structure = p.structure
bv = BVAnalyzer()
valences = bv.get_valences(self.structure)
el = [site.species_string for site in self.structure.sites]
valence_dict = dict(zip(el, valences))
self.rad_dict = {}
for k, v in valence_dict.items():
self.rad_dict[k] = float(Species(k, v).ionic_radius)
assert len(self.rad_dict) == len(self.structure.composition)
def test_get_voronoi_nodes(self):
vor_node_struct, vor_edge_center_struct, vor_face_center_struct = \
get_voronoi_nodes(self.structure, self.rad_dict)
self.assertIsInstance(vor_node_struct, Structure)
self.assertIsInstance(vor_edge_center_struct, Structure)
self.assertIsInstance(vor_face_center_struct, Structure)
print(len(vor_node_struct.sites))
print(len(vor_face_center_struct.sites))
@unittest.skip("file free_sph.cif not present")
class GetFreeSphereParamsTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(test_dir, 'free_sph.cif')
self.structure = Structure.from_file(filepath)
self.rad_dict = {'Ge': 0.67, 'P': 0.52, 'S': 1.7,
'La': 1.17, 'Zr': 0.86, 'O': 1.26}
def test_get_free_sphere_params(self):
free_sph_params = get_free_sphere_params(self.structure,
rad_dict=self.rad_dict)
# Zeo results can change in future. Hence loose comparison
self.assertAlmostEqual(
free_sph_params['inc_sph_max_dia'], 2.58251, places=1)
self.assertAlmostEqual(
free_sph_params['free_sph_max_dia'], 1.29452, places=1)
self.assertAlmostEqual(
free_sph_params['inc_sph_along_free_sph_path_max_dia'],
2.58251, places=1)
@unittest.skipIf(not zeo, "zeo not present.")
class GetHighAccuracyVoronoiNodesTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(test_dir, 'POSCAR')
p = Poscar.from_file(filepath)
self.structure = p.structure
bv = BVAnalyzer()
valences = bv.get_valences(self.structure)
el = [site.species_string for site in self.structure.sites]
valence_dict = dict(zip(el, valences))
self.rad_dict = {}
for k, v in valence_dict.items():
self.rad_dict[k] = float(Species(k, v).ionic_radius)
assert len(self.rad_dict) == len(self.structure.composition)
def test_get_voronoi_nodes(self):
# vor_node_struct, vor_ec_struct, vor_fc_struct = \
# get_high_accuracy_voronoi_nodes(self.structure, self.rad_dict)
vor_node_struct = \
get_high_accuracy_voronoi_nodes(self.structure, self.rad_dict)
self.assertIsInstance(vor_node_struct, Structure)
# self.assertIsInstance(vor_ec_struct, Structure)
# self.assertIsInstance(vor_fc_struct, Structure)
print(len(vor_node_struct.sites))
# print(len(vor_fc_struct.sites))
@unittest.skipIf(not zeo, "zeo not present.")
class GetVoronoiNodesMultiOxiTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(test_dir, 'POSCAR')
p = Poscar.from_file(filepath)
self.structure = p.structure
bv = BVAnalyzer()
self.structure = bv.get_oxi_state_decorated_structure(self.structure)
valences = bv.get_valences(self.structure)
radii = []
for i in range(len(valences)):
el = self.structure.sites[i].specie.symbol
radius = Species(el, valences[i]).ionic_radius
radii.append(radius)
el = [site.species_string for site in self.structure.sites]
self.rad_dict = dict(zip(el, radii))
for el in self.rad_dict.keys():
print((el, self.rad_dict[el].real))
def test_get_voronoi_nodes(self):
vor_node_struct, vor_edge_center_struct, vor_face_center_struct = \
get_voronoi_nodes(self.structure, self.rad_dict)
self.assertIsInstance(vor_node_struct, Structure)
self.assertIsInstance(vor_edge_center_struct, Structure)
self.assertIsInstance(vor_face_center_struct, Structure)
@unittest.skip("The function is deprecated")
class GetVoidVolumeSurfaceTest(unittest.TestCase):
def setUp(self):
filepath1 = os.path.join(test_dir, 'Li2O.cif')
p = CifParser(filepath1).get_structures(False)[0]
bv = BVAnalyzer()
valences = bv.get_valences(p)
el = [site.species_string for site in p.sites]
val_dict = dict(zip(el, valences))
self._radii = {}
for k, v in val_dict.items():
k1 = re.sub(r'[1-9,+,\-]', '', k)
self._radii[k1] = float(Species(k1, v).ionic_radius)
p.remove(0)
self._vac_struct = p
def test_void_volume_surface_area(self):
pass
vol, sa = get_void_volume_surfarea(self._vac_struct, self._radii)
# print "vol: ", vol, "sa: ", sa
self.assertIsInstance(vol, float)
self.assertIsInstance(sa, float)
if __name__ == "__main__":
unittest.main()
| 39.06338
| 79
| 0.651343
|
__author__ = "Bharat Medasani"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "bkmedasani@lbl.gov"
__date__ = "Aug 2, 2013"
import unittest
import os
import re
from pymatgen.core.periodic_table import Species
from pymatgen.core.structure import Structure, Molecule
from pymatgen.io.cif import CifParser
from pymatgen.io.zeopp import ZeoCssr, ZeoVoronoiXYZ, get_voronoi_nodes, \
get_high_accuracy_voronoi_nodes, get_void_volume_surfarea, \
get_free_sphere_params
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.analysis.bond_valence import BVAnalyzer
try:
import zeo
except ImportError:
zeo = None
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
@unittest.skipIf(not zeo, "zeo not present.")
class ZeoCssrTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(test_dir, 'POSCAR')
p = Poscar.from_file(filepath)
self.zeocssr = ZeoCssr(p.structure)
def test_str(self):
expected_string = """4.7595 10.4118 6.0672
90.00 90.00 90.00 SPGR = 1 P 1 OPT = 1
24 0
0 Fe4 P4 O16
1 Fe 0.4749 0.2187 0.7500 0 0 0 0 0 0 0 0 0.0000
2 Fe 0.9749 0.2813 0.2500 0 0 0 0 0 0 0 0 0.0000
3 Fe 0.0251 0.7187 0.7500 0 0 0 0 0 0 0 0 0.0000
4 Fe 0.5251 0.7813 0.2500 0 0 0 0 0 0 0 0 0.0000
5 P 0.4182 0.0946 0.2500 0 0 0 0 0 0 0 0 0.0000
6 P 0.9182 0.4054 0.7500 0 0 0 0 0 0 0 0 0.0000
7 P 0.0818 0.5946 0.2500 0 0 0 0 0 0 0 0 0.0000
8 P 0.5818 0.9054 0.7500 0 0 0 0 0 0 0 0 0.0000
9 O 0.7071 0.0434 0.7500 0 0 0 0 0 0 0 0 0.0000
10 O 0.7413 0.0966 0.2500 0 0 0 0 0 0 0 0 0.0000
11 O 0.2854 0.1657 0.0461 0 0 0 0 0 0 0 0 0.0000
12 O 0.2854 0.1657 0.4539 0 0 0 0 0 0 0 0 0.0000
13 O 0.7854 0.3343 0.5461 0 0 0 0 0 0 0 0 0.0000
14 O 0.7854 0.3343 0.9539 0 0 0 0 0 0 0 0 0.0000
15 O 0.2413 0.4034 0.7500 0 0 0 0 0 0 0 0 0.0000
16 O 0.2071 0.4566 0.2500 0 0 0 0 0 0 0 0 0.0000
17 O 0.7929 0.5434 0.7500 0 0 0 0 0 0 0 0 0.0000
18 O 0.7587 0.5966 0.2500 0 0 0 0 0 0 0 0 0.0000
19 O 0.2146 0.6657 0.0461 0 0 0 0 0 0 0 0 0.0000
20 O 0.2146 0.6657 0.4539 0 0 0 0 0 0 0 0 0.0000
21 O 0.7146 0.8343 0.5461 0 0 0 0 0 0 0 0 0.0000
22 O 0.7146 0.8343 0.9539 0 0 0 0 0 0 0 0 0.0000
23 O 0.2587 0.9034 0.7500 0 0 0 0 0 0 0 0 0.0000
24 O 0.2929 0.9566 0.2500 0 0 0 0 0 0 0 0 0.0000"""
self.assertEqual(str(self.zeocssr), expected_string)
def test_from_file(self):
filename = os.path.join(test_dir, "EDI.cssr")
zeocssr = ZeoCssr.from_file(filename)
self.assertIsInstance(zeocssr.structure, Structure)
class ZeoCssrOxiTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(test_dir, 'POSCAR')
p = Poscar.from_file(filepath)
structure = BVAnalyzer().get_oxi_state_decorated_structure(p.structure)
self.zeocssr = ZeoCssr(structure)
def test_str(self):
expected_string = """4.7595 10.4118 6.0672
90.00 90.00 90.00 SPGR = 1 P 1 OPT = 1
24 0
0 Fe4 P4 O16
1 Fe3+ 0.4749 0.2187 0.7500 0 0 0 0 0 0 0 0 0.0000
2 Fe3+ 0.9749 0.2813 0.2500 0 0 0 0 0 0 0 0 0.0000
3 Fe3+ 0.0251 0.7187 0.7500 0 0 0 0 0 0 0 0 0.0000
4 Fe3+ 0.5251 0.7813 0.2500 0 0 0 0 0 0 0 0 0.0000
5 P5+ 0.4182 0.0946 0.2500 0 0 0 0 0 0 0 0 0.0000
6 P5+ 0.9182 0.4054 0.7500 0 0 0 0 0 0 0 0 0.0000
7 P5+ 0.0818 0.5946 0.2500 0 0 0 0 0 0 0 0 0.0000
8 P5+ 0.5818 0.9054 0.7500 0 0 0 0 0 0 0 0 0.0000
9 O2- 0.7071 0.0434 0.7500 0 0 0 0 0 0 0 0 0.0000
10 O2- 0.7413 0.0966 0.2500 0 0 0 0 0 0 0 0 0.0000
11 O2- 0.2854 0.1657 0.0461 0 0 0 0 0 0 0 0 0.0000
12 O2- 0.2854 0.1657 0.4539 0 0 0 0 0 0 0 0 0.0000
13 O2- 0.7854 0.3343 0.5461 0 0 0 0 0 0 0 0 0.0000
14 O2- 0.7854 0.3343 0.9539 0 0 0 0 0 0 0 0 0.0000
15 O2- 0.2413 0.4034 0.7500 0 0 0 0 0 0 0 0 0.0000
16 O2- 0.2071 0.4566 0.2500 0 0 0 0 0 0 0 0 0.0000
17 O2- 0.7929 0.5434 0.7500 0 0 0 0 0 0 0 0 0.0000
18 O2- 0.7587 0.5966 0.2500 0 0 0 0 0 0 0 0 0.0000
19 O2- 0.2146 0.6657 0.0461 0 0 0 0 0 0 0 0 0.0000
20 O2- 0.2146 0.6657 0.4539 0 0 0 0 0 0 0 0 0.0000
21 O2- 0.7146 0.8343 0.5461 0 0 0 0 0 0 0 0 0.0000
22 O2- 0.7146 0.8343 0.9539 0 0 0 0 0 0 0 0 0.0000
23 O2- 0.2587 0.9034 0.7500 0 0 0 0 0 0 0 0 0.0000
24 O2- 0.2929 0.9566 0.2500 0 0 0 0 0 0 0 0 0.0000"""
self.assertEqual(str(self.zeocssr), expected_string)
def test_from_file(self):
filename = os.path.join(test_dir, "EDI_oxistate_decorated.cssr")
zeocssr = ZeoCssr.from_file(filename)
self.assertIsInstance(zeocssr.structure, Structure)
@unittest.skipIf(not zeo, "zeo not present.")
class ZeoVoronoiXYZTest(unittest.TestCase):
def setUp(self):
coords = [
[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.089000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000]]
prop = [0.4, 0.2, 0.2, 0.2, 0.2]
self.mol = Molecule(
["C", "H", "H", "H", "H"], coords,
site_properties={"voronoi_radius": prop})
self.xyz = ZeoVoronoiXYZ(self.mol)
def test_str(self):
ans = """5
H4 C1
C 0.000000 0.000000 0.000000 0.400000
H 1.089000 0.000000 0.000000 0.200000
H -0.363000 1.026719 0.000000 0.200000
H -0.363000 -0.513360 -0.889165 0.200000
H -0.363000 -0.513360 0.889165 0.200000"""
self.assertEqual(str(self.xyz), ans)
self.assertEqual(str(self.xyz), ans)
def test_from_file(self):
filename = os.path.join(test_dir, "EDI_voro.xyz")
vor = ZeoVoronoiXYZ.from_file(filename)
self.assertIsInstance(vor.molecule, Molecule)
@unittest.skipIf(not zeo, "zeo not present.")
class GetVoronoiNodesTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(test_dir, 'POSCAR')
p = Poscar.from_file(filepath)
self.structure = p.structure
bv = BVAnalyzer()
valences = bv.get_valences(self.structure)
el = [site.species_string for site in self.structure.sites]
valence_dict = dict(zip(el, valences))
self.rad_dict = {}
for k, v in valence_dict.items():
self.rad_dict[k] = float(Species(k, v).ionic_radius)
assert len(self.rad_dict) == len(self.structure.composition)
def test_get_voronoi_nodes(self):
vor_node_struct, vor_edge_center_struct, vor_face_center_struct = \
get_voronoi_nodes(self.structure, self.rad_dict)
self.assertIsInstance(vor_node_struct, Structure)
self.assertIsInstance(vor_edge_center_struct, Structure)
self.assertIsInstance(vor_face_center_struct, Structure)
print(len(vor_node_struct.sites))
print(len(vor_face_center_struct.sites))
@unittest.skip("file free_sph.cif not present")
class GetFreeSphereParamsTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(test_dir, 'free_sph.cif')
self.structure = Structure.from_file(filepath)
self.rad_dict = {'Ge': 0.67, 'P': 0.52, 'S': 1.7,
'La': 1.17, 'Zr': 0.86, 'O': 1.26}
def test_get_free_sphere_params(self):
free_sph_params = get_free_sphere_params(self.structure,
rad_dict=self.rad_dict)
self.assertAlmostEqual(
free_sph_params['inc_sph_max_dia'], 2.58251, places=1)
self.assertAlmostEqual(
free_sph_params['free_sph_max_dia'], 1.29452, places=1)
self.assertAlmostEqual(
free_sph_params['inc_sph_along_free_sph_path_max_dia'],
2.58251, places=1)
@unittest.skipIf(not zeo, "zeo not present.")
class GetHighAccuracyVoronoiNodesTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(test_dir, 'POSCAR')
p = Poscar.from_file(filepath)
self.structure = p.structure
bv = BVAnalyzer()
valences = bv.get_valences(self.structure)
el = [site.species_string for site in self.structure.sites]
valence_dict = dict(zip(el, valences))
self.rad_dict = {}
for k, v in valence_dict.items():
self.rad_dict[k] = float(Species(k, v).ionic_radius)
assert len(self.rad_dict) == len(self.structure.composition)
def test_get_voronoi_nodes(self):
vor_node_struct = \
get_high_accuracy_voronoi_nodes(self.structure, self.rad_dict)
self.assertIsInstance(vor_node_struct, Structure)
print(len(vor_node_struct.sites))
@unittest.skipIf(not zeo, "zeo not present.")
class GetVoronoiNodesMultiOxiTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(test_dir, 'POSCAR')
p = Poscar.from_file(filepath)
self.structure = p.structure
bv = BVAnalyzer()
self.structure = bv.get_oxi_state_decorated_structure(self.structure)
valences = bv.get_valences(self.structure)
radii = []
for i in range(len(valences)):
el = self.structure.sites[i].specie.symbol
radius = Species(el, valences[i]).ionic_radius
radii.append(radius)
el = [site.species_string for site in self.structure.sites]
self.rad_dict = dict(zip(el, radii))
for el in self.rad_dict.keys():
print((el, self.rad_dict[el].real))
def test_get_voronoi_nodes(self):
vor_node_struct, vor_edge_center_struct, vor_face_center_struct = \
get_voronoi_nodes(self.structure, self.rad_dict)
self.assertIsInstance(vor_node_struct, Structure)
self.assertIsInstance(vor_edge_center_struct, Structure)
self.assertIsInstance(vor_face_center_struct, Structure)
@unittest.skip("The function is deprecated")
class GetVoidVolumeSurfaceTest(unittest.TestCase):
def setUp(self):
filepath1 = os.path.join(test_dir, 'Li2O.cif')
p = CifParser(filepath1).get_structures(False)[0]
bv = BVAnalyzer()
valences = bv.get_valences(p)
el = [site.species_string for site in p.sites]
val_dict = dict(zip(el, valences))
self._radii = {}
for k, v in val_dict.items():
k1 = re.sub(r'[1-9,+,\-]', '', k)
self._radii[k1] = float(Species(k1, v).ionic_radius)
p.remove(0)
self._vac_struct = p
def test_void_volume_surface_area(self):
pass
vol, sa = get_void_volume_surfarea(self._vac_struct, self._radii)
self.assertIsInstance(vol, float)
self.assertIsInstance(sa, float)
if __name__ == "__main__":
unittest.main()
| true
| true
|
1c3f796b9f5638396fc79a7e1e9a7954890167dc
| 523
|
py
|
Python
|
tests/conftest.py
|
erlep/PyTest
|
5524c2d684ee47ce75f76cf828c755cda23df73c
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
erlep/PyTest
|
5524c2d684ee47ce75f76cf828c755cda23df73c
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
erlep/PyTest
|
5524c2d684ee47ce75f76cf828c755cda23df73c
|
[
"MIT"
] | null | null | null |
# In pytest, what is the use of conftest.py files? - https://bit.ly/3EVB7yn
import pytest
import sys
@pytest.fixture
def capture_stdout(monkeypatch):
buffer = {"stdout": "", "write_calls": 0}
def fake_write(s):
buffer["stdout"] += s
buffer["write_calls"] += 1
monkeypatch.setattr(sys.stdout, 'write', fake_write)
return buffer
@pytest.fixture(scope="session")
def db_conn():
db = ...
url = ...
with db.connect(url) as conn: # connection will be torn down after all tests finish
yield conn
| 20.92
| 86
| 0.674952
|
import pytest
import sys
@pytest.fixture
def capture_stdout(monkeypatch):
buffer = {"stdout": "", "write_calls": 0}
def fake_write(s):
buffer["stdout"] += s
buffer["write_calls"] += 1
monkeypatch.setattr(sys.stdout, 'write', fake_write)
return buffer
@pytest.fixture(scope="session")
def db_conn():
db = ...
url = ...
with db.connect(url) as conn:
yield conn
| true
| true
|
1c3f79f859edbcb9220e7e3ffe94558764672e95
| 762
|
py
|
Python
|
bokeh_app/data/synthetic_data_generator.py
|
goodteamname/spino
|
aa8c6cfa9f94a639c306d85ca6df2483108fda37
|
[
"MIT"
] | null | null | null |
bokeh_app/data/synthetic_data_generator.py
|
goodteamname/spino
|
aa8c6cfa9f94a639c306d85ca6df2483108fda37
|
[
"MIT"
] | 9
|
2020-10-26T10:57:00.000Z
|
2020-11-01T14:48:21.000Z
|
bokeh_app/data/synthetic_data_generator.py
|
goodteamname/spino
|
aa8c6cfa9f94a639c306d85ca6df2483108fda37
|
[
"MIT"
] | 1
|
2020-10-26T10:41:31.000Z
|
2020-10-26T10:41:31.000Z
|
import numpy as np
import matplotlib.pyplot as plt
t = np.linspace(0, 20, 1001)
noise = np.random.randn(1001)
T = np.pi # period
# Fourier series coefficients
a_0 = 1.
a_1 = 1.
a_2 = 2.
a_3 = 3.
b_1 = 4.
b_2 = 5.
b_3 = 6.
timeseries = a_0 \
+ a_1*np.cos(1*np.pi*t/T) \
+ a_2*np.cos(2*np.pi*t/T) \
+ a_3*np.cos(3*np.pi*t/T) \
+ b_1*np.sin(1*np.pi*t/T) \
+ b_2*np.sin(2*np.pi*t/T) \
+ b_3*np.sin(3*np.pi*t/T)
noisy_timeseries = timeseries + noise
plt.plot(t, timeseries)
plt.scatter(t, noisy_timeseries)
plt.show()
# data = np.vstack((t, timeseries)).T
# noisy_data = np.vstack((t, noisy_timeseries)).T
# np.savetxt("test_timeseries.csv", data, delimiter=",")
# np.savetxt("test_timeseries_noisy.csv", noisy_data, delimiter=",")
| 19.538462
| 68
| 0.635171
|
import numpy as np
import matplotlib.pyplot as plt
t = np.linspace(0, 20, 1001)
noise = np.random.randn(1001)
T = np.pi
a_0 = 1.
a_1 = 1.
a_2 = 2.
a_3 = 3.
b_1 = 4.
b_2 = 5.
b_3 = 6.
timeseries = a_0 \
+ a_1*np.cos(1*np.pi*t/T) \
+ a_2*np.cos(2*np.pi*t/T) \
+ a_3*np.cos(3*np.pi*t/T) \
+ b_1*np.sin(1*np.pi*t/T) \
+ b_2*np.sin(2*np.pi*t/T) \
+ b_3*np.sin(3*np.pi*t/T)
noisy_timeseries = timeseries + noise
plt.plot(t, timeseries)
plt.scatter(t, noisy_timeseries)
plt.show()
| true
| true
|
1c3f7a392ac2a8f3459f0a2fa8a65f80aba96efe
| 285
|
py
|
Python
|
renovation_logging/config/desktop.py
|
leam-tech/renovation_logging
|
172be47b9b5f7a49590efcda3cf189687e514069
|
[
"MIT"
] | 1
|
2021-06-19T12:10:12.000Z
|
2021-06-19T12:10:12.000Z
|
renovation_logging/config/desktop.py
|
leam-tech/renovation_logging
|
172be47b9b5f7a49590efcda3cf189687e514069
|
[
"MIT"
] | null | null | null |
renovation_logging/config/desktop.py
|
leam-tech/renovation_logging
|
172be47b9b5f7a49590efcda3cf189687e514069
|
[
"MIT"
] | 1
|
2021-06-19T12:10:15.000Z
|
2021-06-19T12:10:15.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"module_name": "Renovation Logging",
"color": "grey",
"icon": "octicon octicon-file-directory",
"type": "module",
"label": _("Renovation Logging")
}
]
| 19
| 44
| 0.635088
|
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"module_name": "Renovation Logging",
"color": "grey",
"icon": "octicon octicon-file-directory",
"type": "module",
"label": _("Renovation Logging")
}
]
| true
| true
|
1c3f7ad4ef6b9d9d18d44be2d896e43eed4b7b8b
| 66,169
|
py
|
Python
|
workers/pull_request_worker/pull_request_worker.py
|
jnqqq/augur
|
c42306f656637bd0cd7e2a7e76b15dd232e76ad5
|
[
"MIT"
] | 2
|
2021-08-31T06:54:27.000Z
|
2021-11-15T03:18:50.000Z
|
workers/pull_request_worker/pull_request_worker.py
|
NToepke/augur
|
aef5edca1c8cea2698b6800ced68fa64acae4d76
|
[
"MIT"
] | null | null | null |
workers/pull_request_worker/pull_request_worker.py
|
NToepke/augur
|
aef5edca1c8cea2698b6800ced68fa64acae4d76
|
[
"MIT"
] | null | null | null |
#SPDX-License-Identifier: MIT
import ast
import json
import logging
import os
import sys
import time
import traceback
from workers.worker_git_integration import WorkerGitInterfaceable
from numpy.lib.utils import source
import requests
import copy
from datetime import datetime
from multiprocessing import Process, Queue
import pandas as pd
import sqlalchemy as s
from sqlalchemy.sql.expression import bindparam
from workers.worker_base import Worker
class GitHubPullRequestWorker(WorkerGitInterfaceable):
"""
Worker that collects Pull Request related data from the
Github API and stores it in our database.
:param task: most recent task the broker added to the worker's queue
:param config: holds info like api keys, descriptions, and database connection strings
"""
def __init__(self, config={}):
worker_type = "pull_request_worker"
# Define what this worker can be given and know how to interpret
given = [['github_url']]
models = ['pull_requests', 'pull_request_commits', 'pull_request_files']
# Define the tables needed to insert, update, or delete on
data_tables = ['contributors', 'pull_requests',
'pull_request_assignees', 'pull_request_events', 'pull_request_labels',
'pull_request_message_ref', 'pull_request_meta', 'pull_request_repo',
'pull_request_reviewers', 'pull_request_teams', 'message', 'pull_request_commits',
'pull_request_files', 'pull_request_reviews', 'pull_request_review_message_ref']
operations_tables = ['worker_history', 'worker_job']
self.deep_collection = True
self.platform_id = 25150 # GitHub
# Run the general worker initialization
super().__init__(worker_type, config, given, models, data_tables, operations_tables)
# Define data collection info
self.tool_source = 'GitHub Pull Request Worker'
self.tool_version = '1.0.0'
self.data_source = 'GitHub API'
#Needs to be an attribute of the class for incremental database insert using paginate_endpoint
self.pk_source_prs = []
def is_nan(value):
return type(value) == float and math.isnan(value)
def graphql_paginate(self, query, data_subjects, before_parameters=None):
""" Paginate a GitHub GraphQL query backwards
:param query: A string, holds the GraphQL query
:rtype: A Pandas DataFrame, contains all data contained in the pages
"""
self.logger.info(f'Start paginate with params: \n{data_subjects} '
f'\n{before_parameters}')
def all_items(dictionary):
for key, value in dictionary.items():
if type(value) is dict:
yield (key, value)
yield from all_items(value)
else:
yield (key, value)
if not before_parameters:
before_parameters = {}
for subject, _ in all_items(data_subjects):
before_parameters[subject] = ''
start_cursor = None
has_previous_page = True
base_url = 'https://api.github.com/graphql'
tuples = []
def find_root_of_subject(data, key_subject):
self.logger.debug(f'Finding {key_subject} root of {data}')
key_nest = None
for subject, nest in data.items():
if key_subject in nest:
key_nest = nest[key_subject]
break
elif type(nest) == dict:
return find_root_of_subject(nest, key_subject)
else:
raise KeyError
return key_nest
for data_subject, nest in data_subjects.items():
self.logger.debug(f'Beginning paginate process for field {data_subject} '
f'for query: {query}')
page_count = 0
while has_previous_page:
page_count += 1
num_attempts = 3
success = False
for attempt in range(num_attempts):
self.logger.info(f'Attempt #{attempt + 1} for hitting GraphQL endpoint '
f'page number {page_count}\n')
response = requests.post(base_url, json={'query': query.format(
**before_parameters)}, headers=self.headers)
self.update_gh_rate_limit(response)
try:
data = response.json()
except:
data = json.loads(json.dumps(response.text))
if 'errors' in data:
self.logger.info("Error!: {}".format(data['errors']))
if data['errors'][0]['type'] == 'NOT_FOUND':
self.logger.warning(
"Github repo was not found or does not exist for "
f"endpoint: {base_url}\n"
)
break
if data['errors'][0]['type'] == 'RATE_LIMITED':
self.update_gh_rate_limit(response)
num_attempts -= 1
continue
if 'data' in data:
success = True
root = find_root_of_subject(data, data_subject)
page_info = root['pageInfo']
data = root['edges']
break
else:
self.logger.info("Request returned a non-data dict: {}\n".format(data))
if data['message'] == 'Not Found':
self.logger.info(
"Github repo was not found or does not exist for endpoint: "
f"{base_url}\n"
)
break
if data['message'] == (
"You have triggered an abuse detection mechanism. Please wait a "
"few minutes before you try again."
):
num_attempts -= 1
self.update_gh_rate_limit(response, temporarily_disable=True)
if data['message'] == "Bad credentials":
self.update_gh_rate_limit(response, bad_credentials=True)
if not success:
self.logger.info('GraphQL query failed: {}'.format(query))
break
before_parameters.update({
data_subject: ', before: \"{}\"'.format(page_info['startCursor'])
})
has_previous_page = page_info['hasPreviousPage']
tuples += data
self.logger.info(f"Paged through {page_count} pages and "
f"collected {len(tuples)} data points\n")
if not nest:
return tuples
return tuples + self.graphql_paginate(query, data_subjects[subject],
before_parameters=before_parameters)
def pull_request_files_model(self, task_info, repo_id):
# query existing PRs and the respective url we will append the commits url to
pr_number_sql = s.sql.text("""
SELECT DISTINCT pr_src_number as pr_src_number, pull_requests.pull_request_id
FROM pull_requests--, pull_request_meta
WHERE repo_id = {}
""".format(self.repo_id))
pr_numbers = pd.read_sql(pr_number_sql, self.db, params={})
pr_file_rows = []
for index, pull_request in enumerate(pr_numbers.itertuples()):
self.logger.debug(f'Querying files for pull request #{index + 1} of {len(pr_numbers)}')
query = """
{{
repository(owner:"%s", name:"%s"){{
pullRequest (number: %s) {{
""" % (self.owner, self.repo, pull_request.pr_src_number) + """
files (last: 100{files}) {{
pageInfo {{
hasPreviousPage
hasNextPage
endCursor
startCursor
}}
edges {{
node {{
additions
deletions
path
}}
}}
}}
}}
}}
}}
"""
pr_file_rows += [{
'pull_request_id': pull_request.pull_request_id,
'pr_file_additions': pr_file['node']['additions'],
'pr_file_deletions': pr_file['node']['deletions'],
'pr_file_path': pr_file['node']['path'],
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': 'GitHub API',
'repo_id': self.repo_id,
} for pr_file in self.graphql_paginate(query, {'files': None})]
# Get current table values
table_values_sql = s.sql.text("""
SELECT pull_request_files.*
FROM pull_request_files, pull_requests
WHERE pull_request_files.pull_request_id = pull_requests.pull_request_id
AND pull_requests.repo_id = :repo_id
""")
self.logger.debug(
f'Getting table values with the following PSQL query: \n{table_values_sql}\n'
)
table_values = pd.read_sql(table_values_sql, self.db, params={'repo_id': self.repo_id})
# Compare queried values against table values for dupes/updates
if len(pr_file_rows) > 0:
table_columns = pr_file_rows[0].keys()
else:
self.logger.debug(f'No rows need insertion for repo {self.repo_id}\n')
self.register_task_completion(task_info, self.repo_id, 'pull_request_files')
return
# Compare queried values against table values for dupes/updates
pr_file_rows_df = pd.DataFrame(pr_file_rows)
pr_file_rows_df = pr_file_rows_df.dropna(subset=['pull_request_id'])
dupe_columns = ['pull_request_id', 'pr_file_path']
update_columns = ['pr_file_additions', 'pr_file_deletions']
need_insertion = pr_file_rows_df.merge(table_values, suffixes=('','_table'),
how='outer', indicator=True, on=dupe_columns).loc[
lambda x : x['_merge']=='left_only'][table_columns]
need_updates = pr_file_rows_df.merge(table_values, on=dupe_columns, suffixes=('','_table'),
how='inner',indicator=False)[table_columns].merge(table_values,
on=update_columns, suffixes=('','_table'), how='outer',indicator=True
).loc[lambda x : x['_merge']=='left_only'][table_columns]
need_updates['b_pull_request_id'] = need_updates['pull_request_id']
need_updates['b_pr_file_path'] = need_updates['pr_file_path']
pr_file_insert_rows = need_insertion.to_dict('records')
pr_file_update_rows = need_updates.to_dict('records')
self.logger.debug(
f'Repo id {self.repo_id} needs {len(need_insertion)} insertions and '
f'{len(need_updates)} updates.\n'
)
if len(pr_file_update_rows) > 0:
success = False
while not success:
try:
self.db.execute(
self.pull_request_files_table.update().where(
self.pull_request_files_table.c.pull_request_id == bindparam(
'b_pull_request_id'
) and self.pull_request_files_table.c.pr_file_path == bindparam(
'b_pr_file_path'
)
).values(
pr_file_additions=bindparam('pr_file_additions'),
pr_file_deletions=bindparam('pr_file_deletions')
), pr_file_update_rows
)
success = True
except Exception as e:
self.logger.info('error: {}'.format(e))
time.sleep(5)
if len(pr_file_insert_rows) > 0:
success = False
while not success:
try:
self.db.execute(
self.pull_request_files_table.insert(),
pr_file_insert_rows
)
success = True
except Exception as e:
self.logger.info('error: {}'.format(e))
time.sleep(5)
self.register_task_completion(task_info, self.repo_id, 'pull_request_files')
def pull_request_commits_model(self, task_info, repo_id):
""" Queries the commits related to each pull request already inserted in the db """
self.logger.info("Querying starting ids info...\n")
# Increment so we are ready to insert the 'next one' of each of these most recent ids
self.history_id = self.get_max_id(
'worker_history', 'history_id', operations_table=True
) + 1
self.pr_id_inc = self.get_max_id('pull_requests', 'pull_request_id')
self.pr_meta_id_inc = self.get_max_id('pull_request_meta', 'pr_repo_meta_id')
# query existing PRs and the respective url we will append the commits url to
pr_url_sql = s.sql.text("""
SELECT DISTINCT pr_url, pull_requests.pull_request_id
FROM pull_requests--, pull_request_meta
WHERE repo_id = {}
""".format(self.repo_id))
urls = pd.read_sql(pr_url_sql, self.db, params={})
for pull_request in urls.itertuples(): # for each url of PRs we have inserted
commits_url = pull_request.pr_url + '/commits?page={}'
table = 'pull_request_commits'
table_pkey = 'pr_cmt_id'
duplicate_col_map = {'pr_cmt_sha': 'sha'}
update_col_map = {}
# Use helper paginate function to iterate the commits url and check for dupes
#TODO: figure out why dupes sometimes still happen.q
pr_commits = self.paginate(
commits_url, duplicate_col_map, update_col_map, table, table_pkey,
where_clause="where pull_request_id = {}".format(pull_request.pull_request_id)
)
for pr_commit in pr_commits: # post-pagination, iterate results
try:
if pr_commit['flag'] == 'need_insertion': # if non-dupe
pr_commit_row = {
'pull_request_id': pull_request.pull_request_id,
'pr_cmt_sha': pr_commit['sha'],
'pr_cmt_node_id': pr_commit['node_id'],
'pr_cmt_message': pr_commit['commit']['message'],
# 'pr_cmt_comments_url': pr_commit['comments_url'],
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': 'GitHub API',
'repo_id': self.repo_id,
}
result = self.db.execute(
self.pull_request_commits_table.insert().values(pr_commit_row)
)
self.logger.info(
f"Inserted Pull Request Commit: {result.inserted_primary_key}\n"
)
except Exception as e:
self.logger.debug(f"pr_commit exception registered: {e}.")
stacker = traceback.format_exc()
self.logger.debug(f"{stacker}")
continue
self.register_task_completion(self.task_info, self.repo_id, 'pull_request_commits')
def _get_pk_source_prs(self):
#self.owner and self.repo are both defined in the worker base's collect method using the url of the github repo.
pr_url = (
f"https://api.github.com/repos/{self.owner}/{self.repo}/pulls?state=all&"
"direction=asc&per_page=100&page={}"
)
#Database action map is essential in order to avoid duplicates messing up the data
## 9/20/2021: SPG added closed_at, updated_at, and merged_at to the update map.
## 11/29/2021: And this is the cause of PR updates not working because it doesn't handle NULLs ... I think.
pr_action_map = {
'insert': {
'source': ['id'],
'augur': ['pr_src_id']
},
'update': {
'source': ['state'],
'augur': ['pr_src_state']
}
}
#Use a parent method in order to iterate through pull request pages
#Define a method to pass paginate_endpoint so that prs can be inserted incrementally
def pk_source_increment_insert(inc_source_prs, action_map):
self.write_debug_data(inc_source_prs, 'source_prs')
if len(inc_source_prs['all']) == 0:
self.logger.info("There are no prs for this repository.\n")
self.register_task_completion(self.task_info, self.repo_id, 'pull_requests')
return False
def is_valid_pr_block(issue):
return (
'pull_request' in issue and issue['pull_request']
and isinstance(issue['pull_request'], dict) and 'url' in issue['pull_request']
)
#self.logger.debug(f"inc_source_prs is: {len(inc_source_prs['insert'])} and the action map is {action_map}...")
#This is sending empty data to enrich_cntrb_id, fix with check
if len(inc_source_prs['insert']) > 0:
inc_source_prs['insert'] = self.enrich_cntrb_id(
inc_source_prs['insert'], str('user.login'), action_map_additions={
'insert': {
'source': ['user.node_id'],
'augur': ['gh_node_id']
}
}, prefix='user.'
)
else:
self.logger.info("Contributor enrichment is not needed, no inserts in action map.")
prs_insert = [
{
'repo_id': self.repo_id,
'pr_url': pr['url'],
'pr_src_id': pr['id'],
'pr_src_node_id': pr['node_id'], ## 9/20/2021 - This was null. No idea why.
'pr_html_url': pr['html_url'],
'pr_diff_url': pr['diff_url'],
'pr_patch_url': pr['patch_url'],
'pr_issue_url': pr['issue_url'],
'pr_augur_issue_id': None,
'pr_src_number': pr['number'],
'pr_src_state': pr['state'],
'pr_src_locked': pr['locked'],
'pr_src_title': str(pr['title']).encode(encoding='UTF-8',errors='backslashreplace').decode(encoding='UTF-8',errors='ignore') if (
pr['title']
) else ' ',
'pr_augur_contributor_id': pr['cntrb_id'] if (
pr['cntrb_id']
) else is_nan(pr['cntrb_id']),
'pr_body': str(pr['body']).encode(encoding='UTF-8',errors='backslashreplace').decode(encoding='UTF-8',errors='ignore') if (
pr['body']
) else None,
'pr_created_at': pr['created_at'],
'pr_updated_at': pr['updated_at'],
'pr_closed_at': pr['closed_at'] if (
pr['closed_at']
) else None,
'pr_merged_at': None if not (
pr['merged_at']
) else pr['merged_at'],
'pr_merge_commit_sha': pr['merge_commit_sha'],
'pr_teams': None,
'pr_milestone': None,
'pr_commits_url': pr['commits_url'],
'pr_review_comments_url': pr['review_comments_url'],
'pr_review_comment_url': pr['review_comment_url'],
'pr_comments_url': pr['comments_url'],
'pr_statuses_url': pr['statuses_url'],
'pr_meta_head_id': None if not (
pr['head']
) else pr['head']['label'],
'pr_meta_base_id': None if not (
pr['base']
) else pr['base']['label'],
'pr_src_issue_url': pr['issue_url'],
'pr_src_comments_url': pr['comments_url'],
'pr_src_review_comments_url': pr['review_comments_url'],
'pr_src_commits_url': pr['commits_url'],
'pr_src_statuses_url': pr['statuses_url'],
'pr_src_author_association': pr['author_association'],
'tool_source': self.tool_source + '_reviews',
'tool_version': self.tool_version,
'data_source': 'Pull Request Reviews Github API'
} for pr in inc_source_prs['insert']
]
if len(inc_source_prs['insert']) > 0 or len(inc_source_prs['update']) > 0:
#update_columns=action_map['update']['augur']
#actual_update_columns=update_columns.append('pr_closed_at').append('pr_updated_at').append('pr_merged_at')
self.bulk_insert(
self.pull_requests_table,
update=inc_source_prs['update'], unique_columns=action_map['insert']['augur'],
insert=prs_insert, update_columns=['pr_src_state', 'pr_closed_at', 'pr_updated_at', 'pr_merged_at']
)
source_data = inc_source_prs['insert'] + inc_source_prs['update']
elif not self.deep_collection:
self.logger.info(
"There are no prs to update, insert, or collect nested information for.\n"
)
#self.register_task_completion(self.task_info, self.repo_id, 'pull_requests')
return
if self.deep_collection:
source_data = inc_source_prs['all']
# Merge source data to inserted data to have access to inserted primary keys
# I don't see why we need these. The action map should work. SPG 9/20/2021
gh_merge_fields = ['id']
augur_merge_fields = ['pr_src_id']
self.pk_source_prs += self.enrich_data_primary_keys(source_data, self.pull_requests_table,
gh_merge_fields, augur_merge_fields
)
return
#paginate endpoint with stagger enabled so that the above method can insert every 500
# self.logger.info(
# f"PR Action map is {pr_action_map}"
# )
source_prs = self.paginate_endpoint(
pr_url, action_map=pr_action_map, table=self.pull_requests_table,
where_clause=self.pull_requests_table.c.repo_id == self.repo_id,
stagger=True,
insertion_method=pk_source_increment_insert
)
# self.logger.info(
# f"PR Action map is {pr_action_map} after source_prs. The source_prs are {source_prs}."
# )
#Use the increment insert method in order to do the
#remaining pages of the paginated endpoint that weren't inserted inside the paginate_endpoint method
pk_source_increment_insert(source_prs,pr_action_map)
pk_source_prs = self.pk_source_prs
#This attribute is only needed because paginate endpoint needs to
#send this data to the child class and this is the easiset way to do that.
self.pk_source_prs = []
return pk_source_prs
def pull_requests_model(self, entry_info, repo_id):
"""Pull Request data collection function. Query GitHub API for PhubRs.
:param entry_info: A dictionary consisiting of 'git_url' and 'repo_id'
:type entry_info: dict
"""
github_url = self.task_info['given']['github_url']
# self.query_github_contributors(self.task_info, self.repo_id)
self.logger.info("Beginning collection of Pull Requests...\n")
self.logger.info(f"Repo ID: {self.repo_id}, Git URL: {github_url}\n")
pk_source_prs = []
try:
pk_source_prs = self._get_pk_source_prs()
except Exception as e:
self.logger.debug(f"Pull Requests model failed with {e}.")
stacker = traceback.format_exc()
self.logger.debug(f"{stacker}")
pass
#self.write_debug_data(pk_source_prs, 'pk_source_prs')
if pk_source_prs:
try:
self.pull_request_comments_model(pk_source_prs)
self.logger.info(f"Pull request comments model.")
except Exception as e:
self.logger.debug(f"PR comments model failed on {e}. exception registered.")
stacker = traceback.format_exc()
self.logger.debug(f"{stacker}")
pass
finally:
try:
self.pull_request_events_model(pk_source_prs)
self.logger.info(f"Pull request events model.")
except Exception as e:
self.logger.debug(f"PR events model failed on {e}. exception registered for pr_step.")
stacker = traceback.format_exc()
self.logger.debug(f"{stacker}")
pass
finally:
try:
self.pull_request_reviews_model(pk_source_prs)
self.logger.info(f"Pull request reviews model.")
except Exception as e:
self.logger.debug(f"PR reviews model failed on {e}. exception registered for pr_step.")
stacker = traceback.format_exc()
self.logger.debug(f"{stacker}")
pass
finally:
try:
self.pull_request_nested_data_model(pk_source_prs)
self.logger.info(f"Pull request nested data model.")
except Exception as e:
self.logger.debug(f"PR nested model failed on {e}. exception registered for pr_step.")
stacker = traceback.format_exc()
self.logger.debug(f"{stacker}")
pass
finally:
self.logger.debug("finished running through four models.")
self.register_task_completion(self.task_info, self.repo_id, 'pull_requests')
def pull_request_comments_model(self, pk_source_prs):
comments_url = (
f"https://api.github.com/repos/{self.owner}/{self.repo}/pulls/comments?per_page=100"
"&page={}"
)
# We should be capturing the following additional data here:
# 1. The Platform message ID : Most efficient way to dup check
# 2. The plaform issue ID and/or PR ID so queries are easier
# 3. The REPO_ID so queries are easier.
## ALL THIS INFO IS IN THE PLATFOMR JSON AND WE ARe ignoring IT.
comment_action_map = {
'insert': {
'source': ['id'],
'augur': ['platform_msg_id']
}
}
comment_ref_action_map = {
'insert': {
'source': ['id'],
'augur': ['pr_message_ref_src_comment_id']
}
}
def pr_comments_insert(inc_pr_comments, comment_action_map, comment_ref_action_map):
#self.write_debug_data(pr_comments, 'pr_comments')
inc_pr_comments['insert'] = self.text_clean(inc_pr_comments['insert'], 'body')
#This is sending empty data to enrich_cntrb_id, fix with check
if len(inc_pr_comments['insert']) > 0:
inc_pr_comments['insert'] = self.enrich_cntrb_id(
inc_pr_comments['insert'], str('user.login'), action_map_additions={
'insert': {
'source': ['user.node_id'],
'augur': ['gh_node_id']
}
}, prefix='user.'
)
else:
self.logger.info("Contributor enrichment is not needed, no inserts in action map.")
pr_comments_insert = [
{
'pltfrm_id': self.platform_id,
'msg_text': str(comment['body']).encode(encoding='UTF-8',errors='backslashreplace').decode(encoding='UTF-8',errors='ignore') if (
comment['body']
) else ' ',
'msg_timestamp': comment['created_at'],
'cntrb_id': comment['cntrb_id'] if (
comment['cntrb_id']
) else is_nan(comment['cntrb_id']),
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source,
'repo_id': self.repo_id,
'platform_msg_id': int(comment['id']),
'platform_node_id': comment['node_id']
} for comment in inc_pr_comments['insert']
]
try:
self.bulk_insert(self.message_table, insert=pr_comments_insert,
unique_columns=comment_action_map['insert']['augur'])
except Exception as e:
self.logger.debug(f"PR comments data model failed on {e}. exception registered.")
stacker = traceback.format_exc()
self.logger.debug(f"{stacker}")
""" PR MESSAGE REF TABLE """
try:
c_pk_source_comments = self.enrich_data_primary_keys(
inc_pr_comments['insert'], self.message_table,
comment_action_map['insert']['source'],
comment_action_map['insert']['augur'] ##, in_memory=True ## removed to align with GitHub issue worker
)
except Exception as e:
self.logger.info(f"bulk insert of comments failed on {e}. exception registerred")
stacker = traceback.format_exc()
self.logger.debug(f"{stacker}")
self.write_debug_data(c_pk_source_comments, 'c_pk_source_comments')
self.logger.info(f"log of the length of c_pk_source_comments {len(c_pk_source_comments)}.")
try:
# both_pk_source_comments = self.enrich_data_primary_keys(
# c_pk_source_comments, self.pull_requests_table,
# ['issue_url'], ['pr_issue_url'], in_memory=True)
both_pk_source_comments = self.enrich_data_primary_keys(
c_pk_source_comments, self.pull_requests_table,
['pull_request_url'], ['pr_url'])
self.logger.info(f"log of the length of both_pk_source_comments {len(both_pk_source_comments)}.")
except Exception as e:
self.logger.info(f"bulk insert of comments failed on {e}. exception registerred")
stacker = traceback.format_exc()
self.logger.debug(f"{stacker}")
self.logger.debug(f"length of both_pk_source_comments: {len(both_pk_source_comments)}")
pr_message_ref_insert = [
{
'pull_request_id': comment['pull_request_id'],
'msg_id': comment['msg_id'], # to cast, or not to cast. That is the question. 12/6/2021
'pr_message_ref_src_comment_id': int(comment['id']),
'pr_message_ref_src_node_id': comment['node_id'],
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source,
'repo_id': self.repo_id
} for comment in both_pk_source_comments
]
try:
self.logger.debug(f"inserting into {self.pull_request_message_ref_table}.")
self.bulk_insert(self.pull_request_message_ref_table, insert=pr_message_ref_insert,
unique_columns=comment_ref_action_map['insert']['augur'])
except Exception as e:
self.logger.info(f"message inserts failed with: {e}.")
stacker = traceback.format_exc()
self.logger.debug(f"{stacker}")
pass
# TODO: add relational table so we can include a where_clause here
try:
pr_comments = self.paginate_endpoint(
comments_url, action_map=comment_action_map, table=self.message_table,
where_clause=self.message_table.c.msg_id.in_(
[
msg_row[0] for msg_row in self.db.execute(
s.sql.select(
[self.pull_request_message_ref_table.c.msg_id]
).where(
self.pull_request_message_ref_table.c.pull_request_id.in_(
set(pd.DataFrame(pk_source_prs)['pull_request_id'])
)
)
).fetchall()
]
),
stagger=True,
insertion_method=pr_comments_insert
)
pr_comments_insert(pr_comments,comment_action_map,comment_ref_action_map)
self.logger.info(f"comments inserted for repo_id: {self.repo_id}")
return
except Exception as e:
self.logger.info(f"exception registered in paginate endpoint for issue comments: {e}")
stacker = traceback.format_exc()
self.logger.debug(f"{stacker}")
pass
def pull_request_events_model(self, pk_source_prs=[]):
if not pk_source_prs:
pk_source_prs = self._get_pk_source_prs()
events_url = (
f"https://api.github.com/repos/{self.owner}/{self.repo}/issues/events?per_page=100&"
"page={}"
)
# Get events that we already have stored
# Set pseudo key (something other than PK) to
# check duplicates with
event_action_map = {
'insert': {
'source': ['id'],
'augur': ['pr_platform_event_id']
}
}
self.logger.info(pk_source_prs[0])
self.logger.info(pd.DataFrame(pk_source_prs).columns)
self.logger.info(pd.DataFrame(pk_source_prs))
#list to hold contributors needing insertion or update
#12/12/2021 -- Changed to new_paginate_endpoint because it works for issue_events
pr_events = self.new_paginate_endpoint(
events_url, table=self.pull_request_events_table, action_map=event_action_map,
where_clause=self.pull_request_events_table.c.pull_request_id.in_(
set(pd.DataFrame(pk_source_prs)['pull_request_id'])
)
)
#self.write_debug_data(pr_events, 'pr_events')
## Remember parameters after teh self.table are the
## GitHub column Name, followed by the corresponding Augur table column name.
## NOTE: When we are enriching primary keys, we are passing keys
## FROM the table we are processing, in THIS case, the events table,
## TO THE TABLE THAT IS THE ULTIMATE PARENT AND HAS THE SAME COLUMNS
## Pull request table, "id" of the pull request (consfusingly returned by the
## GitHub issue events API, and the place that value is stored in the PULL REQUESTS
## Table. 12/12/2021, SPG)
pk_pr_events = self.enrich_data_primary_keys(pr_events['insert'],
#self.pull_requests_table, ['issue.id'], ['pr_src_id']) #changed 12/12/2021 to mirror issues events
self.pull_requests_table, ['issue.url'], ['pr_issue_url'], in_memory=True) # changed back
#self.write_debug_data(pk_pr_events, 'pk_pr_events')
if len(pk_pr_events):
pk_pr_events = pd.DataFrame(pk_pr_events)[
['id', 'pull_request_id', 'node_id', 'url', 'actor', 'created_at', 'event', 'commit_id']
].to_dict(orient='records')
if len(pk_pr_events) > 0:
pk_pr_events = self.enrich_cntrb_id(
pk_pr_events, str('actor.login'), action_map_additions={
'insert': {
'source': ['actor.node_id'],
'augur': ['gh_node_id']
}
}, prefix='actor.'
)
else:
self.logger.info("Contributor enrichment is not needed, no data provided.")
for index, issue in enumerate(pk_pr_events):
if 'cntrb_id' not in issue:
self.logger.debug(f"Exception registered. Dict has null cntrb_id: {issue}")
# 'reporter_id': issue['cntrb_id'] if (
# issue['cntrb_id']
# ) else is_na(issue['cntrb_id']),
pr_events_insert = [
{
'pull_request_id': int(event['pull_request_id']),
'cntrb_id': event['cntrb_id'] if (
event['cntrb_id']
) else is_nan(event['cntrb_id']),
'action': event['event'],
'action_commit_hash': event['commit_id'],
'created_at': event['created_at'] if (
event['created_at']
) else None,
'issue_event_src_id': int(event['id']), #even source id is just the event id. issue.id is the corresponding PR
'node_id': event['node_id'],
'node_url': event['url'],
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source,
'pr_platform_event_id': int(event['id']), # [duplicate for readability]even source id is just the event id. issue.id is the corresponding PR
'platform_id': self.platform_id,
'repo_id': self.repo_id
} for event in pk_pr_events if event['actor'] is not None #12/6/2021 added event['cntrb_id'] as NULLs were getting through.
]
self.bulk_insert(self.pull_request_events_table, insert=pr_events_insert,
unique_columns=event_action_map['insert']['augur']
)
return pr_events['all']
def pull_request_reviews_model(self, pk_source_prs=[]):
if not pk_source_prs:
pk_source_prs = self._get_pk_source_prs()
review_action_map = {
'insert': {
'source': ['id'],
'augur': ['pr_review_src_id']
},
'update': {
'source': ['state'],
'augur': ['pr_review_state']
}
}
reviews_urls = [
(
f"https://api.github.com/repos/{self.owner}/{self.repo}/pulls/{pr['number']}/"
"reviews?per_page=100", {'pull_request_id': pr['pull_request_id']}
)
for pr in pk_source_prs
]
pr_pk_source_reviews = self.multi_thread_urls(reviews_urls)
self.write_debug_data(pr_pk_source_reviews, 'pr_pk_source_reviews')
cols_to_query = self.get_relevant_columns(
self.pull_request_reviews_table, review_action_map
)
#I don't know what else this could be used for so I'm using it for the function call
table_values = self.db.execute(s.sql.select(cols_to_query).where(
self.pull_request_reviews_table.c.pull_request_id.in_(
set(pd.DataFrame(pk_source_prs)['pull_request_id'])
))).fetchall()
source_reviews_insert, source_reviews_update = self.organize_needed_data(
pr_pk_source_reviews, table_values=table_values,
action_map=review_action_map
)
if len(source_reviews_insert) > 0:
source_reviews_insert = self.enrich_cntrb_id(
source_reviews_insert, str('user.login'), action_map_additions={
'insert': {
'source': ['user.node_id'],
'augur': ['gh_node_id']
}
}, prefix='user.'
)
else:
self.logger.info("Contributor enrichment is not needed, source_reviews_insert is empty.")
reviews_insert = [
{
'pull_request_id': review['pull_request_id'],
'cntrb_id': review['cntrb_id'],
'pr_review_author_association': review['author_association'],
'pr_review_state': review['state'],
'pr_review_body': str(review['body']).encode(encoding='UTF-8',errors='backslashreplace').decode(encoding='UTF-8',errors='ignore') if (
review['body']
) else None,
'pr_review_submitted_at': review['submitted_at'] if (
'submitted_at' in review
) else None,
'pr_review_src_id': int(float(review['id'])), #12/3/2021 cast as int due to error. # Here, `pr_review_src_id` is mapped to `id` SPG 11/29/2021. This is fine. Its the review id.
'pr_review_node_id': review['node_id'],
'pr_review_html_url': review['html_url'],
'pr_review_pull_request_url': review['pull_request_url'],
'pr_review_commit_id': review['commit_id'],
'tool_source': 'pull_request_reviews model',
'tool_version': self.tool_version+ "_reviews",
'data_source': self.data_source,
'repo_id': self.repo_id,
'platform_id': self.platform_id
} for review in source_reviews_insert if review['user'] and 'login' in review['user']
]
try:
self.bulk_insert(
self.pull_request_reviews_table, insert=reviews_insert, update=source_reviews_update,
unique_columns=review_action_map['insert']['augur'],
update_columns=review_action_map['update']['augur']
)
except Exception as e:
self.logger.debug(f"PR reviews data model failed on {e}. exception registered.")
stacker = traceback.format_exc()
self.logger.debug(f"{stacker}")
# Merge source data to inserted data to have access to inserted primary keys
gh_merge_fields = ['id']
augur_merge_fields = ['pr_review_src_id']
both_pr_review_pk_source_reviews = self.enrich_data_primary_keys(
pr_pk_source_reviews, self.pull_request_reviews_table, gh_merge_fields,
augur_merge_fields, in_memory=True
)
self.write_debug_data(both_pr_review_pk_source_reviews, 'both_pr_review_pk_source_reviews')
# Review Comments
# https://api.github.com/repos/chaoss/augur/pulls
review_msg_url = (f'https://api.github.com/repos/{self.owner}/{self.repo}/pulls' +
'/comments?per_page=100&page={}')
'''This includes the two columns that are in the natural key for messages
Its important to note the inclusion of tool_source on the augur side.
That exists because of an anomaly in the GitHub API, where the messages
API for Issues and the issues API will return all the messages related to
pull requests.
Logically, the only way to tell the difference is, in the case of issues, the
pull_request_id in the issues table is null.
The pull_request_id in the pull_requests table is never null.
So, issues has the full set issues. Pull requests has the full set of pull requests.
there are no issues in the pull requests table.
'''
review_msg_action_map = {
'insert': {
'source': ['id'],
'augur': ['platform_msg_id']
}
}
''' This maps to the two unique columns that constitute the natural key in the table.
'''
review_msg_ref_action_map = {
'insert': {
'source': ['id'],
'augur': ['pr_review_msg_src_id']
}
}
in_clause = [] if len(both_pr_review_pk_source_reviews) == 0 else set(pd.DataFrame(both_pr_review_pk_source_reviews)['pr_review_id'])
review_msgs = self.paginate_endpoint(
review_msg_url, action_map=review_msg_action_map, table=self.message_table,
where_clause=self.message_table.c.msg_id.in_(
[
msg_row[0] for msg_row in self.db.execute(
s.sql.select([self.pull_request_review_message_ref_table.c.msg_id]).where(
self.pull_request_review_message_ref_table.c.pr_review_id.in_(
in_clause
)
)
).fetchall()
]
)
)
self.write_debug_data(review_msgs, 'review_msgs')
if len(review_msgs['insert']) > 0:
review_msgs['insert'] = self.enrich_cntrb_id(
review_msgs['insert'], str('user.login'), action_map_additions={
'insert': {
'source': ['user.node_id'],
'augur': ['gh_node_id']
}
}, prefix='user.'
)
else:
self.logger.info("Contributor enrichment is not needed, nothing to insert from the action map.")
review_msg_insert = [
{
'pltfrm_id': self.platform_id,
'msg_text': str(comment['body']).encode(encoding='UTF-8',errors='backslashreplace').decode(encoding='UTF-8',errors='ignore') if (
comment['body']
) else None,
'msg_timestamp': comment['created_at'],
'cntrb_id': comment['cntrb_id'],
'tool_source': self.tool_source +"_reviews",
'tool_version': self.tool_version + "_reviews",
'data_source': 'pull_request_reviews model',
'repo_id': self.repo_id,
'platform_msg_id': int(float(comment['id'])),
'platform_node_id': comment['node_id']
} for comment in review_msgs['insert']
if comment['user'] and 'login' in comment['user']
]
self.bulk_insert(self.message_table, insert=review_msg_insert,
unique_columns = review_msg_action_map['insert']['augur'])
# PR REVIEW MESSAGE REF TABLE
c_pk_source_comments = self.enrich_data_primary_keys(
review_msgs['insert'], self.message_table, review_msg_action_map['insert']['source'],
review_msg_action_map['insert']['augur'], in_memory=True
)
self.write_debug_data(c_pk_source_comments, 'c_pk_source_comments')
''' The action map does not apply here because this is a reference to the parent
table. '''
both_pk_source_comments = self.enrich_data_primary_keys(
c_pk_source_comments, self.pull_request_reviews_table, ['pull_request_review_id'],
['pr_review_src_id'], in_memory=True
)
self.write_debug_data(both_pk_source_comments, 'both_pk_source_comments')
pr_review_msg_ref_insert = [
{
'pr_review_id': comment['pr_review_id'],
'msg_id': comment['msg_id'], #msg_id turned up null when I removed the cast to int ..
'pr_review_msg_url': comment['url'],
'pr_review_src_id': int(comment['pull_request_review_id']),
'pr_review_msg_src_id': int(comment['id']),
'pr_review_msg_node_id': comment['node_id'],
'pr_review_msg_diff_hunk': comment['diff_hunk'],
'pr_review_msg_path': comment['path'],
'pr_review_msg_position': s.sql.expression.null() if not ( # This had to be changed because "None" is JSON. SQL requires NULL SPG 11/28/2021
comment['position'] #12/6/2021 - removed casting from value check
) else comment['position'],
'pr_review_msg_original_position': s.sql.expression.null() if not ( # This had to be changed because "None" is JSON. SQL requires NULL SPG 11/28/2021
comment['original_position'] #12/6/2021 - removed casting from value check
) else comment['original_position'],
'pr_review_msg_commit_id': str(comment['commit_id']),
'pr_review_msg_original_commit_id': str(comment['original_commit_id']),
'pr_review_msg_updated_at': comment['updated_at'],
'pr_review_msg_html_url': comment['html_url'],
'pr_url': comment['pull_request_url'],
'pr_review_msg_author_association': comment['author_association'],
'pr_review_msg_start_line': s.sql.expression.null() if not ( # This had to be changed because "None" is JSON. SQL requires NULL SPG 11/28/2021
comment['start_line'] #12/6/2021 - removed casting from value check
) else comment['start_line'],
'pr_review_msg_original_start_line': s.sql.expression.null() if not ( # This had to be changed because "None" is JSON. SQL requires NULL SPG 11/28/2021
comment['original_start_line'] #12/6/2021 - removed casting from value check
) else comment['original_start_line'],
'pr_review_msg_start_side': s.sql.expression.null() if not ( # This had to be changed because "None" is JSON. SQL requires NULL SPG 11/28/2021
str(comment['start_side'])
) else str(comment['start_side']),
'pr_review_msg_line': s.sql.expression.null() if not ( # This had to be changed because "None" is JSON. SQL requires NULL SPG 11/28/2021
comment['line'] #12/6/2021 - removed casting from value check
) else comment['line'],
'pr_review_msg_original_line': s.sql.expression.null() if not ( # This had to be changed because "None" is JSON. SQL requires NULL SPG 11/28/2021
comment['original_line'] #12/6/2021 - removed casting from value check
) else comment['original_line'],
'pr_review_msg_side': s.sql.expression.null() if not ( # This had to be changed because "None" is JSON. SQL requires NULL SPG 11/28/2021
str(comment['side'])
) else str(comment['side']),
'tool_source': 'pull_request_reviews model',
'tool_version': self.tool_version + "_reviews",
'data_source': self.data_source,
'repo_id': self.repo_id
} for comment in both_pk_source_comments
]
try:
self.bulk_insert(
self.pull_request_review_message_ref_table,
insert=pr_review_msg_ref_insert, unique_columns = review_msg_ref_action_map['insert']['augur']
)
except Exception as e:
self.logger.debug(f"bulk insert for review message ref failed on : {e}")
stacker = traceback.format_exc()
self.logger.debug(f"{stacker}")
def pull_request_nested_data_model(self, pk_source_prs=[]):
try:
if not pk_source_prs:
pk_source_prs = self._get_pk_source_prs()
#prdata = json.loads(json.dumps(pk_source_prs))
#self.logger.debug(f"nested data model pk_source_prs structure is: {prdata}.")
else:
#prdata = json.loads(json.dumps(pk_source_prs))
self.logger.debug("nested model loaded.")
except Exception as e:
self.logger.debug(f'gettign source prs failed for nested model on {e}.')
pass
labels_all = []
reviewers_all = []
assignees_all = []
meta_all = []
for index, pr in enumerate(pk_source_prs):
# PR Labels
source_labels = pd.DataFrame(pr['labels'])
source_labels['pull_request_id'] = pr['pull_request_id']
labels_all += source_labels.to_dict(orient='records')
# Reviewers
source_reviewers = pd.DataFrame(pr['requested_reviewers'])
source_reviewers['pull_request_id'] = pr['pull_request_id']
reviewers_all += source_reviewers.to_dict(orient='records')
# Assignees
source_assignees = pd.DataFrame(pr['assignees'])
source_assignees['pull_request_id'] = pr['pull_request_id']
assignees_all += source_assignees.to_dict(orient='records')
# Meta
pr['head'].update(
{'pr_head_or_base': 'head', 'pull_request_id': pr['pull_request_id']}
)
pr['base'].update(
{'pr_head_or_base': 'base', 'pull_request_id': pr['pull_request_id']}
)
meta_all += [pr['head'], pr['base']]
pr_nested_loop = 1
while (pr_nested_loop <5):
try:
if pr_nested_loop == 1:
pr_nested_loop += 1
# PR labels insertion
label_action_map = {
'insert': {
'source': ['pull_request_id', 'id'],
'augur': ['pull_request_id', 'pr_src_id']
}
}
table_values_pr_labels = self.db.execute(
s.sql.select(self.get_relevant_columns(self.pull_request_labels_table,label_action_map))
).fetchall()
source_labels_insert, _ = self.organize_needed_data(
labels_all, table_values=table_values_pr_labels, action_map=label_action_map
)
labels_insert = [
{
'pull_request_id': label['pull_request_id'],
'pr_src_id': int(label['id']),
'pr_src_node_id': label['node_id'],
'pr_src_url': label['url'],
'pr_src_description': label['name'],
'pr_src_color': label['color'],
'pr_src_default_bool': label['default'],
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source,
'repo_id': self.repo_id
} for label in source_labels_insert
]
self.bulk_insert(self.pull_request_labels_table, insert=labels_insert)
elif pr_nested_loop == 2:
pr_nested_loop += 1
# PR reviewers insertion
reviewer_action_map = {
'insert': {
'source': ['pull_request_id', 'id'],
'augur': ['pull_request_id', 'pr_reviewer_src_id']
}
}
table_values_issue_labels = self.db.execute(
s.sql.select(self.get_relevant_columns(self.pull_request_reviewers_table,reviewer_action_map))
).fetchall()
source_reviewers_insert, _ = self.organize_needed_data(
reviewers_all, table_values=table_values_issue_labels,
action_map=reviewer_action_map
)
if len(source_reviewers_insert) > 0:
source_reviewers_insert = self.enrich_cntrb_id(
source_reviewers_insert, str('login'), action_map_additions={
'insert': {
'source': ['node_id'],
'augur': ['gh_node_id']
}
}
)
else:
self.logger.info("Contributor enrichment is not needed, no inserts provided.")
reviewers_insert = [
{
'pull_request_id': reviewer['pull_request_id'],
'cntrb_id': reviewer['cntrb_id'],
'pr_reviewer_src_id': int(float(reviewer['id'])),
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source,
'repo_id': self.repo_id
} for reviewer in source_reviewers_insert if 'login' in reviewer
]
self.bulk_insert(self.pull_request_reviewers_table, insert=reviewers_insert)
elif pr_nested_loop ==3:
# PR assignees insertion
pr_nested_loop += 1
assignee_action_map = {
'insert': {
'source': ['pull_request_id', 'id'],
'augur': ['pull_request_id', 'pr_assignee_src_id']
}
}
table_values_assignees_labels = self.db.execute(
s.sql.select(self.get_relevant_columns(self.pull_request_assignees_table,assignee_action_map))
).fetchall()
source_assignees_insert, _ = self.organize_needed_data(
assignees_all, table_values=table_values_assignees_labels,
action_map=assignee_action_map
)
if len(source_assignees_insert) > 0:
source_assignees_insert = self.enrich_cntrb_id(
source_assignees_insert, str('login'), action_map_additions={
'insert': {
'source': ['node_id'],
'augur': ['gh_node_id']
}
}
)
else:
self.logger.info("Contributor enrichment is not needed, no inserts provided.")
assignees_insert = [
{
'pull_request_id': assignee['pull_request_id'],
'contrib_id': assignee['cntrb_id'],
'pr_assignee_src_id': int(assignee['id']),
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source,
'repo_id': self.repo_id
} for assignee in source_assignees_insert if 'login' in assignee
]
self.bulk_insert(self.pull_request_assignees_table, insert=assignees_insert)
elif pr_nested_loop == 4:
# PR meta insertion
pr_nested_loop += 1
meta_action_map = {
'insert': {
'source': ['pull_request_id', 'sha', 'pr_head_or_base'],
'augur': ['pull_request_id', 'pr_sha', 'pr_head_or_base']
}
}
table_values_pull_request_meta = self.db.execute(
s.sql.select(self.get_relevant_columns(self.pull_request_meta_table,meta_action_map))
).fetchall()
source_meta_insert, _ = self.organize_needed_data(
meta_all, table_values=table_values_pull_request_meta, action_map=meta_action_map
)
if len(source_meta_insert) > 0:
source_meta_insert = self.enrich_cntrb_id(
source_meta_insert, str('user.login'), action_map_additions={
'insert': {
'source': ['user.node_id'],
'augur': ['gh_node_id']
}
}, prefix='user.'
)
else:
self.logger.info("Contributor enrichment is not needed, nothing in source_meta_insert.")
meta_insert = [
{
'pull_request_id': meta['pull_request_id'],
'pr_head_or_base': meta['pr_head_or_base'],
'pr_src_meta_label': meta['label'],
'pr_src_meta_ref': meta['ref'],
'pr_sha': meta['sha'],
'cntrb_id': meta['cntrb_id'], ## Cast as int for the `nan` user by SPG on 11/28/2021; removed 12/6/2021
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source,
'repo_id': self.repo_id
} for meta in source_meta_insert if 'login' in meta['user'] # trying to fix bug SPG 11/29/2021 #meta['user'] and 'login' in meta['user']
] # reverted above to see if it works with other fixes.
self.bulk_insert(self.pull_request_meta_table, insert=meta_insert)
except Exception as e:
self.logger.debug(f"Nested Model error at loop {pr_nested_loop} : {e}.")
stacker = traceback.format_exc()
self.logger.debug(f"{stacker}")
continue
def query_pr_repo(self, pr_repo, pr_repo_type, pr_meta_id):
""" TODO: insert this data as extra columns in the meta table """
try:
self.logger.info(f'Querying PR {pr_repo_type} repo')
table = 'pull_request_repo'
duplicate_col_map = {'pr_src_repo_id': 'id'}
##TODO Need to add pull request closed here.
update_col_map = {}
table_pkey = 'pr_repo_id'
update_keys = list(update_col_map.keys()) if update_col_map else []
cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey]
pr_repo_table_values = self.get_table_values(cols_query, [table])
new_pr_repo = self.assign_tuple_action(
[pr_repo], pr_repo_table_values, update_col_map, duplicate_col_map, table_pkey
)[0]
if new_pr_repo['owner'] and 'login' in new_pr_repo['owner']:
cntrb_id = self.find_id_from_login(new_pr_repo['owner']['login'])
else:
cntrb_id = 1
pr_repo = {
'pr_repo_meta_id': pr_meta_id,
'pr_repo_head_or_base': pr_repo_type,
'pr_src_repo_id': new_pr_repo['id'],
# 'pr_src_node_id': new_pr_repo[0]['node_id'],
'pr_src_node_id': None,
'pr_repo_name': new_pr_repo['name'],
'pr_repo_full_name': new_pr_repo['full_name'],
'pr_repo_private_bool': new_pr_repo['private'],
'pr_cntrb_id': cntrb_id, #12/6/2021 removed int casting
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
if new_pr_repo['flag'] == 'need_insertion':
result = self.db.execute(self.pull_request_repo_table.insert().values(pr_repo))
self.logger.info(f"Added PR {pr_repo_type} repo {result.inserted_primary_key}")
self.results_counter += 1
self.logger.info(
f"Finished adding PR {pr_repo_type} Repo data for PR with id {self.pr_id_inc}"
)
except Exception as e:
self.logger.debug(f"repo exception registerred for PRs: {e}")
self.logger.debug(f"Nested Model error at loop {pr_nested_loop} : {e}.")
stacker = traceback.format_exc()
self.logger.debug(f"{stacker}")
| 45.760028
| 192
| 0.534344
|
import ast
import json
import logging
import os
import sys
import time
import traceback
from workers.worker_git_integration import WorkerGitInterfaceable
from numpy.lib.utils import source
import requests
import copy
from datetime import datetime
from multiprocessing import Process, Queue
import pandas as pd
import sqlalchemy as s
from sqlalchemy.sql.expression import bindparam
from workers.worker_base import Worker
class GitHubPullRequestWorker(WorkerGitInterfaceable):
def __init__(self, config={}):
worker_type = "pull_request_worker"
given = [['github_url']]
models = ['pull_requests', 'pull_request_commits', 'pull_request_files']
data_tables = ['contributors', 'pull_requests',
'pull_request_assignees', 'pull_request_events', 'pull_request_labels',
'pull_request_message_ref', 'pull_request_meta', 'pull_request_repo',
'pull_request_reviewers', 'pull_request_teams', 'message', 'pull_request_commits',
'pull_request_files', 'pull_request_reviews', 'pull_request_review_message_ref']
operations_tables = ['worker_history', 'worker_job']
self.deep_collection = True
self.platform_id = 25150
super().__init__(worker_type, config, given, models, data_tables, operations_tables)
self.tool_source = 'GitHub Pull Request Worker'
self.tool_version = '1.0.0'
self.data_source = 'GitHub API'
self.pk_source_prs = []
def is_nan(value):
return type(value) == float and math.isnan(value)
def graphql_paginate(self, query, data_subjects, before_parameters=None):
self.logger.info(f'Start paginate with params: \n{data_subjects} '
f'\n{before_parameters}')
def all_items(dictionary):
for key, value in dictionary.items():
if type(value) is dict:
yield (key, value)
yield from all_items(value)
else:
yield (key, value)
if not before_parameters:
before_parameters = {}
for subject, _ in all_items(data_subjects):
before_parameters[subject] = ''
start_cursor = None
has_previous_page = True
base_url = 'https://api.github.com/graphql'
tuples = []
def find_root_of_subject(data, key_subject):
self.logger.debug(f'Finding {key_subject} root of {data}')
key_nest = None
for subject, nest in data.items():
if key_subject in nest:
key_nest = nest[key_subject]
break
elif type(nest) == dict:
return find_root_of_subject(nest, key_subject)
else:
raise KeyError
return key_nest
for data_subject, nest in data_subjects.items():
self.logger.debug(f'Beginning paginate process for field {data_subject} '
f'for query: {query}')
page_count = 0
while has_previous_page:
page_count += 1
num_attempts = 3
success = False
for attempt in range(num_attempts):
self.logger.info(f'Attempt #{attempt + 1} for hitting GraphQL endpoint '
f'page number {page_count}\n')
response = requests.post(base_url, json={'query': query.format(
**before_parameters)}, headers=self.headers)
self.update_gh_rate_limit(response)
try:
data = response.json()
except:
data = json.loads(json.dumps(response.text))
if 'errors' in data:
self.logger.info("Error!: {}".format(data['errors']))
if data['errors'][0]['type'] == 'NOT_FOUND':
self.logger.warning(
"Github repo was not found or does not exist for "
f"endpoint: {base_url}\n"
)
break
if data['errors'][0]['type'] == 'RATE_LIMITED':
self.update_gh_rate_limit(response)
num_attempts -= 1
continue
if 'data' in data:
success = True
root = find_root_of_subject(data, data_subject)
page_info = root['pageInfo']
data = root['edges']
break
else:
self.logger.info("Request returned a non-data dict: {}\n".format(data))
if data['message'] == 'Not Found':
self.logger.info(
"Github repo was not found or does not exist for endpoint: "
f"{base_url}\n"
)
break
if data['message'] == (
"You have triggered an abuse detection mechanism. Please wait a "
"few minutes before you try again."
):
num_attempts -= 1
self.update_gh_rate_limit(response, temporarily_disable=True)
if data['message'] == "Bad credentials":
self.update_gh_rate_limit(response, bad_credentials=True)
if not success:
self.logger.info('GraphQL query failed: {}'.format(query))
break
before_parameters.update({
data_subject: ', before: \"{}\"'.format(page_info['startCursor'])
})
has_previous_page = page_info['hasPreviousPage']
tuples += data
self.logger.info(f"Paged through {page_count} pages and "
f"collected {len(tuples)} data points\n")
if not nest:
return tuples
return tuples + self.graphql_paginate(query, data_subjects[subject],
before_parameters=before_parameters)
def pull_request_files_model(self, task_info, repo_id):
pr_number_sql = s.sql.text("""
SELECT DISTINCT pr_src_number as pr_src_number, pull_requests.pull_request_id
FROM pull_requests--, pull_request_meta
WHERE repo_id = {}
""".format(self.repo_id))
pr_numbers = pd.read_sql(pr_number_sql, self.db, params={})
pr_file_rows = []
for index, pull_request in enumerate(pr_numbers.itertuples()):
self.logger.debug(f'Querying files for pull request #{index + 1} of {len(pr_numbers)}')
query = """
{{
repository(owner:"%s", name:"%s"){{
pullRequest (number: %s) {{
""" % (self.owner, self.repo, pull_request.pr_src_number) + """
files (last: 100{files}) {{
pageInfo {{
hasPreviousPage
hasNextPage
endCursor
startCursor
}}
edges {{
node {{
additions
deletions
path
}}
}}
}}
}}
}}
}}
"""
pr_file_rows += [{
'pull_request_id': pull_request.pull_request_id,
'pr_file_additions': pr_file['node']['additions'],
'pr_file_deletions': pr_file['node']['deletions'],
'pr_file_path': pr_file['node']['path'],
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': 'GitHub API',
'repo_id': self.repo_id,
} for pr_file in self.graphql_paginate(query, {'files': None})]
table_values_sql = s.sql.text("""
SELECT pull_request_files.*
FROM pull_request_files, pull_requests
WHERE pull_request_files.pull_request_id = pull_requests.pull_request_id
AND pull_requests.repo_id = :repo_id
""")
self.logger.debug(
f'Getting table values with the following PSQL query: \n{table_values_sql}\n'
)
table_values = pd.read_sql(table_values_sql, self.db, params={'repo_id': self.repo_id})
if len(pr_file_rows) > 0:
table_columns = pr_file_rows[0].keys()
else:
self.logger.debug(f'No rows need insertion for repo {self.repo_id}\n')
self.register_task_completion(task_info, self.repo_id, 'pull_request_files')
return
pr_file_rows_df = pd.DataFrame(pr_file_rows)
pr_file_rows_df = pr_file_rows_df.dropna(subset=['pull_request_id'])
dupe_columns = ['pull_request_id', 'pr_file_path']
update_columns = ['pr_file_additions', 'pr_file_deletions']
need_insertion = pr_file_rows_df.merge(table_values, suffixes=('','_table'),
how='outer', indicator=True, on=dupe_columns).loc[
lambda x : x['_merge']=='left_only'][table_columns]
need_updates = pr_file_rows_df.merge(table_values, on=dupe_columns, suffixes=('','_table'),
how='inner',indicator=False)[table_columns].merge(table_values,
on=update_columns, suffixes=('','_table'), how='outer',indicator=True
).loc[lambda x : x['_merge']=='left_only'][table_columns]
need_updates['b_pull_request_id'] = need_updates['pull_request_id']
need_updates['b_pr_file_path'] = need_updates['pr_file_path']
pr_file_insert_rows = need_insertion.to_dict('records')
pr_file_update_rows = need_updates.to_dict('records')
self.logger.debug(
f'Repo id {self.repo_id} needs {len(need_insertion)} insertions and '
f'{len(need_updates)} updates.\n'
)
if len(pr_file_update_rows) > 0:
success = False
while not success:
try:
self.db.execute(
self.pull_request_files_table.update().where(
self.pull_request_files_table.c.pull_request_id == bindparam(
'b_pull_request_id'
) and self.pull_request_files_table.c.pr_file_path == bindparam(
'b_pr_file_path'
)
).values(
pr_file_additions=bindparam('pr_file_additions'),
pr_file_deletions=bindparam('pr_file_deletions')
), pr_file_update_rows
)
success = True
except Exception as e:
self.logger.info('error: {}'.format(e))
time.sleep(5)
if len(pr_file_insert_rows) > 0:
success = False
while not success:
try:
self.db.execute(
self.pull_request_files_table.insert(),
pr_file_insert_rows
)
success = True
except Exception as e:
self.logger.info('error: {}'.format(e))
time.sleep(5)
self.register_task_completion(task_info, self.repo_id, 'pull_request_files')
def pull_request_commits_model(self, task_info, repo_id):
self.logger.info("Querying starting ids info...\n")
self.history_id = self.get_max_id(
'worker_history', 'history_id', operations_table=True
) + 1
self.pr_id_inc = self.get_max_id('pull_requests', 'pull_request_id')
self.pr_meta_id_inc = self.get_max_id('pull_request_meta', 'pr_repo_meta_id')
pr_url_sql = s.sql.text("""
SELECT DISTINCT pr_url, pull_requests.pull_request_id
FROM pull_requests--, pull_request_meta
WHERE repo_id = {}
""".format(self.repo_id))
urls = pd.read_sql(pr_url_sql, self.db, params={})
for pull_request in urls.itertuples():
commits_url = pull_request.pr_url + '/commits?page={}'
table = 'pull_request_commits'
table_pkey = 'pr_cmt_id'
duplicate_col_map = {'pr_cmt_sha': 'sha'}
update_col_map = {}
pr_commits = self.paginate(
commits_url, duplicate_col_map, update_col_map, table, table_pkey,
where_clause="where pull_request_id = {}".format(pull_request.pull_request_id)
)
for pr_commit in pr_commits:
try:
if pr_commit['flag'] == 'need_insertion':
pr_commit_row = {
'pull_request_id': pull_request.pull_request_id,
'pr_cmt_sha': pr_commit['sha'],
'pr_cmt_node_id': pr_commit['node_id'],
'pr_cmt_message': pr_commit['commit']['message'],
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': 'GitHub API',
'repo_id': self.repo_id,
}
result = self.db.execute(
self.pull_request_commits_table.insert().values(pr_commit_row)
)
self.logger.info(
f"Inserted Pull Request Commit: {result.inserted_primary_key}\n"
)
except Exception as e:
self.logger.debug(f"pr_commit exception registered: {e}.")
stacker = traceback.format_exc()
self.logger.debug(f"{stacker}")
continue
self.register_task_completion(self.task_info, self.repo_id, 'pull_request_commits')
def _get_pk_source_prs(self):
pr_url = (
f"https://api.github.com/repos/{self.owner}/{self.repo}/pulls?state=all&"
"direction=asc&per_page=100&page={}"
)
#Database action map is essential in order to avoid duplicates messing up the data
## 9/20/2021: SPG added closed_at, updated_at, and merged_at to the update map.
## 11/29/2021: And this is the cause of PR updates not working because it doesn't handle NULLs ... I think.
pr_action_map = {
'insert': {
'source': ['id'],
'augur': ['pr_src_id']
},
'update': {
'source': ['state'],
'augur': ['pr_src_state']
}
}
def pk_source_increment_insert(inc_source_prs, action_map):
self.write_debug_data(inc_source_prs, 'source_prs')
if len(inc_source_prs['all']) == 0:
self.logger.info("There are no prs for this repository.\n")
self.register_task_completion(self.task_info, self.repo_id, 'pull_requests')
return False
def is_valid_pr_block(issue):
return (
'pull_request' in issue and issue['pull_request']
and isinstance(issue['pull_request'], dict) and 'url' in issue['pull_request']
)
if len(inc_source_prs['insert']) > 0:
inc_source_prs['insert'] = self.enrich_cntrb_id(
inc_source_prs['insert'], str('user.login'), action_map_additions={
'insert': {
'source': ['user.node_id'],
'augur': ['gh_node_id']
}
}, prefix='user.'
)
else:
self.logger.info("Contributor enrichment is not needed, no inserts in action map.")
prs_insert = [
{
'repo_id': self.repo_id,
'pr_url': pr['url'],
'pr_src_id': pr['id'],
'pr_src_node_id': pr['node_id'], url'],
'pr_diff_url': pr['diff_url'],
'pr_patch_url': pr['patch_url'],
'pr_issue_url': pr['issue_url'],
'pr_augur_issue_id': None,
'pr_src_number': pr['number'],
'pr_src_state': pr['state'],
'pr_src_locked': pr['locked'],
'pr_src_title': str(pr['title']).encode(encoding='UTF-8',errors='backslashreplace').decode(encoding='UTF-8',errors='ignore') if (
pr['title']
) else ' ',
'pr_augur_contributor_id': pr['cntrb_id'] if (
pr['cntrb_id']
) else is_nan(pr['cntrb_id']),
'pr_body': str(pr['body']).encode(encoding='UTF-8',errors='backslashreplace').decode(encoding='UTF-8',errors='ignore') if (
pr['body']
) else None,
'pr_created_at': pr['created_at'],
'pr_updated_at': pr['updated_at'],
'pr_closed_at': pr['closed_at'] if (
pr['closed_at']
) else None,
'pr_merged_at': None if not (
pr['merged_at']
) else pr['merged_at'],
'pr_merge_commit_sha': pr['merge_commit_sha'],
'pr_teams': None,
'pr_milestone': None,
'pr_commits_url': pr['commits_url'],
'pr_review_comments_url': pr['review_comments_url'],
'pr_review_comment_url': pr['review_comment_url'],
'pr_comments_url': pr['comments_url'],
'pr_statuses_url': pr['statuses_url'],
'pr_meta_head_id': None if not (
pr['head']
) else pr['head']['label'],
'pr_meta_base_id': None if not (
pr['base']
) else pr['base']['label'],
'pr_src_issue_url': pr['issue_url'],
'pr_src_comments_url': pr['comments_url'],
'pr_src_review_comments_url': pr['review_comments_url'],
'pr_src_commits_url': pr['commits_url'],
'pr_src_statuses_url': pr['statuses_url'],
'pr_src_author_association': pr['author_association'],
'tool_source': self.tool_source + '_reviews',
'tool_version': self.tool_version,
'data_source': 'Pull Request Reviews Github API'
} for pr in inc_source_prs['insert']
]
if len(inc_source_prs['insert']) > 0 or len(inc_source_prs['update']) > 0:
self.bulk_insert(
self.pull_requests_table,
update=inc_source_prs['update'], unique_columns=action_map['insert']['augur'],
insert=prs_insert, update_columns=['pr_src_state', 'pr_closed_at', 'pr_updated_at', 'pr_merged_at']
)
source_data = inc_source_prs['insert'] + inc_source_prs['update']
elif not self.deep_collection:
self.logger.info(
"There are no prs to update, insert, or collect nested information for.\n"
)
return
if self.deep_collection:
source_data = inc_source_prs['all']
gh_merge_fields = ['id']
augur_merge_fields = ['pr_src_id']
self.pk_source_prs += self.enrich_data_primary_keys(source_data, self.pull_requests_table,
gh_merge_fields, augur_merge_fields
)
return
#paginate endpoint with stagger enabled so that the above method can insert every 500
# self.logger.info(
# f"PR Action map is {pr_action_map}"
# )
source_prs = self.paginate_endpoint(
pr_url, action_map=pr_action_map, table=self.pull_requests_table,
where_clause=self.pull_requests_table.c.repo_id == self.repo_id,
stagger=True,
insertion_method=pk_source_increment_insert
)
# self.logger.info(
# f"PR Action map is {pr_action_map} after source_prs. The source_prs are {source_prs}."
# )
#Use the increment insert method in order to do the
#remaining pages of the paginated endpoint that weren't inserted inside the paginate_endpoint method
pk_source_increment_insert(source_prs,pr_action_map)
pk_source_prs = self.pk_source_prs
self.pk_source_prs = []
return pk_source_prs
def pull_requests_model(self, entry_info, repo_id):
github_url = self.task_info['given']['github_url']
self.logger.info("Beginning collection of Pull Requests...\n")
self.logger.info(f"Repo ID: {self.repo_id}, Git URL: {github_url}\n")
pk_source_prs = []
try:
pk_source_prs = self._get_pk_source_prs()
except Exception as e:
self.logger.debug(f"Pull Requests model failed with {e}.")
stacker = traceback.format_exc()
self.logger.debug(f"{stacker}")
pass
if pk_source_prs:
try:
self.pull_request_comments_model(pk_source_prs)
self.logger.info(f"Pull request comments model.")
except Exception as e:
self.logger.debug(f"PR comments model failed on {e}. exception registered.")
stacker = traceback.format_exc()
self.logger.debug(f"{stacker}")
pass
finally:
try:
self.pull_request_events_model(pk_source_prs)
self.logger.info(f"Pull request events model.")
except Exception as e:
self.logger.debug(f"PR events model failed on {e}. exception registered for pr_step.")
stacker = traceback.format_exc()
self.logger.debug(f"{stacker}")
pass
finally:
try:
self.pull_request_reviews_model(pk_source_prs)
self.logger.info(f"Pull request reviews model.")
except Exception as e:
self.logger.debug(f"PR reviews model failed on {e}. exception registered for pr_step.")
stacker = traceback.format_exc()
self.logger.debug(f"{stacker}")
pass
finally:
try:
self.pull_request_nested_data_model(pk_source_prs)
self.logger.info(f"Pull request nested data model.")
except Exception as e:
self.logger.debug(f"PR nested model failed on {e}. exception registered for pr_step.")
stacker = traceback.format_exc()
self.logger.debug(f"{stacker}")
pass
finally:
self.logger.debug("finished running through four models.")
self.register_task_completion(self.task_info, self.repo_id, 'pull_requests')
def pull_request_comments_model(self, pk_source_prs):
comments_url = (
f"https://api.github.com/repos/{self.owner}/{self.repo}/pulls/comments?per_page=100"
"&page={}"
)
'source': ['id'],
'augur': ['platform_msg_id']
}
}
comment_ref_action_map = {
'insert': {
'source': ['id'],
'augur': ['pr_message_ref_src_comment_id']
}
}
def pr_comments_insert(inc_pr_comments, comment_action_map, comment_ref_action_map):
inc_pr_comments['insert'] = self.text_clean(inc_pr_comments['insert'], 'body')
if len(inc_pr_comments['insert']) > 0:
inc_pr_comments['insert'] = self.enrich_cntrb_id(
inc_pr_comments['insert'], str('user.login'), action_map_additions={
'insert': {
'source': ['user.node_id'],
'augur': ['gh_node_id']
}
}, prefix='user.'
)
else:
self.logger.info("Contributor enrichment is not needed, no inserts in action map.")
pr_comments_insert = [
{
'pltfrm_id': self.platform_id,
'msg_text': str(comment['body']).encode(encoding='UTF-8',errors='backslashreplace').decode(encoding='UTF-8',errors='ignore') if (
comment['body']
) else ' ',
'msg_timestamp': comment['created_at'],
'cntrb_id': comment['cntrb_id'] if (
comment['cntrb_id']
) else is_nan(comment['cntrb_id']),
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source,
'repo_id': self.repo_id,
'platform_msg_id': int(comment['id']),
'platform_node_id': comment['node_id']
} for comment in inc_pr_comments['insert']
]
try:
self.bulk_insert(self.message_table, insert=pr_comments_insert,
unique_columns=comment_action_map['insert']['augur'])
except Exception as e:
self.logger.debug(f"PR comments data model failed on {e}. exception registered.")
stacker = traceback.format_exc()
self.logger.debug(f"{stacker}")
try:
c_pk_source_comments = self.enrich_data_primary_keys(
inc_pr_comments['insert'], self.message_table,
comment_action_map['insert']['source'],
comment_action_map['insert']['augur'] d")
stacker = traceback.format_exc()
self.logger.debug(f"{stacker}")
self.write_debug_data(c_pk_source_comments, 'c_pk_source_comments')
self.logger.info(f"log of the length of c_pk_source_comments {len(c_pk_source_comments)}.")
try:
both_pk_source_comments = self.enrich_data_primary_keys(
c_pk_source_comments, self.pull_requests_table,
['pull_request_url'], ['pr_url'])
self.logger.info(f"log of the length of both_pk_source_comments {len(both_pk_source_comments)}.")
except Exception as e:
self.logger.info(f"bulk insert of comments failed on {e}. exception registerred")
stacker = traceback.format_exc()
self.logger.debug(f"{stacker}")
self.logger.debug(f"length of both_pk_source_comments: {len(both_pk_source_comments)}")
pr_message_ref_insert = [
{
'pull_request_id': comment['pull_request_id'],
'msg_id': comment['msg_id'],
'pr_message_ref_src_comment_id': int(comment['id']),
'pr_message_ref_src_node_id': comment['node_id'],
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source,
'repo_id': self.repo_id
} for comment in both_pk_source_comments
]
try:
self.logger.debug(f"inserting into {self.pull_request_message_ref_table}.")
self.bulk_insert(self.pull_request_message_ref_table, insert=pr_message_ref_insert,
unique_columns=comment_ref_action_map['insert']['augur'])
except Exception as e:
self.logger.info(f"message inserts failed with: {e}.")
stacker = traceback.format_exc()
self.logger.debug(f"{stacker}")
pass
try:
pr_comments = self.paginate_endpoint(
comments_url, action_map=comment_action_map, table=self.message_table,
where_clause=self.message_table.c.msg_id.in_(
[
msg_row[0] for msg_row in self.db.execute(
s.sql.select(
[self.pull_request_message_ref_table.c.msg_id]
).where(
self.pull_request_message_ref_table.c.pull_request_id.in_(
set(pd.DataFrame(pk_source_prs)['pull_request_id'])
)
)
).fetchall()
]
),
stagger=True,
insertion_method=pr_comments_insert
)
pr_comments_insert(pr_comments,comment_action_map,comment_ref_action_map)
self.logger.info(f"comments inserted for repo_id: {self.repo_id}")
return
except Exception as e:
self.logger.info(f"exception registered in paginate endpoint for issue comments: {e}")
stacker = traceback.format_exc()
self.logger.debug(f"{stacker}")
pass
def pull_request_events_model(self, pk_source_prs=[]):
if not pk_source_prs:
pk_source_prs = self._get_pk_source_prs()
events_url = (
f"https://api.github.com/repos/{self.owner}/{self.repo}/issues/events?per_page=100&"
"page={}"
)
event_action_map = {
'insert': {
'source': ['id'],
'augur': ['pr_platform_event_id']
}
}
self.logger.info(pk_source_prs[0])
self.logger.info(pd.DataFrame(pk_source_prs).columns)
self.logger.info(pd.DataFrame(pk_source_prs))
pr_events = self.new_paginate_endpoint(
events_url, table=self.pull_request_events_table, action_map=event_action_map,
where_clause=self.pull_request_events_table.c.pull_request_id.in_(
set(pd.DataFrame(pk_source_prs)['pull_request_id'])
)
)
_cntrb_id(
pk_pr_events, str('actor.login'), action_map_additions={
'insert': {
'source': ['actor.node_id'],
'augur': ['gh_node_id']
}
}, prefix='actor.'
)
else:
self.logger.info("Contributor enrichment is not needed, no data provided.")
for index, issue in enumerate(pk_pr_events):
if 'cntrb_id' not in issue:
self.logger.debug(f"Exception registered. Dict has null cntrb_id: {issue}")
pr_events_insert = [
{
'pull_request_id': int(event['pull_request_id']),
'cntrb_id': event['cntrb_id'] if (
event['cntrb_id']
) else is_nan(event['cntrb_id']),
'action': event['event'],
'action_commit_hash': event['commit_id'],
'created_at': event['created_at'] if (
event['created_at']
) else None,
'issue_event_src_id': int(event['id']),
'node_id': event['node_id'],
'node_url': event['url'],
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source,
'pr_platform_event_id': int(event['id']),
'platform_id': self.platform_id,
'repo_id': self.repo_id
} for event in pk_pr_events if event['actor'] is not None
]
self.bulk_insert(self.pull_request_events_table, insert=pr_events_insert,
unique_columns=event_action_map['insert']['augur']
)
return pr_events['all']
def pull_request_reviews_model(self, pk_source_prs=[]):
if not pk_source_prs:
pk_source_prs = self._get_pk_source_prs()
review_action_map = {
'insert': {
'source': ['id'],
'augur': ['pr_review_src_id']
},
'update': {
'source': ['state'],
'augur': ['pr_review_state']
}
}
reviews_urls = [
(
f"https://api.github.com/repos/{self.owner}/{self.repo}/pulls/{pr['number']}/"
"reviews?per_page=100", {'pull_request_id': pr['pull_request_id']}
)
for pr in pk_source_prs
]
pr_pk_source_reviews = self.multi_thread_urls(reviews_urls)
self.write_debug_data(pr_pk_source_reviews, 'pr_pk_source_reviews')
cols_to_query = self.get_relevant_columns(
self.pull_request_reviews_table, review_action_map
)
table_values = self.db.execute(s.sql.select(cols_to_query).where(
self.pull_request_reviews_table.c.pull_request_id.in_(
set(pd.DataFrame(pk_source_prs)['pull_request_id'])
))).fetchall()
source_reviews_insert, source_reviews_update = self.organize_needed_data(
pr_pk_source_reviews, table_values=table_values,
action_map=review_action_map
)
if len(source_reviews_insert) > 0:
source_reviews_insert = self.enrich_cntrb_id(
source_reviews_insert, str('user.login'), action_map_additions={
'insert': {
'source': ['user.node_id'],
'augur': ['gh_node_id']
}
}, prefix='user.'
)
else:
self.logger.info("Contributor enrichment is not needed, source_reviews_insert is empty.")
reviews_insert = [
{
'pull_request_id': review['pull_request_id'],
'cntrb_id': review['cntrb_id'],
'pr_review_author_association': review['author_association'],
'pr_review_state': review['state'],
'pr_review_body': str(review['body']).encode(encoding='UTF-8',errors='backslashreplace').decode(encoding='UTF-8',errors='ignore') if (
review['body']
) else None,
'pr_review_submitted_at': review['submitted_at'] if (
'submitted_at' in review
) else None,
'pr_review_src_id': int(float(review['id'])), : review['html_url'],
'pr_review_pull_request_url': review['pull_request_url'],
'pr_review_commit_id': review['commit_id'],
'tool_source': 'pull_request_reviews model',
'tool_version': self.tool_version+ "_reviews",
'data_source': self.data_source,
'repo_id': self.repo_id,
'platform_id': self.platform_id
} for review in source_reviews_insert if review['user'] and 'login' in review['user']
]
try:
self.bulk_insert(
self.pull_request_reviews_table, insert=reviews_insert, update=source_reviews_update,
unique_columns=review_action_map['insert']['augur'],
update_columns=review_action_map['update']['augur']
)
except Exception as e:
self.logger.debug(f"PR reviews data model failed on {e}. exception registered.")
stacker = traceback.format_exc()
self.logger.debug(f"{stacker}")
gh_merge_fields = ['id']
augur_merge_fields = ['pr_review_src_id']
both_pr_review_pk_source_reviews = self.enrich_data_primary_keys(
pr_pk_source_reviews, self.pull_request_reviews_table, gh_merge_fields,
augur_merge_fields, in_memory=True
)
self.write_debug_data(both_pr_review_pk_source_reviews, 'both_pr_review_pk_source_reviews')
review_msg_url = (f'https://api.github.com/repos/{self.owner}/{self.repo}/pulls' +
'/comments?per_page=100&page={}')
review_msg_action_map = {
'insert': {
'source': ['id'],
'augur': ['platform_msg_id']
}
}
review_msg_ref_action_map = {
'insert': {
'source': ['id'],
'augur': ['pr_review_msg_src_id']
}
}
in_clause = [] if len(both_pr_review_pk_source_reviews) == 0 else set(pd.DataFrame(both_pr_review_pk_source_reviews)['pr_review_id'])
review_msgs = self.paginate_endpoint(
review_msg_url, action_map=review_msg_action_map, table=self.message_table,
where_clause=self.message_table.c.msg_id.in_(
[
msg_row[0] for msg_row in self.db.execute(
s.sql.select([self.pull_request_review_message_ref_table.c.msg_id]).where(
self.pull_request_review_message_ref_table.c.pr_review_id.in_(
in_clause
)
)
).fetchall()
]
)
)
self.write_debug_data(review_msgs, 'review_msgs')
if len(review_msgs['insert']) > 0:
review_msgs['insert'] = self.enrich_cntrb_id(
review_msgs['insert'], str('user.login'), action_map_additions={
'insert': {
'source': ['user.node_id'],
'augur': ['gh_node_id']
}
}, prefix='user.'
)
else:
self.logger.info("Contributor enrichment is not needed, nothing to insert from the action map.")
review_msg_insert = [
{
'pltfrm_id': self.platform_id,
'msg_text': str(comment['body']).encode(encoding='UTF-8',errors='backslashreplace').decode(encoding='UTF-8',errors='ignore') if (
comment['body']
) else None,
'msg_timestamp': comment['created_at'],
'cntrb_id': comment['cntrb_id'],
'tool_source': self.tool_source +"_reviews",
'tool_version': self.tool_version + "_reviews",
'data_source': 'pull_request_reviews model',
'repo_id': self.repo_id,
'platform_msg_id': int(float(comment['id'])),
'platform_node_id': comment['node_id']
} for comment in review_msgs['insert']
if comment['user'] and 'login' in comment['user']
]
self.bulk_insert(self.message_table, insert=review_msg_insert,
unique_columns = review_msg_action_map['insert']['augur'])
c_pk_source_comments = self.enrich_data_primary_keys(
review_msgs['insert'], self.message_table, review_msg_action_map['insert']['source'],
review_msg_action_map['insert']['augur'], in_memory=True
)
self.write_debug_data(c_pk_source_comments, 'c_pk_source_comments')
both_pk_source_comments = self.enrich_data_primary_keys(
c_pk_source_comments, self.pull_request_reviews_table, ['pull_request_review_id'],
['pr_review_src_id'], in_memory=True
)
self.write_debug_data(both_pk_source_comments, 'both_pk_source_comments')
pr_review_msg_ref_insert = [
{
'pr_review_id': comment['pr_review_id'],
'msg_id': comment['msg_id'],
'pr_review_msg_url': comment['url'],
'pr_review_src_id': int(comment['pull_request_review_id']),
'pr_review_msg_src_id': int(comment['id']),
'pr_review_msg_node_id': comment['node_id'],
'pr_review_msg_diff_hunk': comment['diff_hunk'],
'pr_review_msg_path': comment['path'],
'pr_review_msg_position': s.sql.expression.null() if not (
comment['position']
) else comment['position'],
'pr_review_msg_original_position': s.sql.expression.null() if not (
comment['original_position']
) else comment['original_position'],
'pr_review_msg_commit_id': str(comment['commit_id']),
'pr_review_msg_original_commit_id': str(comment['original_commit_id']),
'pr_review_msg_updated_at': comment['updated_at'],
'pr_review_msg_html_url': comment['html_url'],
'pr_url': comment['pull_request_url'],
'pr_review_msg_author_association': comment['author_association'],
'pr_review_msg_start_line': s.sql.expression.null() if not (
comment['start_line']
) else comment['start_line'],
'pr_review_msg_original_start_line': s.sql.expression.null() if not (
comment['original_start_line']
) else comment['original_start_line'],
'pr_review_msg_start_side': s.sql.expression.null() if not (
str(comment['start_side'])
) else str(comment['start_side']),
'pr_review_msg_line': s.sql.expression.null() if not (
comment['line']
) else comment['line'],
'pr_review_msg_original_line': s.sql.expression.null() if not (
comment['original_line']
) else comment['original_line'],
'pr_review_msg_side': s.sql.expression.null() if not (
str(comment['side'])
) else str(comment['side']),
'tool_source': 'pull_request_reviews model',
'tool_version': self.tool_version + "_reviews",
'data_source': self.data_source,
'repo_id': self.repo_id
} for comment in both_pk_source_comments
]
try:
self.bulk_insert(
self.pull_request_review_message_ref_table,
insert=pr_review_msg_ref_insert, unique_columns = review_msg_ref_action_map['insert']['augur']
)
except Exception as e:
self.logger.debug(f"bulk insert for review message ref failed on : {e}")
stacker = traceback.format_exc()
self.logger.debug(f"{stacker}")
def pull_request_nested_data_model(self, pk_source_prs=[]):
try:
if not pk_source_prs:
pk_source_prs = self._get_pk_source_prs()
else:
self.logger.debug("nested model loaded.")
except Exception as e:
self.logger.debug(f'gettign source prs failed for nested model on {e}.')
pass
labels_all = []
reviewers_all = []
assignees_all = []
meta_all = []
for index, pr in enumerate(pk_source_prs):
source_labels = pd.DataFrame(pr['labels'])
source_labels['pull_request_id'] = pr['pull_request_id']
labels_all += source_labels.to_dict(orient='records')
source_reviewers = pd.DataFrame(pr['requested_reviewers'])
source_reviewers['pull_request_id'] = pr['pull_request_id']
reviewers_all += source_reviewers.to_dict(orient='records')
source_assignees = pd.DataFrame(pr['assignees'])
source_assignees['pull_request_id'] = pr['pull_request_id']
assignees_all += source_assignees.to_dict(orient='records')
pr['head'].update(
{'pr_head_or_base': 'head', 'pull_request_id': pr['pull_request_id']}
)
pr['base'].update(
{'pr_head_or_base': 'base', 'pull_request_id': pr['pull_request_id']}
)
meta_all += [pr['head'], pr['base']]
pr_nested_loop = 1
while (pr_nested_loop <5):
try:
if pr_nested_loop == 1:
pr_nested_loop += 1
label_action_map = {
'insert': {
'source': ['pull_request_id', 'id'],
'augur': ['pull_request_id', 'pr_src_id']
}
}
table_values_pr_labels = self.db.execute(
s.sql.select(self.get_relevant_columns(self.pull_request_labels_table,label_action_map))
).fetchall()
source_labels_insert, _ = self.organize_needed_data(
labels_all, table_values=table_values_pr_labels, action_map=label_action_map
)
labels_insert = [
{
'pull_request_id': label['pull_request_id'],
'pr_src_id': int(label['id']),
'pr_src_node_id': label['node_id'],
'pr_src_url': label['url'],
'pr_src_description': label['name'],
'pr_src_color': label['color'],
'pr_src_default_bool': label['default'],
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source,
'repo_id': self.repo_id
} for label in source_labels_insert
]
self.bulk_insert(self.pull_request_labels_table, insert=labels_insert)
elif pr_nested_loop == 2:
pr_nested_loop += 1
reviewer_action_map = {
'insert': {
'source': ['pull_request_id', 'id'],
'augur': ['pull_request_id', 'pr_reviewer_src_id']
}
}
table_values_issue_labels = self.db.execute(
s.sql.select(self.get_relevant_columns(self.pull_request_reviewers_table,reviewer_action_map))
).fetchall()
source_reviewers_insert, _ = self.organize_needed_data(
reviewers_all, table_values=table_values_issue_labels,
action_map=reviewer_action_map
)
if len(source_reviewers_insert) > 0:
source_reviewers_insert = self.enrich_cntrb_id(
source_reviewers_insert, str('login'), action_map_additions={
'insert': {
'source': ['node_id'],
'augur': ['gh_node_id']
}
}
)
else:
self.logger.info("Contributor enrichment is not needed, no inserts provided.")
reviewers_insert = [
{
'pull_request_id': reviewer['pull_request_id'],
'cntrb_id': reviewer['cntrb_id'],
'pr_reviewer_src_id': int(float(reviewer['id'])),
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source,
'repo_id': self.repo_id
} for reviewer in source_reviewers_insert if 'login' in reviewer
]
self.bulk_insert(self.pull_request_reviewers_table, insert=reviewers_insert)
elif pr_nested_loop ==3:
pr_nested_loop += 1
assignee_action_map = {
'insert': {
'source': ['pull_request_id', 'id'],
'augur': ['pull_request_id', 'pr_assignee_src_id']
}
}
table_values_assignees_labels = self.db.execute(
s.sql.select(self.get_relevant_columns(self.pull_request_assignees_table,assignee_action_map))
).fetchall()
source_assignees_insert, _ = self.organize_needed_data(
assignees_all, table_values=table_values_assignees_labels,
action_map=assignee_action_map
)
if len(source_assignees_insert) > 0:
source_assignees_insert = self.enrich_cntrb_id(
source_assignees_insert, str('login'), action_map_additions={
'insert': {
'source': ['node_id'],
'augur': ['gh_node_id']
}
}
)
else:
self.logger.info("Contributor enrichment is not needed, no inserts provided.")
assignees_insert = [
{
'pull_request_id': assignee['pull_request_id'],
'contrib_id': assignee['cntrb_id'],
'pr_assignee_src_id': int(assignee['id']),
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source,
'repo_id': self.repo_id
} for assignee in source_assignees_insert if 'login' in assignee
]
self.bulk_insert(self.pull_request_assignees_table, insert=assignees_insert)
elif pr_nested_loop == 4:
pr_nested_loop += 1
meta_action_map = {
'insert': {
'source': ['pull_request_id', 'sha', 'pr_head_or_base'],
'augur': ['pull_request_id', 'pr_sha', 'pr_head_or_base']
}
}
table_values_pull_request_meta = self.db.execute(
s.sql.select(self.get_relevant_columns(self.pull_request_meta_table,meta_action_map))
).fetchall()
source_meta_insert, _ = self.organize_needed_data(
meta_all, table_values=table_values_pull_request_meta, action_map=meta_action_map
)
if len(source_meta_insert) > 0:
source_meta_insert = self.enrich_cntrb_id(
source_meta_insert, str('user.login'), action_map_additions={
'insert': {
'source': ['user.node_id'],
'augur': ['gh_node_id']
}
}, prefix='user.'
)
else:
self.logger.info("Contributor enrichment is not needed, nothing in source_meta_insert.")
meta_insert = [
{
'pull_request_id': meta['pull_request_id'],
'pr_head_or_base': meta['pr_head_or_base'],
'pr_src_meta_label': meta['label'],
'pr_src_meta_ref': meta['ref'],
'pr_sha': meta['sha'],
'cntrb_id': meta['cntrb_id'], 'tool_version': self.tool_version,
'data_source': self.data_source,
'repo_id': self.repo_id
} for meta in source_meta_insert if 'login' in meta['user'] self.bulk_insert(self.pull_request_meta_table, insert=meta_insert)
except Exception as e:
self.logger.debug(f"Nested Model error at loop {pr_nested_loop} : {e}.")
stacker = traceback.format_exc()
self.logger.debug(f"{stacker}")
continue
def query_pr_repo(self, pr_repo, pr_repo_type, pr_meta_id):
try:
self.logger.info(f'Querying PR {pr_repo_type} repo')
table = 'pull_request_repo'
duplicate_col_map = {'pr_src_repo_id': 'id'}
table_pkey = 'pr_repo_id'
update_keys = list(update_col_map.keys()) if update_col_map else []
cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey]
pr_repo_table_values = self.get_table_values(cols_query, [table])
new_pr_repo = self.assign_tuple_action(
[pr_repo], pr_repo_table_values, update_col_map, duplicate_col_map, table_pkey
)[0]
if new_pr_repo['owner'] and 'login' in new_pr_repo['owner']:
cntrb_id = self.find_id_from_login(new_pr_repo['owner']['login'])
else:
cntrb_id = 1
pr_repo = {
'pr_repo_meta_id': pr_meta_id,
'pr_repo_head_or_base': pr_repo_type,
'pr_src_repo_id': new_pr_repo['id'],
'pr_src_node_id': None,
'pr_repo_name': new_pr_repo['name'],
'pr_repo_full_name': new_pr_repo['full_name'],
'pr_repo_private_bool': new_pr_repo['private'],
'pr_cntrb_id': cntrb_id,
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
if new_pr_repo['flag'] == 'need_insertion':
result = self.db.execute(self.pull_request_repo_table.insert().values(pr_repo))
self.logger.info(f"Added PR {pr_repo_type} repo {result.inserted_primary_key}")
self.results_counter += 1
self.logger.info(
f"Finished adding PR {pr_repo_type} Repo data for PR with id {self.pr_id_inc}"
)
except Exception as e:
self.logger.debug(f"repo exception registerred for PRs: {e}")
self.logger.debug(f"Nested Model error at loop {pr_nested_loop} : {e}.")
stacker = traceback.format_exc()
self.logger.debug(f"{stacker}")
| true
| true
|
1c3f7b58800d085db2e92f96db154a84a24c2a7d
| 1,168
|
py
|
Python
|
plotly/validators/scattercarpet/stream/__init__.py
|
piyush1301/plotly.py
|
50cd5c4cd4732042422751c7760acbab8dd8a50d
|
[
"MIT"
] | 6
|
2019-05-03T02:12:04.000Z
|
2020-03-01T06:33:21.000Z
|
plotly/validators/scattercarpet/stream/__init__.py
|
piyush1301/plotly.py
|
50cd5c4cd4732042422751c7760acbab8dd8a50d
|
[
"MIT"
] | null | null | null |
plotly/validators/scattercarpet/stream/__init__.py
|
piyush1301/plotly.py
|
50cd5c4cd4732042422751c7760acbab8dd8a50d
|
[
"MIT"
] | 5
|
2019-05-18T16:50:11.000Z
|
2021-07-06T21:14:36.000Z
|
import _plotly_utils.basevalidators
class TokenValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name='token',
parent_name='scattercarpet.stream',
**kwargs
):
super(TokenValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'calc'),
no_blank=kwargs.pop('no_blank', True),
role=kwargs.pop('role', 'info'),
strict=kwargs.pop('strict', True),
**kwargs
)
import _plotly_utils.basevalidators
class MaxpointsValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name='maxpoints',
parent_name='scattercarpet.stream',
**kwargs
):
super(MaxpointsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'calc'),
max=kwargs.pop('max', 10000),
min=kwargs.pop('min', 0),
role=kwargs.pop('role', 'info'),
**kwargs
)
| 25.955556
| 71
| 0.590753
|
import _plotly_utils.basevalidators
class TokenValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name='token',
parent_name='scattercarpet.stream',
**kwargs
):
super(TokenValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'calc'),
no_blank=kwargs.pop('no_blank', True),
role=kwargs.pop('role', 'info'),
strict=kwargs.pop('strict', True),
**kwargs
)
import _plotly_utils.basevalidators
class MaxpointsValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name='maxpoints',
parent_name='scattercarpet.stream',
**kwargs
):
super(MaxpointsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'calc'),
max=kwargs.pop('max', 10000),
min=kwargs.pop('min', 0),
role=kwargs.pop('role', 'info'),
**kwargs
)
| true
| true
|
1c3f7b8f6dcb7dec90206974a40dcaca5fa5ba83
| 1,131
|
py
|
Python
|
migrations/versions/efeb5cce78ac_users_table.py
|
danisaleem/Flask_App
|
bb3709fedd1cb40d36f2b91964b54875c55e9091
|
[
"MIT"
] | null | null | null |
migrations/versions/efeb5cce78ac_users_table.py
|
danisaleem/Flask_App
|
bb3709fedd1cb40d36f2b91964b54875c55e9091
|
[
"MIT"
] | 1
|
2020-05-14T16:31:48.000Z
|
2021-04-27T01:19:46.000Z
|
migrations/versions/efeb5cce78ac_users_table.py
|
danisaleem/Flask_App
|
bb3709fedd1cb40d36f2b91964b54875c55e9091
|
[
"MIT"
] | null | null | null |
"""users table
Revision ID: efeb5cce78ac
Revises:
Create Date: 2020-03-23 00:01:22.028648
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'efeb5cce78ac'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('email', sa.String(length=120), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_user_email'), 'user', ['email'], unique=True)
op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_user_username'), table_name='user')
op.drop_index(op.f('ix_user_email'), table_name='user')
op.drop_table('user')
# ### end Alembic commands ###
| 29
| 80
| 0.678161
|
from alembic import op
import sqlalchemy as sa
revision = 'efeb5cce78ac'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
gth=120), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_user_email'), 'user', ['email'], unique=True)
op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)
| true
| true
|
1c3f7bdad58bdc7300886175b27dda17fa0bda59
| 374
|
py
|
Python
|
JiYouMCC/0016/0016.py
|
hooting/show-me-the-code-python
|
0026ee495eade7c28aa5a1249716b1fdc20a141c
|
[
"MIT"
] | null | null | null |
JiYouMCC/0016/0016.py
|
hooting/show-me-the-code-python
|
0026ee495eade7c28aa5a1249716b1fdc20a141c
|
[
"MIT"
] | null | null | null |
JiYouMCC/0016/0016.py
|
hooting/show-me-the-code-python
|
0026ee495eade7c28aa5a1249716b1fdc20a141c
|
[
"MIT"
] | null | null | null |
import xlwt
import json
import sys
reload(sys)
sys.setdefaultencoding('utf8')
file = xlwt.Workbook(encoding='utf-8')
table = file.add_sheet('numbers', cell_overwrite_ok=True)
txt = open('numbers.txt').read()
json_txt = json.loads(txt)
for x in range(len(json_txt)):
for y in range(len(json_txt[x])):
table.write(x, y, json_txt[x][y])
file.save('numbers.xls')
| 22
| 57
| 0.703209
|
import xlwt
import json
import sys
reload(sys)
sys.setdefaultencoding('utf8')
file = xlwt.Workbook(encoding='utf-8')
table = file.add_sheet('numbers', cell_overwrite_ok=True)
txt = open('numbers.txt').read()
json_txt = json.loads(txt)
for x in range(len(json_txt)):
for y in range(len(json_txt[x])):
table.write(x, y, json_txt[x][y])
file.save('numbers.xls')
| true
| true
|
1c3f7d2811a924f7d780fcfee57f6ad8a841e5a7
| 39,337
|
py
|
Python
|
mesh_tensorflow/bert/run_classifier.py
|
jinoobaek-qz/mesh
|
99a7c089bd6c3919dce898b1c77a4e2f7b4cf275
|
[
"Apache-2.0"
] | 1
|
2021-04-18T08:25:31.000Z
|
2021-04-18T08:25:31.000Z
|
mesh_tensorflow/bert/run_classifier.py
|
jinoobaek-qz/mesh
|
99a7c089bd6c3919dce898b1c77a4e2f7b4cf275
|
[
"Apache-2.0"
] | null | null | null |
mesh_tensorflow/bert/run_classifier.py
|
jinoobaek-qz/mesh
|
99a7c089bd6c3919dce898b1c77a4e2f7b4cf275
|
[
"Apache-2.0"
] | 1
|
2021-07-06T23:06:05.000Z
|
2021-07-06T23:06:05.000Z
|
# coding=utf-8
# Copyright 2019 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import mesh_tensorflow as mtf
import mesh_tensorflow.bert.bert as bert_lib
import mesh_tensorflow.bert.optimization as optimization_lib
import mesh_tensorflow.bert.tokenization as tokenization
import tensorflow.compat.v1 as tf
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"data_dir", None,
"The input data dir. Should contain the .tsv files (or other data files) "
"for the task.")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("task_name", None, "The name of the task to train.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_string(
"cached_train_file", None, "Prepared training file.")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_float(
"max_optimized_variable_size", 1e7,
"Do not optimize variables larger than this.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool(
"do_predict", False,
"Whether to run the model in inference mode on the test set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 32, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_bool("clip_gradients", True, "Apply gradient clipping.")
flags.DEFINE_string("optimizer", "adam", "adam/adafactor")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string("mesh_shape", "batch:8", "mesh shape")
tf.flags.DEFINE_string(
"layout",
"batch:batch,vocab:model,intermediate:model,num_heads:model,experts:batch",
"layout rules")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class PaddingInputExample(object):
"""Fake example so the num input examples is a multiple of the batch size.
When running eval/predict on the TPU, we need to pad the number of examples
to be a multiple of the batch size, because the TPU requires a fixed batch
size. The alternative is to drop the last batch, which is bad because it means
the entire output data won't be generated.
We use this class instead of `None` because treating `None` as padding
battches could cause silent errors.
"""
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids,
input_mask,
segment_ids,
label_id,
is_real_example=True):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.is_real_example = is_real_example
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class XnliProcessor(DataProcessor):
"""Processor for the XNLI data set."""
def __init__(self):
self.language = "zh"
def get_train_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(
os.path.join(data_dir, "multinli",
"multinli.train.%s.tsv" % self.language))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "train-%d" % (i)
text_a = tokenization.convert_to_unicode(line[0])
text_b = tokenization.convert_to_unicode(line[1])
label = tokenization.convert_to_unicode(line[2])
if label == tokenization.convert_to_unicode("contradictory"):
label = tokenization.convert_to_unicode("contradiction")
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_dev_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv"))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "dev-%d" % (i)
language = tokenization.convert_to_unicode(line[0])
if language != tokenization.convert_to_unicode(self.language):
continue
text_a = tokenization.convert_to_unicode(line[6])
text_b = tokenization.convert_to_unicode(line[7])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0]))
text_a = tokenization.convert_to_unicode(line[8])
text_b = tokenization.convert_to_unicode(line[9])
if set_type == "test":
label = "contradiction"
else:
label = tokenization.convert_to_unicode(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[3])
text_b = tokenization.convert_to_unicode(line[4])
if set_type == "test":
label = "0"
else:
label = tokenization.convert_to_unicode(line[0])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# Only the test set has a header
if set_type == "test" and i == 0:
continue
guid = "%s-%s" % (set_type, i)
if set_type == "test":
text_a = tokenization.convert_to_unicode(line[1])
label = "0"
else:
text_a = tokenization.convert_to_unicode(line[3])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer):
"""Converts a single `InputExample` into a single `InputFeatures`."""
if isinstance(example, PaddingInputExample):
return InputFeatures(
input_ids=[0] * max_seq_length,
input_mask=[0] * max_seq_length,
segment_ids=[0] * max_seq_length,
label_id=0,
is_real_example=False)
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("tokens: %s" %
" ".join([tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
is_real_example=True)
return feature
def file_based_convert_examples_to_features(examples, label_list,
max_seq_length, tokenizer,
output_file):
"""Convert a set of `InputExample`s to a TFRecord file."""
writer = tf.python_io.TFRecordWriter(
output_file, tf.python_io.TFRecordOptions(output_buffer_size=2 ** 24))
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature([feature.label_id])
features["is_real_example"] = create_int_feature(
[int(feature.is_real_example)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
def file_based_input_fn_builder(input_file, seq_length, is_training,
drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([], tf.int64),
"is_real_example": tf.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.data.experimental.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
labels, num_labels_dim, layout, mesh_shape):
"""Creates a classification model."""
model = bert_lib.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
layout=layout,
mesh_shape=mesh_shape)
# In the demo, we are doing a simple classification task on the entire
# segment.
#
# If you want to use the token-level output, use model.get_sequence_output()
# instead.
output_layer = model.get_pooled_output()
hidden_dim = output_layer.shape[-1]
mesh = input_ids.mesh
output_weights = mtf.get_variable(
mesh,
"output_weights",
shape=[num_labels_dim, hidden_dim],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = mtf.get_variable(
mesh,
"output_bias",
shape=[num_labels_dim],
initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = mtf.dropout(output_layer, keep_prob=0.9)
logits = mtf.einsum([output_layer, output_weights],
reduced_dims=[hidden_dim])
logits = logits + output_bias
probabilities = mtf.softmax(logits, reduced_dim=num_labels_dim)
per_example_loss = mtf.layers.softmax_cross_entropy_with_logits(
logits, labels, vocab_dim=num_labels_dim)
loss = mtf.reduce_mean(per_example_loss) + model.get_extra_loss()
return (loss, per_example_loss, logits, probabilities)
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
# MTF setup.
graph = mtf.Graph()
mesh_shape = mtf.convert_to_shape(FLAGS.mesh_shape)
layout_rules = mtf.convert_to_layout_rules(FLAGS.layout)
ctx = params["context"]
num_hosts = ctx.num_hosts
host_placement_fn = ctx.tpu_host_placement_function
device_list = [host_placement_fn(host_id=t) for t in range(num_hosts)]
tf.logging.info("device_list = %s" % device_list,)
replica_cache_size = 300 * 1000000 # 300M per replica
# Worker 0 caches all the TPU binaries.
worker0_mem = replica_cache_size * ctx.num_replicas
devices_memeory_usage = [worker0_mem] + [0] * (num_hosts - 1)
var_placer = mtf.utils.BalancedVariablePlacer(device_list,
devices_memeory_usage)
mesh_devices = [""] * mesh_shape.size
mesh_impl = mtf.simd_mesh_impl.SimdMeshImpl(mesh_shape, layout_rules,
mesh_devices,
ctx.device_assignment)
mesh = mtf.Mesh(graph, "bert_mesh", var_placer)
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_real_example = None
if "is_real_example" in features:
is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32)
else:
is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)
batch_size = input_ids.get_shape()[0].value
batch_dim = mtf.Dimension("batch", batch_size)
seq_length = input_ids.get_shape()[1].value
seq_dim = mtf.Dimension("seq", seq_length)
num_labels_dim = mtf.Dimension("seq", num_labels)
mtf_input_ids = mtf.import_tf_tensor(mesh, input_ids, [batch_dim, seq_dim])
mtf_input_mask = mtf.import_tf_tensor(mesh, input_mask,
[batch_dim, seq_dim])
mtf_segment_ids = mtf.import_tf_tensor(mesh, segment_ids,
[batch_dim, seq_dim])
mtf_label_ids = mtf.import_tf_tensor(mesh, label_ids, [batch_dim])
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, logits,
probabilities) = create_model(bert_config, is_training, mtf_input_ids,
mtf_input_mask, mtf_segment_ids,
mtf_label_ids, num_labels_dim,
layout_rules, mesh_shape)
total_loss = mtf.anonymize(total_loss)
per_example_loss = mtf.anonymize(per_example_loss)
logits = mtf.anonymize(logits)
if mode == tf.estimator.ModeKeys.TRAIN:
_, update_ops = optimization_lib.create_optimizer(
total_loss,
learning_rate,
num_train_steps,
num_warmup_steps,
max_optimized_variable_size=FLAGS.max_optimized_variable_size,
optimizer=FLAGS.optimizer,
clip_gradients=FLAGS.clip_gradients)
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
tf_loss = tf.to_float(lowering.export_to_tf_tensor(total_loss))
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_global_step()
tf_update_ops = [lowering.lowered_operation(op) for op in update_ops]
tf_update_ops.append(tf.assign_add(global_step, 1))
tf.logging.info("tf_update_ops: {}".format(tf_update_ops))
train_op = tf.group(tf_update_ops)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions, weights=is_real_example)
loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
eval_metrics = (metric_fn, [
lowering.export_to_tf_tensor(per_example_loss), label_ids,
lowering.export_to_tf_tensor(logits), is_real_example
])
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = bert_lib.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
with mtf.utils.outside_all_rewrites():
# Copy master variables to slices. Must be called first.
restore_hook = mtf.MtfRestoreHook(lowering)
if mode == tf.estimator.ModeKeys.TRAIN:
saver = tf.train.Saver(
tf.global_variables(),
sharded=True,
max_to_keep=10,
keep_checkpoint_every_n_hours=2,
defer_build=False,
save_relative_paths=True)
tf.add_to_collection(tf.GraphKeys.SAVERS, saver)
saver_listener = mtf.MtfCheckpointSaverListener(lowering)
saver_hook = tf.train.CheckpointSaverHook(
FLAGS.output_dir,
save_steps=1000,
saver=saver,
listeners=[saver_listener])
return tf.estimator.tpu.TPUEstimatorSpec(
mode,
loss=tf_loss,
train_op=train_op,
training_hooks=[restore_hook, saver_hook],
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.tpu.TPUEstimatorSpec(
mode,
evaluation_hooks=[restore_hook],
loss=tf_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
return tf.estimator.tpu.TPUEstimatorSpec(
mode,
prediction_hooks=[restore_hook],
predictions={
"probabilities": lowering.export_to_tf_tensor(probabilities)
},
scaffold_fn=scaffold_fn)
return model_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def input_fn_builder(features, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_label_ids = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_label_ids.append(feature.label_id)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"label_ids":
tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),
})
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
features.append(feature)
return features
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor,
"xnli": XnliProcessor,
}
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
raise ValueError(
"At least one of `do_train`, `do_eval` or `do_predict' must be True.")
bert_config = bert_lib.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
tf.gfile.MakeDirs(FLAGS.output_dir)
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
run_config = tf.estimator.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tf.estimator.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_cores_per_replica=1,
per_host_input_for_training=tf.estimator.tpu.InputPipelineConfig
.BROADCAST))
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
train_examples = processor.get_train_examples(FLAGS.data_dir)
num_train_steps = int(
len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
model_fn = model_fn_builder(
bert_config=bert_config,
num_labels=len(label_list),
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.estimator.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
if FLAGS.cached_train_file:
train_file = FLAGS.cached_train_file
else:
train_file = os.path.join(FLAGS.output_dir, "train.tf_record")
file_based_convert_examples_to_features(train_examples, label_list,
FLAGS.max_seq_length, tokenizer,
train_file)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
if FLAGS.do_eval:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
num_actual_eval_examples = len(eval_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on. These do NOT count towards the metric (all tf.metrics
# support a per-instance weight, and these get a weight of 0.0).
while len(eval_examples) % FLAGS.eval_batch_size != 0:
eval_examples.append(PaddingInputExample())
eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
file_based_convert_examples_to_features(eval_examples, label_list,
FLAGS.max_seq_length, tokenizer,
eval_file)
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(eval_examples), num_actual_eval_examples,
len(eval_examples) - num_actual_eval_examples)
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
# This tells the estimator to run through the entire set.
eval_steps = None
# However, if running eval on the TPU, you will need to specify the
# number of steps.
if FLAGS.use_tpu:
assert len(eval_examples) % FLAGS.eval_batch_size == 0
eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)
eval_drop_remainder = True if FLAGS.use_tpu else False
eval_input_fn = file_based_input_fn_builder(
input_file=eval_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder)
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
with tf.gfile.GFile(output_eval_file, "w") as writer:
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if FLAGS.do_predict:
predict_examples = processor.get_test_examples(FLAGS.data_dir)
num_actual_predict_examples = len(predict_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on.
while len(predict_examples) % FLAGS.predict_batch_size != 0:
predict_examples.append(PaddingInputExample())
predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
file_based_convert_examples_to_features(predict_examples, label_list,
FLAGS.max_seq_length, tokenizer,
predict_file)
tf.logging.info("***** Running prediction*****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(predict_examples), num_actual_predict_examples,
len(predict_examples) - num_actual_predict_examples)
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_drop_remainder = True if FLAGS.use_tpu else False
predict_input_fn = file_based_input_fn_builder(
input_file=predict_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=predict_drop_remainder)
result = estimator.predict(input_fn=predict_input_fn)
output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
with tf.gfile.GFile(output_predict_file, "w") as writer:
num_written_lines = 0
tf.logging.info("***** Predict results *****")
for (i, prediction) in enumerate(result):
probabilities = prediction["probabilities"]
if i >= num_actual_predict_examples:
break
output_line = "\t".join(
str(class_probability)
for class_probability in probabilities) + "\n"
writer.write(output_line)
num_written_lines += 1
assert num_written_lines == num_actual_predict_examples
if __name__ == "__main__":
flags.mark_flag_as_required("data_dir")
flags.mark_flag_as_required("task_name")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.disable_v2_behavior()
tf.app.run()
| 36.155331
| 82
| 0.67237
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import mesh_tensorflow as mtf
import mesh_tensorflow.bert.bert as bert_lib
import mesh_tensorflow.bert.optimization as optimization_lib
import mesh_tensorflow.bert.tokenization as tokenization
import tensorflow.compat.v1 as tf
flags = tf.flags
FLAGS = flags.FLAGS
"data_dir", None,
"The input data dir. Should contain the .tsv files (or other data files) "
"for the task.")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("task_name", None, "The name of the task to train.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
ing(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_string(
"cached_train_file", None, "Prepared training file.")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_float(
"max_optimized_variable_size", 1e7,
"Do not optimize variables larger than this.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool(
"do_predict", False,
"Whether to run the model in inference mode on the test set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 32, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_bool("clip_gradients", True, "Apply gradient clipping.")
flags.DEFINE_string("optimizer", "adam", "adam/adafactor")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string("mesh_shape", "batch:8", "mesh shape")
tf.flags.DEFINE_string(
"layout",
"batch:batch,vocab:model,intermediate:model,num_heads:model,experts:batch",
"layout rules")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
class InputExample(object):
def __init__(self, guid, text_a, text_b=None, label=None):
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class PaddingInputExample(object):
class InputFeatures(object):
def __init__(self,
input_ids,
input_mask,
segment_ids,
label_id,
is_real_example=True):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.is_real_example = is_real_example
class DataProcessor(object):
def get_train_examples(self, data_dir):
raise NotImplementedError()
def get_dev_examples(self, data_dir):
raise NotImplementedError()
def get_test_examples(self, data_dir):
raise NotImplementedError()
def get_labels(self):
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class XnliProcessor(DataProcessor):
def __init__(self):
self.language = "zh"
def get_train_examples(self, data_dir):
lines = self._read_tsv(
os.path.join(data_dir, "multinli",
"multinli.train.%s.tsv" % self.language))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "train-%d" % (i)
text_a = tokenization.convert_to_unicode(line[0])
text_b = tokenization.convert_to_unicode(line[1])
label = tokenization.convert_to_unicode(line[2])
if label == tokenization.convert_to_unicode("contradictory"):
label = tokenization.convert_to_unicode("contradiction")
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_dev_examples(self, data_dir):
lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv"))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "dev-%d" % (i)
language = tokenization.convert_to_unicode(line[0])
if language != tokenization.convert_to_unicode(self.language):
continue
text_a = tokenization.convert_to_unicode(line[6])
text_b = tokenization.convert_to_unicode(line[7])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_labels(self):
return ["contradiction", "entailment", "neutral"]
class MnliProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_test_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test")
def get_labels(self):
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0]))
text_a = tokenization.convert_to_unicode(line[8])
text_b = tokenization.convert_to_unicode(line[9])
if set_type == "test":
label = "contradiction"
else:
label = tokenization.convert_to_unicode(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MrpcProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
return ["0", "1"]
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[3])
text_b = tokenization.convert_to_unicode(line[4])
if set_type == "test":
label = "0"
else:
label = tokenization.convert_to_unicode(line[0])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class ColaProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
return ["0", "1"]
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if set_type == "test" and i == 0:
continue
guid = "%s-%s" % (set_type, i)
if set_type == "test":
text_a = tokenization.convert_to_unicode(line[1])
label = "0"
else:
text_a = tokenization.convert_to_unicode(line[3])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer):
if isinstance(example, PaddingInputExample):
return InputFeatures(
input_ids=[0] * max_seq_length,
input_mask=[0] * max_seq_length,
segment_ids=[0] * max_seq_length,
label_id=0,
is_real_example=False)
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("tokens: %s" %
" ".join([tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
is_real_example=True)
return feature
def file_based_convert_examples_to_features(examples, label_list,
max_seq_length, tokenizer,
output_file):
writer = tf.python_io.TFRecordWriter(
output_file, tf.python_io.TFRecordOptions(output_buffer_size=2 ** 24))
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature([feature.label_id])
features["is_real_example"] = create_int_feature(
[int(feature.is_real_example)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
def file_based_input_fn_builder(input_file, seq_length, is_training,
drop_remainder):
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([], tf.int64),
"is_real_example": tf.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
example = tf.parse_single_example(record, name_to_features)
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
batch_size = params["batch_size"]
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.data.experimental.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
labels, num_labels_dim, layout, mesh_shape):
model = bert_lib.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
layout=layout,
mesh_shape=mesh_shape)
output_layer = model.get_pooled_output()
hidden_dim = output_layer.shape[-1]
mesh = input_ids.mesh
output_weights = mtf.get_variable(
mesh,
"output_weights",
shape=[num_labels_dim, hidden_dim],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = mtf.get_variable(
mesh,
"output_bias",
shape=[num_labels_dim],
initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
output_layer = mtf.dropout(output_layer, keep_prob=0.9)
logits = mtf.einsum([output_layer, output_weights],
reduced_dims=[hidden_dim])
logits = logits + output_bias
probabilities = mtf.softmax(logits, reduced_dim=num_labels_dim)
per_example_loss = mtf.layers.softmax_cross_entropy_with_logits(
logits, labels, vocab_dim=num_labels_dim)
loss = mtf.reduce_mean(per_example_loss) + model.get_extra_loss()
return (loss, per_example_loss, logits, probabilities)
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu):
def model_fn(features, labels, mode, params):
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
graph = mtf.Graph()
mesh_shape = mtf.convert_to_shape(FLAGS.mesh_shape)
layout_rules = mtf.convert_to_layout_rules(FLAGS.layout)
ctx = params["context"]
num_hosts = ctx.num_hosts
host_placement_fn = ctx.tpu_host_placement_function
device_list = [host_placement_fn(host_id=t) for t in range(num_hosts)]
tf.logging.info("device_list = %s" % device_list,)
replica_cache_size = 300 * 1000000
worker0_mem = replica_cache_size * ctx.num_replicas
devices_memeory_usage = [worker0_mem] + [0] * (num_hosts - 1)
var_placer = mtf.utils.BalancedVariablePlacer(device_list,
devices_memeory_usage)
mesh_devices = [""] * mesh_shape.size
mesh_impl = mtf.simd_mesh_impl.SimdMeshImpl(mesh_shape, layout_rules,
mesh_devices,
ctx.device_assignment)
mesh = mtf.Mesh(graph, "bert_mesh", var_placer)
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_real_example = None
if "is_real_example" in features:
is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32)
else:
is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)
batch_size = input_ids.get_shape()[0].value
batch_dim = mtf.Dimension("batch", batch_size)
seq_length = input_ids.get_shape()[1].value
seq_dim = mtf.Dimension("seq", seq_length)
num_labels_dim = mtf.Dimension("seq", num_labels)
mtf_input_ids = mtf.import_tf_tensor(mesh, input_ids, [batch_dim, seq_dim])
mtf_input_mask = mtf.import_tf_tensor(mesh, input_mask,
[batch_dim, seq_dim])
mtf_segment_ids = mtf.import_tf_tensor(mesh, segment_ids,
[batch_dim, seq_dim])
mtf_label_ids = mtf.import_tf_tensor(mesh, label_ids, [batch_dim])
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, logits,
probabilities) = create_model(bert_config, is_training, mtf_input_ids,
mtf_input_mask, mtf_segment_ids,
mtf_label_ids, num_labels_dim,
layout_rules, mesh_shape)
total_loss = mtf.anonymize(total_loss)
per_example_loss = mtf.anonymize(per_example_loss)
logits = mtf.anonymize(logits)
if mode == tf.estimator.ModeKeys.TRAIN:
_, update_ops = optimization_lib.create_optimizer(
total_loss,
learning_rate,
num_train_steps,
num_warmup_steps,
max_optimized_variable_size=FLAGS.max_optimized_variable_size,
optimizer=FLAGS.optimizer,
clip_gradients=FLAGS.clip_gradients)
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
tf_loss = tf.to_float(lowering.export_to_tf_tensor(total_loss))
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_global_step()
tf_update_ops = [lowering.lowered_operation(op) for op in update_ops]
tf_update_ops.append(tf.assign_add(global_step, 1))
tf.logging.info("tf_update_ops: {}".format(tf_update_ops))
train_op = tf.group(tf_update_ops)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions, weights=is_real_example)
loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
eval_metrics = (metric_fn, [
lowering.export_to_tf_tensor(per_example_loss), label_ids,
lowering.export_to_tf_tensor(logits), is_real_example
])
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = bert_lib.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
with mtf.utils.outside_all_rewrites():
restore_hook = mtf.MtfRestoreHook(lowering)
if mode == tf.estimator.ModeKeys.TRAIN:
saver = tf.train.Saver(
tf.global_variables(),
sharded=True,
max_to_keep=10,
keep_checkpoint_every_n_hours=2,
defer_build=False,
save_relative_paths=True)
tf.add_to_collection(tf.GraphKeys.SAVERS, saver)
saver_listener = mtf.MtfCheckpointSaverListener(lowering)
saver_hook = tf.train.CheckpointSaverHook(
FLAGS.output_dir,
save_steps=1000,
saver=saver,
listeners=[saver_listener])
return tf.estimator.tpu.TPUEstimatorSpec(
mode,
loss=tf_loss,
train_op=train_op,
training_hooks=[restore_hook, saver_hook],
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.tpu.TPUEstimatorSpec(
mode,
evaluation_hooks=[restore_hook],
loss=tf_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
return tf.estimator.tpu.TPUEstimatorSpec(
mode,
prediction_hooks=[restore_hook],
predictions={
"probabilities": lowering.export_to_tf_tensor(probabilities)
},
scaffold_fn=scaffold_fn)
return model_fn
def input_fn_builder(features, seq_length, is_training, drop_remainder):
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_label_ids = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_label_ids.append(feature.label_id)
def input_fn(params):
batch_size = params["batch_size"]
num_examples = len(features)
d = tf.data.Dataset.from_tensor_slices({
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"label_ids":
tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),
})
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer):
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
features.append(feature)
return features
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor,
"xnli": XnliProcessor,
}
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
raise ValueError(
"At least one of `do_train`, `do_eval` or `do_predict' must be True.")
bert_config = bert_lib.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
tf.gfile.MakeDirs(FLAGS.output_dir)
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
run_config = tf.estimator.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tf.estimator.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_cores_per_replica=1,
per_host_input_for_training=tf.estimator.tpu.InputPipelineConfig
.BROADCAST))
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
train_examples = processor.get_train_examples(FLAGS.data_dir)
num_train_steps = int(
len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
model_fn = model_fn_builder(
bert_config=bert_config,
num_labels=len(label_list),
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.estimator.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
if FLAGS.cached_train_file:
train_file = FLAGS.cached_train_file
else:
train_file = os.path.join(FLAGS.output_dir, "train.tf_record")
file_based_convert_examples_to_features(train_examples, label_list,
FLAGS.max_seq_length, tokenizer,
train_file)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
if FLAGS.do_eval:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
num_actual_eval_examples = len(eval_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on. These do NOT count towards the metric (all tf.metrics
# support a per-instance weight, and these get a weight of 0.0).
while len(eval_examples) % FLAGS.eval_batch_size != 0:
eval_examples.append(PaddingInputExample())
eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
file_based_convert_examples_to_features(eval_examples, label_list,
FLAGS.max_seq_length, tokenizer,
eval_file)
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(eval_examples), num_actual_eval_examples,
len(eval_examples) - num_actual_eval_examples)
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
# This tells the estimator to run through the entire set.
eval_steps = None
# However, if running eval on the TPU, you will need to specify the
# number of steps.
if FLAGS.use_tpu:
assert len(eval_examples) % FLAGS.eval_batch_size == 0
eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)
eval_drop_remainder = True if FLAGS.use_tpu else False
eval_input_fn = file_based_input_fn_builder(
input_file=eval_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder)
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
with tf.gfile.GFile(output_eval_file, "w") as writer:
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if FLAGS.do_predict:
predict_examples = processor.get_test_examples(FLAGS.data_dir)
num_actual_predict_examples = len(predict_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on.
while len(predict_examples) % FLAGS.predict_batch_size != 0:
predict_examples.append(PaddingInputExample())
predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
file_based_convert_examples_to_features(predict_examples, label_list,
FLAGS.max_seq_length, tokenizer,
predict_file)
tf.logging.info("***** Running prediction*****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(predict_examples), num_actual_predict_examples,
len(predict_examples) - num_actual_predict_examples)
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_drop_remainder = True if FLAGS.use_tpu else False
predict_input_fn = file_based_input_fn_builder(
input_file=predict_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=predict_drop_remainder)
result = estimator.predict(input_fn=predict_input_fn)
output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
with tf.gfile.GFile(output_predict_file, "w") as writer:
num_written_lines = 0
tf.logging.info("***** Predict results *****")
for (i, prediction) in enumerate(result):
probabilities = prediction["probabilities"]
if i >= num_actual_predict_examples:
break
output_line = "\t".join(
str(class_probability)
for class_probability in probabilities) + "\n"
writer.write(output_line)
num_written_lines += 1
assert num_written_lines == num_actual_predict_examples
if __name__ == "__main__":
flags.mark_flag_as_required("data_dir")
flags.mark_flag_as_required("task_name")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.disable_v2_behavior()
tf.app.run()
| true
| true
|
1c3f7e11db8df3b681d8d8f066e13f183429620e
| 262
|
py
|
Python
|
tests/artificial/transf_Logit/trend_PolyTrend/cycle_7/ar_12/test_artificial_128_Logit_PolyTrend_7_12_100.py
|
shaido987/pyaf
|
b9afd089557bed6b90b246d3712c481ae26a1957
|
[
"BSD-3-Clause"
] | 377
|
2016-10-13T20:52:44.000Z
|
2022-03-29T18:04:14.000Z
|
tests/artificial/transf_Logit/trend_PolyTrend/cycle_7/ar_12/test_artificial_128_Logit_PolyTrend_7_12_100.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 160
|
2016-10-13T16:11:53.000Z
|
2022-03-28T04:21:34.000Z
|
tests/artificial/transf_Logit/trend_PolyTrend/cycle_7/ar_12/test_artificial_128_Logit_PolyTrend_7_12_100.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 63
|
2017-03-09T14:51:18.000Z
|
2022-03-27T20:52:57.000Z
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 7, transform = "Logit", sigma = 0.0, exog_count = 100, ar_order = 12);
| 37.428571
| 162
| 0.729008
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 7, transform = "Logit", sigma = 0.0, exog_count = 100, ar_order = 12);
| true
| true
|
1c3f7f0265ab655f120e105f8a3c29e9c4dc37c2
| 8,752
|
py
|
Python
|
FuzzingTool_Dialog_ObjectFileChoose_Child.py
|
Ryu-Miyaki/Fuzz4B
|
8546f165d4dbdd97eb6ab5a6f4c445ee81ec364b
|
[
"MIT"
] | 16
|
2020-06-25T11:56:59.000Z
|
2022-02-05T14:00:12.000Z
|
FuzzingTool_Dialog_ObjectFileChoose_Child.py
|
Ryu-Miyaki/Fuzz4B
|
8546f165d4dbdd97eb6ab5a6f4c445ee81ec364b
|
[
"MIT"
] | null | null | null |
FuzzingTool_Dialog_ObjectFileChoose_Child.py
|
Ryu-Miyaki/Fuzz4B
|
8546f165d4dbdd97eb6ab5a6f4c445ee81ec364b
|
[
"MIT"
] | null | null | null |
import wx
import FuzzingTool
import pyperclip
# Implementing Dialog_ObjectFileChoose
class FuzzingTool_Dialog_ObjectFileChoose_Child( wx.Dialog ):
def __init__( self, parent ):
wx.Dialog.__init__ ( self, parent, id = wx.ID_ANY, title = wx.EmptyString, pos = wx.DefaultPosition, size = wx.DefaultSize, style = 0 )
self.SetSizeHints( wx.DefaultSize, wx.DefaultSize )
self.bSizer_ObjectFileChoose = wx.BoxSizer( wx.VERTICAL )
self.Text_ObjectFileChoose = wx.StaticText( self, wx.ID_ANY, u"Please generate object (.o) files by afl-gcc or afl-g++ with the following options.\n※You should save all the source code and generated object files\n under the directory where you compiled them.", wx.DefaultPosition, wx.DefaultSize, 0 )
self.Text_ObjectFileChoose.Wrap( -1 )
self.bSizer_ObjectFileChoose.Add( self.Text_ObjectFileChoose, 0, wx.ALL, 5 )
self.Text_Option_g = wx.StaticText( self, wx.ID_ANY, u"● -g", wx.DefaultPosition, wx.DefaultSize, 0 )
self.Text_Option_g.Wrap( -1 )
self.bSizer_ObjectFileChoose.Add( self.Text_Option_g, 0, wx.ALL, 5 )
self.Text_Option_coverage = wx.StaticText( self, wx.ID_ANY, u"● --coverage", wx.DefaultPosition, wx.DefaultSize, 0 )
self.Text_Option_coverage.Wrap( -1 )
self.bSizer_ObjectFileChoose.Add( self.Text_Option_coverage, 0, wx.ALL, 5 )
self.Text_Option_c = wx.StaticText( self, wx.ID_ANY, u"● -c", wx.DefaultPosition, wx.DefaultSize, 0 )
self.Text_Option_c.Wrap( -1 )
self.bSizer_ObjectFileChoose.Add( self.Text_Option_c, 0, wx.ALL, 5 )
bSizer_CompileCommandandCopyButton = wx.BoxSizer( wx.HORIZONTAL )
self.Text_CompileCommand = wx.StaticText( self, wx.ID_ANY, u"e.g. afl-gcc -g --coverage -c sourcefile.c", wx.DefaultPosition, wx.DefaultSize, 0 )
self.Text_CompileCommand.Wrap( -1 )
bSizer_CompileCommandandCopyButton.Add( self.Text_CompileCommand, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.Button_Copy = wx.Button( self, wx.ID_ANY, u"Copy", wx.DefaultPosition, wx.Size( 50,-1 ), 0 )
bSizer_CompileCommandandCopyButton.Add( self.Button_Copy, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.bSizer_ObjectFileChoose.Add( bSizer_CompileCommandandCopyButton, 1, wx.EXPAND, 5 )
bSizer_CompilationDir = wx.BoxSizer( wx.HORIZONTAL )
self.Text_CompilationDir = wx.StaticText( self, wx.ID_ANY, u"Directory where you compiled files", wx.DefaultPosition, wx.DefaultSize, 0 )
self.Text_CompilationDir.Wrap( -1 )
bSizer_CompilationDir.Add( self.Text_CompilationDir, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.DirPicker_CompilationCommand = wx.DirPickerCtrl( self, wx.ID_ANY, wx.EmptyString, u"Select a folder", wx.DefaultPosition, wx.DefaultSize, wx.DIRP_DEFAULT_STYLE )
bSizer_CompilationDir.Add( self.DirPicker_CompilationCommand, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.bSizer_ObjectFileChoose.Add( bSizer_CompilationDir, 1, wx.EXPAND, 5 )
bSizer_OKorCanncel = wx.StdDialogButtonSizer()
self.bSizer_OKorCanncelOK = wx.Button( self, wx.ID_OK )
bSizer_OKorCanncel.AddButton( self.bSizer_OKorCanncelOK )
self.bSizer_OKorCanncelCancel = wx.Button( self, wx.ID_CANCEL )
bSizer_OKorCanncel.AddButton( self.bSizer_OKorCanncelCancel )
bSizer_OKorCanncel.Realize()
self.bSizer_ObjectFileChoose.Add( bSizer_OKorCanncel, 1, wx.ALIGN_CENTER_HORIZONTAL, 5 )
self.SetSizer( self.bSizer_ObjectFileChoose )
self.Layout()
self.bSizer_ObjectFileChoose.Fit( self )
self.Centre( wx.BOTH )
# Connect Events
self.Button_Copy.Bind( wx.EVT_BUTTON, self.Button_CopyOnButtonClick )
self.DirPicker_CompilationCommand.Bind( wx.EVT_DIRPICKER_CHANGED, self.DirPicker_CompilationCommandOnDirChanged )
self.bSizer_OKorCanncelCancel.Bind( wx.EVT_BUTTON, self.bSizer_OKorCanncelOnCancelButtonClick )
self.bSizer_OKorCanncelOK.Bind( wx.EVT_BUTTON, self.bSizer_OKorCanncelOnOKButtonClick )
self.bSizer_OKorCanncelOK.Disable()
self.added_bsizer_list = []
self.added_text_list = []
self.added_filepicker_list = []
self.otherobjectfilepicker_count = 0
self.compilationdir_choosed = False
# Handlers for Dialog_ObjectFileChoose events.
def Button_CopyOnButtonClick( self, event ):
# TODO: Implement Button_CopyOnButtonClick
pyperclip.copy(self.Text_CompileCommand.GetLabel().split(' ', )[1])
def DirPicker_CompilationCommandOnDirChanged( self, event ):
# TODO: Implement DirPicker_CompilationCommandOnDirChanged
if self.added_text_list != []:
for text in self.added_text_list:
text.Destroy()
self.added_text_list.clear()
if self.added_filepicker_list != []:
for filepicker in self.added_filepicker_list:
filepicker.Destroy()
self.added_filepicker_list.clear()
if self.added_bsizer_list != []:
for bSizer in self.added_bsizer_list:
self.bSizer_ObjectFileChoose.Remove(bSizer)
self.added_bsizer_list.clear()
if self.compilationdir_choosed == False:
bSizer_AddObjectFile = wx.BoxSizer( wx.HORIZONTAL )
self.Text_AddObjectFile = wx.StaticText( self, wx.ID_ANY, u"Add more object files", wx.DefaultPosition, wx.DefaultSize, 0 )
self.Text_AddObjectFile.Wrap( -1 )
bSizer_AddObjectFile.Add( self.Text_AddObjectFile, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.Button_Add = wx.Button( self, wx.ID_ANY, u"Add", wx.DefaultPosition, wx.Size( 50,-1 ), 0 )
bSizer_AddObjectFile.Add( self.Button_Add, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.bSizer_ObjectFileChoose.Insert( self.bSizer_ObjectFileChoose.GetItemCount() - 1, bSizer_AddObjectFile, 1, wx.EXPAND, 5 )
self.Button_Add.Bind( wx.EVT_BUTTON, self.Button_AddOnButtonClick )
bSizer = wx.BoxSizer(wx.HORIZONTAL)
text = wx.StaticText(self, wx.ID_ANY, u"Object file including main()", wx.DefaultPosition, wx.DefaultSize, 0)
text.Wrap(-1)
bSizer.Add(text, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
filepicker = wx.FilePickerCtrl( self, wx.ID_ANY, wx.EmptyString, u"Select a file", u"*.*", wx.DefaultPosition, wx.DefaultSize, wx.FLP_DEFAULT_STYLE )
filepicker.SetInitialDirectory(self.DirPicker_CompilationCommand.GetPath())
bSizer.Add(filepicker, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
self.bSizer_ObjectFileChoose.Insert(self.bSizer_ObjectFileChoose.GetItemCount() - 2, bSizer, 1, wx.EXPAND, 5)
filepicker.Bind(wx.EVT_FILEPICKER_CHANGED, self.FilePicker_ObjectFilewithmainOnFileChanged)
self.added_bsizer_list.append(bSizer)
self.added_text_list.append(text)
self.added_filepicker_list.append(filepicker)
self.Layout()
self.bSizer_ObjectFileChoose.Fit( self )
self.compilationdir_choosed = True
self.otherobjectfilepicker_count = 0
self.bSizer_OKorCanncelOK.Disable()
def FilePicker_ObjectFilewithmainOnFileChanged( self, event ):
# TODO: Implement FilePicker_ObjectFilewithmainOnFileChanged
self.bSizer_OKorCanncelOK.Enable()
def Button_AddOnButtonClick( self, event ):
# TODO: Implement Button_AddOnButtonClick
if self.otherobjectfilepicker_count == 0:
self.Add_Objectfilepicker()
elif self.added_filepicker_list[self.otherobjectfilepicker_count].GetPath() != "":
self.caution.SetLabel("")
self.Add_Objectfilepicker()
else:
self.caution.SetLabel("Please select a file. ")
self.Layout()
self.bSizer_ObjectFileChoose.Fit(self)
def Add_Objectfilepicker(self):
self.otherobjectfilepicker_count += 1
bSizer = wx.BoxSizer(wx.HORIZONTAL)
text = wx.StaticText(self, wx.ID_ANY, u"Object file" + str(self.otherobjectfilepicker_count), wx.DefaultPosition, wx.DefaultSize, 0)
text.Wrap(-1)
bSizer.Add(text, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
filepicker = wx.FilePickerCtrl(self, wx.ID_ANY, wx.EmptyString, u"Select a file", u"*.*", wx.DefaultPosition, wx.DefaultSize, wx.FLP_DEFAULT_STYLE)
filepicker.SetInitialDirectory(self.DirPicker_CompilationCommand.GetPath())
bSizer.Add(filepicker, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
self.caution = wx.StaticText(self, wx.ID_ANY, u"", wx.DefaultPosition, wx.DefaultSize, 0)
self.caution.Wrap(-1)
bSizer.Add(self.caution, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
self.bSizer_ObjectFileChoose.Insert(self.bSizer_ObjectFileChoose.GetItemCount() - 2, bSizer, 1, wx.EXPAND, 5)
self.Layout()
self.bSizer_ObjectFileChoose.Fit(self)
self.added_bsizer_list.append(bSizer)
self.added_text_list.append(text)
self.added_text_list.append(self.caution)
self.added_filepicker_list.append(filepicker)
def bSizer_OKorCanncelOnCancelButtonClick( self, event ):
# TODO: Implement bSizer_OKorCanncelOnCancelButtonClick
self.EndModal(False)
def bSizer_OKorCanncelOnOKButtonClick( self, event ):
# TODO: Implement bSizer_OKorCanncelOnOKButtonClick
self.compilationdir = self.DirPicker_CompilationCommand.GetPath()
self.objectfile_list = [filepicker.GetPath() for filepicker in self.added_filepicker_list if filepicker.GetPath() != ""]
self.EndModal(True)
| 46.802139
| 305
| 0.776165
|
import wx
import FuzzingTool
import pyperclip
class FuzzingTool_Dialog_ObjectFileChoose_Child( wx.Dialog ):
def __init__( self, parent ):
wx.Dialog.__init__ ( self, parent, id = wx.ID_ANY, title = wx.EmptyString, pos = wx.DefaultPosition, size = wx.DefaultSize, style = 0 )
self.SetSizeHints( wx.DefaultSize, wx.DefaultSize )
self.bSizer_ObjectFileChoose = wx.BoxSizer( wx.VERTICAL )
self.Text_ObjectFileChoose = wx.StaticText( self, wx.ID_ANY, u"Please generate object (.o) files by afl-gcc or afl-g++ with the following options.\n※You should save all the source code and generated object files\n under the directory where you compiled them.", wx.DefaultPosition, wx.DefaultSize, 0 )
self.Text_ObjectFileChoose.Wrap( -1 )
self.bSizer_ObjectFileChoose.Add( self.Text_ObjectFileChoose, 0, wx.ALL, 5 )
self.Text_Option_g = wx.StaticText( self, wx.ID_ANY, u"● -g", wx.DefaultPosition, wx.DefaultSize, 0 )
self.Text_Option_g.Wrap( -1 )
self.bSizer_ObjectFileChoose.Add( self.Text_Option_g, 0, wx.ALL, 5 )
self.Text_Option_coverage = wx.StaticText( self, wx.ID_ANY, u"● --coverage", wx.DefaultPosition, wx.DefaultSize, 0 )
self.Text_Option_coverage.Wrap( -1 )
self.bSizer_ObjectFileChoose.Add( self.Text_Option_coverage, 0, wx.ALL, 5 )
self.Text_Option_c = wx.StaticText( self, wx.ID_ANY, u"● -c", wx.DefaultPosition, wx.DefaultSize, 0 )
self.Text_Option_c.Wrap( -1 )
self.bSizer_ObjectFileChoose.Add( self.Text_Option_c, 0, wx.ALL, 5 )
bSizer_CompileCommandandCopyButton = wx.BoxSizer( wx.HORIZONTAL )
self.Text_CompileCommand = wx.StaticText( self, wx.ID_ANY, u"e.g. afl-gcc -g --coverage -c sourcefile.c", wx.DefaultPosition, wx.DefaultSize, 0 )
self.Text_CompileCommand.Wrap( -1 )
bSizer_CompileCommandandCopyButton.Add( self.Text_CompileCommand, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.Button_Copy = wx.Button( self, wx.ID_ANY, u"Copy", wx.DefaultPosition, wx.Size( 50,-1 ), 0 )
bSizer_CompileCommandandCopyButton.Add( self.Button_Copy, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.bSizer_ObjectFileChoose.Add( bSizer_CompileCommandandCopyButton, 1, wx.EXPAND, 5 )
bSizer_CompilationDir = wx.BoxSizer( wx.HORIZONTAL )
self.Text_CompilationDir = wx.StaticText( self, wx.ID_ANY, u"Directory where you compiled files", wx.DefaultPosition, wx.DefaultSize, 0 )
self.Text_CompilationDir.Wrap( -1 )
bSizer_CompilationDir.Add( self.Text_CompilationDir, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.DirPicker_CompilationCommand = wx.DirPickerCtrl( self, wx.ID_ANY, wx.EmptyString, u"Select a folder", wx.DefaultPosition, wx.DefaultSize, wx.DIRP_DEFAULT_STYLE )
bSizer_CompilationDir.Add( self.DirPicker_CompilationCommand, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.bSizer_ObjectFileChoose.Add( bSizer_CompilationDir, 1, wx.EXPAND, 5 )
bSizer_OKorCanncel = wx.StdDialogButtonSizer()
self.bSizer_OKorCanncelOK = wx.Button( self, wx.ID_OK )
bSizer_OKorCanncel.AddButton( self.bSizer_OKorCanncelOK )
self.bSizer_OKorCanncelCancel = wx.Button( self, wx.ID_CANCEL )
bSizer_OKorCanncel.AddButton( self.bSizer_OKorCanncelCancel )
bSizer_OKorCanncel.Realize()
self.bSizer_ObjectFileChoose.Add( bSizer_OKorCanncel, 1, wx.ALIGN_CENTER_HORIZONTAL, 5 )
self.SetSizer( self.bSizer_ObjectFileChoose )
self.Layout()
self.bSizer_ObjectFileChoose.Fit( self )
self.Centre( wx.BOTH )
self.Button_Copy.Bind( wx.EVT_BUTTON, self.Button_CopyOnButtonClick )
self.DirPicker_CompilationCommand.Bind( wx.EVT_DIRPICKER_CHANGED, self.DirPicker_CompilationCommandOnDirChanged )
self.bSizer_OKorCanncelCancel.Bind( wx.EVT_BUTTON, self.bSizer_OKorCanncelOnCancelButtonClick )
self.bSizer_OKorCanncelOK.Bind( wx.EVT_BUTTON, self.bSizer_OKorCanncelOnOKButtonClick )
self.bSizer_OKorCanncelOK.Disable()
self.added_bsizer_list = []
self.added_text_list = []
self.added_filepicker_list = []
self.otherobjectfilepicker_count = 0
self.compilationdir_choosed = False
def Button_CopyOnButtonClick( self, event ):
pyperclip.copy(self.Text_CompileCommand.GetLabel().split(' ', )[1])
def DirPicker_CompilationCommandOnDirChanged( self, event ):
if self.added_text_list != []:
for text in self.added_text_list:
text.Destroy()
self.added_text_list.clear()
if self.added_filepicker_list != []:
for filepicker in self.added_filepicker_list:
filepicker.Destroy()
self.added_filepicker_list.clear()
if self.added_bsizer_list != []:
for bSizer in self.added_bsizer_list:
self.bSizer_ObjectFileChoose.Remove(bSizer)
self.added_bsizer_list.clear()
if self.compilationdir_choosed == False:
bSizer_AddObjectFile = wx.BoxSizer( wx.HORIZONTAL )
self.Text_AddObjectFile = wx.StaticText( self, wx.ID_ANY, u"Add more object files", wx.DefaultPosition, wx.DefaultSize, 0 )
self.Text_AddObjectFile.Wrap( -1 )
bSizer_AddObjectFile.Add( self.Text_AddObjectFile, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.Button_Add = wx.Button( self, wx.ID_ANY, u"Add", wx.DefaultPosition, wx.Size( 50,-1 ), 0 )
bSizer_AddObjectFile.Add( self.Button_Add, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.bSizer_ObjectFileChoose.Insert( self.bSizer_ObjectFileChoose.GetItemCount() - 1, bSizer_AddObjectFile, 1, wx.EXPAND, 5 )
self.Button_Add.Bind( wx.EVT_BUTTON, self.Button_AddOnButtonClick )
bSizer = wx.BoxSizer(wx.HORIZONTAL)
text = wx.StaticText(self, wx.ID_ANY, u"Object file including main()", wx.DefaultPosition, wx.DefaultSize, 0)
text.Wrap(-1)
bSizer.Add(text, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
filepicker = wx.FilePickerCtrl( self, wx.ID_ANY, wx.EmptyString, u"Select a file", u"*.*", wx.DefaultPosition, wx.DefaultSize, wx.FLP_DEFAULT_STYLE )
filepicker.SetInitialDirectory(self.DirPicker_CompilationCommand.GetPath())
bSizer.Add(filepicker, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
self.bSizer_ObjectFileChoose.Insert(self.bSizer_ObjectFileChoose.GetItemCount() - 2, bSizer, 1, wx.EXPAND, 5)
filepicker.Bind(wx.EVT_FILEPICKER_CHANGED, self.FilePicker_ObjectFilewithmainOnFileChanged)
self.added_bsizer_list.append(bSizer)
self.added_text_list.append(text)
self.added_filepicker_list.append(filepicker)
self.Layout()
self.bSizer_ObjectFileChoose.Fit( self )
self.compilationdir_choosed = True
self.otherobjectfilepicker_count = 0
self.bSizer_OKorCanncelOK.Disable()
def FilePicker_ObjectFilewithmainOnFileChanged( self, event ):
self.bSizer_OKorCanncelOK.Enable()
def Button_AddOnButtonClick( self, event ):
if self.otherobjectfilepicker_count == 0:
self.Add_Objectfilepicker()
elif self.added_filepicker_list[self.otherobjectfilepicker_count].GetPath() != "":
self.caution.SetLabel("")
self.Add_Objectfilepicker()
else:
self.caution.SetLabel("Please select a file. ")
self.Layout()
self.bSizer_ObjectFileChoose.Fit(self)
def Add_Objectfilepicker(self):
self.otherobjectfilepicker_count += 1
bSizer = wx.BoxSizer(wx.HORIZONTAL)
text = wx.StaticText(self, wx.ID_ANY, u"Object file" + str(self.otherobjectfilepicker_count), wx.DefaultPosition, wx.DefaultSize, 0)
text.Wrap(-1)
bSizer.Add(text, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
filepicker = wx.FilePickerCtrl(self, wx.ID_ANY, wx.EmptyString, u"Select a file", u"*.*", wx.DefaultPosition, wx.DefaultSize, wx.FLP_DEFAULT_STYLE)
filepicker.SetInitialDirectory(self.DirPicker_CompilationCommand.GetPath())
bSizer.Add(filepicker, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
self.caution = wx.StaticText(self, wx.ID_ANY, u"", wx.DefaultPosition, wx.DefaultSize, 0)
self.caution.Wrap(-1)
bSizer.Add(self.caution, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
self.bSizer_ObjectFileChoose.Insert(self.bSizer_ObjectFileChoose.GetItemCount() - 2, bSizer, 1, wx.EXPAND, 5)
self.Layout()
self.bSizer_ObjectFileChoose.Fit(self)
self.added_bsizer_list.append(bSizer)
self.added_text_list.append(text)
self.added_text_list.append(self.caution)
self.added_filepicker_list.append(filepicker)
def bSizer_OKorCanncelOnCancelButtonClick( self, event ):
self.EndModal(False)
def bSizer_OKorCanncelOnOKButtonClick( self, event ):
self.compilationdir = self.DirPicker_CompilationCommand.GetPath()
self.objectfile_list = [filepicker.GetPath() for filepicker in self.added_filepicker_list if filepicker.GetPath() != ""]
self.EndModal(True)
| true
| true
|
1c3f7feb8162ecf2c73e47b82f212ad4cf57200f
| 13,530
|
py
|
Python
|
orcid_api/models/work.py
|
jpeerz/NZ-ORCID-Hub
|
ba412d49cff0158842878753b65fc60731df158c
|
[
"MIT"
] | null | null | null |
orcid_api/models/work.py
|
jpeerz/NZ-ORCID-Hub
|
ba412d49cff0158842878753b65fc60731df158c
|
[
"MIT"
] | null | null | null |
orcid_api/models/work.py
|
jpeerz/NZ-ORCID-Hub
|
ba412d49cff0158842878753b65fc60731df158c
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Work(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, created_date=None, last_modified_date=None, source=None, put_code=None, path=None, title=None, journal_title=None, short_description=None, citation=None, type=None, publication_date=None, external_ids=None, url=None, contributors=None, language_code=None, country=None, visibility=None):
"""
Work - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'created_date': 'CreatedDate',
'last_modified_date': 'LastModifiedDate',
'source': 'Source',
'put_code': 'int',
'path': 'str',
'title': 'WorkTitle',
'journal_title': 'Title',
'short_description': 'str',
'citation': 'Citation',
'type': 'str',
'publication_date': 'PublicationDate',
'external_ids': 'ExternalIDs',
'url': 'Url',
'contributors': 'WorkContributors',
'language_code': 'str',
'country': 'Country',
'visibility': 'str'
}
self.attribute_map = {
'created_date': 'created-date',
'last_modified_date': 'last-modified-date',
'source': 'source',
'put_code': 'put-code',
'path': 'path',
'title': 'title',
'journal_title': 'journal-title',
'short_description': 'short-description',
'citation': 'citation',
'type': 'type',
'publication_date': 'publication-date',
'external_ids': 'external-ids',
'url': 'url',
'contributors': 'contributors',
'language_code': 'language-code',
'country': 'country',
'visibility': 'visibility'
}
self._created_date = created_date
self._last_modified_date = last_modified_date
self._source = source
self._put_code = put_code
self._path = path
self._title = title
self._journal_title = journal_title
self._short_description = short_description
self._citation = citation
self._type = type
self._publication_date = publication_date
self._external_ids = external_ids
self._url = url
self._contributors = contributors
self._language_code = language_code
self._country = country
self._visibility = visibility
@property
def created_date(self):
"""
Gets the created_date of this Work.
:return: The created_date of this Work.
:rtype: CreatedDate
"""
return self._created_date
@created_date.setter
def created_date(self, created_date):
"""
Sets the created_date of this Work.
:param created_date: The created_date of this Work.
:type: CreatedDate
"""
self._created_date = created_date
@property
def last_modified_date(self):
"""
Gets the last_modified_date of this Work.
:return: The last_modified_date of this Work.
:rtype: LastModifiedDate
"""
return self._last_modified_date
@last_modified_date.setter
def last_modified_date(self, last_modified_date):
"""
Sets the last_modified_date of this Work.
:param last_modified_date: The last_modified_date of this Work.
:type: LastModifiedDate
"""
self._last_modified_date = last_modified_date
@property
def source(self):
"""
Gets the source of this Work.
:return: The source of this Work.
:rtype: Source
"""
return self._source
@source.setter
def source(self, source):
"""
Sets the source of this Work.
:param source: The source of this Work.
:type: Source
"""
self._source = source
@property
def put_code(self):
"""
Gets the put_code of this Work.
:return: The put_code of this Work.
:rtype: int
"""
return self._put_code
@put_code.setter
def put_code(self, put_code):
"""
Sets the put_code of this Work.
:param put_code: The put_code of this Work.
:type: int
"""
self._put_code = put_code
@property
def path(self):
"""
Gets the path of this Work.
:return: The path of this Work.
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""
Sets the path of this Work.
:param path: The path of this Work.
:type: str
"""
self._path = path
@property
def title(self):
"""
Gets the title of this Work.
:return: The title of this Work.
:rtype: WorkTitle
"""
return self._title
@title.setter
def title(self, title):
"""
Sets the title of this Work.
:param title: The title of this Work.
:type: WorkTitle
"""
self._title = title
@property
def journal_title(self):
"""
Gets the journal_title of this Work.
:return: The journal_title of this Work.
:rtype: Title
"""
return self._journal_title
@journal_title.setter
def journal_title(self, journal_title):
"""
Sets the journal_title of this Work.
:param journal_title: The journal_title of this Work.
:type: Title
"""
self._journal_title = journal_title
@property
def short_description(self):
"""
Gets the short_description of this Work.
:return: The short_description of this Work.
:rtype: str
"""
return self._short_description
@short_description.setter
def short_description(self, short_description):
"""
Sets the short_description of this Work.
:param short_description: The short_description of this Work.
:type: str
"""
self._short_description = short_description
@property
def citation(self):
"""
Gets the citation of this Work.
:return: The citation of this Work.
:rtype: Citation
"""
return self._citation
@citation.setter
def citation(self, citation):
"""
Sets the citation of this Work.
:param citation: The citation of this Work.
:type: Citation
"""
self._citation = citation
@property
def type(self):
"""
Gets the type of this Work.
:return: The type of this Work.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this Work.
:param type: The type of this Work.
:type: str
"""
allowed_values = ["ARTISTIC_PERFORMANCE", "BOOK_CHAPTER", "BOOK_REVIEW", "BOOK", "CONFERENCE_ABSTRACT", "CONFERENCE_PAPER", "CONFERENCE_POSTER", "DATA_SET", "DICTIONARY_ENTRY", "DISCLOSURE", "DISSERTATION", "EDITED_BOOK", "ENCYCLOPEDIA_ENTRY", "INVENTION", "JOURNAL_ARTICLE", "JOURNAL_ISSUE", "LECTURE_SPEECH", "LICENSE", "MAGAZINE_ARTICLE", "MANUAL", "NEWSLETTER_ARTICLE", "NEWSPAPER_ARTICLE", "ONLINE_RESOURCE", "OTHER", "PATENT", "REGISTERED_COPYRIGHT", "REPORT", "RESEARCH_TECHNIQUE", "RESEARCH_TOOL", "SPIN_OFF_COMPANY", "STANDARDS_AND_POLICY", "SUPERVISED_STUDENT_PUBLICATION", "TECHNICAL_STANDARD", "TEST", "TRADEMARK", "TRANSLATION", "WEBSITE", "WORKING_PAPER", "UNDEFINED"]
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}"
.format(type, allowed_values)
)
self._type = type
@property
def publication_date(self):
"""
Gets the publication_date of this Work.
:return: The publication_date of this Work.
:rtype: PublicationDate
"""
return self._publication_date
@publication_date.setter
def publication_date(self, publication_date):
"""
Sets the publication_date of this Work.
:param publication_date: The publication_date of this Work.
:type: PublicationDate
"""
self._publication_date = publication_date
@property
def external_ids(self):
"""
Gets the external_ids of this Work.
:return: The external_ids of this Work.
:rtype: ExternalIDs
"""
return self._external_ids
@external_ids.setter
def external_ids(self, external_ids):
"""
Sets the external_ids of this Work.
:param external_ids: The external_ids of this Work.
:type: ExternalIDs
"""
self._external_ids = external_ids
@property
def url(self):
"""
Gets the url of this Work.
:return: The url of this Work.
:rtype: Url
"""
return self._url
@url.setter
def url(self, url):
"""
Sets the url of this Work.
:param url: The url of this Work.
:type: Url
"""
self._url = url
@property
def contributors(self):
"""
Gets the contributors of this Work.
:return: The contributors of this Work.
:rtype: WorkContributors
"""
return self._contributors
@contributors.setter
def contributors(self, contributors):
"""
Sets the contributors of this Work.
:param contributors: The contributors of this Work.
:type: WorkContributors
"""
self._contributors = contributors
@property
def language_code(self):
"""
Gets the language_code of this Work.
:return: The language_code of this Work.
:rtype: str
"""
return self._language_code
@language_code.setter
def language_code(self, language_code):
"""
Sets the language_code of this Work.
:param language_code: The language_code of this Work.
:type: str
"""
self._language_code = language_code
@property
def country(self):
"""
Gets the country of this Work.
:return: The country of this Work.
:rtype: Country
"""
return self._country
@country.setter
def country(self, country):
"""
Sets the country of this Work.
:param country: The country of this Work.
:type: Country
"""
self._country = country
@property
def visibility(self):
"""
Gets the visibility of this Work.
:return: The visibility of this Work.
:rtype: str
"""
return self._visibility
@visibility.setter
def visibility(self, visibility):
"""
Sets the visibility of this Work.
:param visibility: The visibility of this Work.
:type: str
"""
allowed_values = ["LIMITED", "REGISTERED_ONLY", "PUBLIC", "PRIVATE"]
if visibility not in allowed_values:
raise ValueError(
"Invalid value for `visibility` ({0}), must be one of {1}"
.format(visibility, allowed_values)
)
self._visibility = visibility
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, Work):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 26.425781
| 690
| 0.566593
|
from pprint import pformat
from six import iteritems
import re
class Work(object):
def __init__(self, created_date=None, last_modified_date=None, source=None, put_code=None, path=None, title=None, journal_title=None, short_description=None, citation=None, type=None, publication_date=None, external_ids=None, url=None, contributors=None, language_code=None, country=None, visibility=None):
self.swagger_types = {
'created_date': 'CreatedDate',
'last_modified_date': 'LastModifiedDate',
'source': 'Source',
'put_code': 'int',
'path': 'str',
'title': 'WorkTitle',
'journal_title': 'Title',
'short_description': 'str',
'citation': 'Citation',
'type': 'str',
'publication_date': 'PublicationDate',
'external_ids': 'ExternalIDs',
'url': 'Url',
'contributors': 'WorkContributors',
'language_code': 'str',
'country': 'Country',
'visibility': 'str'
}
self.attribute_map = {
'created_date': 'created-date',
'last_modified_date': 'last-modified-date',
'source': 'source',
'put_code': 'put-code',
'path': 'path',
'title': 'title',
'journal_title': 'journal-title',
'short_description': 'short-description',
'citation': 'citation',
'type': 'type',
'publication_date': 'publication-date',
'external_ids': 'external-ids',
'url': 'url',
'contributors': 'contributors',
'language_code': 'language-code',
'country': 'country',
'visibility': 'visibility'
}
self._created_date = created_date
self._last_modified_date = last_modified_date
self._source = source
self._put_code = put_code
self._path = path
self._title = title
self._journal_title = journal_title
self._short_description = short_description
self._citation = citation
self._type = type
self._publication_date = publication_date
self._external_ids = external_ids
self._url = url
self._contributors = contributors
self._language_code = language_code
self._country = country
self._visibility = visibility
@property
def created_date(self):
return self._created_date
@created_date.setter
def created_date(self, created_date):
self._created_date = created_date
@property
def last_modified_date(self):
return self._last_modified_date
@last_modified_date.setter
def last_modified_date(self, last_modified_date):
self._last_modified_date = last_modified_date
@property
def source(self):
return self._source
@source.setter
def source(self, source):
self._source = source
@property
def put_code(self):
return self._put_code
@put_code.setter
def put_code(self, put_code):
self._put_code = put_code
@property
def path(self):
return self._path
@path.setter
def path(self, path):
self._path = path
@property
def title(self):
return self._title
@title.setter
def title(self, title):
self._title = title
@property
def journal_title(self):
return self._journal_title
@journal_title.setter
def journal_title(self, journal_title):
self._journal_title = journal_title
@property
def short_description(self):
return self._short_description
@short_description.setter
def short_description(self, short_description):
self._short_description = short_description
@property
def citation(self):
return self._citation
@citation.setter
def citation(self, citation):
self._citation = citation
@property
def type(self):
return self._type
@type.setter
def type(self, type):
allowed_values = ["ARTISTIC_PERFORMANCE", "BOOK_CHAPTER", "BOOK_REVIEW", "BOOK", "CONFERENCE_ABSTRACT", "CONFERENCE_PAPER", "CONFERENCE_POSTER", "DATA_SET", "DICTIONARY_ENTRY", "DISCLOSURE", "DISSERTATION", "EDITED_BOOK", "ENCYCLOPEDIA_ENTRY", "INVENTION", "JOURNAL_ARTICLE", "JOURNAL_ISSUE", "LECTURE_SPEECH", "LICENSE", "MAGAZINE_ARTICLE", "MANUAL", "NEWSLETTER_ARTICLE", "NEWSPAPER_ARTICLE", "ONLINE_RESOURCE", "OTHER", "PATENT", "REGISTERED_COPYRIGHT", "REPORT", "RESEARCH_TECHNIQUE", "RESEARCH_TOOL", "SPIN_OFF_COMPANY", "STANDARDS_AND_POLICY", "SUPERVISED_STUDENT_PUBLICATION", "TECHNICAL_STANDARD", "TEST", "TRADEMARK", "TRANSLATION", "WEBSITE", "WORKING_PAPER", "UNDEFINED"]
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}"
.format(type, allowed_values)
)
self._type = type
@property
def publication_date(self):
return self._publication_date
@publication_date.setter
def publication_date(self, publication_date):
self._publication_date = publication_date
@property
def external_ids(self):
return self._external_ids
@external_ids.setter
def external_ids(self, external_ids):
self._external_ids = external_ids
@property
def url(self):
return self._url
@url.setter
def url(self, url):
self._url = url
@property
def contributors(self):
return self._contributors
@contributors.setter
def contributors(self, contributors):
self._contributors = contributors
@property
def language_code(self):
return self._language_code
@language_code.setter
def language_code(self, language_code):
self._language_code = language_code
@property
def country(self):
return self._country
@country.setter
def country(self, country):
self._country = country
@property
def visibility(self):
return self._visibility
@visibility.setter
def visibility(self, visibility):
allowed_values = ["LIMITED", "REGISTERED_ONLY", "PUBLIC", "PRIVATE"]
if visibility not in allowed_values:
raise ValueError(
"Invalid value for `visibility` ({0}), must be one of {1}"
.format(visibility, allowed_values)
)
self._visibility = visibility
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, Work):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
1c3f804e9ac4240a15574562410b12ae5fd62227
| 1,472
|
py
|
Python
|
train.py
|
train255/Silent-Face-Anti-Spoofing
|
e2137cde55ba4c7b43c2a7d6340d827a106b7404
|
[
"Apache-2.0"
] | null | null | null |
train.py
|
train255/Silent-Face-Anti-Spoofing
|
e2137cde55ba4c7b43c2a7d6340d827a106b7404
|
[
"Apache-2.0"
] | null | null | null |
train.py
|
train255/Silent-Face-Anti-Spoofing
|
e2137cde55ba4c7b43c2a7d6340d827a106b7404
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Time : 20-6-3 下午5:39
# @Author : zhuying
# @Company : Minivision
# @File : train.py
# @Software : PyCharm
import argparse
import os
from src.train_main import TrainMain
from src.default_config import get_default_config, update_config
def parse_args():
"""parsing and configuration"""
desc = "Silence-FAS"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("--device_ids", type=str, default="1", help="which gpu id, 0123")
parser.add_argument("--model_type", type=str, default="MultiFTNet", help="model type")
parser.add_argument("--epochs", type=int, default=25, help="epochs")
parser.add_argument("--batch_size", type=int, default=1024, help="batch size")
parser.add_argument("--val_size", type=float, default=0.1, help="validation split")
parser.add_argument("--checkpoint", type=str, default="", help="checkpoint path")
parser.add_argument("--patch_info", type=str, default="1_80x80",
help="[org_1_80x60 / 1_80x80 / 2.7_80x80 / 4_80x80]")
args = parser.parse_args()
cuda_devices = [int(elem) for elem in args.device_ids]
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(map(str, cuda_devices))
args.devices = [x for x in range(len(cuda_devices))]
return args
if __name__ == "__main__":
args = parse_args()
conf = get_default_config()
conf = update_config(args, conf)
trainer = TrainMain(conf)
trainer.train_model()
| 36.8
| 90
| 0.683424
|
import argparse
import os
from src.train_main import TrainMain
from src.default_config import get_default_config, update_config
def parse_args():
desc = "Silence-FAS"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("--device_ids", type=str, default="1", help="which gpu id, 0123")
parser.add_argument("--model_type", type=str, default="MultiFTNet", help="model type")
parser.add_argument("--epochs", type=int, default=25, help="epochs")
parser.add_argument("--batch_size", type=int, default=1024, help="batch size")
parser.add_argument("--val_size", type=float, default=0.1, help="validation split")
parser.add_argument("--checkpoint", type=str, default="", help="checkpoint path")
parser.add_argument("--patch_info", type=str, default="1_80x80",
help="[org_1_80x60 / 1_80x80 / 2.7_80x80 / 4_80x80]")
args = parser.parse_args()
cuda_devices = [int(elem) for elem in args.device_ids]
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(map(str, cuda_devices))
args.devices = [x for x in range(len(cuda_devices))]
return args
if __name__ == "__main__":
args = parse_args()
conf = get_default_config()
conf = update_config(args, conf)
trainer = TrainMain(conf)
trainer.train_model()
| true
| true
|
1c3f8190c1626094869fa84261e62d28256a508c
| 126,857
|
py
|
Python
|
tests/models/layoutlmv2/test_tokenization_layoutlmv2.py
|
manuelciosici/transformers
|
c33f6046c3dab8f41bedf893404e6469dea3bce8
|
[
"Apache-2.0"
] | 8,028
|
2018-11-05T15:19:44.000Z
|
2019-07-16T09:14:59.000Z
|
tests/models/layoutlmv2/test_tokenization_layoutlmv2.py
|
ymwangg/transformers
|
4a419d4995111c22d6842ee1bcd2d3f500150845
|
[
"Apache-2.0"
] | 731
|
2018-11-05T21:35:52.000Z
|
2019-07-16T09:51:26.000Z
|
tests/models/layoutlmv2/test_tokenization_layoutlmv2.py
|
ymwangg/transformers
|
4a419d4995111c22d6842ee1bcd2d3f500150845
|
[
"Apache-2.0"
] | 2,106
|
2018-11-05T15:29:15.000Z
|
2019-07-16T08:51:57.000Z
|
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
import re
import shutil
import tempfile
import unittest
from typing import List
from transformers import AddedToken, LayoutLMv2TokenizerFast, SpecialTokensMixin, is_tf_available, is_torch_available
from transformers.models.layoutlmv2.tokenization_layoutlmv2 import (
VOCAB_FILES_NAMES,
BasicTokenizer,
LayoutLMv2Tokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import is_pt_tf_cross_test, require_pandas, require_tokenizers, require_torch, slow
from ...test_tokenization_common import (
SMALL_TRAINING_CORPUS,
TokenizerTesterMixin,
filter_non_english,
merge_model_tokenizer_mappings,
)
@require_tokenizers
@require_pandas
class LayoutLMv2TokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = LayoutLMv2Tokenizer
rust_tokenizer_class = LayoutLMv2TokenizerFast
test_rust_tokenizer = True
space_between_special_tokens = True
from_pretrained_filter = filter_non_english
test_seq2seq = False
def get_words_and_boxes(self):
words = ["a", "weirdly", "test"]
boxes = [[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]]
return words, boxes
def get_words_and_boxes_batch(self):
words = [["a", "weirdly", "test"], ["hello", "my", "name", "is", "bob"]]
boxes = [
[[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]],
[[961, 885, 992, 912], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69]],
]
return words, boxes
def get_question_words_and_boxes(self):
question = "what's his name?"
words = ["a", "weirdly", "test"]
boxes = [[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]]
return question, words, boxes
def get_question_words_and_boxes_batch(self):
questions = ["what's his name?", "how is he called?"]
words = [["a", "weirdly", "test"], ["what", "a", "laif", "gastn"]]
boxes = [
[[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]],
[[256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69]],
]
return questions, words, boxes
def setUp(self):
super().setUp()
vocab_tokens = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"what",
"s",
"his",
"name",
"?",
"a",
"weird",
"##ly",
"test",
"lowest",
]
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def get_input_output_texts(self, tokenizer):
input_text = "UNwant\u00E9d,running"
output_text = "unwanted, running"
return input_text, output_text
def test_chinese(self):
tokenizer = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz"), ["ah", "\u535A", "\u63A8", "zz"])
def test_basic_tokenizer_lower(self):
tokenizer = BasicTokenizer(do_lower_case=True)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["hello", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def test_basic_tokenizer_lower_strip_accents_false(self):
tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=False)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hällo", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["h\u00E9llo"])
def test_basic_tokenizer_lower_strip_accents_true(self):
tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=True)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def test_basic_tokenizer_lower_strip_accents_default(self):
tokenizer = BasicTokenizer(do_lower_case=True)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def test_basic_tokenizer_no_lower(self):
tokenizer = BasicTokenizer(do_lower_case=False)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["HeLLo", "!", "how", "Are", "yoU", "?"]
)
def test_basic_tokenizer_no_lower_strip_accents_false(self):
tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=False)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HäLLo", "!", "how", "Are", "yoU", "?"]
)
def test_basic_tokenizer_no_lower_strip_accents_true(self):
tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=True)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HaLLo", "!", "how", "Are", "yoU", "?"]
)
def test_basic_tokenizer_respects_never_split_tokens(self):
tokenizer = BasicTokenizer(do_lower_case=False, never_split=["[UNK]"])
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]"), ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"]
)
def test_wordpiece_tokenizer(self):
vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
vocab = {}
for (i, token) in enumerate(vocab_tokens):
vocab[token] = i
tokenizer = WordpieceTokenizer(vocab=vocab, unk_token="[UNK]")
self.assertListEqual(tokenizer.tokenize(""), [])
self.assertListEqual(tokenizer.tokenize("unwanted running"), ["un", "##want", "##ed", "runn", "##ing"])
self.assertListEqual(tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"])
def test_is_whitespace(self):
self.assertTrue(_is_whitespace(" "))
self.assertTrue(_is_whitespace("\t"))
self.assertTrue(_is_whitespace("\r"))
self.assertTrue(_is_whitespace("\n"))
self.assertTrue(_is_whitespace("\u00A0"))
self.assertFalse(_is_whitespace("A"))
self.assertFalse(_is_whitespace("-"))
def test_is_control(self):
self.assertTrue(_is_control("\u0005"))
self.assertFalse(_is_control("A"))
self.assertFalse(_is_control(" "))
self.assertFalse(_is_control("\t"))
self.assertFalse(_is_control("\r"))
def test_is_punctuation(self):
self.assertTrue(_is_punctuation("-"))
self.assertTrue(_is_punctuation("$"))
self.assertTrue(_is_punctuation("`"))
self.assertTrue(_is_punctuation("."))
self.assertFalse(_is_punctuation("A"))
self.assertFalse(_is_punctuation(" "))
def test_clean_text(self):
tokenizer = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(t) for t in ["Hello", "\xad", "hello"]], [["[UNK]"], [], ["[UNK]"]])
@slow
def test_sequence_builders(self):
tokenizer = self.tokenizer_class.from_pretrained("microsoft/layoutlmv2-base-uncased")
question, words, boxes = self.get_question_words_and_boxes()
text = tokenizer.encode(
question.split(),
boxes=[tokenizer.pad_token_box for _ in range(len(question.split()))],
add_special_tokens=False,
)
text_2 = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert encoded_pair == [101] + text + [102] + text_2 + [102]
def test_offsets_with_special_characters(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
words, boxes = self.get_words_and_boxes()
words[1] = tokenizer_r.mask_token
tokens = tokenizer_r.encode_plus(
words,
boxes=boxes,
return_attention_mask=False,
return_token_type_ids=False,
return_offsets_mapping=True,
add_special_tokens=True,
)
expected_results = [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((0, 6), tokenizer_r.mask_token),
((0, 4), "test"),
((0, 0), tokenizer_r.sep_token),
]
self.assertEqual(
[e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens["input_ids"])
)
self.assertEqual([e[0] for e in expected_results], tokens["offset_mapping"])
def test_add_special_tokens(self):
tokenizers: List[LayoutLMv2Tokenizer] = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
special_token = "[SPECIAL_TOKEN]"
special_token_box = [1000, 1000, 1000, 1000]
tokenizer.add_special_tokens({"cls_token": special_token})
encoded_special_token = tokenizer.encode(
[special_token], boxes=[special_token_box], add_special_tokens=False
)
self.assertEqual(len(encoded_special_token), 1)
decoded = tokenizer.decode(encoded_special_token, skip_special_tokens=True)
self.assertTrue(special_token not in decoded)
def test_add_tokens_tokenizer(self):
tokenizers: List[LayoutLMv2Tokenizer] = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
vocab_size = tokenizer.vocab_size
all_size = len(tokenizer)
self.assertNotEqual(vocab_size, 0)
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
new_toks = ["aaaaa", "bbbbbb", "cccccccccdddddddd"]
added_toks = tokenizer.add_tokens(new_toks)
vocab_size_2 = tokenizer.vocab_size
all_size_2 = len(tokenizer)
self.assertNotEqual(vocab_size_2, 0)
self.assertEqual(vocab_size, vocab_size_2)
self.assertEqual(added_toks, len(new_toks))
self.assertEqual(all_size_2, all_size + len(new_toks))
words = "aaaaa bbbbbb low cccccccccdddddddd l".split()
boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))]
tokens = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
self.assertGreaterEqual(len(tokens), 4)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
self.assertGreater(tokens[-2], tokenizer.vocab_size - 1)
new_toks_2 = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
added_toks_2 = tokenizer.add_special_tokens(new_toks_2)
vocab_size_3 = tokenizer.vocab_size
all_size_3 = len(tokenizer)
self.assertNotEqual(vocab_size_3, 0)
self.assertEqual(vocab_size, vocab_size_3)
self.assertEqual(added_toks_2, len(new_toks_2))
self.assertEqual(all_size_3, all_size_2 + len(new_toks_2))
words = ">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l".split()
boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))]
tokens = tokenizer.encode(
words,
boxes=boxes,
add_special_tokens=False,
)
self.assertGreaterEqual(len(tokens), 6)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
self.assertGreater(tokens[0], tokens[1])
self.assertGreater(tokens[-2], tokenizer.vocab_size - 1)
self.assertGreater(tokens[-2], tokens[-3])
self.assertEqual(tokens[0], tokenizer.eos_token_id)
self.assertEqual(tokens[-2], tokenizer.pad_token_id)
@require_tokenizers
def test_encode_decode_with_spaces(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
new_toks = [AddedToken("[ABC]", normalized=False), AddedToken("[DEF]", normalized=False)]
tokenizer.add_tokens(new_toks)
input = "[ABC][DEF][ABC][DEF]"
if self.space_between_special_tokens:
output = "[ABC] [DEF] [ABC] [DEF]"
else:
output = input
encoded = tokenizer.encode(input.split(), boxes=boxes, add_special_tokens=False)
decoded = tokenizer.decode(encoded, spaces_between_special_tokens=self.space_between_special_tokens)
self.assertIn(decoded, [output, output.lower()])
@unittest.skip("Not implemented")
def test_right_and_left_truncation(self):
pass
def test_encode_plus_with_padding(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, words)
padding_size = 10
padding_idx = tokenizer.pad_token_id
encoded_sequence = tokenizer.encode_plus(words, boxes=boxes, return_special_tokens_mask=True)
input_ids = encoded_sequence["input_ids"]
special_tokens_mask = encoded_sequence["special_tokens_mask"]
sequence_length = len(input_ids)
# Test 'longest' and 'no_padding' don't do anything
tokenizer.padding_side = "right"
not_padded_sequence = tokenizer.encode_plus(
words,
boxes=boxes,
padding=False,
return_special_tokens_mask=True,
)
not_padded_input_ids = not_padded_sequence["input_ids"]
not_padded_special_tokens_mask = not_padded_sequence["special_tokens_mask"]
not_padded_sequence_length = len(not_padded_input_ids)
self.assertTrue(sequence_length == not_padded_sequence_length)
self.assertTrue(input_ids == not_padded_input_ids)
self.assertTrue(special_tokens_mask == not_padded_special_tokens_mask)
not_padded_sequence = tokenizer.encode_plus(
words,
boxes=boxes,
padding=False,
return_special_tokens_mask=True,
)
not_padded_input_ids = not_padded_sequence["input_ids"]
not_padded_special_tokens_mask = not_padded_sequence["special_tokens_mask"]
not_padded_sequence_length = len(not_padded_input_ids)
self.assertTrue(sequence_length == not_padded_sequence_length)
self.assertTrue(input_ids == not_padded_input_ids)
self.assertTrue(special_tokens_mask == not_padded_special_tokens_mask)
# Test right padding
tokenizer.padding_side = "right"
right_padded_sequence = tokenizer.encode_plus(
words,
boxes=boxes,
max_length=sequence_length + padding_size,
padding="max_length",
return_special_tokens_mask=True,
)
right_padded_input_ids = right_padded_sequence["input_ids"]
right_padded_special_tokens_mask = right_padded_sequence["special_tokens_mask"]
right_padded_sequence_length = len(right_padded_input_ids)
self.assertTrue(sequence_length + padding_size == right_padded_sequence_length)
self.assertTrue(input_ids + [padding_idx] * padding_size == right_padded_input_ids)
self.assertTrue(special_tokens_mask + [1] * padding_size == right_padded_special_tokens_mask)
# Test left padding
tokenizer.padding_side = "left"
left_padded_sequence = tokenizer.encode_plus(
words,
boxes=boxes,
max_length=sequence_length + padding_size,
padding="max_length",
return_special_tokens_mask=True,
)
left_padded_input_ids = left_padded_sequence["input_ids"]
left_padded_special_tokens_mask = left_padded_sequence["special_tokens_mask"]
left_padded_sequence_length = len(left_padded_input_ids)
self.assertTrue(sequence_length + padding_size == left_padded_sequence_length)
self.assertTrue([padding_idx] * padding_size + input_ids == left_padded_input_ids)
self.assertTrue([1] * padding_size + special_tokens_mask == left_padded_special_tokens_mask)
if "token_type_ids" in tokenizer.model_input_names:
token_type_ids = encoded_sequence["token_type_ids"]
left_padded_token_type_ids = left_padded_sequence["token_type_ids"]
right_padded_token_type_ids = right_padded_sequence["token_type_ids"]
assert token_type_ids + [0] * padding_size == right_padded_token_type_ids
assert [0] * padding_size + token_type_ids == left_padded_token_type_ids
if "attention_mask" in tokenizer.model_input_names:
attention_mask = encoded_sequence["attention_mask"]
right_padded_attention_mask = right_padded_sequence["attention_mask"]
left_padded_attention_mask = left_padded_sequence["attention_mask"]
self.assertTrue(attention_mask + [0] * padding_size == right_padded_attention_mask)
self.assertTrue([0] * padding_size + attention_mask == left_padded_attention_mask)
def test_internal_consistency(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
tokens = []
for word in words:
tokens.extend(tokenizer.tokenize(word))
ids = tokenizer.convert_tokens_to_ids(tokens)
ids_2 = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
self.assertListEqual(ids, ids_2)
tokens_2 = tokenizer.convert_ids_to_tokens(ids)
self.assertNotEqual(len(tokens_2), 0)
text_2 = tokenizer.decode(ids)
self.assertIsInstance(text_2, str)
output_text = "a weirdly test"
self.assertEqual(text_2, output_text)
def test_mask_output(self):
tokenizers = self.get_tokenizers(fast=False, do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
if (
tokenizer.build_inputs_with_special_tokens.__qualname__.split(".")[0] != "PreTrainedTokenizer"
and "token_type_ids" in tokenizer.model_input_names
):
information = tokenizer.encode_plus(words, boxes=boxes, add_special_tokens=True)
sequences, mask = information["input_ids"], information["token_type_ids"]
self.assertEqual(len(sequences), len(mask))
def test_number_of_added_tokens(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# test 1: single sequence
words, boxes = self.get_words_and_boxes()
sequences = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
attached_sequences = tokenizer.encode(words, boxes=boxes, add_special_tokens=True)
# Method is implemented (e.g. not GPT-2)
if len(attached_sequences) != 2:
self.assertEqual(
tokenizer.num_special_tokens_to_add(pair=False), len(attached_sequences) - len(sequences)
)
# test 2: two sequences
question, words, boxes = self.get_question_words_and_boxes()
sequences = tokenizer.encode(question, words, boxes=boxes, add_special_tokens=False)
attached_sequences = tokenizer.encode(question, words, boxes=boxes, add_special_tokens=True)
# Method is implemented (e.g. not GPT-2)
if len(attached_sequences) != 2:
self.assertEqual(
tokenizer.num_special_tokens_to_add(pair=True), len(attached_sequences) - len(sequences)
)
def test_padding_to_max_length(self):
"""We keep this test for backward compatibility but it should be removed when `pad_to_max_length` will be deprecated"""
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
padding_size = 10
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, words)
padding_idx = tokenizer.pad_token_id
# Check that it correctly pads when a maximum length is specified along with the padding flag set to True
tokenizer.padding_side = "right"
encoded_sequence = tokenizer.encode(words, boxes=boxes)
sequence_length = len(encoded_sequence)
# FIXME: the next line should be padding(max_length) to avoid warning
padded_sequence = tokenizer.encode(
words, boxes=boxes, max_length=sequence_length + padding_size, pad_to_max_length=True
)
padded_sequence_length = len(padded_sequence)
assert sequence_length + padding_size == padded_sequence_length
assert encoded_sequence + [padding_idx] * padding_size == padded_sequence
# Check that nothing is done when a maximum length is not specified
encoded_sequence = tokenizer.encode(words, boxes=boxes)
sequence_length = len(encoded_sequence)
tokenizer.padding_side = "right"
padded_sequence_right = tokenizer.encode(words, boxes=boxes, pad_to_max_length=True)
padded_sequence_right_length = len(padded_sequence_right)
assert sequence_length == padded_sequence_right_length
assert encoded_sequence == padded_sequence_right
def test_padding(self, max_length=50):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
self.assertEqual(tokenizer_p.pad_token_id, tokenizer_r.pad_token_id)
pad_token_id = tokenizer_p.pad_token_id
# Encode - Simple input
words, boxes = self.get_words_and_boxes()
input_r = tokenizer_r.encode(words, boxes=boxes, max_length=max_length, pad_to_max_length=True)
input_p = tokenizer_p.encode(words, boxes=boxes, max_length=max_length, pad_to_max_length=True)
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.encode(words, boxes=boxes, max_length=max_length, padding="max_length")
input_p = tokenizer_p.encode(words, boxes=boxes, max_length=max_length, padding="max_length")
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.encode(words, boxes=boxes, padding="longest")
input_p = tokenizer_p.encode(words, boxes=boxes, padding=True)
self.assert_padded_input_match(input_r, input_p, len(input_r), pad_token_id)
# Encode - Pair input
question, words, boxes = self.get_question_words_and_boxes()
input_r = tokenizer_r.encode(
question, words, boxes=boxes, max_length=max_length, pad_to_max_length=True
)
input_p = tokenizer_p.encode(
question, words, boxes=boxes, max_length=max_length, pad_to_max_length=True
)
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.encode(question, words, boxes=boxes, max_length=max_length, padding="max_length")
input_p = tokenizer_p.encode(question, words, boxes=boxes, max_length=max_length, padding="max_length")
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.encode(question, words, boxes=boxes, padding=True)
input_p = tokenizer_p.encode(question, words, boxes=boxes, padding="longest")
self.assert_padded_input_match(input_r, input_p, len(input_r), pad_token_id)
# Encode_plus - Simple input
words, boxes = self.get_words_and_boxes()
input_r = tokenizer_r.encode_plus(words, boxes=boxes, max_length=max_length, pad_to_max_length=True)
input_p = tokenizer_p.encode_plus(words, boxes=boxes, max_length=max_length, pad_to_max_length=True)
self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
input_r = tokenizer_r.encode_plus(words, boxes=boxes, max_length=max_length, padding="max_length")
input_p = tokenizer_p.encode_plus(words, boxes=boxes, max_length=max_length, padding="max_length")
self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
input_r = tokenizer_r.encode_plus(words, boxes=boxes, padding="longest")
input_p = tokenizer_p.encode_plus(words, boxes=boxes, padding=True)
self.assert_padded_input_match(
input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id
)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
# Encode_plus - Pair input
question, words, boxes = self.get_question_words_and_boxes()
input_r = tokenizer_r.encode_plus(
question, words, boxes=boxes, max_length=max_length, pad_to_max_length=True
)
input_p = tokenizer_p.encode_plus(
question, words, boxes=boxes, max_length=max_length, pad_to_max_length=True
)
self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
input_r = tokenizer_r.encode_plus(
question, words, boxes=boxes, max_length=max_length, padding="max_length"
)
input_p = tokenizer_p.encode_plus(
question, words, boxes=boxes, max_length=max_length, padding="max_length"
)
self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
input_r = tokenizer_r.encode_plus(question, words, boxes=boxes, padding="longest")
input_p = tokenizer_p.encode_plus(question, words, boxes=boxes, padding=True)
self.assert_padded_input_match(
input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id
)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
# Batch_encode_plus - Simple input
words, boxes = self.get_words_and_boxes_batch()
input_r = tokenizer_r.batch_encode_plus(
words,
boxes=boxes,
max_length=max_length,
pad_to_max_length=True,
)
input_p = tokenizer_p.batch_encode_plus(
words,
boxes=boxes,
max_length=max_length,
pad_to_max_length=True,
)
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.batch_encode_plus(
words,
boxes=boxes,
max_length=max_length,
padding="max_length",
)
input_p = tokenizer_p.batch_encode_plus(
words,
boxes=boxes,
max_length=max_length,
padding="max_length",
)
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.batch_encode_plus(
words,
boxes=boxes,
max_length=max_length,
padding="longest",
)
input_p = tokenizer_p.batch_encode_plus(
words,
boxes=boxes,
max_length=max_length,
padding=True,
)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id)
input_r = tokenizer_r.batch_encode_plus(words, boxes=boxes, padding="longest")
input_p = tokenizer_p.batch_encode_plus(words, boxes=boxes, padding=True)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id)
# Batch_encode_plus - Pair input
questions, words, boxes = self.get_question_words_and_boxes_batch()
input_r = tokenizer_r.batch_encode_plus(
list(zip(questions, words)),
is_pair=True,
boxes=boxes,
max_length=max_length,
truncation=True,
padding="max_length",
)
input_p = tokenizer_p.batch_encode_plus(
list(zip(questions, words)),
is_pair=True,
boxes=boxes,
max_length=max_length,
truncation=True,
padding="max_length",
)
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.batch_encode_plus(
list(zip(questions, words)),
is_pair=True,
boxes=boxes,
padding=True,
)
input_p = tokenizer_p.batch_encode_plus(
list(zip(questions, words)),
is_pair=True,
boxes=boxes,
padding="longest",
)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id)
# Using pad on single examples after tokenization
words, boxes = self.get_words_and_boxes()
input_r = tokenizer_r.encode_plus(words, boxes=boxes)
input_r = tokenizer_r.pad(input_r)
input_p = tokenizer_r.encode_plus(words, boxes=boxes)
input_p = tokenizer_r.pad(input_p)
self.assert_padded_input_match(
input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id
)
# Using pad on single examples after tokenization
input_r = tokenizer_r.encode_plus(words, boxes=boxes)
input_r = tokenizer_r.pad(input_r, max_length=max_length, padding="max_length")
input_p = tokenizer_r.encode_plus(words, boxes=boxes)
input_p = tokenizer_r.pad(input_p, max_length=max_length, padding="max_length")
self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id)
# Using pad after tokenization
words, boxes = self.get_words_and_boxes_batch()
input_r = tokenizer_r.batch_encode_plus(
words,
boxes=boxes,
)
input_r = tokenizer_r.pad(input_r)
input_p = tokenizer_r.batch_encode_plus(
words,
boxes=boxes,
)
input_p = tokenizer_r.pad(input_p)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id)
# Using pad after tokenization
words, boxes = self.get_words_and_boxes_batch()
input_r = tokenizer_r.batch_encode_plus(
words,
boxes=boxes,
)
input_r = tokenizer_r.pad(input_r, max_length=max_length, padding="max_length")
input_p = tokenizer_r.batch_encode_plus(
words,
boxes=boxes,
)
input_p = tokenizer_r.pad(input_p, max_length=max_length, padding="max_length")
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
def test_call(self):
# Tests that all call wrap to encode_plus and batch_encode_plus
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Test not batched
words, boxes = self.get_words_and_boxes()
encoded_sequences_1 = tokenizer.encode_plus(words, boxes=boxes)
encoded_sequences_2 = tokenizer(words, boxes=boxes)
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
# Test not batched pairs
question, words, boxes = self.get_question_words_and_boxes()
encoded_sequences_1 = tokenizer.encode_plus(words, boxes=boxes)
encoded_sequences_2 = tokenizer(words, boxes=boxes)
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
# Test batched
words, boxes = self.get_words_and_boxes_batch()
encoded_sequences_1 = tokenizer.batch_encode_plus(words, is_pair=False, boxes=boxes)
encoded_sequences_2 = tokenizer(words, boxes=boxes)
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
def test_batch_encode_plus_batch_sequence_length(self):
# Tests that all encoded values have the correct size
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes_batch()
encoded_sequences = [
tokenizer.encode_plus(words_example, boxes=boxes_example)
for words_example, boxes_example in zip(words, boxes)
]
encoded_sequences_batch = tokenizer.batch_encode_plus(words, is_pair=False, boxes=boxes, padding=False)
self.assertListEqual(
encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch)
)
maximum_length = len(
max([encoded_sequence["input_ids"] for encoded_sequence in encoded_sequences], key=len)
)
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, words)
encoded_sequences_padded = [
tokenizer.encode_plus(
words_example, boxes=boxes_example, max_length=maximum_length, padding="max_length"
)
for words_example, boxes_example in zip(words, boxes)
]
encoded_sequences_batch_padded = tokenizer.batch_encode_plus(
words, is_pair=False, boxes=boxes, padding=True
)
self.assertListEqual(
encoded_sequences_padded,
self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch_padded),
)
# check 'longest' is unsensitive to a max length
encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus(
words, is_pair=False, boxes=boxes, padding=True
)
encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus(
words, is_pair=False, boxes=boxes, max_length=maximum_length + 10, padding="longest"
)
for key in encoded_sequences_batch_padded_1.keys():
self.assertListEqual(
encoded_sequences_batch_padded_1[key],
encoded_sequences_batch_padded_2[key],
)
# check 'no_padding' is unsensitive to a max length
encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus(
words, is_pair=False, boxes=boxes, padding=False
)
encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus(
words, is_pair=False, boxes=boxes, max_length=maximum_length + 10, padding=False
)
for key in encoded_sequences_batch_padded_1.keys():
self.assertListEqual(
encoded_sequences_batch_padded_1[key],
encoded_sequences_batch_padded_2[key],
)
@unittest.skip("batch_encode_plus does not handle overflowing tokens.")
def test_batch_encode_plus_overflowing_tokens(self):
pass
def test_batch_encode_plus_padding(self):
# Test that padded sequences are equivalent between batch_encode_plus and encode_plus
# Right padding tests
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes_batch()
max_length = 100
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, words)
encoded_sequences = [
tokenizer.encode_plus(
words_example, boxes=boxes_example, max_length=max_length, padding="max_length"
)
for words_example, boxes_example in zip(words, boxes)
]
encoded_sequences_batch = tokenizer.batch_encode_plus(
words, is_pair=False, boxes=boxes, max_length=max_length, padding="max_length"
)
self.assertListEqual(
encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch)
)
# Left padding tests
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
tokenizer.padding_side = "left"
words, boxes = self.get_words_and_boxes_batch()
max_length = 100
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, words)
encoded_sequences = [
tokenizer.encode_plus(
words_example, boxes=boxes_example, max_length=max_length, padding="max_length"
)
for words_example, boxes_example in zip(words, boxes)
]
encoded_sequences_batch = tokenizer.batch_encode_plus(
words, is_pair=False, boxes=boxes, max_length=max_length, padding="max_length"
)
self.assertListEqual(
encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch)
)
def test_padding_to_multiple_of(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
if tokenizer.pad_token is None:
self.skipTest("No padding token.")
else:
words, boxes = self.get_words_and_boxes()
# empty_tokens = tokenizer([""], [[]], padding=True, pad_to_multiple_of=8)
normal_tokens = tokenizer(words, boxes=boxes, padding=True, pad_to_multiple_of=8)
# for key, value in empty_tokens.items():
# self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
for key, value in normal_tokens.items():
self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
normal_tokens = tokenizer(words, boxes=boxes, pad_to_multiple_of=8)
for key, value in normal_tokens.items():
self.assertNotEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
# Should also work with truncation
normal_tokens = tokenizer(words, boxes=boxes, padding=True, truncation=True, pad_to_multiple_of=8)
for key, value in normal_tokens.items():
self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
# truncation to something which is not a multiple of pad_to_multiple_of raises an error
self.assertRaises(
ValueError,
tokenizer.__call__,
words,
boxes=boxes,
padding=True,
truncation=True,
max_length=12,
pad_to_multiple_of=8,
)
def test_tokenizer_slow_store_full_signature(self):
signature = inspect.signature(self.tokenizer_class.__init__)
tokenizer = self.get_tokenizer()
for parameter_name, parameter in signature.parameters.items():
if parameter.default != inspect.Parameter.empty:
self.assertIn(parameter_name, tokenizer.init_kwargs)
def test_build_inputs_with_special_tokens(self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
# Input tokens id
words, boxes = self.get_words_and_boxes()
input_simple = tokenizer_p.encode(words, boxes=boxes, add_special_tokens=False)
input_pair = tokenizer_p.encode(words, boxes=boxes, add_special_tokens=False)
# Generate output
output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple)
output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple)
self.assertEqual(output_p, output_r)
# Generate pair output
output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple, input_pair)
output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple, input_pair)
self.assertEqual(output_p, output_r)
def test_special_tokens_mask_input_pairs(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
encoded_sequence = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
encoded_sequence_dict = tokenizer.encode_plus(
words,
boxes=boxes,
add_special_tokens=True,
return_special_tokens_mask=True,
# add_prefix_space=False,
)
encoded_sequence_w_special = encoded_sequence_dict["input_ids"]
special_tokens_mask = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special))
filtered_sequence = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(encoded_sequence_w_special)
]
filtered_sequence = [x for x in filtered_sequence if x is not None]
self.assertEqual(encoded_sequence, filtered_sequence)
def test_special_tokens_mask(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
# Testing single inputs
encoded_sequence = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
encoded_sequence_dict = tokenizer.encode_plus(
words, boxes=boxes, add_special_tokens=True, return_special_tokens_mask=True
)
encoded_sequence_w_special = encoded_sequence_dict["input_ids"]
special_tokens_mask = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special))
filtered_sequence = [x for i, x in enumerate(encoded_sequence_w_special) if not special_tokens_mask[i]]
self.assertEqual(encoded_sequence, filtered_sequence)
def test_save_and_load_tokenizer(self):
# safety check on max_len default value so we are sure the test works
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
self.assertNotEqual(tokenizer.model_max_length, 42)
# Now let's start the test
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Isolate this from the other tests because we save additional tokens/etc
words, boxes = self.get_words_and_boxes()
tmpdirname = tempfile.mkdtemp()
before_tokens = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
before_vocab = tokenizer.get_vocab()
tokenizer.save_pretrained(tmpdirname)
after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)
after_tokens = after_tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
after_vocab = after_tokenizer.get_vocab()
self.assertListEqual(before_tokens, after_tokens)
self.assertDictEqual(before_vocab, after_vocab)
shutil.rmtree(tmpdirname)
def test_right_and_left_padding(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
sequence = "Sequence"
padding_size = 10
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, sequence)
padding_idx = tokenizer.pad_token_id
# RIGHT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True
tokenizer.padding_side = "right"
encoded_sequence = tokenizer.encode(words, boxes=boxes)
sequence_length = len(encoded_sequence)
padded_sequence = tokenizer.encode(
words, boxes=boxes, max_length=sequence_length + padding_size, padding="max_length"
)
padded_sequence_length = len(padded_sequence)
assert sequence_length + padding_size == padded_sequence_length
assert encoded_sequence + [padding_idx] * padding_size == padded_sequence
# LEFT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True
tokenizer.padding_side = "left"
encoded_sequence = tokenizer.encode(words, boxes=boxes)
sequence_length = len(encoded_sequence)
padded_sequence = tokenizer.encode(
words, boxes=boxes, max_length=sequence_length + padding_size, padding="max_length"
)
padded_sequence_length = len(padded_sequence)
assert sequence_length + padding_size == padded_sequence_length
assert [padding_idx] * padding_size + encoded_sequence == padded_sequence
# RIGHT & LEFT PADDING - Check that nothing is done for 'longest' and 'no_padding'
encoded_sequence = tokenizer.encode(words, boxes=boxes)
sequence_length = len(encoded_sequence)
tokenizer.padding_side = "right"
padded_sequence_right = tokenizer.encode(words, boxes=boxes, padding=True)
padded_sequence_right_length = len(padded_sequence_right)
assert sequence_length == padded_sequence_right_length
assert encoded_sequence == padded_sequence_right
tokenizer.padding_side = "left"
padded_sequence_left = tokenizer.encode(words, boxes=boxes, padding="longest")
padded_sequence_left_length = len(padded_sequence_left)
assert sequence_length == padded_sequence_left_length
assert encoded_sequence == padded_sequence_left
tokenizer.padding_side = "right"
padded_sequence_right = tokenizer.encode(words, boxes=boxes)
padded_sequence_right_length = len(padded_sequence_right)
assert sequence_length == padded_sequence_right_length
assert encoded_sequence == padded_sequence_right
tokenizer.padding_side = "left"
padded_sequence_left = tokenizer.encode(words, boxes=boxes, padding=False)
padded_sequence_left_length = len(padded_sequence_left)
assert sequence_length == padded_sequence_left_length
assert encoded_sequence == padded_sequence_left
def test_token_type_ids(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# test 1: single sequence
words, boxes = self.get_words_and_boxes()
output = tokenizer(words, boxes=boxes, return_token_type_ids=True)
# Assert that the token type IDs have the same length as the input IDs
self.assertEqual(len(output["token_type_ids"]), len(output["input_ids"]))
# Assert that the token type IDs have the same length as the attention mask
self.assertEqual(len(output["token_type_ids"]), len(output["attention_mask"]))
self.assertIn(0, output["token_type_ids"])
self.assertNotIn(1, output["token_type_ids"])
# test 2: two sequences (question + words)
question, words, boxes = self.get_question_words_and_boxes()
output = tokenizer(question, words, boxes, return_token_type_ids=True)
# Assert that the token type IDs have the same length as the input IDs
self.assertEqual(len(output["token_type_ids"]), len(output["input_ids"]))
# Assert that the token type IDs have the same length as the attention mask
self.assertEqual(len(output["token_type_ids"]), len(output["attention_mask"]))
self.assertIn(0, output["token_type_ids"])
self.assertIn(1, output["token_type_ids"])
def test_offsets_mapping(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
text = ["a", "wonderful", "test"]
boxes = [[1, 8, 12, 20] for _ in range(len(text))]
# No pair
tokens_with_offsets = tokenizer_r.encode_plus(
text,
boxes=boxes,
return_special_tokens_mask=True,
return_offsets_mapping=True,
add_special_tokens=True,
)
added_tokens = tokenizer_r.num_special_tokens_to_add(False)
offsets = tokens_with_offsets["offset_mapping"]
# Assert there is the same number of tokens and offsets
self.assertEqual(len(offsets), len(tokens_with_offsets["input_ids"]))
# Assert there is online added_tokens special_tokens
self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens)
# Pairs
text = "what's his name"
pair = ["a", "wonderful", "test"]
boxes = [[1, 8, 12, 20] for _ in range(len(pair))]
tokens_with_offsets = tokenizer_r.encode_plus(
text,
pair,
boxes=boxes,
return_special_tokens_mask=True,
return_offsets_mapping=True,
add_special_tokens=True,
)
added_tokens = tokenizer_r.num_special_tokens_to_add(True)
offsets = tokens_with_offsets["offset_mapping"]
# Assert there is the same number of tokens and offsets
self.assertEqual(len(offsets), len(tokens_with_offsets["input_ids"]))
# Assert there is online added_tokens special_tokens
self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens)
@require_torch
@slow
def test_torch_encode_plus_sent_to_model(self):
import torch
from transformers import MODEL_MAPPING, TOKENIZER_MAPPING
MODEL_TOKENIZER_MAPPING = merge_model_tokenizer_mappings(MODEL_MAPPING, TOKENIZER_MAPPING)
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
if tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING:
return
config_class, model_class = MODEL_TOKENIZER_MAPPING[tokenizer.__class__]
config = config_class()
if config.is_encoder_decoder or config.pad_token_id is None:
return
model = model_class(config)
# Make sure the model contains at least the full vocabulary size in its embedding matrix
is_using_common_embeddings = hasattr(model.get_input_embeddings(), "weight")
assert (
(model.get_input_embeddings().weight.shape[0] >= len(tokenizer))
if is_using_common_embeddings
else True
)
# Build sequence
words, boxes = self.get_words_and_boxes()
encoded_sequence = tokenizer.encode_plus(words, boxes=boxes, return_tensors="pt")
batch_encoded_sequence = tokenizer.batch_encode_plus(
[words, words], boxes=[boxes, boxes], return_tensors="pt"
)
# We add dummy image keys (as LayoutLMv2 actually also requires a feature extractor
# to prepare the image input)
encoded_sequence["image"] = torch.randn(1, 3, 224, 224)
batch_encoded_sequence["image"] = torch.randn(2, 3, 224, 224)
# This should not fail
with torch.no_grad(): # saves some time
model(**encoded_sequence)
model(**batch_encoded_sequence)
def test_rust_and_python_full_tokenizers(self):
if not self.test_rust_tokenizer:
return
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
words, boxes = self.get_words_and_boxes()
ids = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
ids = tokenizer.encode(words, boxes=boxes, add_special_tokens=True)
rust_ids = rust_tokenizer.encode(words, boxes=boxes, add_special_tokens=True)
self.assertListEqual(ids, rust_ids)
def test_tokenization_python_rust_equals(self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
words, boxes = self.get_words_and_boxes()
# Ensure basic input match
input_p = tokenizer_p.encode_plus(words, boxes=boxes)
input_r = tokenizer_r.encode_plus(words, boxes=boxes)
for key in filter(
lambda x: x in ["input_ids", "token_type_ids", "attention_mask", "bbox"], input_p.keys()
):
self.assertSequenceEqual(input_p[key], input_r[key])
input_pairs_p = tokenizer_p.encode_plus(words, boxes=boxes)
input_pairs_r = tokenizer_r.encode_plus(words, boxes=boxes)
for key in filter(
lambda x: x in ["input_ids", "token_type_ids", "attention_mask", "bbox"], input_p.keys()
):
self.assertSequenceEqual(input_pairs_p[key], input_pairs_r[key])
words = ["hello" for _ in range(1000)]
boxes = [[1000, 1000, 1000, 1000] for _ in range(1000)]
# Ensure truncation match
input_p = tokenizer_p.encode_plus(words, boxes=boxes, max_length=512, truncation=True)
input_r = tokenizer_r.encode_plus(words, boxes=boxes, max_length=512, truncation=True)
for key in filter(
lambda x: x in ["input_ids", "token_type_ids", "attention_mask", "bbox"], input_p.keys()
):
self.assertSequenceEqual(input_p[key], input_r[key])
# Ensure truncation with stride match
input_p = tokenizer_p.encode_plus(
words, boxes=boxes, max_length=512, truncation=True, stride=3, return_overflowing_tokens=True
)
input_r = tokenizer_r.encode_plus(
words, boxes=boxes, max_length=512, truncation=True, stride=3, return_overflowing_tokens=True
)
for key in filter(
lambda x: x in ["input_ids", "token_type_ids", "attention_mask", "bbox"], input_p.keys()
):
self.assertSequenceEqual(input_p[key], input_r[key][0])
def test_embeded_special_tokens(self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
words, boxes = self.get_words_and_boxes()
tokens_r = tokenizer_r.encode_plus(
words,
boxes=boxes,
add_special_tokens=True,
)
tokens_p = tokenizer_p.encode_plus(
words,
boxes=boxes,
add_special_tokens=True,
)
for key in tokens_p.keys():
self.assertEqual(tokens_r[key], tokens_p[key])
if "token_type_ids" in tokens_r:
self.assertEqual(sum(tokens_r["token_type_ids"]), sum(tokens_p["token_type_ids"]))
tokens_r = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"])
tokens_p = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"])
self.assertSequenceEqual(tokens_r, tokens_p)
def test_compare_add_special_tokens(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
simple_num_special_tokens_to_add = tokenizer_r.num_special_tokens_to_add(pair=False)
words, boxes = self.get_words_and_boxes()
# tokenize()
no_special_tokens = tokenizer_r.tokenize(" ".join(words), add_special_tokens=False)
with_special_tokens = tokenizer_r.tokenize(" ".join(words), add_special_tokens=True)
self.assertEqual(len(no_special_tokens), len(with_special_tokens) - simple_num_special_tokens_to_add)
# encode()
no_special_tokens = tokenizer_r.encode(words, boxes=boxes, add_special_tokens=False)
with_special_tokens = tokenizer_r.encode(words, boxes=boxes, add_special_tokens=True)
self.assertEqual(len(no_special_tokens), len(with_special_tokens) - simple_num_special_tokens_to_add)
# encode_plus()
no_special_tokens = tokenizer_r.encode_plus(words, boxes=boxes, add_special_tokens=False)
with_special_tokens = tokenizer_r.encode_plus(words, boxes=boxes, add_special_tokens=True)
for key in no_special_tokens.keys():
self.assertEqual(
len(no_special_tokens[key]),
len(with_special_tokens[key]) - simple_num_special_tokens_to_add,
)
# # batch_encode_plus
words, boxes = self.get_words_and_boxes_batch()
no_special_tokens = tokenizer_r.batch_encode_plus(words, boxes=boxes, add_special_tokens=False)
with_special_tokens = tokenizer_r.batch_encode_plus(words, boxes=boxes, add_special_tokens=True)
for key in no_special_tokens.keys():
for i_no, i_with in zip(no_special_tokens[key], with_special_tokens[key]):
self.assertEqual(len(i_no), len(i_with) - simple_num_special_tokens_to_add)
@slow
def test_layoutlmv2_truncation_integration_test(self):
words, boxes = self.get_words_and_boxes()
tokenizer = LayoutLMv2Tokenizer.from_pretrained("microsoft/layoutlmv2-base-uncased", model_max_length=512)
for i in range(12, 512):
new_encoded_inputs = tokenizer.encode(words, boxes=boxes, max_length=i, truncation=True)
# Ensure that the input IDs are less than the max length defined.
self.assertLessEqual(len(new_encoded_inputs), i)
tokenizer.model_max_length = 20
new_encoded_inputs = tokenizer.encode(words, boxes=boxes, truncation=True)
dropped_encoded_inputs = tokenizer.encode(words, boxes=boxes, truncation=True)
# Ensure that the input IDs are still truncated when no max_length is specified
self.assertListEqual(new_encoded_inputs, dropped_encoded_inputs)
self.assertLessEqual(len(new_encoded_inputs), 20)
@is_pt_tf_cross_test
def test_batch_encode_plus_tensors(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes_batch()
# A Tensor cannot be build by sequences which are not the same size
self.assertRaises(ValueError, tokenizer.batch_encode_plus, words, boxes=boxes, return_tensors="pt")
self.assertRaises(ValueError, tokenizer.batch_encode_plus, words, boxes=boxes, return_tensors="tf")
if tokenizer.pad_token_id is None:
self.assertRaises(
ValueError,
tokenizer.batch_encode_plus,
words,
boxes=boxes,
padding=True,
return_tensors="pt",
)
self.assertRaises(
ValueError,
tokenizer.batch_encode_plus,
words,
boxes=boxes,
padding="longest",
return_tensors="tf",
)
else:
pytorch_tensor = tokenizer.batch_encode_plus(words, boxes=boxes, padding=True, return_tensors="pt")
tensorflow_tensor = tokenizer.batch_encode_plus(
words, boxes=boxes, padding="longest", return_tensors="tf"
)
encoded_sequences = tokenizer.batch_encode_plus(words, boxes=boxes, padding=True)
for key in encoded_sequences.keys():
pytorch_value = pytorch_tensor[key].tolist()
tensorflow_value = tensorflow_tensor[key].numpy().tolist()
encoded_value = encoded_sequences[key]
self.assertEqual(pytorch_value, tensorflow_value, encoded_value)
def test_sequence_ids(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
if not tokenizer.is_fast:
continue
with self.subTest(f"{tokenizer.__class__.__name__}"):
seq_0 = "Test this method."
seq_1 = ["With", "these", "inputs."]
boxes = [[1000, 1000, 1000, 1000] for _ in range(len(seq_1))]
# We want to have sequence 0 and sequence 1 are tagged
# respectively with 0 and 1 token_ids
# (regardless of whether the model use token type ids)
# We use this assumption in the QA pipeline among other place
output = tokenizer(seq_0.split(), boxes=boxes)
self.assertIn(0, output.sequence_ids())
output = tokenizer(seq_0, seq_1, boxes=boxes)
self.assertIn(0, output.sequence_ids())
self.assertIn(1, output.sequence_ids())
if tokenizer.num_special_tokens_to_add(pair=True):
self.assertIn(None, output.sequence_ids())
def test_special_tokens_initialization(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
added_tokens = [AddedToken("<special>", lstrip=True)]
tokenizer_r = self.rust_tokenizer_class.from_pretrained(
pretrained_name, additional_special_tokens=added_tokens, **kwargs
)
words = "Hey this is a <special> token".split()
boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))]
r_output = tokenizer_r.encode(words, boxes=boxes)
special_token_id = tokenizer_r.encode(
["<special>"], boxes=[1000, 1000, 1000, 1000], add_special_tokens=False
)[0]
self.assertTrue(special_token_id in r_output)
if self.test_slow_tokenizer:
tokenizer_cr = self.rust_tokenizer_class.from_pretrained(
pretrained_name, additional_special_tokens=added_tokens, **kwargs, from_slow=True
)
tokenizer_p = self.tokenizer_class.from_pretrained(
pretrained_name, additional_special_tokens=added_tokens, **kwargs
)
words = "Hey this is a <special> token".split()
boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))]
p_output = tokenizer_p.encode(words, boxes=boxes)
cr_output = tokenizer_cr.encode(words, boxes=boxes)
self.assertEqual(p_output, r_output)
self.assertEqual(cr_output, r_output)
self.assertTrue(special_token_id in p_output)
self.assertTrue(special_token_id in cr_output)
def test_training_new_tokenizer(self):
# This feature only exists for fast tokenizers
if not self.test_rust_tokenizer:
return
tokenizer = self.get_rust_tokenizer()
new_tokenizer = tokenizer.train_new_from_iterator(SMALL_TRAINING_CORPUS, 100)
# Test we can use the new tokenizer with something not seen during training
text = [["this", "is", "the"], ["how", "are", "you"]]
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8], [1, 3, 4, 8]], [[5, 6, 7, 8], [4, 5, 6, 7], [3, 9, 2, 7]]]
inputs = new_tokenizer(text, boxes=boxes)
self.assertEqual(len(inputs["input_ids"]), 2)
decoded_input = new_tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True)
expected_result = "this is the"
if tokenizer.backend_tokenizer.normalizer is not None:
expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result)
self.assertEqual(expected_result, decoded_input)
# We check that the parameters of the tokenizer remained the same
# Check we have the same number of added_tokens for both pair and non-pair inputs.
self.assertEqual(tokenizer.num_special_tokens_to_add(False), new_tokenizer.num_special_tokens_to_add(False))
self.assertEqual(tokenizer.num_special_tokens_to_add(True), new_tokenizer.num_special_tokens_to_add(True))
# Check we have the correct max_length for both pair and non-pair inputs.
self.assertEqual(tokenizer.max_len_single_sentence, new_tokenizer.max_len_single_sentence)
self.assertEqual(tokenizer.max_len_sentences_pair, new_tokenizer.max_len_sentences_pair)
# Assert the set of special tokens match as we didn't ask to change them
self.assertSequenceEqual(
tokenizer.all_special_tokens_extended,
new_tokenizer.all_special_tokens_extended,
)
self.assertDictEqual(tokenizer.special_tokens_map, new_tokenizer.special_tokens_map)
def test_training_new_tokenizer_with_special_tokens_change(self):
# This feature only exists for fast tokenizers
if not self.test_rust_tokenizer:
return
tokenizer = self.get_rust_tokenizer()
# Test with a special tokens map
class_signature = inspect.signature(tokenizer.__class__)
if "cls_token" in class_signature.parameters:
new_tokenizer = tokenizer.train_new_from_iterator(
SMALL_TRAINING_CORPUS, 100, special_tokens_map={tokenizer.cls_token: "<cls>"}
)
cls_id = new_tokenizer.get_vocab()["<cls>"]
self.assertEqual(new_tokenizer.cls_token, "<cls>")
self.assertEqual(new_tokenizer.cls_token_id, cls_id)
# Create a new mapping from the special tokens defined in the original tokenizer
special_tokens_list = SpecialTokensMixin.SPECIAL_TOKENS_ATTRIBUTES.copy()
special_tokens_list.remove("additional_special_tokens")
special_tokens_map = {}
for token in special_tokens_list:
# Get the private one to avoid unnecessary warnings.
if getattr(tokenizer, f"_{token}") is not None:
special_token = getattr(tokenizer, token)
special_tokens_map[special_token] = f"{special_token}a"
# Train new tokenizer
new_tokenizer = tokenizer.train_new_from_iterator(
SMALL_TRAINING_CORPUS, 100, special_tokens_map=special_tokens_map
)
# Check the changes
for token in special_tokens_list:
# Get the private one to avoid unnecessary warnings.
if getattr(tokenizer, f"_{token}") is None:
continue
special_token = getattr(tokenizer, token)
if special_token in special_tokens_map:
new_special_token = getattr(new_tokenizer, token)
self.assertEqual(special_tokens_map[special_token], new_special_token)
new_id = new_tokenizer.get_vocab()[new_special_token]
self.assertEqual(getattr(new_tokenizer, f"{token}_id"), new_id)
# Check if the AddedToken / string format has been kept
for special_token in tokenizer.all_special_tokens_extended:
if isinstance(special_token, AddedToken) and special_token.content not in special_tokens_map:
# The special token must appear identically in the list of the new tokenizer.
self.assertTrue(
special_token in new_tokenizer.all_special_tokens_extended,
f"'{special_token}' should be in {new_tokenizer.all_special_tokens_extended}",
)
elif isinstance(special_token, AddedToken):
# The special token must appear in the list of the new tokenizer as an object of type AddedToken with
# the same parameters as the old AddedToken except the content that the user has requested to change.
special_token_str = special_token.content
new_special_token_str = special_tokens_map[special_token_str]
find = False
for candidate in new_tokenizer.all_special_tokens_extended:
if (
isinstance(candidate, AddedToken)
and candidate.content == new_special_token_str
and candidate.lstrip == special_token.lstrip
and candidate.rstrip == special_token.rstrip
and candidate.normalized == special_token.normalized
and candidate.single_word == special_token.single_word
):
find = True
break
self.assertTrue(
find,
(
f"'{new_special_token_str}' doesn't appear in the list "
f"'{new_tokenizer.all_special_tokens_extended}' as an AddedToken with the same parameters as "
f"'{special_token}' in the list {tokenizer.all_special_tokens_extended}"
),
)
elif special_token not in special_tokens_map:
# The special token must appear identically in the list of the new tokenizer.
self.assertTrue(
special_token in new_tokenizer.all_special_tokens_extended,
f"'{special_token}' should be in {new_tokenizer.all_special_tokens_extended}",
)
else:
# The special token must appear in the list of the new tokenizer as an object of type string.
self.assertTrue(special_tokens_map[special_token] in new_tokenizer.all_special_tokens_extended)
# Test we can use the new tokenizer with something not seen during training
words = [["this", "is"], ["hello", "🤗"]]
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[1, 2, 3, 4], [5, 6, 7, 8]]]
inputs = new_tokenizer(words, boxes=boxes)
self.assertEqual(len(inputs["input_ids"]), 2)
decoded_input = new_tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True)
expected_result = "this is"
if tokenizer.backend_tokenizer.normalizer is not None:
expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result)
self.assertEqual(expected_result, decoded_input)
def test_prepare_for_model(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
# only test prepare_for_model for the slow tokenizer
if tokenizer.__class__.__name__ == "LayoutLMv2TokenizerFast":
continue
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
prepared_input_dict = tokenizer.prepare_for_model(words, boxes=boxes, add_special_tokens=True)
input_dict = tokenizer.encode_plus(words, boxes=boxes, add_special_tokens=True)
self.assertEqual(input_dict, prepared_input_dict)
def test_padding_different_model_input_name(self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
self.assertEqual(tokenizer_p.pad_token_id, tokenizer_r.pad_token_id)
pad_token_id = tokenizer_p.pad_token_id
words, boxes = self.get_words_and_boxes_batch()
input_r = tokenizer_r.batch_encode_plus(words, boxes=boxes)
input_p = tokenizer_r.batch_encode_plus(words, boxes=boxes)
# rename encoded batch to "inputs"
input_r["inputs"] = input_r[tokenizer_r.model_input_names[0]]
del input_r[tokenizer_r.model_input_names[0]]
input_p["inputs"] = input_p[tokenizer_p.model_input_names[0]]
del input_p[tokenizer_p.model_input_names[0]]
# Renaming `input_ids` to `inputs`
tokenizer_r.model_input_names = ["inputs"] + tokenizer_r.model_input_names[1:]
tokenizer_p.model_input_names = ["inputs"] + tokenizer_p.model_input_names[1:]
input_r = tokenizer_r.pad(input_r, padding="longest")
input_p = tokenizer_r.pad(input_p, padding="longest")
max_length = len(input_p["inputs"][0])
self.assert_batch_padded_input_match(
input_r, input_p, max_length, pad_token_id, model_main_input_name="inputs"
)
def test_batch_encode_dynamic_overflowing(self):
"""
When calling batch_encode with multiple sequences, it can return different number of
overflowing encoding for each sequence:
[
Sequence 1: [Encoding 1, Encoding 2],
Sequence 2: [Encoding 1],
Sequence 3: [Encoding 1, Encoding 2, ... Encoding N]
]
This needs to be padded so that it can represented as a tensor
"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name}, {tokenizer.__class__.__name__})"):
if is_torch_available():
returned_tensor = "pt"
elif is_tf_available():
returned_tensor = "tf"
else:
returned_tensor = "jax"
# Single example
words, boxes = self.get_words_and_boxes()
tokens = tokenizer.encode_plus(
words,
boxes=boxes,
max_length=6,
padding=True,
truncation=True,
return_tensors=returned_tensor,
return_overflowing_tokens=True,
)
for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):
if key != "bbox":
self.assertEqual(len(tokens[key].shape), 2)
else:
self.assertEqual(len(tokens[key].shape), 3)
# Batch of examples
# For these 2 examples, 3 training examples will be created
words, boxes = self.get_words_and_boxes_batch()
tokens = tokenizer.batch_encode_plus(
words,
boxes=boxes,
max_length=6,
padding=True,
truncation="only_first",
return_tensors=returned_tensor,
return_overflowing_tokens=True,
)
for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):
if key != "bbox":
self.assertEqual(len(tokens[key].shape), 2)
self.assertEqual(tokens[key].shape[-1], 6)
else:
self.assertEqual(len(tokens[key].shape), 3)
self.assertEqual(tokens[key].shape[-1], 4)
@unittest.skip("TO DO: overwrite this very extensive test.")
def test_alignement_methods(self):
pass
def get_clean_sequence(self, tokenizer, with_prefix_space=False, max_length=20, min_length=5):
toks = [(i, tokenizer.decode([i], clean_up_tokenization_spaces=False)) for i in range(len(tokenizer))]
toks = list(filter(lambda t: re.match(r"^[ a-zA-Z]+$", t[1]), toks))
toks = list(
filter(
lambda t: [t[0]]
== tokenizer.encode(t[1].split(" "), boxes=len(t[1]) * [[1, 1, 1, 1]], add_special_tokens=False),
toks,
)
)
if max_length is not None and len(toks) > max_length:
toks = toks[:max_length]
if min_length is not None and len(toks) < min_length and len(toks) > 0:
while len(toks) < min_length:
toks = toks + toks
# toks_str = [t[1] for t in toks]
toks_ids = [t[0] for t in toks]
# Ensure consistency
output_txt = tokenizer.decode(toks_ids, clean_up_tokenization_spaces=False)
if " " not in output_txt and len(toks_ids) > 1:
output_txt = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=False)
+ " "
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=False)
)
if with_prefix_space:
output_txt = " " + output_txt
words = output_txt.split(" ")
boxes = [[i, i, i, i] for i in range(len(words))]
output_ids = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
return words, boxes, output_ids
# @unittest.skip("LayoutLMv2 tokenizer requires boxes besides sequences.")
def test_maximum_encoding_length_pair_input(self):
tokenizers = self.get_tokenizers(do_lower_case=False, model_max_length=100)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Build a sequence from our model's vocabulary
stride = 2
seq_0, boxes_0, ids = self.get_clean_sequence(tokenizer, max_length=20)
question_0 = " ".join(map(str, seq_0))
if len(ids) <= 2 + stride:
seq_0 = (seq_0 + " ") * (2 + stride)
ids = None
seq0_tokens = tokenizer(seq_0, boxes=boxes_0, add_special_tokens=False)
self.assertGreater(len(seq0_tokens["input_ids"]), 2 + stride)
question_1 = "This is another sentence to be encoded."
seq_1 = ["what", "a", "weird", "test", "weirdly", "weird"]
boxes_1 = [[i, i, i, i] for i in range(len(seq_1))]
seq1_tokens = tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)
if abs(len(seq0_tokens["input_ids"]) - len(seq1_tokens["input_ids"])) <= 2:
seq1_tokens_input_ids = seq1_tokens["input_ids"] + seq1_tokens["input_ids"]
seq_1 = tokenizer.decode(seq1_tokens_input_ids, clean_up_tokenization_spaces=False)
seq_1 = seq_1.split(" ")
boxes_1 = [[i, i, i, i] for i in range(len(seq_1))]
seq1_tokens = tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)
self.assertGreater(len(seq1_tokens["input_ids"]), 2 + stride)
smallest = (
seq1_tokens["input_ids"]
if len(seq0_tokens["input_ids"]) > len(seq1_tokens["input_ids"])
else seq0_tokens["input_ids"]
)
# We are not using the special tokens - a bit too hard to test all the tokenizers with this
# TODO try this again later
sequence = tokenizer(
question_0, seq_1, boxes=boxes_1, add_special_tokens=False
) # , add_prefix_space=False)
# Test with max model input length
model_max_length = tokenizer.model_max_length
self.assertEqual(model_max_length, 100)
seq_2 = seq_0 * model_max_length
question_2 = " ".join(map(str, seq_2))
boxes_2 = boxes_0 * model_max_length
self.assertGreater(len(seq_2), model_max_length)
sequence1 = tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)
total_length1 = len(sequence1["input_ids"])
sequence2 = tokenizer(question_2, seq_1, boxes=boxes_1, add_special_tokens=False)
total_length2 = len(sequence2["input_ids"])
self.assertLess(total_length1, model_max_length, "Issue with the testing sequence, please update it.")
self.assertGreater(
total_length2, model_max_length, "Issue with the testing sequence, please update it."
)
# Simple
padding_strategies = (
[False, True, "longest"] if tokenizer.pad_token and tokenizer.pad_token_id >= 0 else [False]
)
for padding_state in padding_strategies:
with self.subTest(f"{tokenizer.__class__.__name__} Padding: {padding_state}"):
for truncation_state in [True, "longest_first", "only_first"]:
with self.subTest(f"{tokenizer.__class__.__name__} Truncation: {truncation_state}"):
output = tokenizer(
question_2,
seq_1,
boxes=boxes_1,
padding=padding_state,
truncation=truncation_state,
)
self.assertEqual(len(output["input_ids"]), model_max_length)
self.assertEqual(len(output["bbox"]), model_max_length)
output = tokenizer(
[question_2],
[seq_1],
boxes=[boxes_1],
padding=padding_state,
truncation=truncation_state,
)
self.assertEqual(len(output["input_ids"][0]), model_max_length)
self.assertEqual(len(output["bbox"][0]), model_max_length)
# Simple
output = tokenizer(
question_1, seq_2, boxes=boxes_2, padding=padding_state, truncation="only_second"
)
self.assertEqual(len(output["input_ids"]), model_max_length)
self.assertEqual(len(output["bbox"]), model_max_length)
output = tokenizer(
[question_1], [seq_2], boxes=[boxes_2], padding=padding_state, truncation="only_second"
)
self.assertEqual(len(output["input_ids"][0]), model_max_length)
self.assertEqual(len(output["bbox"][0]), model_max_length)
# Simple with no truncation
# Reset warnings
tokenizer.deprecation_warnings = {}
with self.assertLogs("transformers", level="WARNING") as cm:
output = tokenizer(
question_1, seq_2, boxes=boxes_2, padding=padding_state, truncation=False
)
self.assertNotEqual(len(output["input_ids"]), model_max_length)
self.assertNotEqual(len(output["bbox"]), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(
cm.records[0].message.startswith(
"Token indices sequence length is longer than the specified maximum sequence length for this model"
)
)
tokenizer.deprecation_warnings = {}
with self.assertLogs("transformers", level="WARNING") as cm:
output = tokenizer(
[question_1], [seq_2], boxes=[boxes_2], padding=padding_state, truncation=False
)
self.assertNotEqual(len(output["input_ids"][0]), model_max_length)
self.assertNotEqual(len(output["bbox"][0]), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(
cm.records[0].message.startswith(
"Token indices sequence length is longer than the specified maximum sequence length for this model"
)
)
# Check the order of Sequence of input ids, overflowing tokens and bbox sequence with truncation
truncated_first_sequence = (
tokenizer(seq_0, boxes=boxes_0, add_special_tokens=False)["input_ids"][:-2]
+ tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)["input_ids"]
)
truncated_second_sequence = (
tokenizer(seq_0, boxes=boxes_0, add_special_tokens=False)["input_ids"]
+ tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)["input_ids"][:-2]
)
truncated_longest_sequence = (
truncated_first_sequence if len(seq0_tokens) > len(seq1_tokens) else truncated_second_sequence
)
overflow_first_sequence = (
tokenizer(seq_0, boxes=boxes_0, add_special_tokens=False)["input_ids"][-(2 + stride) :]
+ tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)["input_ids"]
)
overflow_second_sequence = (
tokenizer(seq_0, boxes=boxes_0, add_special_tokens=False)["input_ids"]
+ tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)["input_ids"][-(2 + stride) :]
)
overflow_longest_sequence = (
overflow_first_sequence if len(seq0_tokens) > len(seq1_tokens) else overflow_second_sequence
)
bbox_first = [[0, 0, 0, 0]] * (len(seq_0) - 2)
bbox_first_sequence = bbox_first + tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)["bbox"]
overflowing_token_bbox_first_sequence_slow = [[0, 0, 0, 0]] * (2 + stride)
overflowing_token_bbox_first_sequence_fast = [[0, 0, 0, 0]] * (2 + stride) + tokenizer(
seq_1, boxes=boxes_1, add_special_tokens=False
)["bbox"]
bbox_second = [[0, 0, 0, 0]] * len(seq_0)
bbox_second_sequence = (
bbox_second + tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)["bbox"][:-2]
)
overflowing_token_bbox_second_sequence_slow = tokenizer(
seq_1, boxes=boxes_1, add_special_tokens=False
)["bbox"][-(2 + stride) :]
overflowing_token_bbox_second_sequence_fast = [[0, 0, 0, 0]] * len(seq_0) + tokenizer(
seq_1, boxes=boxes_1, add_special_tokens=False
)["bbox"][-(2 + stride) :]
bbox_longest_sequence = (
bbox_first_sequence if len(seq0_tokens) > len(seq1_tokens) else bbox_second_sequence
)
overflowing_token_bbox_longest_sequence_fast = (
overflowing_token_bbox_first_sequence_fast
if len(seq0_tokens) > len(seq1_tokens)
else overflowing_token_bbox_second_sequence_fast
)
# Overflowing tokens are handled quite differently in slow and fast tokenizers
if isinstance(tokenizer, LayoutLMv2TokenizerFast):
information = tokenizer(
question_0,
seq_1,
boxes=boxes_1,
max_length=len(sequence["input_ids"]) - 2,
add_special_tokens=False,
stride=stride,
truncation="longest_first",
return_overflowing_tokens=True,
# add_prefix_space=False,
)
truncated_sequence = information["input_ids"][0]
overflowing_tokens = information["input_ids"][1]
bbox = information["bbox"][0]
overflowing_bbox = information["bbox"][1]
self.assertEqual(len(information["input_ids"]), 2)
self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2)
self.assertEqual(truncated_sequence, truncated_longest_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride + len(smallest))
self.assertEqual(overflowing_tokens, overflow_longest_sequence)
self.assertEqual(bbox, bbox_longest_sequence)
self.assertEqual(len(overflowing_bbox), 2 + stride + len(smallest))
self.assertEqual(overflowing_bbox, overflowing_token_bbox_longest_sequence_fast)
else:
# No overflowing tokens when using 'longest' in python tokenizers
with self.assertRaises(ValueError) as context:
information = tokenizer(
question_0,
seq_1,
boxes=boxes_1,
max_length=len(sequence["input_ids"]) - 2,
add_special_tokens=False,
stride=stride,
truncation="longest_first",
return_overflowing_tokens=True,
# add_prefix_space=False,
)
self.assertTrue(
context.exception.args[0].startswith(
"Not possible to return overflowing tokens for pair of sequences with the "
"`longest_first`. Please select another truncation strategy than `longest_first`, "
"for instance `only_second` or `only_first`."
)
)
# Overflowing tokens are handled quite differently in slow and fast tokenizers
if isinstance(tokenizer, LayoutLMv2TokenizerFast):
information = tokenizer(
question_0,
seq_1,
boxes=boxes_1,
max_length=len(sequence["input_ids"]) - 2,
add_special_tokens=False,
stride=stride,
truncation=True,
return_overflowing_tokens=True,
# add_prefix_space=False,
)
truncated_sequence = information["input_ids"][0]
overflowing_tokens = information["input_ids"][1]
bbox = information["bbox"][0]
overflowing_bbox = information["bbox"][1]
self.assertEqual(len(information["input_ids"]), 2)
self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2)
self.assertEqual(truncated_sequence, truncated_longest_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride + len(smallest))
self.assertEqual(overflowing_tokens, overflow_longest_sequence)
self.assertEqual(bbox, bbox_longest_sequence)
self.assertEqual(overflowing_bbox, overflowing_token_bbox_longest_sequence_fast)
else:
# No overflowing tokens when using 'longest' in python tokenizers
with self.assertRaises(ValueError) as context:
information = tokenizer(
question_0,
seq_1,
boxes=boxes_1,
max_length=len(sequence["input_ids"]) - 2,
add_special_tokens=False,
stride=stride,
truncation=True,
return_overflowing_tokens=True,
# add_prefix_space=False,
)
self.assertTrue(
context.exception.args[0].startswith(
"Not possible to return overflowing tokens for pair of sequences with the "
"`longest_first`. Please select another truncation strategy than `longest_first`, "
"for instance `only_second` or `only_first`."
)
)
information_first_truncated = tokenizer(
question_0,
seq_1,
boxes=boxes_1,
max_length=len(sequence["input_ids"]) - 2,
add_special_tokens=False,
stride=stride,
truncation="only_first",
return_overflowing_tokens=True,
# add_prefix_space=False,
)
# Overflowing tokens are handled quite differently in slow and fast tokenizers
if isinstance(tokenizer, LayoutLMv2TokenizerFast):
truncated_sequence = information_first_truncated["input_ids"][0]
overflowing_tokens = information_first_truncated["input_ids"][1]
bbox = information_first_truncated["bbox"][0]
overflowing_bbox = information_first_truncated["bbox"][1]
self.assertEqual(len(information_first_truncated["input_ids"]), 2)
self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2)
self.assertEqual(truncated_sequence, truncated_first_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride + len(seq1_tokens["input_ids"]))
self.assertEqual(overflowing_tokens, overflow_first_sequence)
self.assertEqual(bbox, bbox_first_sequence)
self.assertEqual(overflowing_bbox, overflowing_token_bbox_first_sequence_fast)
else:
truncated_sequence = information_first_truncated["input_ids"]
overflowing_tokens = information_first_truncated["overflowing_tokens"]
overflowing_bbox = information_first_truncated["overflowing_token_boxes"]
bbox = information_first_truncated["bbox"]
self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2)
self.assertEqual(truncated_sequence, truncated_first_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride)
self.assertEqual(overflowing_tokens, seq0_tokens["input_ids"][-(2 + stride) :])
self.assertEqual(bbox, bbox_first_sequence)
self.assertEqual(overflowing_bbox, overflowing_token_bbox_first_sequence_slow)
information_second_truncated = tokenizer(
question_0,
seq_1,
boxes=boxes_1,
max_length=len(sequence["input_ids"]) - 2,
add_special_tokens=False,
stride=stride,
truncation="only_second",
return_overflowing_tokens=True,
# add_prefix_space=False,
)
# Overflowing tokens are handled quite differently in slow and fast tokenizers
if isinstance(tokenizer, LayoutLMv2TokenizerFast):
truncated_sequence = information_second_truncated["input_ids"][0]
overflowing_tokens = information_second_truncated["input_ids"][1]
bbox = information_second_truncated["bbox"][0]
overflowing_bbox = information_second_truncated["bbox"][1]
self.assertEqual(len(information_second_truncated["input_ids"]), 2)
self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2)
self.assertEqual(truncated_sequence, truncated_second_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride + len(seq0_tokens["input_ids"]))
self.assertEqual(overflowing_tokens, overflow_second_sequence)
self.assertEqual(bbox, bbox_second_sequence)
self.assertEqual(overflowing_bbox, overflowing_token_bbox_second_sequence_fast)
else:
truncated_sequence = information_second_truncated["input_ids"]
overflowing_tokens = information_second_truncated["overflowing_tokens"]
bbox = information_second_truncated["bbox"]
overflowing_bbox = information_second_truncated["overflowing_token_boxes"]
self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2)
self.assertEqual(truncated_sequence, truncated_second_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride)
self.assertEqual(overflowing_tokens, seq1_tokens["input_ids"][-(2 + stride) :])
self.assertEqual(bbox, bbox_second_sequence)
self.assertEqual(overflowing_bbox, overflowing_token_bbox_second_sequence_slow)
# @unittest.skip("LayoutLMv2 tokenizer requires boxes besides sequences.")
def test_maximum_encoding_length_single_input(self):
tokenizers = self.get_tokenizers(do_lower_case=False, model_max_length=100)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
seq_0, boxes_0, ids = self.get_clean_sequence(tokenizer, max_length=20)
sequence = tokenizer(seq_0, boxes=boxes_0, add_special_tokens=False)
total_length = len(sequence["input_ids"])
self.assertGreater(total_length, 4, "Issue with the testing sequence, please update it it's too short")
# Test with max model input length
model_max_length = tokenizer.model_max_length
self.assertEqual(model_max_length, 100)
seq_1 = seq_0 * model_max_length
boxes_1 = boxes_0 * model_max_length
sequence1 = tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)
total_length1 = len(sequence1["input_ids"])
self.assertGreater(
total_length1, model_max_length, "Issue with the testing sequence, please update it it's too short"
)
# Simple
padding_strategies = (
[False, True, "longest"] if tokenizer.pad_token and tokenizer.pad_token_id >= 0 else [False]
)
for padding_state in padding_strategies:
with self.subTest(f"Padding: {padding_state}"):
for truncation_state in [True, "longest_first", "only_first"]:
with self.subTest(f"Truncation: {truncation_state}"):
output = tokenizer(
seq_1,
boxes=boxes_1,
padding=padding_state,
truncation=truncation_state,
)
self.assertEqual(len(output["input_ids"]), model_max_length)
self.assertEqual(len(output["bbox"]), model_max_length)
output = tokenizer(
[seq_1],
boxes=[boxes_1],
padding=padding_state,
truncation=truncation_state,
)
self.assertEqual(len(output["input_ids"][0]), model_max_length)
self.assertEqual(len(output["bbox"][0]), model_max_length)
# Simple with no truncation
# Reset warnings
tokenizer.deprecation_warnings = {}
with self.assertLogs("transformers", level="WARNING") as cm:
output = tokenizer(seq_1, boxes=boxes_1, padding=padding_state, truncation=False)
self.assertNotEqual(len(output["input_ids"]), model_max_length)
self.assertNotEqual(len(output["bbox"]), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(
cm.records[0].message.startswith(
"Token indices sequence length is longer than the specified maximum sequence length for this model"
)
)
tokenizer.deprecation_warnings = {}
with self.assertLogs("transformers", level="WARNING") as cm:
output = tokenizer([seq_1], boxes=[boxes_1], padding=padding_state, truncation=False)
self.assertNotEqual(len(output["input_ids"][0]), model_max_length)
self.assertNotEqual(len(output["bbox"][0]), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(
cm.records[0].message.startswith(
"Token indices sequence length is longer than the specified maximum sequence length for this model"
)
)
# Check the order of Sequence of input ids, overflowing tokens and bbox sequence with truncation
stride = 2
information = tokenizer(
seq_0,
boxes=boxes_0,
max_length=total_length - 2,
add_special_tokens=False,
stride=stride,
truncation=True,
return_overflowing_tokens=True,
# add_prefix_space=False,
)
# Overflowing tokens are handled quite differently in slow and fast tokenizers
if isinstance(tokenizer, LayoutLMv2TokenizerFast):
truncated_sequence = information["input_ids"][0]
overflowing_tokens = information["input_ids"][1]
bbox = information["bbox"][0]
overflowing_bbox = information["bbox"][1]
self.assertEqual(len(information["input_ids"]), 2)
self.assertEqual(len(truncated_sequence), total_length - 2)
self.assertEqual(truncated_sequence, sequence["input_ids"][:-2])
self.assertEqual(len(overflowing_tokens), 2 + stride)
self.assertEqual(overflowing_tokens, sequence["input_ids"][-(2 + stride) :])
self.assertEqual(bbox, sequence["bbox"][:-2])
self.assertEqual(overflowing_bbox, sequence["bbox"][-(2 + stride) :])
else:
truncated_sequence = information["input_ids"]
overflowing_tokens = information["overflowing_tokens"]
bbox = information["bbox"]
overflowing_bbox = information["overflowing_token_boxes"]
self.assertEqual(len(truncated_sequence), total_length - 2)
self.assertEqual(truncated_sequence, sequence["input_ids"][:-2])
self.assertEqual(len(overflowing_tokens), 2 + stride)
self.assertEqual(overflowing_tokens, sequence["input_ids"][-(2 + stride) :])
self.assertEqual(bbox, sequence["bbox"][:-2])
self.assertEqual(overflowing_bbox, sequence["bbox"][-(2 + stride) :])
@unittest.skip("LayoutLMv2 tokenizer requires boxes besides sequences.")
def test_pretokenized_inputs(self):
pass
@unittest.skip("LayoutLMv2 tokenizer always expects pretokenized inputs.")
def test_compare_pretokenized_inputs(self):
pass
@unittest.skip("LayoutLMv2 fast tokenizer does not support prepare_for_model")
def test_compare_prepare_for_model(self):
pass
@slow
def test_only_label_first_subword(self):
words = ["hello", "niels"]
boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))]
word_labels = [0, 1]
# test slow tokenizer
tokenizer_p = LayoutLMv2Tokenizer.from_pretrained("microsoft/layoutlmv2-base-uncased")
encoding = tokenizer_p(words, boxes=boxes, word_labels=word_labels)
self.assertListEqual(encoding.labels, [-100, 0, 1, -100, -100])
tokenizer_p = LayoutLMv2Tokenizer.from_pretrained(
"microsoft/layoutlmv2-base-uncased", only_label_first_subword=False
)
encoding = tokenizer_p(words, boxes=boxes, word_labels=word_labels)
self.assertListEqual(encoding.labels, [-100, 0, 1, 1, -100])
# test fast tokenizer
tokenizer_r = LayoutLMv2TokenizerFast.from_pretrained("microsoft/layoutlmv2-base-uncased")
encoding = tokenizer_r(words, boxes=boxes, word_labels=word_labels)
self.assertListEqual(encoding.labels, [-100, 0, 1, -100, -100])
tokenizer_r = LayoutLMv2Tokenizer.from_pretrained(
"microsoft/layoutlmv2-base-uncased", only_label_first_subword=False
)
encoding = tokenizer_r(words, boxes=boxes, word_labels=word_labels)
self.assertListEqual(encoding.labels, [-100, 0, 1, 1, -100])
@slow
def test_layoutlmv2_integration_test(self):
tokenizer_p = LayoutLMv2Tokenizer.from_pretrained("microsoft/layoutlmv2-base-uncased")
tokenizer_r = LayoutLMv2TokenizerFast.from_pretrained("microsoft/layoutlmv2-base-uncased")
# There are 3 cases:
# CASE 1: document image classification (training + inference), document image token classification (inference),
# in which case only words and normalized bounding boxes are provided to the tokenizer
# CASE 2: document image token classification (training),
# in which case one also provides word labels to the tokenizer
# CASE 3: document image visual question answering (inference),
# in which case one also provides a question to the tokenizer
# We need to test all 3 cases both on batched and non-batched inputs.
# CASE 1: not batched
words, boxes = self.get_words_and_boxes()
# fmt: off
expected_results = {'input_ids': [101, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'bbox': [[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} # noqa: E231
# fmt: on
encoding_p = tokenizer_p(words, boxes=boxes, padding="max_length", max_length=20)
encoding_r = tokenizer_r(words, boxes=boxes, padding="max_length", max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
# CASE 1: batched
words, boxes = self.get_words_and_boxes_batch()
# fmt: off
expected_results = {'input_ids': [[101, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [101, 7592, 2026, 2171, 2003, 3960, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'bbox': [[[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [961, 885, 992, 912], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E231
# fmt: on
encoding_p = tokenizer_p(words, boxes=boxes, padding="max_length", max_length=20)
encoding_r = tokenizer_r(words, boxes=boxes, padding="max_length", max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
# CASE 2: not batched
words, boxes = self.get_words_and_boxes()
word_labels = [1, 2, 3]
# fmt: off
expected_results = {'input_ids': [101, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'bbox': [[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'labels': [-100, 1, 2, -100, 3, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100], 'attention_mask': [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} # noqa: E231
# fmt: on
encoding_p = tokenizer_p(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20)
encoding_r = tokenizer_r(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
# CASE 2: batched
words, boxes = self.get_words_and_boxes_batch()
word_labels = [[1, 2, 3], [2, 46, 17, 22, 3]]
# fmt: off
expected_results = {'input_ids': [[101, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [101, 7592, 2026, 2171, 2003, 3960, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'bbox': [[[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [961, 885, 992, 912], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'labels': [[-100, 1, 2, -100, 3, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100], [-100, 2, 46, 17, 22, 3, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E231
# fmt: on
encoding_p = tokenizer_p(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20)
encoding_r = tokenizer_r(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
# CASE 3: not batched
question, words, boxes = self.get_question_words_and_boxes()
# fmt: off
expected_results = {'input_ids': [101, 2054, 1005, 1055, 2010, 2171, 1029, 102, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0], 'bbox': [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]} # noqa: E231
# fmt: on
encoding_p = tokenizer_p(question, words, boxes, padding="max_length", max_length=20)
encoding_r = tokenizer_r(question, words, boxes, padding="max_length", max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
# CASE 3: batched
questions, words, boxes = self.get_question_words_and_boxes_batch()
# fmt: off
expected_results = {'input_ids': [[101, 2054, 1005, 1055, 2010, 2171, 1029, 102, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0], [101, 2129, 2003, 2002, 2170, 1029, 102, 2054, 1037, 21110, 2546, 3806, 2102, 2078, 102, 0, 0, 0, 0, 0]], 'bbox': [[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [336, 42, 353, 57], [34, 42, 66, 69], [34, 42, 66, 69], [34, 42, 66, 69], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]]} # noqa: E231
# fmt: on
encoding_p = tokenizer_p(questions, words, boxes, padding="max_length", max_length=20)
encoding_r = tokenizer_r(questions, words, boxes, padding="max_length", max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
@unittest.skip("Doesn't support another framework than PyTorch")
def test_np_encode_plus_sent_to_model(self):
pass
| 52.312165
| 1,398
| 0.5848
|
import inspect
import os
import re
import shutil
import tempfile
import unittest
from typing import List
from transformers import AddedToken, LayoutLMv2TokenizerFast, SpecialTokensMixin, is_tf_available, is_torch_available
from transformers.models.layoutlmv2.tokenization_layoutlmv2 import (
VOCAB_FILES_NAMES,
BasicTokenizer,
LayoutLMv2Tokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import is_pt_tf_cross_test, require_pandas, require_tokenizers, require_torch, slow
from ...test_tokenization_common import (
SMALL_TRAINING_CORPUS,
TokenizerTesterMixin,
filter_non_english,
merge_model_tokenizer_mappings,
)
@require_tokenizers
@require_pandas
class LayoutLMv2TokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = LayoutLMv2Tokenizer
rust_tokenizer_class = LayoutLMv2TokenizerFast
test_rust_tokenizer = True
space_between_special_tokens = True
from_pretrained_filter = filter_non_english
test_seq2seq = False
def get_words_and_boxes(self):
words = ["a", "weirdly", "test"]
boxes = [[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]]
return words, boxes
def get_words_and_boxes_batch(self):
words = [["a", "weirdly", "test"], ["hello", "my", "name", "is", "bob"]]
boxes = [
[[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]],
[[961, 885, 992, 912], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69]],
]
return words, boxes
def get_question_words_and_boxes(self):
question = "what's his name?"
words = ["a", "weirdly", "test"]
boxes = [[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]]
return question, words, boxes
def get_question_words_and_boxes_batch(self):
questions = ["what's his name?", "how is he called?"]
words = [["a", "weirdly", "test"], ["what", "a", "laif", "gastn"]]
boxes = [
[[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]],
[[256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69]],
]
return questions, words, boxes
def setUp(self):
super().setUp()
vocab_tokens = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"what",
"s",
"his",
"name",
"?",
"a",
"weird",
"##ly",
"test",
"lowest",
]
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def get_input_output_texts(self, tokenizer):
input_text = "UNwant\u00E9d,running"
output_text = "unwanted, running"
return input_text, output_text
def test_chinese(self):
tokenizer = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz"), ["ah", "\u535A", "\u63A8", "zz"])
def test_basic_tokenizer_lower(self):
tokenizer = BasicTokenizer(do_lower_case=True)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["hello", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def test_basic_tokenizer_lower_strip_accents_false(self):
tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=False)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hällo", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["h\u00E9llo"])
def test_basic_tokenizer_lower_strip_accents_true(self):
tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=True)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def test_basic_tokenizer_lower_strip_accents_default(self):
tokenizer = BasicTokenizer(do_lower_case=True)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def test_basic_tokenizer_no_lower(self):
tokenizer = BasicTokenizer(do_lower_case=False)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["HeLLo", "!", "how", "Are", "yoU", "?"]
)
def test_basic_tokenizer_no_lower_strip_accents_false(self):
tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=False)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HäLLo", "!", "how", "Are", "yoU", "?"]
)
def test_basic_tokenizer_no_lower_strip_accents_true(self):
tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=True)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HaLLo", "!", "how", "Are", "yoU", "?"]
)
def test_basic_tokenizer_respects_never_split_tokens(self):
tokenizer = BasicTokenizer(do_lower_case=False, never_split=["[UNK]"])
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]"), ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"]
)
def test_wordpiece_tokenizer(self):
vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
vocab = {}
for (i, token) in enumerate(vocab_tokens):
vocab[token] = i
tokenizer = WordpieceTokenizer(vocab=vocab, unk_token="[UNK]")
self.assertListEqual(tokenizer.tokenize(""), [])
self.assertListEqual(tokenizer.tokenize("unwanted running"), ["un", "##want", "##ed", "runn", "##ing"])
self.assertListEqual(tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"])
def test_is_whitespace(self):
self.assertTrue(_is_whitespace(" "))
self.assertTrue(_is_whitespace("\t"))
self.assertTrue(_is_whitespace("\r"))
self.assertTrue(_is_whitespace("\n"))
self.assertTrue(_is_whitespace("\u00A0"))
self.assertFalse(_is_whitespace("A"))
self.assertFalse(_is_whitespace("-"))
def test_is_control(self):
self.assertTrue(_is_control("\u0005"))
self.assertFalse(_is_control("A"))
self.assertFalse(_is_control(" "))
self.assertFalse(_is_control("\t"))
self.assertFalse(_is_control("\r"))
def test_is_punctuation(self):
self.assertTrue(_is_punctuation("-"))
self.assertTrue(_is_punctuation("$"))
self.assertTrue(_is_punctuation("`"))
self.assertTrue(_is_punctuation("."))
self.assertFalse(_is_punctuation("A"))
self.assertFalse(_is_punctuation(" "))
def test_clean_text(self):
tokenizer = self.get_tokenizer()
self.assertListEqual([tokenizer.tokenize(t) for t in ["Hello", "\xad", "hello"]], [["[UNK]"], [], ["[UNK]"]])
@slow
def test_sequence_builders(self):
tokenizer = self.tokenizer_class.from_pretrained("microsoft/layoutlmv2-base-uncased")
question, words, boxes = self.get_question_words_and_boxes()
text = tokenizer.encode(
question.split(),
boxes=[tokenizer.pad_token_box for _ in range(len(question.split()))],
add_special_tokens=False,
)
text_2 = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert encoded_pair == [101] + text + [102] + text_2 + [102]
def test_offsets_with_special_characters(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
words, boxes = self.get_words_and_boxes()
words[1] = tokenizer_r.mask_token
tokens = tokenizer_r.encode_plus(
words,
boxes=boxes,
return_attention_mask=False,
return_token_type_ids=False,
return_offsets_mapping=True,
add_special_tokens=True,
)
expected_results = [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((0, 6), tokenizer_r.mask_token),
((0, 4), "test"),
((0, 0), tokenizer_r.sep_token),
]
self.assertEqual(
[e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens["input_ids"])
)
self.assertEqual([e[0] for e in expected_results], tokens["offset_mapping"])
def test_add_special_tokens(self):
tokenizers: List[LayoutLMv2Tokenizer] = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
special_token = "[SPECIAL_TOKEN]"
special_token_box = [1000, 1000, 1000, 1000]
tokenizer.add_special_tokens({"cls_token": special_token})
encoded_special_token = tokenizer.encode(
[special_token], boxes=[special_token_box], add_special_tokens=False
)
self.assertEqual(len(encoded_special_token), 1)
decoded = tokenizer.decode(encoded_special_token, skip_special_tokens=True)
self.assertTrue(special_token not in decoded)
def test_add_tokens_tokenizer(self):
tokenizers: List[LayoutLMv2Tokenizer] = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
vocab_size = tokenizer.vocab_size
all_size = len(tokenizer)
self.assertNotEqual(vocab_size, 0)
# self.assertEqual(vocab_size, all_size)
new_toks = ["aaaaa", "bbbbbb", "cccccccccdddddddd"]
added_toks = tokenizer.add_tokens(new_toks)
vocab_size_2 = tokenizer.vocab_size
all_size_2 = len(tokenizer)
self.assertNotEqual(vocab_size_2, 0)
self.assertEqual(vocab_size, vocab_size_2)
self.assertEqual(added_toks, len(new_toks))
self.assertEqual(all_size_2, all_size + len(new_toks))
words = "aaaaa bbbbbb low cccccccccdddddddd l".split()
boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))]
tokens = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
self.assertGreaterEqual(len(tokens), 4)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
self.assertGreater(tokens[-2], tokenizer.vocab_size - 1)
new_toks_2 = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
added_toks_2 = tokenizer.add_special_tokens(new_toks_2)
vocab_size_3 = tokenizer.vocab_size
all_size_3 = len(tokenizer)
self.assertNotEqual(vocab_size_3, 0)
self.assertEqual(vocab_size, vocab_size_3)
self.assertEqual(added_toks_2, len(new_toks_2))
self.assertEqual(all_size_3, all_size_2 + len(new_toks_2))
words = ">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l".split()
boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))]
tokens = tokenizer.encode(
words,
boxes=boxes,
add_special_tokens=False,
)
self.assertGreaterEqual(len(tokens), 6)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
self.assertGreater(tokens[0], tokens[1])
self.assertGreater(tokens[-2], tokenizer.vocab_size - 1)
self.assertGreater(tokens[-2], tokens[-3])
self.assertEqual(tokens[0], tokenizer.eos_token_id)
self.assertEqual(tokens[-2], tokenizer.pad_token_id)
@require_tokenizers
def test_encode_decode_with_spaces(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
new_toks = [AddedToken("[ABC]", normalized=False), AddedToken("[DEF]", normalized=False)]
tokenizer.add_tokens(new_toks)
input = "[ABC][DEF][ABC][DEF]"
if self.space_between_special_tokens:
output = "[ABC] [DEF] [ABC] [DEF]"
else:
output = input
encoded = tokenizer.encode(input.split(), boxes=boxes, add_special_tokens=False)
decoded = tokenizer.decode(encoded, spaces_between_special_tokens=self.space_between_special_tokens)
self.assertIn(decoded, [output, output.lower()])
@unittest.skip("Not implemented")
def test_right_and_left_truncation(self):
pass
def test_encode_plus_with_padding(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, words)
padding_size = 10
padding_idx = tokenizer.pad_token_id
encoded_sequence = tokenizer.encode_plus(words, boxes=boxes, return_special_tokens_mask=True)
input_ids = encoded_sequence["input_ids"]
special_tokens_mask = encoded_sequence["special_tokens_mask"]
sequence_length = len(input_ids)
# Test 'longest' and 'no_padding' don't do anything
tokenizer.padding_side = "right"
not_padded_sequence = tokenizer.encode_plus(
words,
boxes=boxes,
padding=False,
return_special_tokens_mask=True,
)
not_padded_input_ids = not_padded_sequence["input_ids"]
not_padded_special_tokens_mask = not_padded_sequence["special_tokens_mask"]
not_padded_sequence_length = len(not_padded_input_ids)
self.assertTrue(sequence_length == not_padded_sequence_length)
self.assertTrue(input_ids == not_padded_input_ids)
self.assertTrue(special_tokens_mask == not_padded_special_tokens_mask)
not_padded_sequence = tokenizer.encode_plus(
words,
boxes=boxes,
padding=False,
return_special_tokens_mask=True,
)
not_padded_input_ids = not_padded_sequence["input_ids"]
not_padded_special_tokens_mask = not_padded_sequence["special_tokens_mask"]
not_padded_sequence_length = len(not_padded_input_ids)
self.assertTrue(sequence_length == not_padded_sequence_length)
self.assertTrue(input_ids == not_padded_input_ids)
self.assertTrue(special_tokens_mask == not_padded_special_tokens_mask)
tokenizer.padding_side = "right"
right_padded_sequence = tokenizer.encode_plus(
words,
boxes=boxes,
max_length=sequence_length + padding_size,
padding="max_length",
return_special_tokens_mask=True,
)
right_padded_input_ids = right_padded_sequence["input_ids"]
right_padded_special_tokens_mask = right_padded_sequence["special_tokens_mask"]
right_padded_sequence_length = len(right_padded_input_ids)
self.assertTrue(sequence_length + padding_size == right_padded_sequence_length)
self.assertTrue(input_ids + [padding_idx] * padding_size == right_padded_input_ids)
self.assertTrue(special_tokens_mask + [1] * padding_size == right_padded_special_tokens_mask)
tokenizer.padding_side = "left"
left_padded_sequence = tokenizer.encode_plus(
words,
boxes=boxes,
max_length=sequence_length + padding_size,
padding="max_length",
return_special_tokens_mask=True,
)
left_padded_input_ids = left_padded_sequence["input_ids"]
left_padded_special_tokens_mask = left_padded_sequence["special_tokens_mask"]
left_padded_sequence_length = len(left_padded_input_ids)
self.assertTrue(sequence_length + padding_size == left_padded_sequence_length)
self.assertTrue([padding_idx] * padding_size + input_ids == left_padded_input_ids)
self.assertTrue([1] * padding_size + special_tokens_mask == left_padded_special_tokens_mask)
if "token_type_ids" in tokenizer.model_input_names:
token_type_ids = encoded_sequence["token_type_ids"]
left_padded_token_type_ids = left_padded_sequence["token_type_ids"]
right_padded_token_type_ids = right_padded_sequence["token_type_ids"]
assert token_type_ids + [0] * padding_size == right_padded_token_type_ids
assert [0] * padding_size + token_type_ids == left_padded_token_type_ids
if "attention_mask" in tokenizer.model_input_names:
attention_mask = encoded_sequence["attention_mask"]
right_padded_attention_mask = right_padded_sequence["attention_mask"]
left_padded_attention_mask = left_padded_sequence["attention_mask"]
self.assertTrue(attention_mask + [0] * padding_size == right_padded_attention_mask)
self.assertTrue([0] * padding_size + attention_mask == left_padded_attention_mask)
def test_internal_consistency(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
tokens = []
for word in words:
tokens.extend(tokenizer.tokenize(word))
ids = tokenizer.convert_tokens_to_ids(tokens)
ids_2 = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
self.assertListEqual(ids, ids_2)
tokens_2 = tokenizer.convert_ids_to_tokens(ids)
self.assertNotEqual(len(tokens_2), 0)
text_2 = tokenizer.decode(ids)
self.assertIsInstance(text_2, str)
output_text = "a weirdly test"
self.assertEqual(text_2, output_text)
def test_mask_output(self):
tokenizers = self.get_tokenizers(fast=False, do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
if (
tokenizer.build_inputs_with_special_tokens.__qualname__.split(".")[0] != "PreTrainedTokenizer"
and "token_type_ids" in tokenizer.model_input_names
):
information = tokenizer.encode_plus(words, boxes=boxes, add_special_tokens=True)
sequences, mask = information["input_ids"], information["token_type_ids"]
self.assertEqual(len(sequences), len(mask))
def test_number_of_added_tokens(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
sequences = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
attached_sequences = tokenizer.encode(words, boxes=boxes, add_special_tokens=True)
if len(attached_sequences) != 2:
self.assertEqual(
tokenizer.num_special_tokens_to_add(pair=False), len(attached_sequences) - len(sequences)
)
question, words, boxes = self.get_question_words_and_boxes()
sequences = tokenizer.encode(question, words, boxes=boxes, add_special_tokens=False)
attached_sequences = tokenizer.encode(question, words, boxes=boxes, add_special_tokens=True)
if len(attached_sequences) != 2:
self.assertEqual(
tokenizer.num_special_tokens_to_add(pair=True), len(attached_sequences) - len(sequences)
)
def test_padding_to_max_length(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
padding_size = 10
self._check_no_pad_token_padding(tokenizer, words)
padding_idx = tokenizer.pad_token_id
tokenizer.padding_side = "right"
encoded_sequence = tokenizer.encode(words, boxes=boxes)
sequence_length = len(encoded_sequence)
padded_sequence = tokenizer.encode(
words, boxes=boxes, max_length=sequence_length + padding_size, pad_to_max_length=True
)
padded_sequence_length = len(padded_sequence)
assert sequence_length + padding_size == padded_sequence_length
assert encoded_sequence + [padding_idx] * padding_size == padded_sequence
encoded_sequence = tokenizer.encode(words, boxes=boxes)
sequence_length = len(encoded_sequence)
tokenizer.padding_side = "right"
padded_sequence_right = tokenizer.encode(words, boxes=boxes, pad_to_max_length=True)
padded_sequence_right_length = len(padded_sequence_right)
assert sequence_length == padded_sequence_right_length
assert encoded_sequence == padded_sequence_right
def test_padding(self, max_length=50):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
self.assertEqual(tokenizer_p.pad_token_id, tokenizer_r.pad_token_id)
pad_token_id = tokenizer_p.pad_token_id
words, boxes = self.get_words_and_boxes()
input_r = tokenizer_r.encode(words, boxes=boxes, max_length=max_length, pad_to_max_length=True)
input_p = tokenizer_p.encode(words, boxes=boxes, max_length=max_length, pad_to_max_length=True)
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.encode(words, boxes=boxes, max_length=max_length, padding="max_length")
input_p = tokenizer_p.encode(words, boxes=boxes, max_length=max_length, padding="max_length")
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.encode(words, boxes=boxes, padding="longest")
input_p = tokenizer_p.encode(words, boxes=boxes, padding=True)
self.assert_padded_input_match(input_r, input_p, len(input_r), pad_token_id)
question, words, boxes = self.get_question_words_and_boxes()
input_r = tokenizer_r.encode(
question, words, boxes=boxes, max_length=max_length, pad_to_max_length=True
)
input_p = tokenizer_p.encode(
question, words, boxes=boxes, max_length=max_length, pad_to_max_length=True
)
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.encode(question, words, boxes=boxes, max_length=max_length, padding="max_length")
input_p = tokenizer_p.encode(question, words, boxes=boxes, max_length=max_length, padding="max_length")
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.encode(question, words, boxes=boxes, padding=True)
input_p = tokenizer_p.encode(question, words, boxes=boxes, padding="longest")
self.assert_padded_input_match(input_r, input_p, len(input_r), pad_token_id)
words, boxes = self.get_words_and_boxes()
input_r = tokenizer_r.encode_plus(words, boxes=boxes, max_length=max_length, pad_to_max_length=True)
input_p = tokenizer_p.encode_plus(words, boxes=boxes, max_length=max_length, pad_to_max_length=True)
self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
input_r = tokenizer_r.encode_plus(words, boxes=boxes, max_length=max_length, padding="max_length")
input_p = tokenizer_p.encode_plus(words, boxes=boxes, max_length=max_length, padding="max_length")
self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
input_r = tokenizer_r.encode_plus(words, boxes=boxes, padding="longest")
input_p = tokenizer_p.encode_plus(words, boxes=boxes, padding=True)
self.assert_padded_input_match(
input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id
)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
question, words, boxes = self.get_question_words_and_boxes()
input_r = tokenizer_r.encode_plus(
question, words, boxes=boxes, max_length=max_length, pad_to_max_length=True
)
input_p = tokenizer_p.encode_plus(
question, words, boxes=boxes, max_length=max_length, pad_to_max_length=True
)
self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
input_r = tokenizer_r.encode_plus(
question, words, boxes=boxes, max_length=max_length, padding="max_length"
)
input_p = tokenizer_p.encode_plus(
question, words, boxes=boxes, max_length=max_length, padding="max_length"
)
self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
input_r = tokenizer_r.encode_plus(question, words, boxes=boxes, padding="longest")
input_p = tokenizer_p.encode_plus(question, words, boxes=boxes, padding=True)
self.assert_padded_input_match(
input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id
)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
words, boxes = self.get_words_and_boxes_batch()
input_r = tokenizer_r.batch_encode_plus(
words,
boxes=boxes,
max_length=max_length,
pad_to_max_length=True,
)
input_p = tokenizer_p.batch_encode_plus(
words,
boxes=boxes,
max_length=max_length,
pad_to_max_length=True,
)
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.batch_encode_plus(
words,
boxes=boxes,
max_length=max_length,
padding="max_length",
)
input_p = tokenizer_p.batch_encode_plus(
words,
boxes=boxes,
max_length=max_length,
padding="max_length",
)
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.batch_encode_plus(
words,
boxes=boxes,
max_length=max_length,
padding="longest",
)
input_p = tokenizer_p.batch_encode_plus(
words,
boxes=boxes,
max_length=max_length,
padding=True,
)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id)
input_r = tokenizer_r.batch_encode_plus(words, boxes=boxes, padding="longest")
input_p = tokenizer_p.batch_encode_plus(words, boxes=boxes, padding=True)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id)
questions, words, boxes = self.get_question_words_and_boxes_batch()
input_r = tokenizer_r.batch_encode_plus(
list(zip(questions, words)),
is_pair=True,
boxes=boxes,
max_length=max_length,
truncation=True,
padding="max_length",
)
input_p = tokenizer_p.batch_encode_plus(
list(zip(questions, words)),
is_pair=True,
boxes=boxes,
max_length=max_length,
truncation=True,
padding="max_length",
)
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.batch_encode_plus(
list(zip(questions, words)),
is_pair=True,
boxes=boxes,
padding=True,
)
input_p = tokenizer_p.batch_encode_plus(
list(zip(questions, words)),
is_pair=True,
boxes=boxes,
padding="longest",
)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id)
words, boxes = self.get_words_and_boxes()
input_r = tokenizer_r.encode_plus(words, boxes=boxes)
input_r = tokenizer_r.pad(input_r)
input_p = tokenizer_r.encode_plus(words, boxes=boxes)
input_p = tokenizer_r.pad(input_p)
self.assert_padded_input_match(
input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id
)
input_r = tokenizer_r.encode_plus(words, boxes=boxes)
input_r = tokenizer_r.pad(input_r, max_length=max_length, padding="max_length")
input_p = tokenizer_r.encode_plus(words, boxes=boxes)
input_p = tokenizer_r.pad(input_p, max_length=max_length, padding="max_length")
self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id)
words, boxes = self.get_words_and_boxes_batch()
input_r = tokenizer_r.batch_encode_plus(
words,
boxes=boxes,
)
input_r = tokenizer_r.pad(input_r)
input_p = tokenizer_r.batch_encode_plus(
words,
boxes=boxes,
)
input_p = tokenizer_r.pad(input_p)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id)
words, boxes = self.get_words_and_boxes_batch()
input_r = tokenizer_r.batch_encode_plus(
words,
boxes=boxes,
)
input_r = tokenizer_r.pad(input_r, max_length=max_length, padding="max_length")
input_p = tokenizer_r.batch_encode_plus(
words,
boxes=boxes,
)
input_p = tokenizer_r.pad(input_p, max_length=max_length, padding="max_length")
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
def test_call(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
encoded_sequences_1 = tokenizer.encode_plus(words, boxes=boxes)
encoded_sequences_2 = tokenizer(words, boxes=boxes)
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
question, words, boxes = self.get_question_words_and_boxes()
encoded_sequences_1 = tokenizer.encode_plus(words, boxes=boxes)
encoded_sequences_2 = tokenizer(words, boxes=boxes)
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
words, boxes = self.get_words_and_boxes_batch()
encoded_sequences_1 = tokenizer.batch_encode_plus(words, is_pair=False, boxes=boxes)
encoded_sequences_2 = tokenizer(words, boxes=boxes)
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
def test_batch_encode_plus_batch_sequence_length(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes_batch()
encoded_sequences = [
tokenizer.encode_plus(words_example, boxes=boxes_example)
for words_example, boxes_example in zip(words, boxes)
]
encoded_sequences_batch = tokenizer.batch_encode_plus(words, is_pair=False, boxes=boxes, padding=False)
self.assertListEqual(
encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch)
)
maximum_length = len(
max([encoded_sequence["input_ids"] for encoded_sequence in encoded_sequences], key=len)
)
self._check_no_pad_token_padding(tokenizer, words)
encoded_sequences_padded = [
tokenizer.encode_plus(
words_example, boxes=boxes_example, max_length=maximum_length, padding="max_length"
)
for words_example, boxes_example in zip(words, boxes)
]
encoded_sequences_batch_padded = tokenizer.batch_encode_plus(
words, is_pair=False, boxes=boxes, padding=True
)
self.assertListEqual(
encoded_sequences_padded,
self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch_padded),
)
encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus(
words, is_pair=False, boxes=boxes, padding=True
)
encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus(
words, is_pair=False, boxes=boxes, max_length=maximum_length + 10, padding="longest"
)
for key in encoded_sequences_batch_padded_1.keys():
self.assertListEqual(
encoded_sequences_batch_padded_1[key],
encoded_sequences_batch_padded_2[key],
)
encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus(
words, is_pair=False, boxes=boxes, padding=False
)
encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus(
words, is_pair=False, boxes=boxes, max_length=maximum_length + 10, padding=False
)
for key in encoded_sequences_batch_padded_1.keys():
self.assertListEqual(
encoded_sequences_batch_padded_1[key],
encoded_sequences_batch_padded_2[key],
)
@unittest.skip("batch_encode_plus does not handle overflowing tokens.")
def test_batch_encode_plus_overflowing_tokens(self):
pass
def test_batch_encode_plus_padding(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes_batch()
max_length = 100
self._check_no_pad_token_padding(tokenizer, words)
encoded_sequences = [
tokenizer.encode_plus(
words_example, boxes=boxes_example, max_length=max_length, padding="max_length"
)
for words_example, boxes_example in zip(words, boxes)
]
encoded_sequences_batch = tokenizer.batch_encode_plus(
words, is_pair=False, boxes=boxes, max_length=max_length, padding="max_length"
)
self.assertListEqual(
encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch)
)
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
tokenizer.padding_side = "left"
words, boxes = self.get_words_and_boxes_batch()
max_length = 100
self._check_no_pad_token_padding(tokenizer, words)
encoded_sequences = [
tokenizer.encode_plus(
words_example, boxes=boxes_example, max_length=max_length, padding="max_length"
)
for words_example, boxes_example in zip(words, boxes)
]
encoded_sequences_batch = tokenizer.batch_encode_plus(
words, is_pair=False, boxes=boxes, max_length=max_length, padding="max_length"
)
self.assertListEqual(
encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch)
)
def test_padding_to_multiple_of(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
if tokenizer.pad_token is None:
self.skipTest("No padding token.")
else:
words, boxes = self.get_words_and_boxes()
normal_tokens = tokenizer(words, boxes=boxes, padding=True, pad_to_multiple_of=8)
for key, value in normal_tokens.items():
self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
normal_tokens = tokenizer(words, boxes=boxes, pad_to_multiple_of=8)
for key, value in normal_tokens.items():
self.assertNotEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
normal_tokens = tokenizer(words, boxes=boxes, padding=True, truncation=True, pad_to_multiple_of=8)
for key, value in normal_tokens.items():
self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
self.assertRaises(
ValueError,
tokenizer.__call__,
words,
boxes=boxes,
padding=True,
truncation=True,
max_length=12,
pad_to_multiple_of=8,
)
def test_tokenizer_slow_store_full_signature(self):
signature = inspect.signature(self.tokenizer_class.__init__)
tokenizer = self.get_tokenizer()
for parameter_name, parameter in signature.parameters.items():
if parameter.default != inspect.Parameter.empty:
self.assertIn(parameter_name, tokenizer.init_kwargs)
def test_build_inputs_with_special_tokens(self):
if not self.test_slow_tokenizer:
return
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
words, boxes = self.get_words_and_boxes()
input_simple = tokenizer_p.encode(words, boxes=boxes, add_special_tokens=False)
input_pair = tokenizer_p.encode(words, boxes=boxes, add_special_tokens=False)
output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple)
output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple)
self.assertEqual(output_p, output_r)
output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple, input_pair)
output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple, input_pair)
self.assertEqual(output_p, output_r)
def test_special_tokens_mask_input_pairs(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
encoded_sequence = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
encoded_sequence_dict = tokenizer.encode_plus(
words,
boxes=boxes,
add_special_tokens=True,
return_special_tokens_mask=True,
)
encoded_sequence_w_special = encoded_sequence_dict["input_ids"]
special_tokens_mask = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special))
filtered_sequence = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(encoded_sequence_w_special)
]
filtered_sequence = [x for x in filtered_sequence if x is not None]
self.assertEqual(encoded_sequence, filtered_sequence)
def test_special_tokens_mask(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
encoded_sequence = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
encoded_sequence_dict = tokenizer.encode_plus(
words, boxes=boxes, add_special_tokens=True, return_special_tokens_mask=True
)
encoded_sequence_w_special = encoded_sequence_dict["input_ids"]
special_tokens_mask = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special))
filtered_sequence = [x for i, x in enumerate(encoded_sequence_w_special) if not special_tokens_mask[i]]
self.assertEqual(encoded_sequence, filtered_sequence)
def test_save_and_load_tokenizer(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
self.assertNotEqual(tokenizer.model_max_length, 42)
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Isolate this from the other tests because we save additional tokens/etc
words, boxes = self.get_words_and_boxes()
tmpdirname = tempfile.mkdtemp()
before_tokens = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
before_vocab = tokenizer.get_vocab()
tokenizer.save_pretrained(tmpdirname)
after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)
after_tokens = after_tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
after_vocab = after_tokenizer.get_vocab()
self.assertListEqual(before_tokens, after_tokens)
self.assertDictEqual(before_vocab, after_vocab)
shutil.rmtree(tmpdirname)
def test_right_and_left_padding(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
sequence = "Sequence"
padding_size = 10
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, sequence)
padding_idx = tokenizer.pad_token_id
# RIGHT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True
tokenizer.padding_side = "right"
encoded_sequence = tokenizer.encode(words, boxes=boxes)
sequence_length = len(encoded_sequence)
padded_sequence = tokenizer.encode(
words, boxes=boxes, max_length=sequence_length + padding_size, padding="max_length"
)
padded_sequence_length = len(padded_sequence)
assert sequence_length + padding_size == padded_sequence_length
assert encoded_sequence + [padding_idx] * padding_size == padded_sequence
# LEFT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True
tokenizer.padding_side = "left"
encoded_sequence = tokenizer.encode(words, boxes=boxes)
sequence_length = len(encoded_sequence)
padded_sequence = tokenizer.encode(
words, boxes=boxes, max_length=sequence_length + padding_size, padding="max_length"
)
padded_sequence_length = len(padded_sequence)
assert sequence_length + padding_size == padded_sequence_length
assert [padding_idx] * padding_size + encoded_sequence == padded_sequence
# RIGHT & LEFT PADDING - Check that nothing is done for 'longest' and 'no_padding'
encoded_sequence = tokenizer.encode(words, boxes=boxes)
sequence_length = len(encoded_sequence)
tokenizer.padding_side = "right"
padded_sequence_right = tokenizer.encode(words, boxes=boxes, padding=True)
padded_sequence_right_length = len(padded_sequence_right)
assert sequence_length == padded_sequence_right_length
assert encoded_sequence == padded_sequence_right
tokenizer.padding_side = "left"
padded_sequence_left = tokenizer.encode(words, boxes=boxes, padding="longest")
padded_sequence_left_length = len(padded_sequence_left)
assert sequence_length == padded_sequence_left_length
assert encoded_sequence == padded_sequence_left
tokenizer.padding_side = "right"
padded_sequence_right = tokenizer.encode(words, boxes=boxes)
padded_sequence_right_length = len(padded_sequence_right)
assert sequence_length == padded_sequence_right_length
assert encoded_sequence == padded_sequence_right
tokenizer.padding_side = "left"
padded_sequence_left = tokenizer.encode(words, boxes=boxes, padding=False)
padded_sequence_left_length = len(padded_sequence_left)
assert sequence_length == padded_sequence_left_length
assert encoded_sequence == padded_sequence_left
def test_token_type_ids(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# test 1: single sequence
words, boxes = self.get_words_and_boxes()
output = tokenizer(words, boxes=boxes, return_token_type_ids=True)
# Assert that the token type IDs have the same length as the input IDs
self.assertEqual(len(output["token_type_ids"]), len(output["input_ids"]))
# Assert that the token type IDs have the same length as the attention mask
self.assertEqual(len(output["token_type_ids"]), len(output["attention_mask"]))
self.assertIn(0, output["token_type_ids"])
self.assertNotIn(1, output["token_type_ids"])
# test 2: two sequences (question + words)
question, words, boxes = self.get_question_words_and_boxes()
output = tokenizer(question, words, boxes, return_token_type_ids=True)
# Assert that the token type IDs have the same length as the input IDs
self.assertEqual(len(output["token_type_ids"]), len(output["input_ids"]))
# Assert that the token type IDs have the same length as the attention mask
self.assertEqual(len(output["token_type_ids"]), len(output["attention_mask"]))
self.assertIn(0, output["token_type_ids"])
self.assertIn(1, output["token_type_ids"])
def test_offsets_mapping(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
text = ["a", "wonderful", "test"]
boxes = [[1, 8, 12, 20] for _ in range(len(text))]
# No pair
tokens_with_offsets = tokenizer_r.encode_plus(
text,
boxes=boxes,
return_special_tokens_mask=True,
return_offsets_mapping=True,
add_special_tokens=True,
)
added_tokens = tokenizer_r.num_special_tokens_to_add(False)
offsets = tokens_with_offsets["offset_mapping"]
# Assert there is the same number of tokens and offsets
self.assertEqual(len(offsets), len(tokens_with_offsets["input_ids"]))
# Assert there is online added_tokens special_tokens
self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens)
# Pairs
text = "what's his name"
pair = ["a", "wonderful", "test"]
boxes = [[1, 8, 12, 20] for _ in range(len(pair))]
tokens_with_offsets = tokenizer_r.encode_plus(
text,
pair,
boxes=boxes,
return_special_tokens_mask=True,
return_offsets_mapping=True,
add_special_tokens=True,
)
added_tokens = tokenizer_r.num_special_tokens_to_add(True)
offsets = tokens_with_offsets["offset_mapping"]
self.assertEqual(len(offsets), len(tokens_with_offsets["input_ids"]))
self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens)
@require_torch
@slow
def test_torch_encode_plus_sent_to_model(self):
import torch
from transformers import MODEL_MAPPING, TOKENIZER_MAPPING
MODEL_TOKENIZER_MAPPING = merge_model_tokenizer_mappings(MODEL_MAPPING, TOKENIZER_MAPPING)
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
if tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING:
return
config_class, model_class = MODEL_TOKENIZER_MAPPING[tokenizer.__class__]
config = config_class()
if config.is_encoder_decoder or config.pad_token_id is None:
return
model = model_class(config)
is_using_common_embeddings = hasattr(model.get_input_embeddings(), "weight")
assert (
(model.get_input_embeddings().weight.shape[0] >= len(tokenizer))
if is_using_common_embeddings
else True
)
words, boxes = self.get_words_and_boxes()
encoded_sequence = tokenizer.encode_plus(words, boxes=boxes, return_tensors="pt")
batch_encoded_sequence = tokenizer.batch_encode_plus(
[words, words], boxes=[boxes, boxes], return_tensors="pt"
)
encoded_sequence["image"] = torch.randn(1, 3, 224, 224)
batch_encoded_sequence["image"] = torch.randn(2, 3, 224, 224)
with torch.no_grad():
model(**encoded_sequence)
model(**batch_encoded_sequence)
def test_rust_and_python_full_tokenizers(self):
if not self.test_rust_tokenizer:
return
if not self.test_slow_tokenizer:
return
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
words, boxes = self.get_words_and_boxes()
ids = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
ids = tokenizer.encode(words, boxes=boxes, add_special_tokens=True)
rust_ids = rust_tokenizer.encode(words, boxes=boxes, add_special_tokens=True)
self.assertListEqual(ids, rust_ids)
def test_tokenization_python_rust_equals(self):
if not self.test_slow_tokenizer:
return
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
words, boxes = self.get_words_and_boxes()
input_p = tokenizer_p.encode_plus(words, boxes=boxes)
input_r = tokenizer_r.encode_plus(words, boxes=boxes)
for key in filter(
lambda x: x in ["input_ids", "token_type_ids", "attention_mask", "bbox"], input_p.keys()
):
self.assertSequenceEqual(input_p[key], input_r[key])
input_pairs_p = tokenizer_p.encode_plus(words, boxes=boxes)
input_pairs_r = tokenizer_r.encode_plus(words, boxes=boxes)
for key in filter(
lambda x: x in ["input_ids", "token_type_ids", "attention_mask", "bbox"], input_p.keys()
):
self.assertSequenceEqual(input_pairs_p[key], input_pairs_r[key])
words = ["hello" for _ in range(1000)]
boxes = [[1000, 1000, 1000, 1000] for _ in range(1000)]
input_p = tokenizer_p.encode_plus(words, boxes=boxes, max_length=512, truncation=True)
input_r = tokenizer_r.encode_plus(words, boxes=boxes, max_length=512, truncation=True)
for key in filter(
lambda x: x in ["input_ids", "token_type_ids", "attention_mask", "bbox"], input_p.keys()
):
self.assertSequenceEqual(input_p[key], input_r[key])
input_p = tokenizer_p.encode_plus(
words, boxes=boxes, max_length=512, truncation=True, stride=3, return_overflowing_tokens=True
)
input_r = tokenizer_r.encode_plus(
words, boxes=boxes, max_length=512, truncation=True, stride=3, return_overflowing_tokens=True
)
for key in filter(
lambda x: x in ["input_ids", "token_type_ids", "attention_mask", "bbox"], input_p.keys()
):
self.assertSequenceEqual(input_p[key], input_r[key][0])
def test_embeded_special_tokens(self):
if not self.test_slow_tokenizer:
return
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
words, boxes = self.get_words_and_boxes()
tokens_r = tokenizer_r.encode_plus(
words,
boxes=boxes,
add_special_tokens=True,
)
tokens_p = tokenizer_p.encode_plus(
words,
boxes=boxes,
add_special_tokens=True,
)
for key in tokens_p.keys():
self.assertEqual(tokens_r[key], tokens_p[key])
if "token_type_ids" in tokens_r:
self.assertEqual(sum(tokens_r["token_type_ids"]), sum(tokens_p["token_type_ids"]))
tokens_r = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"])
tokens_p = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"])
self.assertSequenceEqual(tokens_r, tokens_p)
def test_compare_add_special_tokens(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
simple_num_special_tokens_to_add = tokenizer_r.num_special_tokens_to_add(pair=False)
words, boxes = self.get_words_and_boxes()
no_special_tokens = tokenizer_r.tokenize(" ".join(words), add_special_tokens=False)
with_special_tokens = tokenizer_r.tokenize(" ".join(words), add_special_tokens=True)
self.assertEqual(len(no_special_tokens), len(with_special_tokens) - simple_num_special_tokens_to_add)
no_special_tokens = tokenizer_r.encode(words, boxes=boxes, add_special_tokens=False)
with_special_tokens = tokenizer_r.encode(words, boxes=boxes, add_special_tokens=True)
self.assertEqual(len(no_special_tokens), len(with_special_tokens) - simple_num_special_tokens_to_add)
no_special_tokens = tokenizer_r.encode_plus(words, boxes=boxes, add_special_tokens=False)
with_special_tokens = tokenizer_r.encode_plus(words, boxes=boxes, add_special_tokens=True)
for key in no_special_tokens.keys():
self.assertEqual(
len(no_special_tokens[key]),
len(with_special_tokens[key]) - simple_num_special_tokens_to_add,
)
rds, boxes = self.get_words_and_boxes_batch()
no_special_tokens = tokenizer_r.batch_encode_plus(words, boxes=boxes, add_special_tokens=False)
with_special_tokens = tokenizer_r.batch_encode_plus(words, boxes=boxes, add_special_tokens=True)
for key in no_special_tokens.keys():
for i_no, i_with in zip(no_special_tokens[key], with_special_tokens[key]):
self.assertEqual(len(i_no), len(i_with) - simple_num_special_tokens_to_add)
@slow
def test_layoutlmv2_truncation_integration_test(self):
words, boxes = self.get_words_and_boxes()
tokenizer = LayoutLMv2Tokenizer.from_pretrained("microsoft/layoutlmv2-base-uncased", model_max_length=512)
for i in range(12, 512):
new_encoded_inputs = tokenizer.encode(words, boxes=boxes, max_length=i, truncation=True)
self.assertLessEqual(len(new_encoded_inputs), i)
tokenizer.model_max_length = 20
new_encoded_inputs = tokenizer.encode(words, boxes=boxes, truncation=True)
dropped_encoded_inputs = tokenizer.encode(words, boxes=boxes, truncation=True)
self.assertListEqual(new_encoded_inputs, dropped_encoded_inputs)
self.assertLessEqual(len(new_encoded_inputs), 20)
@is_pt_tf_cross_test
def test_batch_encode_plus_tensors(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes_batch()
self.assertRaises(ValueError, tokenizer.batch_encode_plus, words, boxes=boxes, return_tensors="pt")
self.assertRaises(ValueError, tokenizer.batch_encode_plus, words, boxes=boxes, return_tensors="tf")
if tokenizer.pad_token_id is None:
self.assertRaises(
ValueError,
tokenizer.batch_encode_plus,
words,
boxes=boxes,
padding=True,
return_tensors="pt",
)
self.assertRaises(
ValueError,
tokenizer.batch_encode_plus,
words,
boxes=boxes,
padding="longest",
return_tensors="tf",
)
else:
pytorch_tensor = tokenizer.batch_encode_plus(words, boxes=boxes, padding=True, return_tensors="pt")
tensorflow_tensor = tokenizer.batch_encode_plus(
words, boxes=boxes, padding="longest", return_tensors="tf"
)
encoded_sequences = tokenizer.batch_encode_plus(words, boxes=boxes, padding=True)
for key in encoded_sequences.keys():
pytorch_value = pytorch_tensor[key].tolist()
tensorflow_value = tensorflow_tensor[key].numpy().tolist()
encoded_value = encoded_sequences[key]
self.assertEqual(pytorch_value, tensorflow_value, encoded_value)
def test_sequence_ids(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
if not tokenizer.is_fast:
continue
with self.subTest(f"{tokenizer.__class__.__name__}"):
seq_0 = "Test this method."
seq_1 = ["With", "these", "inputs."]
boxes = [[1000, 1000, 1000, 1000] for _ in range(len(seq_1))]
output = tokenizer(seq_0.split(), boxes=boxes)
self.assertIn(0, output.sequence_ids())
output = tokenizer(seq_0, seq_1, boxes=boxes)
self.assertIn(0, output.sequence_ids())
self.assertIn(1, output.sequence_ids())
if tokenizer.num_special_tokens_to_add(pair=True):
self.assertIn(None, output.sequence_ids())
def test_special_tokens_initialization(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
added_tokens = [AddedToken("<special>", lstrip=True)]
tokenizer_r = self.rust_tokenizer_class.from_pretrained(
pretrained_name, additional_special_tokens=added_tokens, **kwargs
)
words = "Hey this is a <special> token".split()
boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))]
r_output = tokenizer_r.encode(words, boxes=boxes)
special_token_id = tokenizer_r.encode(
["<special>"], boxes=[1000, 1000, 1000, 1000], add_special_tokens=False
)[0]
self.assertTrue(special_token_id in r_output)
if self.test_slow_tokenizer:
tokenizer_cr = self.rust_tokenizer_class.from_pretrained(
pretrained_name, additional_special_tokens=added_tokens, **kwargs, from_slow=True
)
tokenizer_p = self.tokenizer_class.from_pretrained(
pretrained_name, additional_special_tokens=added_tokens, **kwargs
)
words = "Hey this is a <special> token".split()
boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))]
p_output = tokenizer_p.encode(words, boxes=boxes)
cr_output = tokenizer_cr.encode(words, boxes=boxes)
self.assertEqual(p_output, r_output)
self.assertEqual(cr_output, r_output)
self.assertTrue(special_token_id in p_output)
self.assertTrue(special_token_id in cr_output)
def test_training_new_tokenizer(self):
if not self.test_rust_tokenizer:
return
tokenizer = self.get_rust_tokenizer()
new_tokenizer = tokenizer.train_new_from_iterator(SMALL_TRAINING_CORPUS, 100)
text = [["this", "is", "the"], ["how", "are", "you"]]
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8], [1, 3, 4, 8]], [[5, 6, 7, 8], [4, 5, 6, 7], [3, 9, 2, 7]]]
inputs = new_tokenizer(text, boxes=boxes)
self.assertEqual(len(inputs["input_ids"]), 2)
decoded_input = new_tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True)
expected_result = "this is the"
if tokenizer.backend_tokenizer.normalizer is not None:
expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result)
self.assertEqual(expected_result, decoded_input)
self.assertEqual(tokenizer.num_special_tokens_to_add(False), new_tokenizer.num_special_tokens_to_add(False))
self.assertEqual(tokenizer.num_special_tokens_to_add(True), new_tokenizer.num_special_tokens_to_add(True))
self.assertEqual(tokenizer.max_len_single_sentence, new_tokenizer.max_len_single_sentence)
self.assertEqual(tokenizer.max_len_sentences_pair, new_tokenizer.max_len_sentences_pair)
self.assertSequenceEqual(
tokenizer.all_special_tokens_extended,
new_tokenizer.all_special_tokens_extended,
)
self.assertDictEqual(tokenizer.special_tokens_map, new_tokenizer.special_tokens_map)
def test_training_new_tokenizer_with_special_tokens_change(self):
# This feature only exists for fast tokenizers
if not self.test_rust_tokenizer:
return
tokenizer = self.get_rust_tokenizer()
# Test with a special tokens map
class_signature = inspect.signature(tokenizer.__class__)
if "cls_token" in class_signature.parameters:
new_tokenizer = tokenizer.train_new_from_iterator(
SMALL_TRAINING_CORPUS, 100, special_tokens_map={tokenizer.cls_token: "<cls>"}
)
cls_id = new_tokenizer.get_vocab()["<cls>"]
self.assertEqual(new_tokenizer.cls_token, "<cls>")
self.assertEqual(new_tokenizer.cls_token_id, cls_id)
# Create a new mapping from the special tokens defined in the original tokenizer
special_tokens_list = SpecialTokensMixin.SPECIAL_TOKENS_ATTRIBUTES.copy()
special_tokens_list.remove("additional_special_tokens")
special_tokens_map = {}
for token in special_tokens_list:
# Get the private one to avoid unnecessary warnings.
if getattr(tokenizer, f"_{token}") is not None:
special_token = getattr(tokenizer, token)
special_tokens_map[special_token] = f"{special_token}a"
# Train new tokenizer
new_tokenizer = tokenizer.train_new_from_iterator(
SMALL_TRAINING_CORPUS, 100, special_tokens_map=special_tokens_map
)
# Check the changes
for token in special_tokens_list:
# Get the private one to avoid unnecessary warnings.
if getattr(tokenizer, f"_{token}") is None:
continue
special_token = getattr(tokenizer, token)
if special_token in special_tokens_map:
new_special_token = getattr(new_tokenizer, token)
self.assertEqual(special_tokens_map[special_token], new_special_token)
new_id = new_tokenizer.get_vocab()[new_special_token]
self.assertEqual(getattr(new_tokenizer, f"{token}_id"), new_id)
# Check if the AddedToken / string format has been kept
for special_token in tokenizer.all_special_tokens_extended:
if isinstance(special_token, AddedToken) and special_token.content not in special_tokens_map:
# The special token must appear identically in the list of the new tokenizer.
self.assertTrue(
special_token in new_tokenizer.all_special_tokens_extended,
f"'{special_token}' should be in {new_tokenizer.all_special_tokens_extended}",
)
elif isinstance(special_token, AddedToken):
# The special token must appear in the list of the new tokenizer as an object of type AddedToken with
# the same parameters as the old AddedToken except the content that the user has requested to change.
special_token_str = special_token.content
new_special_token_str = special_tokens_map[special_token_str]
find = False
for candidate in new_tokenizer.all_special_tokens_extended:
if (
isinstance(candidate, AddedToken)
and candidate.content == new_special_token_str
and candidate.lstrip == special_token.lstrip
and candidate.rstrip == special_token.rstrip
and candidate.normalized == special_token.normalized
and candidate.single_word == special_token.single_word
):
find = True
break
self.assertTrue(
find,
(
f"'{new_special_token_str}' doesn't appear in the list "
f"'{new_tokenizer.all_special_tokens_extended}' as an AddedToken with the same parameters as "
f"'{special_token}' in the list {tokenizer.all_special_tokens_extended}"
),
)
elif special_token not in special_tokens_map:
self.assertTrue(
special_token in new_tokenizer.all_special_tokens_extended,
f"'{special_token}' should be in {new_tokenizer.all_special_tokens_extended}",
)
else:
self.assertTrue(special_tokens_map[special_token] in new_tokenizer.all_special_tokens_extended)
words = [["this", "is"], ["hello", "🤗"]]
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[1, 2, 3, 4], [5, 6, 7, 8]]]
inputs = new_tokenizer(words, boxes=boxes)
self.assertEqual(len(inputs["input_ids"]), 2)
decoded_input = new_tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True)
expected_result = "this is"
if tokenizer.backend_tokenizer.normalizer is not None:
expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result)
self.assertEqual(expected_result, decoded_input)
def test_prepare_for_model(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
if tokenizer.__class__.__name__ == "LayoutLMv2TokenizerFast":
continue
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
prepared_input_dict = tokenizer.prepare_for_model(words, boxes=boxes, add_special_tokens=True)
input_dict = tokenizer.encode_plus(words, boxes=boxes, add_special_tokens=True)
self.assertEqual(input_dict, prepared_input_dict)
def test_padding_different_model_input_name(self):
if not self.test_slow_tokenizer:
return
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
self.assertEqual(tokenizer_p.pad_token_id, tokenizer_r.pad_token_id)
pad_token_id = tokenizer_p.pad_token_id
words, boxes = self.get_words_and_boxes_batch()
input_r = tokenizer_r.batch_encode_plus(words, boxes=boxes)
input_p = tokenizer_r.batch_encode_plus(words, boxes=boxes)
input_r["inputs"] = input_r[tokenizer_r.model_input_names[0]]
del input_r[tokenizer_r.model_input_names[0]]
input_p["inputs"] = input_p[tokenizer_p.model_input_names[0]]
del input_p[tokenizer_p.model_input_names[0]]
tokenizer_r.model_input_names = ["inputs"] + tokenizer_r.model_input_names[1:]
tokenizer_p.model_input_names = ["inputs"] + tokenizer_p.model_input_names[1:]
input_r = tokenizer_r.pad(input_r, padding="longest")
input_p = tokenizer_r.pad(input_p, padding="longest")
max_length = len(input_p["inputs"][0])
self.assert_batch_padded_input_match(
input_r, input_p, max_length, pad_token_id, model_main_input_name="inputs"
)
def test_batch_encode_dynamic_overflowing(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name}, {tokenizer.__class__.__name__})"):
if is_torch_available():
returned_tensor = "pt"
elif is_tf_available():
returned_tensor = "tf"
else:
returned_tensor = "jax"
words, boxes = self.get_words_and_boxes()
tokens = tokenizer.encode_plus(
words,
boxes=boxes,
max_length=6,
padding=True,
truncation=True,
return_tensors=returned_tensor,
return_overflowing_tokens=True,
)
for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):
if key != "bbox":
self.assertEqual(len(tokens[key].shape), 2)
else:
self.assertEqual(len(tokens[key].shape), 3)
words, boxes = self.get_words_and_boxes_batch()
tokens = tokenizer.batch_encode_plus(
words,
boxes=boxes,
max_length=6,
padding=True,
truncation="only_first",
return_tensors=returned_tensor,
return_overflowing_tokens=True,
)
for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):
if key != "bbox":
self.assertEqual(len(tokens[key].shape), 2)
self.assertEqual(tokens[key].shape[-1], 6)
else:
self.assertEqual(len(tokens[key].shape), 3)
self.assertEqual(tokens[key].shape[-1], 4)
@unittest.skip("TO DO: overwrite this very extensive test.")
def test_alignement_methods(self):
pass
def get_clean_sequence(self, tokenizer, with_prefix_space=False, max_length=20, min_length=5):
toks = [(i, tokenizer.decode([i], clean_up_tokenization_spaces=False)) for i in range(len(tokenizer))]
toks = list(filter(lambda t: re.match(r"^[ a-zA-Z]+$", t[1]), toks))
toks = list(
filter(
lambda t: [t[0]]
== tokenizer.encode(t[1].split(" "), boxes=len(t[1]) * [[1, 1, 1, 1]], add_special_tokens=False),
toks,
)
)
if max_length is not None and len(toks) > max_length:
toks = toks[:max_length]
if min_length is not None and len(toks) < min_length and len(toks) > 0:
while len(toks) < min_length:
toks = toks + toks
toks_ids = [t[0] for t in toks]
output_txt = tokenizer.decode(toks_ids, clean_up_tokenization_spaces=False)
if " " not in output_txt and len(toks_ids) > 1:
output_txt = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=False)
+ " "
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=False)
)
if with_prefix_space:
output_txt = " " + output_txt
words = output_txt.split(" ")
boxes = [[i, i, i, i] for i in range(len(words))]
output_ids = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
return words, boxes, output_ids
def test_maximum_encoding_length_pair_input(self):
tokenizers = self.get_tokenizers(do_lower_case=False, model_max_length=100)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
stride = 2
seq_0, boxes_0, ids = self.get_clean_sequence(tokenizer, max_length=20)
question_0 = " ".join(map(str, seq_0))
if len(ids) <= 2 + stride:
seq_0 = (seq_0 + " ") * (2 + stride)
ids = None
seq0_tokens = tokenizer(seq_0, boxes=boxes_0, add_special_tokens=False)
self.assertGreater(len(seq0_tokens["input_ids"]), 2 + stride)
question_1 = "This is another sentence to be encoded."
seq_1 = ["what", "a", "weird", "test", "weirdly", "weird"]
boxes_1 = [[i, i, i, i] for i in range(len(seq_1))]
seq1_tokens = tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)
if abs(len(seq0_tokens["input_ids"]) - len(seq1_tokens["input_ids"])) <= 2:
seq1_tokens_input_ids = seq1_tokens["input_ids"] + seq1_tokens["input_ids"]
seq_1 = tokenizer.decode(seq1_tokens_input_ids, clean_up_tokenization_spaces=False)
seq_1 = seq_1.split(" ")
boxes_1 = [[i, i, i, i] for i in range(len(seq_1))]
seq1_tokens = tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)
self.assertGreater(len(seq1_tokens["input_ids"]), 2 + stride)
smallest = (
seq1_tokens["input_ids"]
if len(seq0_tokens["input_ids"]) > len(seq1_tokens["input_ids"])
else seq0_tokens["input_ids"]
)
# We are not using the special tokens - a bit too hard to test all the tokenizers with this
# TODO try this again later
sequence = tokenizer(
question_0, seq_1, boxes=boxes_1, add_special_tokens=False
) # , add_prefix_space=False)
# Test with max model input length
model_max_length = tokenizer.model_max_length
self.assertEqual(model_max_length, 100)
seq_2 = seq_0 * model_max_length
question_2 = " ".join(map(str, seq_2))
boxes_2 = boxes_0 * model_max_length
self.assertGreater(len(seq_2), model_max_length)
sequence1 = tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)
total_length1 = len(sequence1["input_ids"])
sequence2 = tokenizer(question_2, seq_1, boxes=boxes_1, add_special_tokens=False)
total_length2 = len(sequence2["input_ids"])
self.assertLess(total_length1, model_max_length, "Issue with the testing sequence, please update it.")
self.assertGreater(
total_length2, model_max_length, "Issue with the testing sequence, please update it."
)
# Simple
padding_strategies = (
[False, True, "longest"] if tokenizer.pad_token and tokenizer.pad_token_id >= 0 else [False]
)
for padding_state in padding_strategies:
with self.subTest(f"{tokenizer.__class__.__name__} Padding: {padding_state}"):
for truncation_state in [True, "longest_first", "only_first"]:
with self.subTest(f"{tokenizer.__class__.__name__} Truncation: {truncation_state}"):
output = tokenizer(
question_2,
seq_1,
boxes=boxes_1,
padding=padding_state,
truncation=truncation_state,
)
self.assertEqual(len(output["input_ids"]), model_max_length)
self.assertEqual(len(output["bbox"]), model_max_length)
output = tokenizer(
[question_2],
[seq_1],
boxes=[boxes_1],
padding=padding_state,
truncation=truncation_state,
)
self.assertEqual(len(output["input_ids"][0]), model_max_length)
self.assertEqual(len(output["bbox"][0]), model_max_length)
# Simple
output = tokenizer(
question_1, seq_2, boxes=boxes_2, padding=padding_state, truncation="only_second"
)
self.assertEqual(len(output["input_ids"]), model_max_length)
self.assertEqual(len(output["bbox"]), model_max_length)
output = tokenizer(
[question_1], [seq_2], boxes=[boxes_2], padding=padding_state, truncation="only_second"
)
self.assertEqual(len(output["input_ids"][0]), model_max_length)
self.assertEqual(len(output["bbox"][0]), model_max_length)
# Simple with no truncation
# Reset warnings
tokenizer.deprecation_warnings = {}
with self.assertLogs("transformers", level="WARNING") as cm:
output = tokenizer(
question_1, seq_2, boxes=boxes_2, padding=padding_state, truncation=False
)
self.assertNotEqual(len(output["input_ids"]), model_max_length)
self.assertNotEqual(len(output["bbox"]), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(
cm.records[0].message.startswith(
"Token indices sequence length is longer than the specified maximum sequence length for this model"
)
)
tokenizer.deprecation_warnings = {}
with self.assertLogs("transformers", level="WARNING") as cm:
output = tokenizer(
[question_1], [seq_2], boxes=[boxes_2], padding=padding_state, truncation=False
)
self.assertNotEqual(len(output["input_ids"][0]), model_max_length)
self.assertNotEqual(len(output["bbox"][0]), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(
cm.records[0].message.startswith(
"Token indices sequence length is longer than the specified maximum sequence length for this model"
)
)
# Check the order of Sequence of input ids, overflowing tokens and bbox sequence with truncation
truncated_first_sequence = (
tokenizer(seq_0, boxes=boxes_0, add_special_tokens=False)["input_ids"][:-2]
+ tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)["input_ids"]
)
truncated_second_sequence = (
tokenizer(seq_0, boxes=boxes_0, add_special_tokens=False)["input_ids"]
+ tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)["input_ids"][:-2]
)
truncated_longest_sequence = (
truncated_first_sequence if len(seq0_tokens) > len(seq1_tokens) else truncated_second_sequence
)
overflow_first_sequence = (
tokenizer(seq_0, boxes=boxes_0, add_special_tokens=False)["input_ids"][-(2 + stride) :]
+ tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)["input_ids"]
)
overflow_second_sequence = (
tokenizer(seq_0, boxes=boxes_0, add_special_tokens=False)["input_ids"]
+ tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)["input_ids"][-(2 + stride) :]
)
overflow_longest_sequence = (
overflow_first_sequence if len(seq0_tokens) > len(seq1_tokens) else overflow_second_sequence
)
bbox_first = [[0, 0, 0, 0]] * (len(seq_0) - 2)
bbox_first_sequence = bbox_first + tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)["bbox"]
overflowing_token_bbox_first_sequence_slow = [[0, 0, 0, 0]] * (2 + stride)
overflowing_token_bbox_first_sequence_fast = [[0, 0, 0, 0]] * (2 + stride) + tokenizer(
seq_1, boxes=boxes_1, add_special_tokens=False
)["bbox"]
bbox_second = [[0, 0, 0, 0]] * len(seq_0)
bbox_second_sequence = (
bbox_second + tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)["bbox"][:-2]
)
overflowing_token_bbox_second_sequence_slow = tokenizer(
seq_1, boxes=boxes_1, add_special_tokens=False
)["bbox"][-(2 + stride) :]
overflowing_token_bbox_second_sequence_fast = [[0, 0, 0, 0]] * len(seq_0) + tokenizer(
seq_1, boxes=boxes_1, add_special_tokens=False
)["bbox"][-(2 + stride) :]
bbox_longest_sequence = (
bbox_first_sequence if len(seq0_tokens) > len(seq1_tokens) else bbox_second_sequence
)
overflowing_token_bbox_longest_sequence_fast = (
overflowing_token_bbox_first_sequence_fast
if len(seq0_tokens) > len(seq1_tokens)
else overflowing_token_bbox_second_sequence_fast
)
# Overflowing tokens are handled quite differently in slow and fast tokenizers
if isinstance(tokenizer, LayoutLMv2TokenizerFast):
information = tokenizer(
question_0,
seq_1,
boxes=boxes_1,
max_length=len(sequence["input_ids"]) - 2,
add_special_tokens=False,
stride=stride,
truncation="longest_first",
return_overflowing_tokens=True,
# add_prefix_space=False,
)
truncated_sequence = information["input_ids"][0]
overflowing_tokens = information["input_ids"][1]
bbox = information["bbox"][0]
overflowing_bbox = information["bbox"][1]
self.assertEqual(len(information["input_ids"]), 2)
self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2)
self.assertEqual(truncated_sequence, truncated_longest_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride + len(smallest))
self.assertEqual(overflowing_tokens, overflow_longest_sequence)
self.assertEqual(bbox, bbox_longest_sequence)
self.assertEqual(len(overflowing_bbox), 2 + stride + len(smallest))
self.assertEqual(overflowing_bbox, overflowing_token_bbox_longest_sequence_fast)
else:
# No overflowing tokens when using 'longest' in python tokenizers
with self.assertRaises(ValueError) as context:
information = tokenizer(
question_0,
seq_1,
boxes=boxes_1,
max_length=len(sequence["input_ids"]) - 2,
add_special_tokens=False,
stride=stride,
truncation="longest_first",
return_overflowing_tokens=True,
# add_prefix_space=False,
)
self.assertTrue(
context.exception.args[0].startswith(
"Not possible to return overflowing tokens for pair of sequences with the "
"`longest_first`. Please select another truncation strategy than `longest_first`, "
"for instance `only_second` or `only_first`."
)
)
# Overflowing tokens are handled quite differently in slow and fast tokenizers
if isinstance(tokenizer, LayoutLMv2TokenizerFast):
information = tokenizer(
question_0,
seq_1,
boxes=boxes_1,
max_length=len(sequence["input_ids"]) - 2,
add_special_tokens=False,
stride=stride,
truncation=True,
return_overflowing_tokens=True,
# add_prefix_space=False,
)
truncated_sequence = information["input_ids"][0]
overflowing_tokens = information["input_ids"][1]
bbox = information["bbox"][0]
overflowing_bbox = information["bbox"][1]
self.assertEqual(len(information["input_ids"]), 2)
self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2)
self.assertEqual(truncated_sequence, truncated_longest_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride + len(smallest))
self.assertEqual(overflowing_tokens, overflow_longest_sequence)
self.assertEqual(bbox, bbox_longest_sequence)
self.assertEqual(overflowing_bbox, overflowing_token_bbox_longest_sequence_fast)
else:
# No overflowing tokens when using 'longest' in python tokenizers
with self.assertRaises(ValueError) as context:
information = tokenizer(
question_0,
seq_1,
boxes=boxes_1,
max_length=len(sequence["input_ids"]) - 2,
add_special_tokens=False,
stride=stride,
truncation=True,
return_overflowing_tokens=True,
# add_prefix_space=False,
)
self.assertTrue(
context.exception.args[0].startswith(
"Not possible to return overflowing tokens for pair of sequences with the "
"`longest_first`. Please select another truncation strategy than `longest_first`, "
"for instance `only_second` or `only_first`."
)
)
information_first_truncated = tokenizer(
question_0,
seq_1,
boxes=boxes_1,
max_length=len(sequence["input_ids"]) - 2,
add_special_tokens=False,
stride=stride,
truncation="only_first",
return_overflowing_tokens=True,
# add_prefix_space=False,
)
# Overflowing tokens are handled quite differently in slow and fast tokenizers
if isinstance(tokenizer, LayoutLMv2TokenizerFast):
truncated_sequence = information_first_truncated["input_ids"][0]
overflowing_tokens = information_first_truncated["input_ids"][1]
bbox = information_first_truncated["bbox"][0]
overflowing_bbox = information_first_truncated["bbox"][1]
self.assertEqual(len(information_first_truncated["input_ids"]), 2)
self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2)
self.assertEqual(truncated_sequence, truncated_first_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride + len(seq1_tokens["input_ids"]))
self.assertEqual(overflowing_tokens, overflow_first_sequence)
self.assertEqual(bbox, bbox_first_sequence)
self.assertEqual(overflowing_bbox, overflowing_token_bbox_first_sequence_fast)
else:
truncated_sequence = information_first_truncated["input_ids"]
overflowing_tokens = information_first_truncated["overflowing_tokens"]
overflowing_bbox = information_first_truncated["overflowing_token_boxes"]
bbox = information_first_truncated["bbox"]
self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2)
self.assertEqual(truncated_sequence, truncated_first_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride)
self.assertEqual(overflowing_tokens, seq0_tokens["input_ids"][-(2 + stride) :])
self.assertEqual(bbox, bbox_first_sequence)
self.assertEqual(overflowing_bbox, overflowing_token_bbox_first_sequence_slow)
information_second_truncated = tokenizer(
question_0,
seq_1,
boxes=boxes_1,
max_length=len(sequence["input_ids"]) - 2,
add_special_tokens=False,
stride=stride,
truncation="only_second",
return_overflowing_tokens=True,
# add_prefix_space=False,
)
# Overflowing tokens are handled quite differently in slow and fast tokenizers
if isinstance(tokenizer, LayoutLMv2TokenizerFast):
truncated_sequence = information_second_truncated["input_ids"][0]
overflowing_tokens = information_second_truncated["input_ids"][1]
bbox = information_second_truncated["bbox"][0]
overflowing_bbox = information_second_truncated["bbox"][1]
self.assertEqual(len(information_second_truncated["input_ids"]), 2)
self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2)
self.assertEqual(truncated_sequence, truncated_second_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride + len(seq0_tokens["input_ids"]))
self.assertEqual(overflowing_tokens, overflow_second_sequence)
self.assertEqual(bbox, bbox_second_sequence)
self.assertEqual(overflowing_bbox, overflowing_token_bbox_second_sequence_fast)
else:
truncated_sequence = information_second_truncated["input_ids"]
overflowing_tokens = information_second_truncated["overflowing_tokens"]
bbox = information_second_truncated["bbox"]
overflowing_bbox = information_second_truncated["overflowing_token_boxes"]
self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2)
self.assertEqual(truncated_sequence, truncated_second_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride)
self.assertEqual(overflowing_tokens, seq1_tokens["input_ids"][-(2 + stride) :])
self.assertEqual(bbox, bbox_second_sequence)
self.assertEqual(overflowing_bbox, overflowing_token_bbox_second_sequence_slow)
# @unittest.skip("LayoutLMv2 tokenizer requires boxes besides sequences.")
def test_maximum_encoding_length_single_input(self):
tokenizers = self.get_tokenizers(do_lower_case=False, model_max_length=100)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
seq_0, boxes_0, ids = self.get_clean_sequence(tokenizer, max_length=20)
sequence = tokenizer(seq_0, boxes=boxes_0, add_special_tokens=False)
total_length = len(sequence["input_ids"])
self.assertGreater(total_length, 4, "Issue with the testing sequence, please update it it's too short")
model_max_length = tokenizer.model_max_length
self.assertEqual(model_max_length, 100)
seq_1 = seq_0 * model_max_length
boxes_1 = boxes_0 * model_max_length
sequence1 = tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)
total_length1 = len(sequence1["input_ids"])
self.assertGreater(
total_length1, model_max_length, "Issue with the testing sequence, please update it it's too short"
)
# Simple
padding_strategies = (
[False, True, "longest"] if tokenizer.pad_token and tokenizer.pad_token_id >= 0 else [False]
)
for padding_state in padding_strategies:
with self.subTest(f"Padding: {padding_state}"):
for truncation_state in [True, "longest_first", "only_first"]:
with self.subTest(f"Truncation: {truncation_state}"):
output = tokenizer(
seq_1,
boxes=boxes_1,
padding=padding_state,
truncation=truncation_state,
)
self.assertEqual(len(output["input_ids"]), model_max_length)
self.assertEqual(len(output["bbox"]), model_max_length)
output = tokenizer(
[seq_1],
boxes=[boxes_1],
padding=padding_state,
truncation=truncation_state,
)
self.assertEqual(len(output["input_ids"][0]), model_max_length)
self.assertEqual(len(output["bbox"][0]), model_max_length)
# Simple with no truncation
# Reset warnings
tokenizer.deprecation_warnings = {}
with self.assertLogs("transformers", level="WARNING") as cm:
output = tokenizer(seq_1, boxes=boxes_1, padding=padding_state, truncation=False)
self.assertNotEqual(len(output["input_ids"]), model_max_length)
self.assertNotEqual(len(output["bbox"]), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(
cm.records[0].message.startswith(
"Token indices sequence length is longer than the specified maximum sequence length for this model"
)
)
tokenizer.deprecation_warnings = {}
with self.assertLogs("transformers", level="WARNING") as cm:
output = tokenizer([seq_1], boxes=[boxes_1], padding=padding_state, truncation=False)
self.assertNotEqual(len(output["input_ids"][0]), model_max_length)
self.assertNotEqual(len(output["bbox"][0]), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(
cm.records[0].message.startswith(
"Token indices sequence length is longer than the specified maximum sequence length for this model"
)
)
# Check the order of Sequence of input ids, overflowing tokens and bbox sequence with truncation
stride = 2
information = tokenizer(
seq_0,
boxes=boxes_0,
max_length=total_length - 2,
add_special_tokens=False,
stride=stride,
truncation=True,
return_overflowing_tokens=True,
# add_prefix_space=False,
)
# Overflowing tokens are handled quite differently in slow and fast tokenizers
if isinstance(tokenizer, LayoutLMv2TokenizerFast):
truncated_sequence = information["input_ids"][0]
overflowing_tokens = information["input_ids"][1]
bbox = information["bbox"][0]
overflowing_bbox = information["bbox"][1]
self.assertEqual(len(information["input_ids"]), 2)
self.assertEqual(len(truncated_sequence), total_length - 2)
self.assertEqual(truncated_sequence, sequence["input_ids"][:-2])
self.assertEqual(len(overflowing_tokens), 2 + stride)
self.assertEqual(overflowing_tokens, sequence["input_ids"][-(2 + stride) :])
self.assertEqual(bbox, sequence["bbox"][:-2])
self.assertEqual(overflowing_bbox, sequence["bbox"][-(2 + stride) :])
else:
truncated_sequence = information["input_ids"]
overflowing_tokens = information["overflowing_tokens"]
bbox = information["bbox"]
overflowing_bbox = information["overflowing_token_boxes"]
self.assertEqual(len(truncated_sequence), total_length - 2)
self.assertEqual(truncated_sequence, sequence["input_ids"][:-2])
self.assertEqual(len(overflowing_tokens), 2 + stride)
self.assertEqual(overflowing_tokens, sequence["input_ids"][-(2 + stride) :])
self.assertEqual(bbox, sequence["bbox"][:-2])
self.assertEqual(overflowing_bbox, sequence["bbox"][-(2 + stride) :])
@unittest.skip("LayoutLMv2 tokenizer requires boxes besides sequences.")
def test_pretokenized_inputs(self):
pass
@unittest.skip("LayoutLMv2 tokenizer always expects pretokenized inputs.")
def test_compare_pretokenized_inputs(self):
pass
@unittest.skip("LayoutLMv2 fast tokenizer does not support prepare_for_model")
def test_compare_prepare_for_model(self):
pass
@slow
def test_only_label_first_subword(self):
words = ["hello", "niels"]
boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))]
word_labels = [0, 1]
# test slow tokenizer
tokenizer_p = LayoutLMv2Tokenizer.from_pretrained("microsoft/layoutlmv2-base-uncased")
encoding = tokenizer_p(words, boxes=boxes, word_labels=word_labels)
self.assertListEqual(encoding.labels, [-100, 0, 1, -100, -100])
tokenizer_p = LayoutLMv2Tokenizer.from_pretrained(
"microsoft/layoutlmv2-base-uncased", only_label_first_subword=False
)
encoding = tokenizer_p(words, boxes=boxes, word_labels=word_labels)
self.assertListEqual(encoding.labels, [-100, 0, 1, 1, -100])
# test fast tokenizer
tokenizer_r = LayoutLMv2TokenizerFast.from_pretrained("microsoft/layoutlmv2-base-uncased")
encoding = tokenizer_r(words, boxes=boxes, word_labels=word_labels)
self.assertListEqual(encoding.labels, [-100, 0, 1, -100, -100])
tokenizer_r = LayoutLMv2Tokenizer.from_pretrained(
"microsoft/layoutlmv2-base-uncased", only_label_first_subword=False
)
encoding = tokenizer_r(words, boxes=boxes, word_labels=word_labels)
self.assertListEqual(encoding.labels, [-100, 0, 1, 1, -100])
@slow
def test_layoutlmv2_integration_test(self):
tokenizer_p = LayoutLMv2Tokenizer.from_pretrained("microsoft/layoutlmv2-base-uncased")
tokenizer_r = LayoutLMv2TokenizerFast.from_pretrained("microsoft/layoutlmv2-base-uncased")
# There are 3 cases:
# CASE 1: document image classification (training + inference), document image token classification (inference),
# in which case only words and normalized bounding boxes are provided to the tokenizer
# CASE 2: document image token classification (training),
# in which case one also provides word labels to the tokenizer
# CASE 3: document image visual question answering (inference),
# in which case one also provides a question to the tokenizer
# We need to test all 3 cases both on batched and non-batched inputs.
# CASE 1: not batched
words, boxes = self.get_words_and_boxes()
# fmt: off
expected_results = {'input_ids': [101, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'bbox': [[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} # noqa: E231
# fmt: on
encoding_p = tokenizer_p(words, boxes=boxes, padding="max_length", max_length=20)
encoding_r = tokenizer_r(words, boxes=boxes, padding="max_length", max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
# CASE 1: batched
words, boxes = self.get_words_and_boxes_batch()
# fmt: off
expected_results = {'input_ids': [[101, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [101, 7592, 2026, 2171, 2003, 3960, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'bbox': [[[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [961, 885, 992, 912], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E231
# fmt: on
encoding_p = tokenizer_p(words, boxes=boxes, padding="max_length", max_length=20)
encoding_r = tokenizer_r(words, boxes=boxes, padding="max_length", max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
# CASE 2: not batched
words, boxes = self.get_words_and_boxes()
word_labels = [1, 2, 3]
# fmt: off
expected_results = {'input_ids': [101, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'bbox': [[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'labels': [-100, 1, 2, -100, 3, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100], 'attention_mask': [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} # noqa: E231
# fmt: on
encoding_p = tokenizer_p(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20)
encoding_r = tokenizer_r(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
# CASE 2: batched
words, boxes = self.get_words_and_boxes_batch()
word_labels = [[1, 2, 3], [2, 46, 17, 22, 3]]
# fmt: off
expected_results = {'input_ids': [[101, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [101, 7592, 2026, 2171, 2003, 3960, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'bbox': [[[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [961, 885, 992, 912], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'labels': [[-100, 1, 2, -100, 3, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100], [-100, 2, 46, 17, 22, 3, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E231
# fmt: on
encoding_p = tokenizer_p(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20)
encoding_r = tokenizer_r(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
# CASE 3: not batched
question, words, boxes = self.get_question_words_and_boxes()
# fmt: off
expected_results = {'input_ids': [101, 2054, 1005, 1055, 2010, 2171, 1029, 102, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0], 'bbox': [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]} # noqa: E231
# fmt: on
encoding_p = tokenizer_p(question, words, boxes, padding="max_length", max_length=20)
encoding_r = tokenizer_r(question, words, boxes, padding="max_length", max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
# CASE 3: batched
questions, words, boxes = self.get_question_words_and_boxes_batch()
# fmt: off
expected_results = {'input_ids': [[101, 2054, 1005, 1055, 2010, 2171, 1029, 102, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0], [101, 2129, 2003, 2002, 2170, 1029, 102, 2054, 1037, 21110, 2546, 3806, 2102, 2078, 102, 0, 0, 0, 0, 0]], 'bbox': [[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [336, 42, 353, 57], [34, 42, 66, 69], [34, 42, 66, 69], [34, 42, 66, 69], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]]} # noqa: E231
# fmt: on
encoding_p = tokenizer_p(questions, words, boxes, padding="max_length", max_length=20)
encoding_r = tokenizer_r(questions, words, boxes, padding="max_length", max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
@unittest.skip("Doesn't support another framework than PyTorch")
def test_np_encode_plus_sent_to_model(self):
pass
| true
| true
|
1c3f81fd472f006da3853fb2058fb95fb3cbadc3
| 371
|
py
|
Python
|
hw/python/count_hi.py
|
petrroll/npfl104
|
241646b02e91c14ac885dd6cc981b5bb63d4561c
|
[
"MIT"
] | null | null | null |
hw/python/count_hi.py
|
petrroll/npfl104
|
241646b02e91c14ac885dd6cc981b5bb63d4561c
|
[
"MIT"
] | null | null | null |
hw/python/count_hi.py
|
petrroll/npfl104
|
241646b02e91c14ac885dd6cc981b5bb63d4561c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
def count_hi(str):
return str.count("hi")
def assert_eq(a, b):
print(f"\tAct:{a}==Exp:{b}")
assert(a == b)
def test():
print(f"Test ({__file__}):")
assert_eq(count_hi(''), 0)
assert_eq(count_hi('hsi'), 0)
assert_eq(count_hi('hi'), 1)
assert_eq(count_hi('hihiqhi'), 3)
print(f"\tTests success.")
if __name__ == "__main__":
test()
| 19.526316
| 35
| 0.619946
|
def count_hi(str):
return str.count("hi")
def assert_eq(a, b):
print(f"\tAct:{a}==Exp:{b}")
assert(a == b)
def test():
print(f"Test ({__file__}):")
assert_eq(count_hi(''), 0)
assert_eq(count_hi('hsi'), 0)
assert_eq(count_hi('hi'), 1)
assert_eq(count_hi('hihiqhi'), 3)
print(f"\tTests success.")
if __name__ == "__main__":
test()
| true
| true
|
1c3f82c9379220d89b5659c7a977fc50f113c0fe
| 1,202
|
py
|
Python
|
module4-software-testing-documentation-and-licensing/lambdata/lambdata_chancedurr/mod.py
|
ChanceDurr/DS-Unit-3-Sprint-1-Software-Engineering
|
842b0fd9364964b9efa0ca06dfae37f07c1e8947
|
[
"MIT"
] | null | null | null |
module4-software-testing-documentation-and-licensing/lambdata/lambdata_chancedurr/mod.py
|
ChanceDurr/DS-Unit-3-Sprint-1-Software-Engineering
|
842b0fd9364964b9efa0ca06dfae37f07c1e8947
|
[
"MIT"
] | null | null | null |
module4-software-testing-documentation-and-licensing/lambdata/lambdata_chancedurr/mod.py
|
ChanceDurr/DS-Unit-3-Sprint-1-Software-Engineering
|
842b0fd9364964b9efa0ca06dfae37f07c1e8947
|
[
"MIT"
] | null | null | null |
import pandas as pd
import unittest
def checkNulls(dataframe):
df = dataframe
nulls = df.isnull().sum()
for col, null in nulls.items():
print(f"'{col}' has {null} null value(s).")
def addListToDataframe(alist, dataframe, colName='new_column'):
newCol = pd.DataFrame(alist, columns=[colName])
dataframe = dataframe.join(newCol)
return dataframe
class Shirt():
def __init__(self, style='tee', sleeve='short', size='large', material='cotton'):
self.style = style
self.sleeve = sleeve
self.size = size
self.material = material
def description(self):
print(f'Style: {self.style}')
print(f'Size: {self.size}')
print(f'Material: {self.material}')
print(f'Sleeve: {self.sleeve}')
class Complex:
def __init__(self, realpart, imagpart):
self.r = realpart
self.i = imagpart
def subtract(self):
return self.i - self.r
def divide(self):
if self.r == 0:
return 'Cannot divide by Zero'
else:
return self.i / self.r
def multiply(self):
return self.i * self.r
def add(self):
return self.i + self.r
| 23.115385
| 85
| 0.593178
|
import pandas as pd
import unittest
def checkNulls(dataframe):
df = dataframe
nulls = df.isnull().sum()
for col, null in nulls.items():
print(f"'{col}' has {null} null value(s).")
def addListToDataframe(alist, dataframe, colName='new_column'):
newCol = pd.DataFrame(alist, columns=[colName])
dataframe = dataframe.join(newCol)
return dataframe
class Shirt():
def __init__(self, style='tee', sleeve='short', size='large', material='cotton'):
self.style = style
self.sleeve = sleeve
self.size = size
self.material = material
def description(self):
print(f'Style: {self.style}')
print(f'Size: {self.size}')
print(f'Material: {self.material}')
print(f'Sleeve: {self.sleeve}')
class Complex:
def __init__(self, realpart, imagpart):
self.r = realpart
self.i = imagpart
def subtract(self):
return self.i - self.r
def divide(self):
if self.r == 0:
return 'Cannot divide by Zero'
else:
return self.i / self.r
def multiply(self):
return self.i * self.r
def add(self):
return self.i + self.r
| true
| true
|
1c3f82ffb5f32873bb236afb3842f2862405b217
| 2,219
|
py
|
Python
|
trustpay/__init__.py
|
RomanMelnyk113/trustpay-python
|
2cf67469edcae3059c78bff83db9408c54b93f7f
|
[
"MIT"
] | null | null | null |
trustpay/__init__.py
|
RomanMelnyk113/trustpay-python
|
2cf67469edcae3059c78bff83db9408c54b93f7f
|
[
"MIT"
] | null | null | null |
trustpay/__init__.py
|
RomanMelnyk113/trustpay-python
|
2cf67469edcae3059c78bff83db9408c54b93f7f
|
[
"MIT"
] | null | null | null |
class PaymentException(Exception):
pass
order_xml_string = '''<?xml version="1.0" encoding="UTF-8"?>
<Document xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="urn:iso:std:iso:20022:tech:xsd:pain.001.001.03">
<CstmrCdtTrfInitn>
<GrpHdr>
<MsgId>{MessageId}</MsgId>
<CreDtTm>{CreationDateTime}</CreDtTm>
<NbOfTxs>1</NbOfTxs>
<InitgPty/>
</GrpHdr>
<PmtInf>
<PmtInfId>1</PmtInfId>
<PmtMtd>TRF</PmtMtd>
<PmtTpInf>
<LclInstrm>
<Prtry>BWT2/EU</Prtry>
</LclInstrm>
</PmtTpInf>
<ReqdExctnDt>{RequestedExecutionDate}</ReqdExctnDt>
<Dbtr>
<Nm>{DebtorName}</Nm>
</Dbtr>
<DbtrAcct>
<Id>
<Othr>
<Id>{DebtorAccount}</Id>
</Othr>
</Id>
</DbtrAcct>
<DbtrAgt>
<FinInstnId>
<BIC>TPAYSKBX</BIC>
</FinInstnId>
</DbtrAgt>
<CdtTrfTxInf>
<PmtId>
<EndToEndId>NOTPROVIDED</EndToEndId>
</PmtId>
<PmtTpInf>
<LclInstrm>
<Cd>010000</Cd>
</LclInstrm>
</PmtTpInf>
<Amt>
<InstdAmt Ccy="{Currency}">{Amount}</InstdAmt>
</Amt>
<CdtrAgt>
<FinInstnId>
<BIC>{CreditorBankBic}</BIC>
</FinInstnId>
</CdtrAgt>
<Cdtr>
<Nm>{CreditorName}</Nm>
</Cdtr>
<CdtrAcct>
<Id>
<IBAN>{CreditorAccount}</IBAN>
</Id>
</CdtrAcct>
<RmtInf>
<Ustrd>{Description}</Ustrd>
</RmtInf>
</CdtTrfTxInf>
</PmtInf>
</CstmrCdtTrfInitn>
</Document>'''
| 31.253521
| 66
| 0.394322
|
class PaymentException(Exception):
pass
order_xml_string = '''<?xml version="1.0" encoding="UTF-8"?>
<Document xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="urn:iso:std:iso:20022:tech:xsd:pain.001.001.03">
<CstmrCdtTrfInitn>
<GrpHdr>
<MsgId>{MessageId}</MsgId>
<CreDtTm>{CreationDateTime}</CreDtTm>
<NbOfTxs>1</NbOfTxs>
<InitgPty/>
</GrpHdr>
<PmtInf>
<PmtInfId>1</PmtInfId>
<PmtMtd>TRF</PmtMtd>
<PmtTpInf>
<LclInstrm>
<Prtry>BWT2/EU</Prtry>
</LclInstrm>
</PmtTpInf>
<ReqdExctnDt>{RequestedExecutionDate}</ReqdExctnDt>
<Dbtr>
<Nm>{DebtorName}</Nm>
</Dbtr>
<DbtrAcct>
<Id>
<Othr>
<Id>{DebtorAccount}</Id>
</Othr>
</Id>
</DbtrAcct>
<DbtrAgt>
<FinInstnId>
<BIC>TPAYSKBX</BIC>
</FinInstnId>
</DbtrAgt>
<CdtTrfTxInf>
<PmtId>
<EndToEndId>NOTPROVIDED</EndToEndId>
</PmtId>
<PmtTpInf>
<LclInstrm>
<Cd>010000</Cd>
</LclInstrm>
</PmtTpInf>
<Amt>
<InstdAmt Ccy="{Currency}">{Amount}</InstdAmt>
</Amt>
<CdtrAgt>
<FinInstnId>
<BIC>{CreditorBankBic}</BIC>
</FinInstnId>
</CdtrAgt>
<Cdtr>
<Nm>{CreditorName}</Nm>
</Cdtr>
<CdtrAcct>
<Id>
<IBAN>{CreditorAccount}</IBAN>
</Id>
</CdtrAcct>
<RmtInf>
<Ustrd>{Description}</Ustrd>
</RmtInf>
</CdtTrfTxInf>
</PmtInf>
</CstmrCdtTrfInitn>
</Document>'''
| true
| true
|
1c3f843b8d1393644c09337a3cb41a16c2b48573
| 1,048
|
py
|
Python
|
jupyter_contrib_nbextensions/__init__.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
jupyter_contrib_nbextensions/__init__.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
jupyter_contrib_nbextensions/__init__.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import jupyter_nbextensions_configurator
__version__ = '0.3.0'
def _jupyter_server_extension_paths():
"""Magically-named function for jupyter extension installations."""
return []
def _jupyter_nbextension_paths():
"""Magically-named function for jupyter extension installations."""
nbextension_dirs = [
os.path.join(os.path.dirname(__file__), 'nbextensions')]
specs = jupyter_nbextensions_configurator.get_configurable_nbextensions(
nbextension_dirs=nbextension_dirs)
return [dict(
section=nbext['Section'],
# src is a directory in which we assume the require file resides.
# the path is relative to the package directory
src=os.path.join(
'nbextensions',
os.path.dirname(nbext['require'])
),
# directory in the `nbextension/` namespace
dest=os.path.dirname(nbext['require']),
# _also_ in the `nbextension/` namespace
require=nbext['require'],
) for nbext in specs]
| 29.942857
| 76
| 0.669847
|
import os
import jupyter_nbextensions_configurator
__version__ = '0.3.0'
def _jupyter_server_extension_paths():
return []
def _jupyter_nbextension_paths():
nbextension_dirs = [
os.path.join(os.path.dirname(__file__), 'nbextensions')]
specs = jupyter_nbextensions_configurator.get_configurable_nbextensions(
nbextension_dirs=nbextension_dirs)
return [dict(
section=nbext['Section'],
src=os.path.join(
'nbextensions',
os.path.dirname(nbext['require'])
),
dest=os.path.dirname(nbext['require']),
require=nbext['require'],
) for nbext in specs]
| true
| true
|
1c3f85598d6d10f7ad9295935ad03bf15f07982d
| 5,117
|
py
|
Python
|
Pheme5MainResponseWAE4Early.py
|
zperfet/PathFake
|
fe09e5f6d872d682ef9e27384edabdb9e2ee27e9
|
[
"MIT"
] | null | null | null |
Pheme5MainResponseWAE4Early.py
|
zperfet/PathFake
|
fe09e5f6d872d682ef9e27384edabdb9e2ee27e9
|
[
"MIT"
] | null | null | null |
Pheme5MainResponseWAE4Early.py
|
zperfet/PathFake
|
fe09e5f6d872d682ef9e27384edabdb9e2ee27e9
|
[
"MIT"
] | 1
|
2022-01-03T15:26:06.000Z
|
2022-01-03T15:26:06.000Z
|
# 相比MainPathVoting的不同之处:random使用全部路径文本;wae使用response path,即除去了源文本
# 二者使用不同的id编码,即每一条路径对应random path ids和wae response path ids
from models.PathBased import ResponseWAE, ResponseCatWAE, ResponseWAECat
from torch import optim
import datetime
from evaluate import *
from get_args import _args, print_args
from data_io import *
import sys
from torch.optim.lr_scheduler import StepLR
def main():
print_args(_args)
# 固定随机数种子
setup_seed(_args.seed)
print("Step1:processing data")
x_train_random, y_train_random, x_test_random, y_test_random = \
load_path_data_pheme(train_path, test_path, label_path,
path_random_id2paths_dict_path,
path_random_npz)
x_train_response, y_train_response, x_test_response, y_test_response = \
load_path_data_pheme(train_path, test_path, label_path,
response_id2paths_dict_path,
response_wae_npz)
# early detection代码,待测试
# x_train_random, y_train_random, x_test_random, y_test_random = \
# load_path_data_for_early_detection(train_path, test_path, label_path,
# early_path_random_id2paths_dict_path,
# path_node_ids_dict_path,
# random_tweet2token_ids_dict_path,
# False,
# False)
#
# x_train_response, y_train_response, x_test_response, y_test_response = \
# load_path_data_for_early_detection(train_path, test_path, label_path,
# early_response_id2paths_dict_path,
# path_node_ids_dict_path,
# response_tweet2token_ids_dict_path,
# True,
# False)
print('Step2:build model')
model = ResponseWAE(_args.random_vocab_dim, _args.response_vocab_dim,
wae_best_encoder_path, _args.random_dim,
_args.vae_dim, _args.class_num)
model.to(device)
# 3. looping SGD
print('Step3:start training')
if _args.optim == 'adagrad':
optimizer = optim.Adagrad(model.parameters(), lr=_args.lr)
elif _args.optim == 'adam':
optimizer = optim.Adam(model.parameters(), lr=_args.lr)
else:
print('optim %s not correct' % _args.optim)
return
# scheduler = StepLR(optimizer, 10, 0.5, -1)
losses_5, losses = [], []
num_examples_seen = 0
indexs = list(range(len(y_train_random)))
highest_acc = 0
best_result = []
for epoch in range(1, _args.epoch + 1):
# print('epoch:', epoch, '学习率:', scheduler.get_lr())
# 每次训练打乱顺序
random.shuffle(indexs)
# 训练模型
for cnt, i in enumerate(indexs):
pred_y, loss = model.forward(torch.Tensor(x_train_random[i]).cuda(device).long(),
torch.Tensor(x_train_response[i]).cuda(device).long(),
torch.Tensor(y_train_random[i]).cuda(device),
)
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses.append(loss.data.cpu().tolist())
num_examples_seen += 1
# if (cnt + 1) % 500 == 0:
# break
# scheduler.step()
# cal loss & evaluate
with torch.no_grad():
if epoch % 1 == 0:
losses_5.append((num_examples_seen, np.mean(losses)))
time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if _args.verbose:
print("%s: Loss after num_examples_seen=%d epoch=%d: %f" %
(time, num_examples_seen, epoch, np.mean(losses)))
sys.stdout.flush()
prediction = []
# 因为每棵树都不同,所以测试的训练和测试的batch都为1;后续有待改进
for j in range(len(y_test_random)):
prediction.append(
model.predict_up(torch.Tensor(x_test_random[j]).cuda(device).long(),
torch.Tensor(x_test_response[j]).cuda(device).long())
.cpu().data.numpy().tolist())
res = evaluation_3class(prediction, y_test_random)
# highest_acc = max(highest_acc, res[1])
if res[1] > highest_acc:
best_result = res
highest_acc = res[1]
if _args.verbose:
print(res)
print()
sys.stdout.flush()
sys.stdout.flush()
losses = []
print('最高acc:', highest_acc)
print("最优性能:", best_result)
print('#' * 80)
if __name__ == '__main__':
main()
| 43
| 96
| 0.522767
|
from models.PathBased import ResponseWAE, ResponseCatWAE, ResponseWAECat
from torch import optim
import datetime
from evaluate import *
from get_args import _args, print_args
from data_io import *
import sys
from torch.optim.lr_scheduler import StepLR
def main():
print_args(_args)
setup_seed(_args.seed)
print("Step1:processing data")
x_train_random, y_train_random, x_test_random, y_test_random = \
load_path_data_pheme(train_path, test_path, label_path,
path_random_id2paths_dict_path,
path_random_npz)
x_train_response, y_train_response, x_test_response, y_test_response = \
load_path_data_pheme(train_path, test_path, label_path,
response_id2paths_dict_path,
response_wae_npz)
print('Step2:build model')
model = ResponseWAE(_args.random_vocab_dim, _args.response_vocab_dim,
wae_best_encoder_path, _args.random_dim,
_args.vae_dim, _args.class_num)
model.to(device)
print('Step3:start training')
if _args.optim == 'adagrad':
optimizer = optim.Adagrad(model.parameters(), lr=_args.lr)
elif _args.optim == 'adam':
optimizer = optim.Adam(model.parameters(), lr=_args.lr)
else:
print('optim %s not correct' % _args.optim)
return
losses_5, losses = [], []
num_examples_seen = 0
indexs = list(range(len(y_train_random)))
highest_acc = 0
best_result = []
for epoch in range(1, _args.epoch + 1):
random.shuffle(indexs)
for cnt, i in enumerate(indexs):
pred_y, loss = model.forward(torch.Tensor(x_train_random[i]).cuda(device).long(),
torch.Tensor(x_train_response[i]).cuda(device).long(),
torch.Tensor(y_train_random[i]).cuda(device),
)
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses.append(loss.data.cpu().tolist())
num_examples_seen += 1
with torch.no_grad():
if epoch % 1 == 0:
losses_5.append((num_examples_seen, np.mean(losses)))
time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if _args.verbose:
print("%s: Loss after num_examples_seen=%d epoch=%d: %f" %
(time, num_examples_seen, epoch, np.mean(losses)))
sys.stdout.flush()
prediction = []
for j in range(len(y_test_random)):
prediction.append(
model.predict_up(torch.Tensor(x_test_random[j]).cuda(device).long(),
torch.Tensor(x_test_response[j]).cuda(device).long())
.cpu().data.numpy().tolist())
res = evaluation_3class(prediction, y_test_random)
if res[1] > highest_acc:
best_result = res
highest_acc = res[1]
if _args.verbose:
print(res)
print()
sys.stdout.flush()
sys.stdout.flush()
losses = []
print('最高acc:', highest_acc)
print("最优性能:", best_result)
print('#' * 80)
if __name__ == '__main__':
main()
| true
| true
|
1c3f85e1028366c5f6c8a26ba5ec68082c8dc435
| 602
|
py
|
Python
|
tools/gui_patch.py
|
facelessuser/Rummage
|
74f0ce1b078eef40c3ba683dbc4638112f3b9bb7
|
[
"MIT"
] | 55
|
2015-02-15T08:17:55.000Z
|
2022-03-11T11:55:39.000Z
|
tools/gui_patch.py
|
facelessuser/Rummage
|
74f0ce1b078eef40c3ba683dbc4638112f3b9bb7
|
[
"MIT"
] | 264
|
2015-01-29T20:27:40.000Z
|
2022-03-03T04:08:48.000Z
|
tools/gui_patch.py
|
facelessuser/Rummage
|
74f0ce1b078eef40c3ba683dbc4638112f3b9bb7
|
[
"MIT"
] | 12
|
2017-08-30T22:54:20.000Z
|
2022-03-21T01:05:50.000Z
|
"""Patch graphical user interface file."""
import codecs
import re
filename = 'rummage/lib/gui/gui.py'
with codecs.open(filename, 'r', encoding='utf-8') as f:
text = f.read()
# Add collapsible pane replacement
text = re.sub(
r'^((?:import|from)(?! \.controls\.collapsible_pane).*?)(\r?\n){2}',
r'\1\2from .controls.collapsible_pane import CollapsiblePane\2\2GUI_PATCHED = True\2\2',
text,
flags=re.M
)
# Replace old collapsible pane
text = re.sub(
r'\bwx\.(?=CollapsiblePane\b)',
'',
text
)
with codecs.open(filename, 'w', encoding='utf-8') as f:
f.write(text)
| 22.296296
| 92
| 0.646179
|
import codecs
import re
filename = 'rummage/lib/gui/gui.py'
with codecs.open(filename, 'r', encoding='utf-8') as f:
text = f.read()
text = re.sub(
r'^((?:import|from)(?! \.controls\.collapsible_pane).*?)(\r?\n){2}',
r'\1\2from .controls.collapsible_pane import CollapsiblePane\2\2GUI_PATCHED = True\2\2',
text,
flags=re.M
)
text = re.sub(
r'\bwx\.(?=CollapsiblePane\b)',
'',
text
)
with codecs.open(filename, 'w', encoding='utf-8') as f:
f.write(text)
| true
| true
|
1c3f860e591eb1e16d50348d24e86f6ad56138e5
| 5,434
|
py
|
Python
|
tests/test_parser.py
|
KyleRConway/knack
|
73d76bae6fe4996f3ffea4bd24cf487b0263da85
|
[
"MIT"
] | null | null | null |
tests/test_parser.py
|
KyleRConway/knack
|
73d76bae6fe4996f3ffea4bd24cf487b0263da85
|
[
"MIT"
] | null | null | null |
tests/test_parser.py
|
KyleRConway/knack
|
73d76bae6fe4996f3ffea4bd24cf487b0263da85
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
from six import StringIO
from knack.parser import CLICommandParser
from knack.commands import CLICommand
from knack.arguments import enum_choice_list
from tests.util import MockContext
class TestParser(unittest.TestCase):
def setUp(self):
self.io = StringIO()
self.mock_ctx = MockContext()
def tearDown(self):
self.io.close()
def test_register_simple_commands(self):
def test_handler1():
pass
def test_handler2():
pass
command = CLICommand(self.mock_ctx, 'command the-name', test_handler1)
command2 = CLICommand(self.mock_ctx, 'sub-command the-second-name', test_handler2)
cmd_table = {'command the-name': command, 'sub-command the-second-name': command2}
self.mock_ctx.commands_loader.command_table = cmd_table
parser = CLICommandParser()
parser.load_command_table(self.mock_ctx.commands_loader)
args = parser.parse_args('command the-name'.split())
self.assertIs(args.func, command)
args = parser.parse_args('sub-command the-second-name'.split())
self.assertIs(args.func, command2)
CLICommandParser.error = VerifyError(self,)
parser.parse_args('sub-command'.split())
self.assertTrue(CLICommandParser.error.called)
def test_required_parameter(self):
def test_handler(args): # pylint: disable=unused-argument
pass
command = CLICommand(self.mock_ctx, 'test command', test_handler)
command.add_argument('req', '--req', required=True)
cmd_table = {'test command': command}
self.mock_ctx.commands_loader.command_table = cmd_table
parser = CLICommandParser()
parser.load_command_table(self.mock_ctx.commands_loader)
args = parser.parse_args('test command --req yep'.split())
self.assertIs(args.func, command)
CLICommandParser.error = VerifyError(self)
parser.parse_args('test command'.split())
self.assertTrue(CLICommandParser.error.called)
def test_nargs_parameter(self):
def test_handler():
pass
command = CLICommand(self.mock_ctx, 'test command', test_handler)
command.add_argument('req', '--req', required=True, nargs=2)
cmd_table = {'test command': command}
self.mock_ctx.commands_loader.command_table = cmd_table
parser = CLICommandParser()
parser.load_command_table(self.mock_ctx.commands_loader)
args = parser.parse_args('test command --req yep nope'.split())
self.assertIs(args.func, command)
CLICommandParser.error = VerifyError(self)
parser.parse_args('test command -req yep'.split())
self.assertTrue(CLICommandParser.error.called)
def test_case_insensitive_enum_choices(self):
from enum import Enum
class TestEnum(Enum): # pylint: disable=too-few-public-methods
opt1 = "ALL_CAPS"
opt2 = "camelCase"
opt3 = "snake_case"
def test_handler():
pass
command = CLICommand(self.mock_ctx, 'test command', test_handler)
command.add_argument('opt', '--opt', required=True, **enum_choice_list(TestEnum))
cmd_table = {'test command': command}
self.mock_ctx.commands_loader.command_table = cmd_table
parser = CLICommandParser()
parser.load_command_table(self.mock_ctx.commands_loader)
args = parser.parse_args('test command --opt alL_cAps'.split())
self.assertEqual(args.opt, 'ALL_CAPS')
args = parser.parse_args('test command --opt CAMELCASE'.split())
self.assertEqual(args.opt, 'camelCase')
args = parser.parse_args('test command --opt sNake_CASE'.split())
self.assertEqual(args.opt, 'snake_case')
def test_cli_ctx_type_error(self):
with self.assertRaises(TypeError):
CLICommandParser(cli_ctx=object())
def test_extra_nonargparse_parameters(self):
""" Add argument that has non argparse parameters.
'mycustomarg' should be filtered out and load_command_table
should complete successfully instead of throwing
TypeError: __init__() got an unexpected keyword argument 'mycustomarg'
"""
def test_handler():
pass
command = CLICommand(self.mock_ctx, 'test command', test_handler)
command.add_argument('req', '--req', required=True, mycustomarg=True)
cmd_table = {'test command': command}
self.mock_ctx.commands_loader.command_table = cmd_table
parser = CLICommandParser()
parser.load_command_table(self.mock_ctx.commands_loader)
class VerifyError(object): # pylint: disable=too-few-public-methods
def __init__(self, test, substr=None):
self.test = test
self.substr = substr
self.called = False
def __call__(self, message):
if self.substr:
self.test.assertTrue(message.find(self.substr) >= 0)
self.called = True
if __name__ == '__main__':
unittest.main()
| 35.986755
| 94
| 0.645749
|
import unittest
from six import StringIO
from knack.parser import CLICommandParser
from knack.commands import CLICommand
from knack.arguments import enum_choice_list
from tests.util import MockContext
class TestParser(unittest.TestCase):
def setUp(self):
self.io = StringIO()
self.mock_ctx = MockContext()
def tearDown(self):
self.io.close()
def test_register_simple_commands(self):
def test_handler1():
pass
def test_handler2():
pass
command = CLICommand(self.mock_ctx, 'command the-name', test_handler1)
command2 = CLICommand(self.mock_ctx, 'sub-command the-second-name', test_handler2)
cmd_table = {'command the-name': command, 'sub-command the-second-name': command2}
self.mock_ctx.commands_loader.command_table = cmd_table
parser = CLICommandParser()
parser.load_command_table(self.mock_ctx.commands_loader)
args = parser.parse_args('command the-name'.split())
self.assertIs(args.func, command)
args = parser.parse_args('sub-command the-second-name'.split())
self.assertIs(args.func, command2)
CLICommandParser.error = VerifyError(self,)
parser.parse_args('sub-command'.split())
self.assertTrue(CLICommandParser.error.called)
def test_required_parameter(self):
def test_handler(args):
pass
command = CLICommand(self.mock_ctx, 'test command', test_handler)
command.add_argument('req', '--req', required=True)
cmd_table = {'test command': command}
self.mock_ctx.commands_loader.command_table = cmd_table
parser = CLICommandParser()
parser.load_command_table(self.mock_ctx.commands_loader)
args = parser.parse_args('test command --req yep'.split())
self.assertIs(args.func, command)
CLICommandParser.error = VerifyError(self)
parser.parse_args('test command'.split())
self.assertTrue(CLICommandParser.error.called)
def test_nargs_parameter(self):
def test_handler():
pass
command = CLICommand(self.mock_ctx, 'test command', test_handler)
command.add_argument('req', '--req', required=True, nargs=2)
cmd_table = {'test command': command}
self.mock_ctx.commands_loader.command_table = cmd_table
parser = CLICommandParser()
parser.load_command_table(self.mock_ctx.commands_loader)
args = parser.parse_args('test command --req yep nope'.split())
self.assertIs(args.func, command)
CLICommandParser.error = VerifyError(self)
parser.parse_args('test command -req yep'.split())
self.assertTrue(CLICommandParser.error.called)
def test_case_insensitive_enum_choices(self):
from enum import Enum
class TestEnum(Enum):
opt1 = "ALL_CAPS"
opt2 = "camelCase"
opt3 = "snake_case"
def test_handler():
pass
command = CLICommand(self.mock_ctx, 'test command', test_handler)
command.add_argument('opt', '--opt', required=True, **enum_choice_list(TestEnum))
cmd_table = {'test command': command}
self.mock_ctx.commands_loader.command_table = cmd_table
parser = CLICommandParser()
parser.load_command_table(self.mock_ctx.commands_loader)
args = parser.parse_args('test command --opt alL_cAps'.split())
self.assertEqual(args.opt, 'ALL_CAPS')
args = parser.parse_args('test command --opt CAMELCASE'.split())
self.assertEqual(args.opt, 'camelCase')
args = parser.parse_args('test command --opt sNake_CASE'.split())
self.assertEqual(args.opt, 'snake_case')
def test_cli_ctx_type_error(self):
with self.assertRaises(TypeError):
CLICommandParser(cli_ctx=object())
def test_extra_nonargparse_parameters(self):
def test_handler():
pass
command = CLICommand(self.mock_ctx, 'test command', test_handler)
command.add_argument('req', '--req', required=True, mycustomarg=True)
cmd_table = {'test command': command}
self.mock_ctx.commands_loader.command_table = cmd_table
parser = CLICommandParser()
parser.load_command_table(self.mock_ctx.commands_loader)
class VerifyError(object):
def __init__(self, test, substr=None):
self.test = test
self.substr = substr
self.called = False
def __call__(self, message):
if self.substr:
self.test.assertTrue(message.find(self.substr) >= 0)
self.called = True
if __name__ == '__main__':
unittest.main()
| true
| true
|
1c3f863d25b01378b7e621472637297beaefc32f
| 489
|
py
|
Python
|
fonolo/api/realtime.py
|
Fonolo/fonolo-python
|
2bc8b5bbdaea60f1fad34ead154a1804dcf73746
|
[
"MIT"
] | null | null | null |
fonolo/api/realtime.py
|
Fonolo/fonolo-python
|
2bc8b5bbdaea60f1fad34ead154a1804dcf73746
|
[
"MIT"
] | null | null | null |
fonolo/api/realtime.py
|
Fonolo/fonolo-python
|
2bc8b5bbdaea60f1fad34ead154a1804dcf73746
|
[
"MIT"
] | null | null | null |
#
# This file is part of the Fonolo Python Wrapper package.
#
# (c) Foncloud, Inc.
#
# For the full copyright and license information, please view the LICENSE
# file that was distributed with this source code.
#
import re
from .requesthandler import RequestHandler
from ..exception.exception import FonoloException
class Realtime(object):
def __init__(self, _handler):
self.handler = _handler;
def get(self, _params=None):
return self.handler.get('realtime');
| 22.227273
| 73
| 0.728016
|
import re
from .requesthandler import RequestHandler
from ..exception.exception import FonoloException
class Realtime(object):
def __init__(self, _handler):
self.handler = _handler;
def get(self, _params=None):
return self.handler.get('realtime');
| true
| true
|
1c3f868dc109dce702dfbec5695442853f06239d
| 2,740
|
py
|
Python
|
exercises/4.0/ros2/src/myworkcell_support/launch/workcell.launch.py
|
Lrk114/industrial_training
|
82070ab18e64410abbe1b8e02dd3d183177ebd0e
|
[
"Apache-2.0"
] | 1
|
2021-08-22T15:25:10.000Z
|
2021-08-22T15:25:10.000Z
|
exercises/4.0/ros2/src/myworkcell_support/launch/workcell.launch.py
|
Lrk114/industrial_training
|
82070ab18e64410abbe1b8e02dd3d183177ebd0e
|
[
"Apache-2.0"
] | null | null | null |
exercises/4.0/ros2/src/myworkcell_support/launch/workcell.launch.py
|
Lrk114/industrial_training
|
82070ab18e64410abbe1b8e02dd3d183177ebd0e
|
[
"Apache-2.0"
] | null | null | null |
import os
import yaml
import launch
import launch_ros
from ament_index_python import get_package_share_directory
def get_package_file(package, file_path):
"""Get the location of a file installed in an ament package"""
package_path = get_package_share_directory(package)
absolute_file_path = os.path.join(package_path, file_path)
return absolute_file_path
def load_file(file_path):
"""Load the contents of a file into a string"""
try:
with open(file_path, 'r') as file:
return file.read()
except EnvironmentError: # parent of IOError, OSError *and* WindowsError where available
return None
def load_yaml(file_path):
"""Load a yaml file into a dictionary"""
try:
with open(file_path, 'r') as file:
return yaml.safe_load(file)
except EnvironmentError: # parent of IOError, OSError *and* WindowsError where available
return None
def run_xacro(xacro_file):
"""Run xacro and output a file in the same directory with the same name, w/o a .xacro suffix"""
urdf_file, ext = os.path.splitext(xacro_file)
if ext != '.xacro':
raise RuntimeError(f'Input file to xacro must have a .xacro extension, got {xacro_file}')
os.system(f'xacro {xacro_file} -o {urdf_file}')
return urdf_file
def generate_launch_description():
xacro_file = get_package_file('myworkcell_support', 'urdf/workcell.urdf.xacro')
urdf_file = run_xacro(xacro_file)
srdf_file = get_package_file('myworkcell_moveit_config', 'config/myworkcell.srdf')
kinematics_file = get_package_file('myworkcell_moveit_config', 'config/kinematics.yaml')
robot_description = load_file(urdf_file)
robot_description_semantic = load_file(srdf_file)
kinematics_config = load_yaml(kinematics_file)
return launch.LaunchDescription([
launch_ros.actions.Node(
name='fake_ar_publisher_node',
package='fake_ar_publisher',
executable='fake_ar_publisher_node',
output='screen',
),
launch_ros.actions.Node(
name='vision_node',
package='myworkcell_core',
executable='vision_node',
output='screen',
),
launch_ros.actions.Node(
name='myworkcell_node',
package='myworkcell_core',
executable='myworkcell_node',
output='screen',
parameters=[
{
'base_frame': 'world',
'robot_description': robot_description,
'robot_description_semantic': robot_description_semantic,
'robot_description_kinematics': kinematics_config,
},
],
),
])
| 36.052632
| 99
| 0.654745
|
import os
import yaml
import launch
import launch_ros
from ament_index_python import get_package_share_directory
def get_package_file(package, file_path):
package_path = get_package_share_directory(package)
absolute_file_path = os.path.join(package_path, file_path)
return absolute_file_path
def load_file(file_path):
try:
with open(file_path, 'r') as file:
return file.read()
except EnvironmentError:
return None
def load_yaml(file_path):
try:
with open(file_path, 'r') as file:
return yaml.safe_load(file)
except EnvironmentError:
return None
def run_xacro(xacro_file):
urdf_file, ext = os.path.splitext(xacro_file)
if ext != '.xacro':
raise RuntimeError(f'Input file to xacro must have a .xacro extension, got {xacro_file}')
os.system(f'xacro {xacro_file} -o {urdf_file}')
return urdf_file
def generate_launch_description():
xacro_file = get_package_file('myworkcell_support', 'urdf/workcell.urdf.xacro')
urdf_file = run_xacro(xacro_file)
srdf_file = get_package_file('myworkcell_moveit_config', 'config/myworkcell.srdf')
kinematics_file = get_package_file('myworkcell_moveit_config', 'config/kinematics.yaml')
robot_description = load_file(urdf_file)
robot_description_semantic = load_file(srdf_file)
kinematics_config = load_yaml(kinematics_file)
return launch.LaunchDescription([
launch_ros.actions.Node(
name='fake_ar_publisher_node',
package='fake_ar_publisher',
executable='fake_ar_publisher_node',
output='screen',
),
launch_ros.actions.Node(
name='vision_node',
package='myworkcell_core',
executable='vision_node',
output='screen',
),
launch_ros.actions.Node(
name='myworkcell_node',
package='myworkcell_core',
executable='myworkcell_node',
output='screen',
parameters=[
{
'base_frame': 'world',
'robot_description': robot_description,
'robot_description_semantic': robot_description_semantic,
'robot_description_kinematics': kinematics_config,
},
],
),
])
| true
| true
|
1c3f86e294b56fda20f934bafc73878611a8c271
| 1,585
|
py
|
Python
|
kiosk/views.py
|
leonrenkema/makerspaceleiden-crm
|
36ea20f5b9e263e8f30b1831ae4a2b1d5b926d3c
|
[
"Apache-2.0"
] | 5
|
2019-03-12T21:38:32.000Z
|
2021-11-06T15:26:56.000Z
|
kiosk/views.py
|
leonrenkema/makerspaceleiden-crm
|
36ea20f5b9e263e8f30b1831ae4a2b1d5b926d3c
|
[
"Apache-2.0"
] | 33
|
2019-01-21T15:54:50.000Z
|
2021-05-18T17:54:52.000Z
|
kiosk/views.py
|
leonrenkema/makerspaceleiden-crm
|
36ea20f5b9e263e8f30b1831ae4a2b1d5b926d3c
|
[
"Apache-2.0"
] | 5
|
2019-01-21T15:47:26.000Z
|
2021-09-22T07:14:34.000Z
|
import os
import logging
import json
from django.shortcuts import render
from django.contrib.sites.shortcuts import get_current_site
from django.utils.encoding import force_bytes, force_text
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.template.loader import render_to_string
from django.contrib.auth.tokens import default_token_generator
from django.template import loader
from django.http import HttpResponse
from django.conf import settings
from django.shortcuts import redirect
from django.views.generic import ListView, CreateView, UpdateView
from django.urls import reverse_lazy
from django.urls import reverse
from django.contrib.auth.decorators import login_required
from django import forms
from django.contrib.auth import login, authenticate
from django.shortcuts import render, redirect
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.db.models import Q
from django.core.exceptions import ObjectDoesNotExist
from django.core.mail import EmailMessage
from makerspaceleiden.decorators import user_or_kiosk_required
from django.conf import settings
from selfservice.aggregator_adapter import get_aggregator_adapter
@user_or_kiosk_required
def kiosk(request):
aggregator_adapter = get_aggregator_adapter()
if not aggregator_adapter:
return HttpResponse(
"No aggregator configuration found", status=500, content_type="text/plain"
)
context = aggregator_adapter.fetch_state_space()
context["user"] = None
return render(request, "kiosk.html", context)
| 34.456522
| 86
| 0.828391
|
import os
import logging
import json
from django.shortcuts import render
from django.contrib.sites.shortcuts import get_current_site
from django.utils.encoding import force_bytes, force_text
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.template.loader import render_to_string
from django.contrib.auth.tokens import default_token_generator
from django.template import loader
from django.http import HttpResponse
from django.conf import settings
from django.shortcuts import redirect
from django.views.generic import ListView, CreateView, UpdateView
from django.urls import reverse_lazy
from django.urls import reverse
from django.contrib.auth.decorators import login_required
from django import forms
from django.contrib.auth import login, authenticate
from django.shortcuts import render, redirect
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.db.models import Q
from django.core.exceptions import ObjectDoesNotExist
from django.core.mail import EmailMessage
from makerspaceleiden.decorators import user_or_kiosk_required
from django.conf import settings
from selfservice.aggregator_adapter import get_aggregator_adapter
@user_or_kiosk_required
def kiosk(request):
aggregator_adapter = get_aggregator_adapter()
if not aggregator_adapter:
return HttpResponse(
"No aggregator configuration found", status=500, content_type="text/plain"
)
context = aggregator_adapter.fetch_state_space()
context["user"] = None
return render(request, "kiosk.html", context)
| true
| true
|
1c3f87d26f8903e3b82e12e48ff535ce57b1bf43
| 4,964
|
py
|
Python
|
api/generated/python/azure-iiot-opc-registry/models/application_info_api_model.py
|
jaz230/Industrial-IoT
|
bd4c5abfe579cbb7086a621e8381978e6c70a563
|
[
"MIT"
] | 1
|
2020-01-22T12:03:08.000Z
|
2020-01-22T12:03:08.000Z
|
api/generated/python/azure-iiot-opc-registry/models/application_info_api_model.py
|
likithadt/Industrial-IoT
|
d4ea7b330eff08455ca0556fed76aa74d2034da5
|
[
"MIT"
] | null | null | null |
api/generated/python/azure-iiot-opc-registry/models/application_info_api_model.py
|
likithadt/Industrial-IoT
|
d4ea7b330eff08455ca0556fed76aa74d2034da5
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator 2.3.33.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ApplicationInfoApiModel(Model):
"""Application info model.
:param application_id: Unique application id
:type application_id: str
:param application_type: Possible values include: 'Server', 'Client',
'ClientAndServer', 'DiscoveryServer'
:type application_type: str or
~azure-iiot-opc-registry.models.ApplicationType
:param application_uri: Unique application uri
:type application_uri: str
:param product_uri: Product uri
:type product_uri: str
:param application_name: Default name of application
:type application_name: str
:param locale: Locale of default name - defaults to "en"
:type locale: str
:param localized_names: Localized Names of application keyed on locale
:type localized_names: dict[str, str]
:param certificate: Application public cert
:type certificate: bytearray
:param capabilities: The capabilities advertised by the server.
:type capabilities: list[str]
:param discovery_urls: Discovery urls of the server
:type discovery_urls: list[str]
:param discovery_profile_uri: Discovery profile uri
:type discovery_profile_uri: str
:param gateway_server_uri: Gateway server uri
:type gateway_server_uri: str
:param host_addresses: Host addresses of server application or null
:type host_addresses: list[str]
:param site_id: Site of the application
:type site_id: str
:param discoverer_id: Discoverer that registered the application
:type discoverer_id: str
:param not_seen_since: Last time application was seen
:type not_seen_since: datetime
:param created:
:type created: ~azure-iiot-opc-registry.models.RegistryOperationApiModel
:param updated:
:type updated: ~azure-iiot-opc-registry.models.RegistryOperationApiModel
"""
_validation = {
'capabilities': {'unique': True},
'discovery_urls': {'unique': True},
'host_addresses': {'unique': True},
}
_attribute_map = {
'application_id': {'key': 'applicationId', 'type': 'str'},
'application_type': {'key': 'applicationType', 'type': 'ApplicationType'},
'application_uri': {'key': 'applicationUri', 'type': 'str'},
'product_uri': {'key': 'productUri', 'type': 'str'},
'application_name': {'key': 'applicationName', 'type': 'str'},
'locale': {'key': 'locale', 'type': 'str'},
'localized_names': {'key': 'localizedNames', 'type': '{str}'},
'certificate': {'key': 'certificate', 'type': 'bytearray'},
'capabilities': {'key': 'capabilities', 'type': '[str]'},
'discovery_urls': {'key': 'discoveryUrls', 'type': '[str]'},
'discovery_profile_uri': {'key': 'discoveryProfileUri', 'type': 'str'},
'gateway_server_uri': {'key': 'gatewayServerUri', 'type': 'str'},
'host_addresses': {'key': 'hostAddresses', 'type': '[str]'},
'site_id': {'key': 'siteId', 'type': 'str'},
'discoverer_id': {'key': 'discovererId', 'type': 'str'},
'not_seen_since': {'key': 'notSeenSince', 'type': 'iso-8601'},
'created': {'key': 'created', 'type': 'RegistryOperationApiModel'},
'updated': {'key': 'updated', 'type': 'RegistryOperationApiModel'},
}
def __init__(self, application_id=None, application_type=None, application_uri=None, product_uri=None, application_name=None, locale=None, localized_names=None, certificate=None, capabilities=None, discovery_urls=None, discovery_profile_uri=None, gateway_server_uri=None, host_addresses=None, site_id=None, discoverer_id=None, not_seen_since=None, created=None, updated=None):
super(ApplicationInfoApiModel, self).__init__()
self.application_id = application_id
self.application_type = application_type
self.application_uri = application_uri
self.product_uri = product_uri
self.application_name = application_name
self.locale = locale
self.localized_names = localized_names
self.certificate = certificate
self.capabilities = capabilities
self.discovery_urls = discovery_urls
self.discovery_profile_uri = discovery_profile_uri
self.gateway_server_uri = gateway_server_uri
self.host_addresses = host_addresses
self.site_id = site_id
self.discoverer_id = discoverer_id
self.not_seen_since = not_seen_since
self.created = created
self.updated = updated
| 47.27619
| 380
| 0.6666
|
from msrest.serialization import Model
class ApplicationInfoApiModel(Model):
_validation = {
'capabilities': {'unique': True},
'discovery_urls': {'unique': True},
'host_addresses': {'unique': True},
}
_attribute_map = {
'application_id': {'key': 'applicationId', 'type': 'str'},
'application_type': {'key': 'applicationType', 'type': 'ApplicationType'},
'application_uri': {'key': 'applicationUri', 'type': 'str'},
'product_uri': {'key': 'productUri', 'type': 'str'},
'application_name': {'key': 'applicationName', 'type': 'str'},
'locale': {'key': 'locale', 'type': 'str'},
'localized_names': {'key': 'localizedNames', 'type': '{str}'},
'certificate': {'key': 'certificate', 'type': 'bytearray'},
'capabilities': {'key': 'capabilities', 'type': '[str]'},
'discovery_urls': {'key': 'discoveryUrls', 'type': '[str]'},
'discovery_profile_uri': {'key': 'discoveryProfileUri', 'type': 'str'},
'gateway_server_uri': {'key': 'gatewayServerUri', 'type': 'str'},
'host_addresses': {'key': 'hostAddresses', 'type': '[str]'},
'site_id': {'key': 'siteId', 'type': 'str'},
'discoverer_id': {'key': 'discovererId', 'type': 'str'},
'not_seen_since': {'key': 'notSeenSince', 'type': 'iso-8601'},
'created': {'key': 'created', 'type': 'RegistryOperationApiModel'},
'updated': {'key': 'updated', 'type': 'RegistryOperationApiModel'},
}
def __init__(self, application_id=None, application_type=None, application_uri=None, product_uri=None, application_name=None, locale=None, localized_names=None, certificate=None, capabilities=None, discovery_urls=None, discovery_profile_uri=None, gateway_server_uri=None, host_addresses=None, site_id=None, discoverer_id=None, not_seen_since=None, created=None, updated=None):
super(ApplicationInfoApiModel, self).__init__()
self.application_id = application_id
self.application_type = application_type
self.application_uri = application_uri
self.product_uri = product_uri
self.application_name = application_name
self.locale = locale
self.localized_names = localized_names
self.certificate = certificate
self.capabilities = capabilities
self.discovery_urls = discovery_urls
self.discovery_profile_uri = discovery_profile_uri
self.gateway_server_uri = gateway_server_uri
self.host_addresses = host_addresses
self.site_id = site_id
self.discoverer_id = discoverer_id
self.not_seen_since = not_seen_since
self.created = created
self.updated = updated
| true
| true
|
1c3f89924bdc6392502c884a1fd372283dfdd904
| 9,069
|
py
|
Python
|
yt/frontends/nc4_cm1/data_structures.py
|
Ronan-Hix/yt
|
5ca4ab65e7486ee392577b0f24dbf2b56b892679
|
[
"BSD-3-Clause-Clear"
] | 360
|
2017-04-24T05:06:04.000Z
|
2022-03-31T10:47:07.000Z
|
yt/frontends/nc4_cm1/data_structures.py
|
chrishavlin/yt
|
023680e3a7bd1000d601727e02a55e72b4cbdc75
|
[
"BSD-3-Clause-Clear"
] | 2,077
|
2017-04-20T20:36:07.000Z
|
2022-03-31T16:39:43.000Z
|
yt/frontends/nc4_cm1/data_structures.py
|
chrishavlin/yt
|
023680e3a7bd1000d601727e02a55e72b4cbdc75
|
[
"BSD-3-Clause-Clear"
] | 257
|
2017-04-19T20:52:28.000Z
|
2022-03-29T12:23:52.000Z
|
import os
import stat
import weakref
from collections import OrderedDict
import numpy as np
from yt.data_objects.index_subobjects.grid_patch import AMRGridPatch
from yt.data_objects.static_output import Dataset
from yt.geometry.grid_geometry_handler import GridIndex
from yt.utilities.file_handler import NetCDF4FileHandler, warn_netcdf
from yt.utilities.logger import ytLogger as mylog
from .fields import CM1FieldInfo
class CM1Grid(AMRGridPatch):
_id_offset = 0
def __init__(self, id, index, level, dimensions):
super().__init__(id, filename=index.index_filename, index=index)
self.Parent = None
self.Children = []
self.Level = level
self.ActiveDimensions = dimensions
def __repr__(self):
return f"CM1Grid_{self.id:d} ({self.ActiveDimensions})"
class CM1Hierarchy(GridIndex):
grid = CM1Grid
def __init__(self, ds, dataset_type="cm1"):
self.dataset_type = dataset_type
self.dataset = weakref.proxy(ds)
# for now, the index file is the dataset!
self.index_filename = self.dataset.parameter_filename
self.directory = os.path.dirname(self.index_filename)
# float type for the simulation edges and must be float64 now
self.float_type = np.float64
super().__init__(ds, dataset_type)
def _detect_output_fields(self):
# build list of on-disk fields for dataset_type 'cm1'
vnames = self.dataset.parameters["variable_names"]
self.field_list = [("cm1", vname) for vname in vnames]
def _count_grids(self):
# This needs to set self.num_grids
self.num_grids = 1
def _parse_index(self):
self.grid_left_edge[0][:] = self.ds.domain_left_edge[:]
self.grid_right_edge[0][:] = self.ds.domain_right_edge[:]
self.grid_dimensions[0][:] = self.ds.domain_dimensions[:]
self.grid_particle_count[0][0] = 0
self.grid_levels[0][0] = 1
self.max_level = 1
def _populate_grid_objects(self):
self.grids = np.empty(self.num_grids, dtype="object")
for i in range(self.num_grids):
g = self.grid(i, self, self.grid_levels.flat[i], self.grid_dimensions[i])
g._prepare_grid()
g._setup_dx()
self.grids[i] = g
class CM1Dataset(Dataset):
_index_class = CM1Hierarchy
_field_info_class = CM1FieldInfo
def __init__(
self,
filename,
dataset_type="cm1",
storage_filename=None,
units_override=None,
unit_system="mks",
):
self.fluid_types += ("cm1",)
self._handle = NetCDF4FileHandler(filename)
# refinement factor between a grid and its subgrid
self.refine_by = 1
super().__init__(
filename,
dataset_type,
units_override=units_override,
unit_system=unit_system,
)
self.storage_filename = storage_filename
self.filename = filename
def _setup_coordinate_handler(self):
# ensure correct ordering of axes so plots aren't rotated (z should always be
# on the vertical axis).
super()._setup_coordinate_handler()
self.coordinates._x_pairs = (("x", "y"), ("y", "x"), ("z", "x"))
self.coordinates._y_pairs = (("x", "z"), ("y", "z"), ("z", "y"))
def _set_code_unit_attributes(self):
# This is where quantities are created that represent the various
# on-disk units. These are the currently available quantities which
# should be set, along with examples of how to set them to standard
# values.
with self._handle.open_ds() as _handle:
length_unit = _handle.variables["xh"].units
self.length_unit = self.quan(1.0, length_unit)
self.mass_unit = self.quan(1.0, "kg")
self.time_unit = self.quan(1.0, "s")
self.velocity_unit = self.quan(1.0, "m/s")
self.time_unit = self.quan(1.0, "s")
def _parse_parameter_file(self):
# This needs to set up the following items. Note that these are all
# assumed to be in code units; domain_left_edge and domain_right_edge
# will be converted to YTArray automatically at a later time.
# This includes the cosmological parameters.
#
# self.unique_identifier <= unique identifier for the dataset
# being read (e.g., UUID or ST_CTIME)
self.unique_identifier = int(os.stat(self.parameter_filename)[stat.ST_CTIME])
self.parameters = {} # code-specific items
with self._handle.open_ds() as _handle:
# _handle here is a netcdf Dataset object, we need to parse some metadata
# for constructing our yt ds.
# TO DO: generalize this to be coordinate variable name agnostic in order to
# make useful for WRF or climate data. For now, we're hard coding for CM1
# specifically and have named the classes appropriately. Additionally, we
# are only handling the cell-centered grid ("xh","yh","zh") at present.
# The cell-centered grid contains scalar fields and interpolated velocities.
dims = [_handle.dimensions[i].size for i in ["xh", "yh", "zh"]]
xh, yh, zh = (_handle.variables[i][:] for i in ["xh", "yh", "zh"])
self.domain_left_edge = np.array(
[xh.min(), yh.min(), zh.min()], dtype="float64"
)
self.domain_right_edge = np.array(
[xh.max(), yh.max(), zh.max()], dtype="float64"
)
# loop over the variable names in the netCDF file, record only those on the
# "zh","yh","xh" grid.
varnames = []
for key, var in _handle.variables.items():
if all(x in var.dimensions for x in ["time", "zh", "yh", "xh"]):
varnames.append(key)
self.parameters["variable_names"] = varnames
self.parameters["lofs_version"] = _handle.cm1_lofs_version
self.parameters["is_uniform"] = _handle.uniform_mesh
self.current_time = _handle.variables["time"][:][0]
# record the dimension metadata: __handle.dimensions contains netcdf
# objects so we need to manually copy over attributes.
dim_info = OrderedDict()
for dim, meta in _handle.dimensions.items():
dim_info[dim] = meta.size
self.parameters["dimensions"] = dim_info
self.dimensionality = 3
self.domain_dimensions = np.array(dims, dtype="int64")
self._periodicity = (False, False, False)
# Set cosmological information to zero for non-cosmological.
self.cosmological_simulation = 0
self.current_redshift = 0.0
self.omega_lambda = 0.0
self.omega_matter = 0.0
self.hubble_constant = 0.0
@classmethod
def _is_valid(cls, filename, *args, **kwargs):
# This accepts a filename or a set of arguments and returns True or
# False depending on if the file is of the type requested.
warn_netcdf(filename)
try:
nc4_file = NetCDF4FileHandler(filename)
with nc4_file.open_ds(keepweakref=True) as _handle:
is_cm1_lofs = hasattr(_handle, "cm1_lofs_version")
is_cm1 = hasattr(_handle, "cm1 version") # not a typo, it is a space...
# ensure coordinates of each variable array exists in the dataset
coords = _handle.dimensions # get the dataset wide coordinates
failed_vars = [] # list of failed variables
for var in _handle.variables: # iterate over the variables
vcoords = _handle[var].dimensions # get the dims for the variable
ncoords = len(vcoords) # number of coordinates in variable
# number of coordinates that pass for a variable
coordspassed = sum(vc in coords for vc in vcoords)
if coordspassed != ncoords:
failed_vars.append(var)
if failed_vars:
mylog.warning(
"Trying to load a cm1_lofs netcdf file but the "
"coordinates of the following fields do not match the "
"coordinates of the dataset: %s",
failed_vars,
)
return False
if not is_cm1_lofs:
if is_cm1:
mylog.warning(
"It looks like you are trying to load a cm1 netcdf file, "
"but at present yt only supports cm1_lofs output. Until"
" support is added, you can likely use"
" yt.load_uniform_grid() to load your cm1 file manually."
)
return False
except (OSError, AttributeError, ImportError):
return False
return True
| 41.792627
| 88
| 0.603705
|
import os
import stat
import weakref
from collections import OrderedDict
import numpy as np
from yt.data_objects.index_subobjects.grid_patch import AMRGridPatch
from yt.data_objects.static_output import Dataset
from yt.geometry.grid_geometry_handler import GridIndex
from yt.utilities.file_handler import NetCDF4FileHandler, warn_netcdf
from yt.utilities.logger import ytLogger as mylog
from .fields import CM1FieldInfo
class CM1Grid(AMRGridPatch):
_id_offset = 0
def __init__(self, id, index, level, dimensions):
super().__init__(id, filename=index.index_filename, index=index)
self.Parent = None
self.Children = []
self.Level = level
self.ActiveDimensions = dimensions
def __repr__(self):
return f"CM1Grid_{self.id:d} ({self.ActiveDimensions})"
class CM1Hierarchy(GridIndex):
grid = CM1Grid
def __init__(self, ds, dataset_type="cm1"):
self.dataset_type = dataset_type
self.dataset = weakref.proxy(ds)
self.index_filename = self.dataset.parameter_filename
self.directory = os.path.dirname(self.index_filename)
self.float_type = np.float64
super().__init__(ds, dataset_type)
def _detect_output_fields(self):
vnames = self.dataset.parameters["variable_names"]
self.field_list = [("cm1", vname) for vname in vnames]
def _count_grids(self):
self.num_grids = 1
def _parse_index(self):
self.grid_left_edge[0][:] = self.ds.domain_left_edge[:]
self.grid_right_edge[0][:] = self.ds.domain_right_edge[:]
self.grid_dimensions[0][:] = self.ds.domain_dimensions[:]
self.grid_particle_count[0][0] = 0
self.grid_levels[0][0] = 1
self.max_level = 1
def _populate_grid_objects(self):
self.grids = np.empty(self.num_grids, dtype="object")
for i in range(self.num_grids):
g = self.grid(i, self, self.grid_levels.flat[i], self.grid_dimensions[i])
g._prepare_grid()
g._setup_dx()
self.grids[i] = g
class CM1Dataset(Dataset):
_index_class = CM1Hierarchy
_field_info_class = CM1FieldInfo
def __init__(
self,
filename,
dataset_type="cm1",
storage_filename=None,
units_override=None,
unit_system="mks",
):
self.fluid_types += ("cm1",)
self._handle = NetCDF4FileHandler(filename)
self.refine_by = 1
super().__init__(
filename,
dataset_type,
units_override=units_override,
unit_system=unit_system,
)
self.storage_filename = storage_filename
self.filename = filename
def _setup_coordinate_handler(self):
# on the vertical axis).
super()._setup_coordinate_handler()
self.coordinates._x_pairs = (("x", "y"), ("y", "x"), ("z", "x"))
self.coordinates._y_pairs = (("x", "z"), ("y", "z"), ("z", "y"))
def _set_code_unit_attributes(self):
# This is where quantities are created that represent the various
# on-disk units. These are the currently available quantities which
# should be set, along with examples of how to set them to standard
# values.
with self._handle.open_ds() as _handle:
length_unit = _handle.variables["xh"].units
self.length_unit = self.quan(1.0, length_unit)
self.mass_unit = self.quan(1.0, "kg")
self.time_unit = self.quan(1.0, "s")
self.velocity_unit = self.quan(1.0, "m/s")
self.time_unit = self.quan(1.0, "s")
def _parse_parameter_file(self):
# This needs to set up the following items. Note that these are all
# assumed to be in code units; domain_left_edge and domain_right_edge
# will be converted to YTArray automatically at a later time.
# This includes the cosmological parameters.
#
# self.unique_identifier <= unique identifier for the dataset
# being read (e.g., UUID or ST_CTIME)
self.unique_identifier = int(os.stat(self.parameter_filename)[stat.ST_CTIME])
self.parameters = {} # code-specific items
with self._handle.open_ds() as _handle:
# _handle here is a netcdf Dataset object, we need to parse some metadata
# for constructing our yt ds.
# TO DO: generalize this to be coordinate variable name agnostic in order to
# make useful for WRF or climate data. For now, we're hard coding for CM1
dims = [_handle.dimensions[i].size for i in ["xh", "yh", "zh"]]
xh, yh, zh = (_handle.variables[i][:] for i in ["xh", "yh", "zh"])
self.domain_left_edge = np.array(
[xh.min(), yh.min(), zh.min()], dtype="float64"
)
self.domain_right_edge = np.array(
[xh.max(), yh.max(), zh.max()], dtype="float64"
)
varnames = []
for key, var in _handle.variables.items():
if all(x in var.dimensions for x in ["time", "zh", "yh", "xh"]):
varnames.append(key)
self.parameters["variable_names"] = varnames
self.parameters["lofs_version"] = _handle.cm1_lofs_version
self.parameters["is_uniform"] = _handle.uniform_mesh
self.current_time = _handle.variables["time"][:][0]
dim_info = OrderedDict()
for dim, meta in _handle.dimensions.items():
dim_info[dim] = meta.size
self.parameters["dimensions"] = dim_info
self.dimensionality = 3
self.domain_dimensions = np.array(dims, dtype="int64")
self._periodicity = (False, False, False)
self.cosmological_simulation = 0
self.current_redshift = 0.0
self.omega_lambda = 0.0
self.omega_matter = 0.0
self.hubble_constant = 0.0
@classmethod
def _is_valid(cls, filename, *args, **kwargs):
warn_netcdf(filename)
try:
nc4_file = NetCDF4FileHandler(filename)
with nc4_file.open_ds(keepweakref=True) as _handle:
is_cm1_lofs = hasattr(_handle, "cm1_lofs_version")
is_cm1 = hasattr(_handle, "cm1 version")
coords = _handle.dimensions
failed_vars = []
for var in _handle.variables:
vcoords = _handle[var].dimensions
ncoords = len(vcoords)
coordspassed = sum(vc in coords for vc in vcoords)
if coordspassed != ncoords:
failed_vars.append(var)
if failed_vars:
mylog.warning(
"Trying to load a cm1_lofs netcdf file but the "
"coordinates of the following fields do not match the "
"coordinates of the dataset: %s",
failed_vars,
)
return False
if not is_cm1_lofs:
if is_cm1:
mylog.warning(
"It looks like you are trying to load a cm1 netcdf file, "
"but at present yt only supports cm1_lofs output. Until"
" support is added, you can likely use"
" yt.load_uniform_grid() to load your cm1 file manually."
)
return False
except (OSError, AttributeError, ImportError):
return False
return True
| true
| true
|
1c3f8a0bddd83d58c92ba7505f302d4e5c9c1774
| 353
|
py
|
Python
|
python/example_from_ray_website.py
|
viper7882/binance-public-data
|
94c77de455338b9a6b9bd03aeacbfd637e36c38a
|
[
"MIT"
] | null | null | null |
python/example_from_ray_website.py
|
viper7882/binance-public-data
|
94c77de455338b9a6b9bd03aeacbfd637e36c38a
|
[
"MIT"
] | null | null | null |
python/example_from_ray_website.py
|
viper7882/binance-public-data
|
94c77de455338b9a6b9bd03aeacbfd637e36c38a
|
[
"MIT"
] | null | null | null |
import ray
import pandas as pd
import dask.dataframe as dd
# Create a Dataset from a list of Pandas DataFrame objects.
pdf = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
ds = ray.data.from_pandas([ray.put(pdf)])
# Create a Dataset from a Dask-on-Ray DataFrame.
dask_df = dd.from_pandas(pdf, npartitions=10)
ds = ray.data.from_dask(dask_df)
| 32.090909
| 62
| 0.70255
|
import ray
import pandas as pd
import dask.dataframe as dd
pdf = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
ds = ray.data.from_pandas([ray.put(pdf)])
dask_df = dd.from_pandas(pdf, npartitions=10)
ds = ray.data.from_dask(dask_df)
| true
| true
|
1c3f8cb06461a4156bf8d23f4172c14cff4e6bf2
| 598
|
py
|
Python
|
src/jk_utils/RandomStateID.py
|
jkpubsrc/python-module-jk-utils
|
6bf97b3dcde7a970c20ca43323e2eb0dda8fbfb3
|
[
"Apache-1.1"
] | null | null | null |
src/jk_utils/RandomStateID.py
|
jkpubsrc/python-module-jk-utils
|
6bf97b3dcde7a970c20ca43323e2eb0dda8fbfb3
|
[
"Apache-1.1"
] | null | null | null |
src/jk_utils/RandomStateID.py
|
jkpubsrc/python-module-jk-utils
|
6bf97b3dcde7a970c20ca43323e2eb0dda8fbfb3
|
[
"Apache-1.1"
] | null | null | null |
import os
import string
import random
class RandomStateID(object):
def __init__(self):
self.__randomStateID = self.__randomString()
#
def __randomString(self, stringLength:int = 32) -> str:
letters = string.ascii_letters + string.digits
return "".join(random.choice(letters) for i in range(stringLength))
#
def touch(self) -> str:
self.__randomStateID = self.__randomString()
return self.__randomStateID
#
@property
def randomStateID(self) -> str:
return self.__randomStateID
#
def __str__(self):
return self.__randomStateID
#
#
| 8.794118
| 69
| 0.687291
|
import os
import string
import random
class RandomStateID(object):
def __init__(self):
self.__randomStateID = self.__randomString()
def __randomString(self, stringLength:int = 32) -> str:
letters = string.ascii_letters + string.digits
return "".join(random.choice(letters) for i in range(stringLength))
def touch(self) -> str:
self.__randomStateID = self.__randomString()
return self.__randomStateID
@property
def randomStateID(self) -> str:
return self.__randomStateID
def __str__(self):
return self.__randomStateID
| true
| true
|
1c3f8dbdef82b91ec5509d0b222966b281d8b193
| 3,943
|
py
|
Python
|
ugrd-assistance-tools_win/[mr]-11_string-data-too-large.py
|
jungheum/cfreds-2017-winreg
|
ebdfc6a60849f1b8f0e9f2d35551cc1dfc012036
|
[
"Apache-2.0"
] | 1
|
2020-04-11T15:33:13.000Z
|
2020-04-11T15:33:13.000Z
|
ugrd-assistance-tools_win/[mr]-11_string-data-too-large.py
|
jungheum/cfreds-2017-winreg
|
ebdfc6a60849f1b8f0e9f2d35551cc1dfc012036
|
[
"Apache-2.0"
] | null | null | null |
ugrd-assistance-tools_win/[mr]-11_string-data-too-large.py
|
jungheum/cfreds-2017-winreg
|
ebdfc6a60849f1b8f0e9f2d35551cc1dfc012036
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
=============================================================================
* Description
MR (manipulated registry hives) category
> Primary class 3
: Invalid data size
> Secondary class 3.1
: String data too large
* Author
Jungheum Park (jungheum.park@nist.gov)
* Organization
Software and Systems Division
Information Technology Laboratory
National Institue of Standards and Technology
U.S. Department of Commerce
* Project @ NIST
CFTT (Computer Forensic Tool Testing) www.cftt.nist.gov
CFReDS (Computer Forensic Reference Data Sets) www.cfreds.nist.gov
* License
Apache License 2.0
* Tested Environment
Windows 7 Enterprise SP1 64-bits English
Python 3.4.3
=============================================================================
"""
import sys
import os
from cftt_hive import *
assert len(sys.argv) == 2
BASE_ADJUST = 2 # base adjust value
def mr_pc3_sc3_1(buffer, filesize):
# utilize the CFTT's hive class
hive = cftt_hive(buffer)
# get the offset of root cell (0x24 from the beginning of hive file)
header = hive.get_header(0)
offset = hive.calc_hive_offset(header.root_cell_offset)
# goto the root cell
root_cell = hive.get_nk(offset)
# get the 1st subkey offset of the original root key cell
offset = hive.calc_hive_offset(root_cell['base'].subkey_list_cell_offset)
sl_cell = hive.get_sl(offset)
# traverse all subkeys
stack = list(reversed(sl_cell['offsets']))
nk_cell = None
vk_cell = None
found = False
while (1):
if len(stack) == 0: break
nk_cell = hive.get_nk(hive.calc_hive_offset(stack.pop()))
sl_cell = hive.get_sl(hive.calc_hive_offset(nk_cell['base'].subkey_list_cell_offset))
if nk_cell['base'].number_of_values > 0:
# goto the value-list cell offset
offset = hive.calc_hive_offset(nk_cell['base'].value_list_cell_offset)
vl_cell = hive.get_vl(offset)
for offset in vl_cell['offsets']:
vk_cell = hive.get_vk(hive.calc_hive_offset(offset))
data_size = vk_cell['base'].data_size
if data_size & 0x80000000 == 0x80000000:
data_size = data_size & 0x7FFFFFFF
# [condition] select a value cell which has SZ type data
if 4 < data_size and \
vk_cell['base'].data_type == 0x00000001: # REG_SZ
found = True
break
if found is True:
break
stack.extend(list(reversed(sl_cell['offsets'])))
if nk_cell is None or vk_cell is None or found is False:
return []
# build manipulated item list (m_list)
m_list = []
m_item = {}
m_item['offset'] = vk_cell['self'] + 0x08 # 'data size' in the vk cell
edited_value = vk_cell['base'].data_size*BASE_ADJUST
m_item['data'] = edited_value.to_bytes(4, byteorder='little')
m_item['info'] = "[original size] 0x%08X (@ 0x%08X) --> [manipulated size] 0x%08X (@ 0x%08X)" \
% (vk_cell['base'].data_size, m_item['offset'], edited_value, m_item['offset'])
m_item['info']+= "\n[Target 'vk_cell']\n: {}\n".format(vk_cell)
m_list.append(m_item)
return m_list
def manipulate(fi, filename, m_list):
fl = open(filename + '.txt', 'w', encoding='utf-8')
if len(m_list) == 0:
fl.write('There is no manipulated item.\n')
for m_item in m_list:
fi.seek(m_item['offset'], os.SEEK_SET)
fi.write(m_item['data'])
fl.write(m_item['info'] + '\n')
fl.close()
f = open(sys.argv[1], 'rb+')
buffer = f.read()
manipulate(f, sys.argv[1], mr_pc3_sc3_1(buffer, len(buffer)))
f.flush()
f.close()
| 36.509259
| 100
| 0.57824
|
import sys
import os
from cftt_hive import *
assert len(sys.argv) == 2
BASE_ADJUST = 2
def mr_pc3_sc3_1(buffer, filesize):
hive = cftt_hive(buffer)
# get the offset of root cell (0x24 from the beginning of hive file)
header = hive.get_header(0)
offset = hive.calc_hive_offset(header.root_cell_offset)
# goto the root cell
root_cell = hive.get_nk(offset)
# get the 1st subkey offset of the original root key cell
offset = hive.calc_hive_offset(root_cell['base'].subkey_list_cell_offset)
sl_cell = hive.get_sl(offset)
# traverse all subkeys
stack = list(reversed(sl_cell['offsets']))
nk_cell = None
vk_cell = None
found = False
while (1):
if len(stack) == 0: break
nk_cell = hive.get_nk(hive.calc_hive_offset(stack.pop()))
sl_cell = hive.get_sl(hive.calc_hive_offset(nk_cell['base'].subkey_list_cell_offset))
if nk_cell['base'].number_of_values > 0:
# goto the value-list cell offset
offset = hive.calc_hive_offset(nk_cell['base'].value_list_cell_offset)
vl_cell = hive.get_vl(offset)
for offset in vl_cell['offsets']:
vk_cell = hive.get_vk(hive.calc_hive_offset(offset))
data_size = vk_cell['base'].data_size
if data_size & 0x80000000 == 0x80000000:
data_size = data_size & 0x7FFFFFFF
# [condition] select a value cell which has SZ type data
if 4 < data_size and \
vk_cell['base'].data_type == 0x00000001: # REG_SZ
found = True
break
if found is True:
break
stack.extend(list(reversed(sl_cell['offsets'])))
if nk_cell is None or vk_cell is None or found is False:
return []
# build manipulated item list (m_list)
m_list = []
m_item = {}
m_item['offset'] = vk_cell['self'] + 0x08 # 'data size' in the vk cell
edited_value = vk_cell['base'].data_size*BASE_ADJUST
m_item['data'] = edited_value.to_bytes(4, byteorder='little')
m_item['info'] = "[original size] 0x%08X (@ 0x%08X) --> [manipulated size] 0x%08X (@ 0x%08X)" \
% (vk_cell['base'].data_size, m_item['offset'], edited_value, m_item['offset'])
m_item['info']+= "\n[Target 'vk_cell']\n: {}\n".format(vk_cell)
m_list.append(m_item)
return m_list
def manipulate(fi, filename, m_list):
fl = open(filename + '.txt', 'w', encoding='utf-8')
if len(m_list) == 0:
fl.write('There is no manipulated item.\n')
for m_item in m_list:
fi.seek(m_item['offset'], os.SEEK_SET)
fi.write(m_item['data'])
fl.write(m_item['info'] + '\n')
fl.close()
f = open(sys.argv[1], 'rb+')
buffer = f.read()
manipulate(f, sys.argv[1], mr_pc3_sc3_1(buffer, len(buffer)))
f.flush()
f.close()
| true
| true
|
1c3f8e91170c92d95f17968976a23f206f129654
| 633
|
py
|
Python
|
backend/manage.py
|
crowdbotics-apps/trouble-32410
|
969cdd6acba7c469990763e99022c635463c0551
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/manage.py
|
crowdbotics-apps/trouble-32410
|
969cdd6acba7c469990763e99022c635463c0551
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/manage.py
|
crowdbotics-apps/trouble-32410
|
969cdd6acba7c469990763e99022c635463c0551
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'trouble_32410.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.772727
| 77
| 0.685624
|
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'trouble_32410.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| true
| true
|
1c3f8ead23fae09ca5bdafd4306caae855595ccc
| 3,551
|
py
|
Python
|
src/cryptocom/exchange/coins.py
|
stopkaya/cryptocom-exchange
|
7518767682d574474f1ba90578e88044dde416a0
|
[
"MIT"
] | null | null | null |
src/cryptocom/exchange/coins.py
|
stopkaya/cryptocom-exchange
|
7518767682d574474f1ba90578e88044dde416a0
|
[
"MIT"
] | null | null | null |
src/cryptocom/exchange/coins.py
|
stopkaya/cryptocom-exchange
|
7518767682d574474f1ba90578e88044dde416a0
|
[
"MIT"
] | null | null | null |
from .structs import Coin
AAVE = Coin("AAVE")
ADA = Coin("ADA")
AGLD = Coin("AGLD")
ALGO = Coin("ALGO")
ALICE = Coin("ALICE")
AMP = Coin("AMP")
ANKR = Coin("ANKR")
ANY = Coin("ANY")
AR = Coin("AR")
ARPA = Coin("ARPA")
ATOM = Coin("ATOM")
AUDIO = Coin("AUDIO")
AVAX = Coin("AVAX")
AXS = Coin("AXS")
BADGER = Coin("BADGER")
BAL = Coin("BAL")
BAND = Coin("BAND")
BAT = Coin("BAT")
BCH = Coin("BCH")
BICO = Coin("BICO")
BNT = Coin("BNT")
BOBA = Coin("BOBA")
BOND = Coin("BOND")
BOSON = Coin("BOSON")
BRZ = Coin("BRZ")
BTC = Coin("BTC")
BUSD = Coin("BUSD")
C98 = Coin("C98")
CELR = Coin("CELR")
CHR = Coin("CHR")
CHZ = Coin("CHZ")
CKB = Coin("CKB")
COMP = Coin("COMP")
COTI = Coin("COTI")
CQT = Coin("CQT")
CRO = Coin("CRO")
CRV = Coin("CRV")
CSPR = Coin("CSPR")
CTSI = Coin("CTSI")
DAI = Coin("DAI")
DAR = Coin("DAR")
DERC = Coin("DERC")
DINO = Coin("DINO")
DOGE = Coin("DOGE")
DOT = Coin("DOT")
DYDX = Coin("DYDX")
EFI = Coin("EFI")
EGLD = Coin("EGLD")
ELON = Coin("ELON")
ENJ = Coin("ENJ")
ENS = Coin("ENS")
EOS = Coin("EOS")
EPS = Coin("EPS")
ETC = Coin("ETC")
ETH = Coin("ETH")
FARM = Coin("FARM")
FET = Coin("FET")
FIL = Coin("FIL")
FLOW = Coin("FLOW")
FORTH = Coin("FORTH")
FTM = Coin("FTM")
FXS = Coin("FXS")
GALA = Coin("GALA")
GHST = Coin("GHST")
GLM = Coin("GLM")
GODS = Coin("GODS")
GRT = Coin("GRT")
GTC = Coin("GTC")
GUSD = Coin("GUSD")
HBAR = Coin("HBAR")
HNT = Coin("HNT")
HOD = Coin("HOD")
HOT = Coin("HOT")
HUSD = Coin("HUSD")
ICP = Coin("ICP")
ICX = Coin("ICX")
ILV = Coin("ILV")
IMX = Coin("IMX")
INJ = Coin("INJ")
IOTX = Coin("IOTX")
IQ = Coin("IQ")
JASMY = Coin("JASMY")
KAVA = Coin("KAVA")
KEEP = Coin("KEEP")
KLAY = Coin("KLAY")
KNC = Coin("KNC")
KSM = Coin("KSM")
LINK = Coin("LINK")
LPT = Coin("LPT")
LRC = Coin("LRC")
LTC = Coin("LTC")
LUNA = Coin("LUNA")
MANA = Coin("MANA")
MASK = Coin("MASK")
MATIC = Coin("MATIC")
MCO = Coin("MCO")
MIR = Coin("MIR")
MKR = Coin("MKR")
MLN = Coin("MLN")
MOVR = Coin("MOVR")
NANO = Coin("NANO")
NEAR = Coin("NEAR")
NEO = Coin("NEO")
NKN = Coin("NKN")
NMR = Coin("NMR")
NU = Coin("NU")
OCEAN = Coin("OCEAN")
OGN = Coin("OGN")
OMG = Coin("OMG")
ONE = Coin("ONE")
ONEINCH = Coin("1INCH")
ONT = Coin("ONT")
OXT = Coin("OXT")
PAXG = Coin("PAXG")
PENDLE = Coin("PENDLE")
PERP = Coin("PERP")
PLA = Coin("PLA")
POLS = Coin("POLS")
POLY = Coin("POLY")
POWR = Coin("POWR")
QI = Coin("QI")
QNT = Coin("QNT")
QTUM = Coin("QTUM")
QUICK = Coin("QUICK")
RAD = Coin("RAD")
RADAR = Coin("RADAR")
RARI = Coin("RARI")
REN = Coin("REN")
REQ = Coin("REQ")
RGT = Coin("RGT")
RLC = Coin("RLC")
RLY = Coin("RLY")
RNDR = Coin("RNDR")
RSR = Coin("RSR")
RUNE = Coin("RUNE")
RVN = Coin("RVN")
SAND = Coin("SAND")
SC = Coin("SC")
SDN = Coin("SDN")
SGB = Coin("SGB")
SHIB = Coin("SHIB")
SKL = Coin("SKL")
SLP = Coin("SLP")
SNX = Coin("SNX")
SOL = Coin("SOL")
SPELL = Coin("SPELL")
SRM = Coin("SRM")
STORJ = Coin("STORJ")
STX = Coin("STX")
SUPER = Coin("SUPER")
SUSHI = Coin("SUSHI")
TFUEL = Coin("TFUEL")
THETA = Coin("THETA")
TONIC = Coin("TONIC")
TRB = Coin("TRB")
TRIBE = Coin("TRIBE")
TRU = Coin("TRU")
TUSD = Coin("TUSD")
UMA = Coin("UMA")
UNI = Coin("UNI")
USDC = Coin("USDC")
USDP = Coin("USDP")
USDT = Coin("USDT")
VET = Coin("VET")
VTHO = Coin("VTHO")
VVS = Coin("VVS")
WAVE = Coin("WAVE")
WAVES = Coin("WAVES")
WAXP = Coin("WAXP")
WBTC = Coin("WBTC")
XLM = Coin("XLM")
XRP = Coin("XRP")
XTZ = Coin("XTZ")
XYO = Coin("XYO")
YFI = Coin("YFI")
YGG = Coin("YGG")
ZIL = Coin("ZIL")
ZRX = Coin("ZRX")
def all():
return [
value for name, value in globals().items()
if isinstance(value, Coin)
]
| 18.888298
| 50
| 0.58209
|
from .structs import Coin
AAVE = Coin("AAVE")
ADA = Coin("ADA")
AGLD = Coin("AGLD")
ALGO = Coin("ALGO")
ALICE = Coin("ALICE")
AMP = Coin("AMP")
ANKR = Coin("ANKR")
ANY = Coin("ANY")
AR = Coin("AR")
ARPA = Coin("ARPA")
ATOM = Coin("ATOM")
AUDIO = Coin("AUDIO")
AVAX = Coin("AVAX")
AXS = Coin("AXS")
BADGER = Coin("BADGER")
BAL = Coin("BAL")
BAND = Coin("BAND")
BAT = Coin("BAT")
BCH = Coin("BCH")
BICO = Coin("BICO")
BNT = Coin("BNT")
BOBA = Coin("BOBA")
BOND = Coin("BOND")
BOSON = Coin("BOSON")
BRZ = Coin("BRZ")
BTC = Coin("BTC")
BUSD = Coin("BUSD")
C98 = Coin("C98")
CELR = Coin("CELR")
CHR = Coin("CHR")
CHZ = Coin("CHZ")
CKB = Coin("CKB")
COMP = Coin("COMP")
COTI = Coin("COTI")
CQT = Coin("CQT")
CRO = Coin("CRO")
CRV = Coin("CRV")
CSPR = Coin("CSPR")
CTSI = Coin("CTSI")
DAI = Coin("DAI")
DAR = Coin("DAR")
DERC = Coin("DERC")
DINO = Coin("DINO")
DOGE = Coin("DOGE")
DOT = Coin("DOT")
DYDX = Coin("DYDX")
EFI = Coin("EFI")
EGLD = Coin("EGLD")
ELON = Coin("ELON")
ENJ = Coin("ENJ")
ENS = Coin("ENS")
EOS = Coin("EOS")
EPS = Coin("EPS")
ETC = Coin("ETC")
ETH = Coin("ETH")
FARM = Coin("FARM")
FET = Coin("FET")
FIL = Coin("FIL")
FLOW = Coin("FLOW")
FORTH = Coin("FORTH")
FTM = Coin("FTM")
FXS = Coin("FXS")
GALA = Coin("GALA")
GHST = Coin("GHST")
GLM = Coin("GLM")
GODS = Coin("GODS")
GRT = Coin("GRT")
GTC = Coin("GTC")
GUSD = Coin("GUSD")
HBAR = Coin("HBAR")
HNT = Coin("HNT")
HOD = Coin("HOD")
HOT = Coin("HOT")
HUSD = Coin("HUSD")
ICP = Coin("ICP")
ICX = Coin("ICX")
ILV = Coin("ILV")
IMX = Coin("IMX")
INJ = Coin("INJ")
IOTX = Coin("IOTX")
IQ = Coin("IQ")
JASMY = Coin("JASMY")
KAVA = Coin("KAVA")
KEEP = Coin("KEEP")
KLAY = Coin("KLAY")
KNC = Coin("KNC")
KSM = Coin("KSM")
LINK = Coin("LINK")
LPT = Coin("LPT")
LRC = Coin("LRC")
LTC = Coin("LTC")
LUNA = Coin("LUNA")
MANA = Coin("MANA")
MASK = Coin("MASK")
MATIC = Coin("MATIC")
MCO = Coin("MCO")
MIR = Coin("MIR")
MKR = Coin("MKR")
MLN = Coin("MLN")
MOVR = Coin("MOVR")
NANO = Coin("NANO")
NEAR = Coin("NEAR")
NEO = Coin("NEO")
NKN = Coin("NKN")
NMR = Coin("NMR")
NU = Coin("NU")
OCEAN = Coin("OCEAN")
OGN = Coin("OGN")
OMG = Coin("OMG")
ONE = Coin("ONE")
ONEINCH = Coin("1INCH")
ONT = Coin("ONT")
OXT = Coin("OXT")
PAXG = Coin("PAXG")
PENDLE = Coin("PENDLE")
PERP = Coin("PERP")
PLA = Coin("PLA")
POLS = Coin("POLS")
POLY = Coin("POLY")
POWR = Coin("POWR")
QI = Coin("QI")
QNT = Coin("QNT")
QTUM = Coin("QTUM")
QUICK = Coin("QUICK")
RAD = Coin("RAD")
RADAR = Coin("RADAR")
RARI = Coin("RARI")
REN = Coin("REN")
REQ = Coin("REQ")
RGT = Coin("RGT")
RLC = Coin("RLC")
RLY = Coin("RLY")
RNDR = Coin("RNDR")
RSR = Coin("RSR")
RUNE = Coin("RUNE")
RVN = Coin("RVN")
SAND = Coin("SAND")
SC = Coin("SC")
SDN = Coin("SDN")
SGB = Coin("SGB")
SHIB = Coin("SHIB")
SKL = Coin("SKL")
SLP = Coin("SLP")
SNX = Coin("SNX")
SOL = Coin("SOL")
SPELL = Coin("SPELL")
SRM = Coin("SRM")
STORJ = Coin("STORJ")
STX = Coin("STX")
SUPER = Coin("SUPER")
SUSHI = Coin("SUSHI")
TFUEL = Coin("TFUEL")
THETA = Coin("THETA")
TONIC = Coin("TONIC")
TRB = Coin("TRB")
TRIBE = Coin("TRIBE")
TRU = Coin("TRU")
TUSD = Coin("TUSD")
UMA = Coin("UMA")
UNI = Coin("UNI")
USDC = Coin("USDC")
USDP = Coin("USDP")
USDT = Coin("USDT")
VET = Coin("VET")
VTHO = Coin("VTHO")
VVS = Coin("VVS")
WAVE = Coin("WAVE")
WAVES = Coin("WAVES")
WAXP = Coin("WAXP")
WBTC = Coin("WBTC")
XLM = Coin("XLM")
XRP = Coin("XRP")
XTZ = Coin("XTZ")
XYO = Coin("XYO")
YFI = Coin("YFI")
YGG = Coin("YGG")
ZIL = Coin("ZIL")
ZRX = Coin("ZRX")
def all():
return [
value for name, value in globals().items()
if isinstance(value, Coin)
]
| true
| true
|
1c3f8ede1e1aadd1f25a41230d9299e775ebf67c
| 4,257
|
py
|
Python
|
src/commercetools/platform/client/in_store/by_project_key_in_store_key_by_store_key_request_builder.py
|
labd/commercetools-python-sdk
|
d8ec285f08d56ede2e4cad45c74833f5b609ab5c
|
[
"MIT"
] | 15
|
2018-11-02T14:35:52.000Z
|
2022-03-16T07:51:44.000Z
|
src/commercetools/platform/client/in_store/by_project_key_in_store_key_by_store_key_request_builder.py
|
labd/commercetools-python-sdk
|
d8ec285f08d56ede2e4cad45c74833f5b609ab5c
|
[
"MIT"
] | 84
|
2018-11-02T12:50:32.000Z
|
2022-03-22T01:25:54.000Z
|
src/commercetools/platform/client/in_store/by_project_key_in_store_key_by_store_key_request_builder.py
|
labd/commercetools-python-sdk
|
d8ec285f08d56ede2e4cad45c74833f5b609ab5c
|
[
"MIT"
] | 13
|
2019-01-03T09:16:50.000Z
|
2022-02-15T18:37:19.000Z
|
# This file is automatically generated by the rmf-codegen project.
#
# The Python code generator is maintained by Lab Digital. If you want to
# contribute to this project then please do not edit this file directly
# but send a pull request to the Lab Digital fork of rmf-codegen at
# https://github.com/labd/rmf-codegen
import typing
import warnings
from ..carts.by_project_key_in_store_key_by_store_key_carts_request_builder import (
ByProjectKeyInStoreKeyByStoreKeyCartsRequestBuilder,
)
from ..customers.by_project_key_in_store_key_by_store_key_customers_request_builder import (
ByProjectKeyInStoreKeyByStoreKeyCustomersRequestBuilder,
)
from ..login.by_project_key_in_store_key_by_store_key_login_request_builder import (
ByProjectKeyInStoreKeyByStoreKeyLoginRequestBuilder,
)
from ..me.by_project_key_in_store_key_by_store_key_me_request_builder import (
ByProjectKeyInStoreKeyByStoreKeyMeRequestBuilder,
)
from ..orders.by_project_key_in_store_key_by_store_key_orders_request_builder import (
ByProjectKeyInStoreKeyByStoreKeyOrdersRequestBuilder,
)
from ..shipping_methods.by_project_key_in_store_key_by_store_key_shipping_methods_request_builder import (
ByProjectKeyInStoreKeyByStoreKeyShippingMethodsRequestBuilder,
)
from ..shopping_lists.by_project_key_in_store_key_by_store_key_shopping_lists_request_builder import (
ByProjectKeyInStoreKeyByStoreKeyShoppingListsRequestBuilder,
)
if typing.TYPE_CHECKING:
from ...base_client import BaseClient
class ByProjectKeyInStoreKeyByStoreKeyRequestBuilder:
_client: "BaseClient"
_project_key: str
_store_key: str
def __init__(
self,
project_key: str,
store_key: str,
client: "BaseClient",
):
self._project_key = project_key
self._store_key = store_key
self._client = client
def carts(self) -> ByProjectKeyInStoreKeyByStoreKeyCartsRequestBuilder:
"""A shopping cart holds product variants and can be ordered."""
return ByProjectKeyInStoreKeyByStoreKeyCartsRequestBuilder(
project_key=self._project_key,
store_key=self._store_key,
client=self._client,
)
def orders(self) -> ByProjectKeyInStoreKeyByStoreKeyOrdersRequestBuilder:
"""An order can be created from a cart, usually after a checkout process has been completed."""
return ByProjectKeyInStoreKeyByStoreKeyOrdersRequestBuilder(
project_key=self._project_key,
store_key=self._store_key,
client=self._client,
)
def me(self) -> ByProjectKeyInStoreKeyByStoreKeyMeRequestBuilder:
return ByProjectKeyInStoreKeyByStoreKeyMeRequestBuilder(
project_key=self._project_key,
store_key=self._store_key,
client=self._client,
)
def customers(self) -> ByProjectKeyInStoreKeyByStoreKeyCustomersRequestBuilder:
"""A customer is a person purchasing products. customers, Orders,
Comments and Reviews can be associated to a customer.
"""
return ByProjectKeyInStoreKeyByStoreKeyCustomersRequestBuilder(
project_key=self._project_key,
store_key=self._store_key,
client=self._client,
)
def login(self) -> ByProjectKeyInStoreKeyByStoreKeyLoginRequestBuilder:
"""Retrieves the authenticated customer."""
return ByProjectKeyInStoreKeyByStoreKeyLoginRequestBuilder(
project_key=self._project_key,
store_key=self._store_key,
client=self._client,
)
def shipping_methods(
self,
) -> ByProjectKeyInStoreKeyByStoreKeyShippingMethodsRequestBuilder:
return ByProjectKeyInStoreKeyByStoreKeyShippingMethodsRequestBuilder(
project_key=self._project_key,
store_key=self._store_key,
client=self._client,
)
def shopping_lists(
self,
) -> ByProjectKeyInStoreKeyByStoreKeyShoppingListsRequestBuilder:
"""shopping-lists e.g. for wishlist support"""
return ByProjectKeyInStoreKeyByStoreKeyShoppingListsRequestBuilder(
project_key=self._project_key,
store_key=self._store_key,
client=self._client,
)
| 38.008929
| 106
| 0.742072
|
import typing
import warnings
from ..carts.by_project_key_in_store_key_by_store_key_carts_request_builder import (
ByProjectKeyInStoreKeyByStoreKeyCartsRequestBuilder,
)
from ..customers.by_project_key_in_store_key_by_store_key_customers_request_builder import (
ByProjectKeyInStoreKeyByStoreKeyCustomersRequestBuilder,
)
from ..login.by_project_key_in_store_key_by_store_key_login_request_builder import (
ByProjectKeyInStoreKeyByStoreKeyLoginRequestBuilder,
)
from ..me.by_project_key_in_store_key_by_store_key_me_request_builder import (
ByProjectKeyInStoreKeyByStoreKeyMeRequestBuilder,
)
from ..orders.by_project_key_in_store_key_by_store_key_orders_request_builder import (
ByProjectKeyInStoreKeyByStoreKeyOrdersRequestBuilder,
)
from ..shipping_methods.by_project_key_in_store_key_by_store_key_shipping_methods_request_builder import (
ByProjectKeyInStoreKeyByStoreKeyShippingMethodsRequestBuilder,
)
from ..shopping_lists.by_project_key_in_store_key_by_store_key_shopping_lists_request_builder import (
ByProjectKeyInStoreKeyByStoreKeyShoppingListsRequestBuilder,
)
if typing.TYPE_CHECKING:
from ...base_client import BaseClient
class ByProjectKeyInStoreKeyByStoreKeyRequestBuilder:
_client: "BaseClient"
_project_key: str
_store_key: str
def __init__(
self,
project_key: str,
store_key: str,
client: "BaseClient",
):
self._project_key = project_key
self._store_key = store_key
self._client = client
def carts(self) -> ByProjectKeyInStoreKeyByStoreKeyCartsRequestBuilder:
return ByProjectKeyInStoreKeyByStoreKeyCartsRequestBuilder(
project_key=self._project_key,
store_key=self._store_key,
client=self._client,
)
def orders(self) -> ByProjectKeyInStoreKeyByStoreKeyOrdersRequestBuilder:
return ByProjectKeyInStoreKeyByStoreKeyOrdersRequestBuilder(
project_key=self._project_key,
store_key=self._store_key,
client=self._client,
)
def me(self) -> ByProjectKeyInStoreKeyByStoreKeyMeRequestBuilder:
return ByProjectKeyInStoreKeyByStoreKeyMeRequestBuilder(
project_key=self._project_key,
store_key=self._store_key,
client=self._client,
)
def customers(self) -> ByProjectKeyInStoreKeyByStoreKeyCustomersRequestBuilder:
return ByProjectKeyInStoreKeyByStoreKeyCustomersRequestBuilder(
project_key=self._project_key,
store_key=self._store_key,
client=self._client,
)
def login(self) -> ByProjectKeyInStoreKeyByStoreKeyLoginRequestBuilder:
return ByProjectKeyInStoreKeyByStoreKeyLoginRequestBuilder(
project_key=self._project_key,
store_key=self._store_key,
client=self._client,
)
def shipping_methods(
self,
) -> ByProjectKeyInStoreKeyByStoreKeyShippingMethodsRequestBuilder:
return ByProjectKeyInStoreKeyByStoreKeyShippingMethodsRequestBuilder(
project_key=self._project_key,
store_key=self._store_key,
client=self._client,
)
def shopping_lists(
self,
) -> ByProjectKeyInStoreKeyByStoreKeyShoppingListsRequestBuilder:
return ByProjectKeyInStoreKeyByStoreKeyShoppingListsRequestBuilder(
project_key=self._project_key,
store_key=self._store_key,
client=self._client,
)
| true
| true
|
1c3f8ee1dd9402086a363d8ccd669aa80cb0fa94
| 9,101
|
py
|
Python
|
test/functional/wallet_import_rescan.py
|
foxdproject/foxdcoin
|
9db505f6f32bd3e51bd2b2da533744c98cee23af
|
[
"MIT"
] | 7
|
2020-06-19T20:49:02.000Z
|
2022-01-31T09:12:18.000Z
|
test/functional/wallet_import_rescan.py
|
foxdproject/foxdcoin
|
9db505f6f32bd3e51bd2b2da533744c98cee23af
|
[
"MIT"
] | 1
|
2021-02-26T19:14:11.000Z
|
2021-02-26T19:14:11.000Z
|
test/functional/wallet_import_rescan.py
|
foxdproject/foxdcoin
|
9db505f6f32bd3e51bd2b2da533744c98cee23af
|
[
"MIT"
] | 12
|
2020-05-02T20:01:44.000Z
|
2022-03-03T11:02:13.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Copyright (c) 2017-2019 The Raven Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Test wallet import RPCs.
Test rescan behavior of importaddress, importpubkey, importprivkey, and
importmulti RPCs with different types of keys and rescan options.
In the first part of the test, node 0 creates an address for each type of
import RPC call and sends FOXD to it. Then other nodes import the addresses,
and the test makes listtransactions and getbalance calls to confirm that the
importing node either did or did not execute rescans picking up the send
transactions.
In the second part of the test, node 0 sends more FOXD to each address, and the
test makes more listtransactions and getbalance calls to confirm that the
importing nodes pick up the new transactions regardless of whether rescans
happened previously.
"""
import collections
import enum
import itertools
from test_framework.test_framework import FoxdcoinTestFramework
from test_framework.util import assert_raises_rpc_error, connect_nodes, sync_blocks, assert_equal, set_node_times
# noinspection PyArgumentList
Call = enum.Enum("Call", "single multi")
# noinspection PyArgumentList
Data = enum.Enum("Data", "address pub priv")
# noinspection PyArgumentList
Rescan = enum.Enum("Rescan", "no yes late_timestamp")
# noinspection PyUnboundLocalVariable,PyUnresolvedReferences
class Variant(collections.namedtuple("Variant", "call data rescan prune")):
"""Helper for importing one key and verifying scanned transactions."""
def try_rpc(self, func, *args, **kwargs):
if self.expect_disabled:
assert_raises_rpc_error(-4, "Rescan is disabled in pruned mode", func, *args, **kwargs)
else:
return func(*args, **kwargs)
def do_import(self, timestamp):
"""Call one key import RPC."""
if self.call == Call.single:
if self.data == Data.address:
response = self.try_rpc(self.node.importaddress, self.address["address"], self.label,
self.rescan == Rescan.yes)
elif self.data == Data.pub:
response = self.try_rpc(self.node.importpubkey, self.address["pubkey"], self.label,
self.rescan == Rescan.yes)
elif self.data == Data.priv:
response = self.try_rpc(self.node.importprivkey, self.key, self.label, self.rescan == Rescan.yes)
assert_equal(response, None)
elif self.call == Call.multi:
response = self.node.importmulti([{
"scriptPubKey": {
"address": self.address["address"]
},
"timestamp": timestamp + TIMESTAMP_WINDOW + (1 if self.rescan == Rescan.late_timestamp else 0),
"pubkeys": [self.address["pubkey"]] if self.data == Data.pub else [],
"keys": [self.key] if self.data == Data.priv else [],
"label": self.label,
"watchonly": self.data != Data.priv
}], {"rescan": self.rescan in (Rescan.yes, Rescan.late_timestamp)})
assert_equal(response, [{"success": True}])
def check(self, txid=None, amount=None, confirmations=None):
"""Verify that getbalance/listtransactions return expected values."""
balance = self.node.getbalance(self.label, 0, True)
assert_equal(balance, self.expected_balance)
txs = self.node.listtransactions(self.label, 10000, 0, True)
assert_equal(len(txs), self.expected_txs)
if txid is not None:
tx, = [tx for tx in txs if tx["txid"] == txid]
assert_equal(tx["account"], self.label)
assert_equal(tx["address"], self.address["address"])
assert_equal(tx["amount"], amount)
assert_equal(tx["category"], "receive")
assert_equal(tx["label"], self.label)
assert_equal(tx["txid"], txid)
assert_equal(tx["confirmations"], confirmations)
assert_equal("trusted" not in tx, True)
# Verify the transaction is correctly marked watchonly depending on
# whether the transaction pays to an imported public key or
# imported private key. The test setup ensures that transaction
# inputs will not be from watchonly keys (important because
# involvesWatchonly will be true if either the transaction output
# or inputs are watchonly).
if self.data != Data.priv:
assert_equal(tx["involvesWatchonly"], True)
else:
assert_equal("involvesWatchonly" not in tx, True)
# List of Variants for each way a key or address could be imported.
IMPORT_VARIANTS = [Variant(*variants) for variants in itertools.product(Call, Data, Rescan, (False, True))]
# List of nodes to import keys to. Half the nodes will have pruning disabled,
# half will have it enabled. Different nodes will be used for imports that are
# expected to cause rescans, and imports that are not expected to cause
# rescans, in order to prevent rescans during later imports picking up
# transactions associated with earlier imports. This makes it easier to keep
# track of expected balances and transactions.
ImportNode = collections.namedtuple("ImportNode", "prune rescan")
IMPORT_NODES = [ImportNode(*fields) for fields in itertools.product((False, True), repeat=2)]
# Rescans start at the earliest block up to 2 hours before the key timestamp.
TIMESTAMP_WINDOW = 2 * 60 * 60
class ImportRescanTest(FoxdcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2 + len(IMPORT_NODES)
def setup_network(self):
extra_args = [[] for _ in range(self.num_nodes)]
for i, import_node in enumerate(IMPORT_NODES, 2):
if import_node.prune:
extra_args[i] += ["-prune=1"]
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
for i in range(1, self.num_nodes):
connect_nodes(self.nodes[i], 0)
def run_test(self):
# Create one transaction on node 0 with a unique amount and label for
# each possible type of wallet import RPC.
for i, variant in enumerate(IMPORT_VARIANTS):
variant.label = "label {} {}".format(i, variant)
variant.address = self.nodes[1].validateaddress(self.nodes[1].getnewaddress(variant.label))
variant.key = self.nodes[1].dumpprivkey(variant.address["address"])
variant.initial_amount = 10 - (i + 1) / 4.0
variant.initial_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.initial_amount)
# Generate a block containing the initial transactions, then another
# block further in the future (past the rescan window).
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getrawmempool(), [])
timestamp = self.nodes[0].getblockheader(self.nodes[0].getbestblockhash())["time"]
set_node_times(self.nodes, timestamp + TIMESTAMP_WINDOW + 1)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# For each variation of wallet key import, invoke the import RPC and
# check the results from getbalance and listtransactions.
for variant in IMPORT_VARIANTS:
variant.expect_disabled = variant.rescan == Rescan.yes and variant.prune and variant.call == Call.single
expect_rescan = variant.rescan == Rescan.yes and not variant.expect_disabled
variant.node = self.nodes[2 + IMPORT_NODES.index(ImportNode(variant.prune, expect_rescan))]
variant.do_import(timestamp)
if expect_rescan:
variant.expected_balance = variant.initial_amount
variant.expected_txs = 1
variant.check(variant.initial_txid, variant.initial_amount, 2)
else:
variant.expected_balance = 0
variant.expected_txs = 0
variant.check()
# Create new transactions sending to each address.
for i, variant in enumerate(IMPORT_VARIANTS):
variant.sent_amount = 10 - (2 * i + 1) / 8.0
variant.sent_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.sent_amount)
# Generate a block containing the new transactions.
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getrawmempool(), [])
sync_blocks(self.nodes)
# Check the latest results from getbalance and listtransactions.
for variant in IMPORT_VARIANTS:
if not variant.expect_disabled:
variant.expected_balance += variant.sent_amount
variant.expected_txs += 1
variant.check(variant.sent_txid, variant.sent_amount, 1)
else:
variant.check()
if __name__ == "__main__":
ImportRescanTest().main()
| 46.912371
| 116
| 0.659048
|
import collections
import enum
import itertools
from test_framework.test_framework import FoxdcoinTestFramework
from test_framework.util import assert_raises_rpc_error, connect_nodes, sync_blocks, assert_equal, set_node_times
Call = enum.Enum("Call", "single multi")
Data = enum.Enum("Data", "address pub priv")
Rescan = enum.Enum("Rescan", "no yes late_timestamp")
class Variant(collections.namedtuple("Variant", "call data rescan prune")):
def try_rpc(self, func, *args, **kwargs):
if self.expect_disabled:
assert_raises_rpc_error(-4, "Rescan is disabled in pruned mode", func, *args, **kwargs)
else:
return func(*args, **kwargs)
def do_import(self, timestamp):
if self.call == Call.single:
if self.data == Data.address:
response = self.try_rpc(self.node.importaddress, self.address["address"], self.label,
self.rescan == Rescan.yes)
elif self.data == Data.pub:
response = self.try_rpc(self.node.importpubkey, self.address["pubkey"], self.label,
self.rescan == Rescan.yes)
elif self.data == Data.priv:
response = self.try_rpc(self.node.importprivkey, self.key, self.label, self.rescan == Rescan.yes)
assert_equal(response, None)
elif self.call == Call.multi:
response = self.node.importmulti([{
"scriptPubKey": {
"address": self.address["address"]
},
"timestamp": timestamp + TIMESTAMP_WINDOW + (1 if self.rescan == Rescan.late_timestamp else 0),
"pubkeys": [self.address["pubkey"]] if self.data == Data.pub else [],
"keys": [self.key] if self.data == Data.priv else [],
"label": self.label,
"watchonly": self.data != Data.priv
}], {"rescan": self.rescan in (Rescan.yes, Rescan.late_timestamp)})
assert_equal(response, [{"success": True}])
def check(self, txid=None, amount=None, confirmations=None):
balance = self.node.getbalance(self.label, 0, True)
assert_equal(balance, self.expected_balance)
txs = self.node.listtransactions(self.label, 10000, 0, True)
assert_equal(len(txs), self.expected_txs)
if txid is not None:
tx, = [tx for tx in txs if tx["txid"] == txid]
assert_equal(tx["account"], self.label)
assert_equal(tx["address"], self.address["address"])
assert_equal(tx["amount"], amount)
assert_equal(tx["category"], "receive")
assert_equal(tx["label"], self.label)
assert_equal(tx["txid"], txid)
assert_equal(tx["confirmations"], confirmations)
assert_equal("trusted" not in tx, True)
if self.data != Data.priv:
assert_equal(tx["involvesWatchonly"], True)
else:
assert_equal("involvesWatchonly" not in tx, True)
IMPORT_VARIANTS = [Variant(*variants) for variants in itertools.product(Call, Data, Rescan, (False, True))]
ImportNode = collections.namedtuple("ImportNode", "prune rescan")
IMPORT_NODES = [ImportNode(*fields) for fields in itertools.product((False, True), repeat=2)]
TIMESTAMP_WINDOW = 2 * 60 * 60
class ImportRescanTest(FoxdcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2 + len(IMPORT_NODES)
def setup_network(self):
extra_args = [[] for _ in range(self.num_nodes)]
for i, import_node in enumerate(IMPORT_NODES, 2):
if import_node.prune:
extra_args[i] += ["-prune=1"]
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
for i in range(1, self.num_nodes):
connect_nodes(self.nodes[i], 0)
def run_test(self):
for i, variant in enumerate(IMPORT_VARIANTS):
variant.label = "label {} {}".format(i, variant)
variant.address = self.nodes[1].validateaddress(self.nodes[1].getnewaddress(variant.label))
variant.key = self.nodes[1].dumpprivkey(variant.address["address"])
variant.initial_amount = 10 - (i + 1) / 4.0
variant.initial_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.initial_amount)
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getrawmempool(), [])
timestamp = self.nodes[0].getblockheader(self.nodes[0].getbestblockhash())["time"]
set_node_times(self.nodes, timestamp + TIMESTAMP_WINDOW + 1)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
for variant in IMPORT_VARIANTS:
variant.expect_disabled = variant.rescan == Rescan.yes and variant.prune and variant.call == Call.single
expect_rescan = variant.rescan == Rescan.yes and not variant.expect_disabled
variant.node = self.nodes[2 + IMPORT_NODES.index(ImportNode(variant.prune, expect_rescan))]
variant.do_import(timestamp)
if expect_rescan:
variant.expected_balance = variant.initial_amount
variant.expected_txs = 1
variant.check(variant.initial_txid, variant.initial_amount, 2)
else:
variant.expected_balance = 0
variant.expected_txs = 0
variant.check()
for i, variant in enumerate(IMPORT_VARIANTS):
variant.sent_amount = 10 - (2 * i + 1) / 8.0
variant.sent_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.sent_amount)
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getrawmempool(), [])
sync_blocks(self.nodes)
for variant in IMPORT_VARIANTS:
if not variant.expect_disabled:
variant.expected_balance += variant.sent_amount
variant.expected_txs += 1
variant.check(variant.sent_txid, variant.sent_amount, 1)
else:
variant.check()
if __name__ == "__main__":
ImportRescanTest().main()
| true
| true
|
1c3f8f46878befe5b6837e93e3a1642a73da1eb5
| 30
|
py
|
Python
|
mplib/__init__.py
|
0000duck/MPlib
|
f5111db952a9ff1f712ef04b6c5ac3b8da8f1184
|
[
"MIT"
] | 20
|
2021-09-15T05:06:25.000Z
|
2022-03-21T13:37:44.000Z
|
mplib/__init__.py
|
0000duck/MPlib
|
f5111db952a9ff1f712ef04b6c5ac3b8da8f1184
|
[
"MIT"
] | 3
|
2021-09-27T22:04:41.000Z
|
2022-03-07T05:15:34.000Z
|
mplib/__init__.py
|
haosulab/MPlib
|
f5111db952a9ff1f712ef04b6c5ac3b8da8f1184
|
[
"MIT"
] | 4
|
2021-08-02T05:33:37.000Z
|
2021-12-15T02:23:51.000Z
|
from .planner import Planner
| 10
| 28
| 0.8
|
from .planner import Planner
| true
| true
|
1c3f8f46ce106fcbd00ff7ff44983e1404f53e64
| 5,922
|
py
|
Python
|
kit_django/userAccount/models.py
|
safakoner/kit
|
aec36a70137febfb5f3e3a9205ea58879736eea4
|
[
"MIT"
] | 6
|
2020-06-29T20:36:15.000Z
|
2021-09-08T23:34:01.000Z
|
kit_django/userAccount/models.py
|
safakoner/kit
|
aec36a70137febfb5f3e3a9205ea58879736eea4
|
[
"MIT"
] | 9
|
2021-03-30T13:46:29.000Z
|
2022-03-12T00:38:27.000Z
|
kit_django/userAccount/models.py
|
safakoner/kit
|
aec36a70137febfb5f3e3a9205ea58879736eea4
|
[
"MIT"
] | 1
|
2020-07-20T18:40:24.000Z
|
2020-07-20T18:40:24.000Z
|
#
# ----------------------------------------------------------------------------------------------------
# DESCRIPTION
# ----------------------------------------------------------------------------------------------------
#
# ----------------------------------------------------------------------------------------------------
# IMPORTS
# ----------------------------------------------------------------------------------------------------
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin
from rest_framework.authtoken.models import Token
from core.randomValue import createRandomFileName
from userAccount.apps import UserAccountConfig
from userAccount.managers import UserAccountManager
#
# ----------------------------------------------------------------------------------------------------
# CODE
# ----------------------------------------------------------------------------------------------------
#
## @brief Get random file name for testimonial image.
#
# @param instance [ appModel.models.Testimonial | None | in ] - Testimonial model class instance.
# @param fileName [ str | None | in ] - File name.
#
# @exception N/A
#
# @return str - File path.
def getUserAccountAvatarImageFieldUploadTo(instance, fileName):
fileRelativePath = '{}/{}/{}'.format(UserAccountConfig.name,
instance.folder_name,
createRandomFileName(fileName))
return fileRelativePath
#
## @brief [ DJANGO MODEL CLASS ] - Django model class.
class UserAccount(AbstractBaseUser, PermissionsMixin):
## [ userAccount.managers.UserAccountManager ] - Objects.
objects = UserAccountManager()
## [ str ] - Username field.
USERNAME_FIELD = 'email'
#
# ------------------------------------------------------------------------------------------------
# CLASSES
# ------------------------------------------------------------------------------------------------
#
## @brief [ DJANGO MODEL META CLASS ] - Meta class of django.db.models.Model class.
class Meta:
## [ str ] - Verbose name of the model class.
verbose_name = 'User Account'
## [ str ] - Verbose plural name of the model class.
verbose_name_plural = 'User Account'
## [ tuple ] - Ordering of the model class.
ordering = ('email',)
#
# ------------------------------------------------------------------------------------------------
# FIELDS
# ------------------------------------------------------------------------------------------------
## [ django.db.models.NullBooleanField ] - Whether this user is super user.
is_superuser = models.NullBooleanField(default=False, null=True, blank=True)
## [ django.db.models.NullBooleanField ] - Whether this user is admin.
is_staff = models.NullBooleanField(default=False, null=True, blank=True)
## [ django.db.models.NullBooleanField ] - Whether this instance is active.
is_active = models.NullBooleanField(default=True, null=True, blank=True)
## [ django.db.models.EmailField ] - Email.
email = models.EmailField(verbose_name='email', max_length=255, unique=True, blank=False, null=False)
## [ django.db.models.CharField ] - First name.
first_name = models.CharField(max_length=20, blank=True, null=True)
## [ django.db.models.CharField ] - Last name.
last_name = models.CharField(max_length=20, blank=True, null=True)
## [ django.db.models.ImageField ] - Avatar.
avatar = models.ImageField(upload_to=getUserAccountAvatarImageFieldUploadTo, blank=True)
## [ django.db.models.CharField ] - Folder name.
folder_name = models.CharField(max_length=200, null=False, blank=False)
## [ django.db.models.DateTimeField ] - Last login.
last_login = models.DateTimeField(auto_now=True)
## [ django.db.models.DateTimeField ] - Registration.
registration_date_time = models.DateTimeField(auto_now=False, auto_now_add=True)
## [ django.db.models.DateTimeField ] - Has account been verified.
has_account_been_verified = models.NullBooleanField(default=False, null=True, blank=True)
## [ django.db.models.DateTimeField ] - Account verification id.
account_verification_id = models.SlugField(max_length=300, unique=True, null=False, blank=False,)
#
# ------------------------------------------------------------------------------------------------
# BUILT-IN METHODS
# ------------------------------------------------------------------------------------------------
#
## @brief String representation.
#
# @exception N/A
#
# @return str - String representation.
def __str__(self):
return self.email
#
# ------------------------------------------------------------------------------------------------
# PROTECTED METHODS
# ------------------------------------------------------------------------------------------------
#
## @brief Property.
#
# @exception N/A
#
# @return rest_framework.authtoken.models.Token - Django model class instance.
def api_token(self):
return Token.objects.get(user=self)
#
## @brief Property.
#
# @exception N/A
#
# @return bool - Value.
def get_full_name(self):
return '{} {}'.format(self.first_name, self.last_name)
#
## @brief Property.
#
# @exception N/A
#
# @return bool - Value.
def get_short_name(self):
return self.first_name
| 37.245283
| 127
| 0.465552
|
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin
from rest_framework.authtoken.models import Token
from core.randomValue import createRandomFileName
from userAccount.apps import UserAccountConfig
from userAccount.managers import UserAccountManager
nstance, fileName):
fileRelativePath = '{}/{}/{}'.format(UserAccountConfig.name,
instance.folder_name,
createRandomFileName(fileName))
return fileRelativePath
):
= 'email'
=True)
lse, null=True, blank=True)
ull=True, blank=True)
EmailField(verbose_name='email', max_length=255, unique=True, blank=False, null=False)
Field(max_length=20, blank=True, null=True)
rField(max_length=20, blank=True, null=True)
mageField(upload_to=getUserAccountAvatarImageFieldUploadTo, blank=True)
ield(max_length=200, null=False, blank=False)
Field(auto_now=True)
eld(auto_now=False, auto_now_add=True)
t=False, null=True, blank=True)
300, unique=True, null=False, blank=False,)
str__(self):
return self.email
def api_token(self):
return Token.objects.get(user=self)
def get_full_name(self):
return '{} {}'.format(self.first_name, self.last_name)
def get_short_name(self):
return self.first_name
| true
| true
|
1c3f8fbf9d1244edc2a9631eb825360a52a8b713
| 920
|
py
|
Python
|
forms-flow-data-analysis-api/tests/unit/utils/test_logging.py
|
sreehari-aot/forms-flow-ai
|
11e2fdd6da792aaa9dd46c0cec38564fe5916b58
|
[
"Apache-2.0"
] | null | null | null |
forms-flow-data-analysis-api/tests/unit/utils/test_logging.py
|
sreehari-aot/forms-flow-ai
|
11e2fdd6da792aaa9dd46c0cec38564fe5916b58
|
[
"Apache-2.0"
] | null | null | null |
forms-flow-data-analysis-api/tests/unit/utils/test_logging.py
|
sreehari-aot/forms-flow-ai
|
11e2fdd6da792aaa9dd46c0cec38564fe5916b58
|
[
"Apache-2.0"
] | null | null | null |
"""Tests to assure the logging utilities.
Test-Suite to ensure that the logging setup is working as expected.
"""
import os
from api.utils.logging import log_error, log_info, setup_logging
def test_logging_with_file(capsys):
"""Assert that logging is setup with the configuration file."""
file_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "logging.conf")
setup_logging(file_path) # important to do this first
captured = capsys.readouterr()
assert captured.out.startswith("Configure logging, from conf")
log_info("log info")
log_error("log error")
def test_logging_with_missing_file(capsys):
"""Assert that a message is sent to STDERR when the configuration doesn't exist."""
file_path = None
setup_logging(file_path) # important to do this first
captured = capsys.readouterr()
assert captured.err.startswith("Unable to configure logging")
| 28.75
| 88
| 0.736957
|
import os
from api.utils.logging import log_error, log_info, setup_logging
def test_logging_with_file(capsys):
file_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "logging.conf")
setup_logging(file_path)
captured = capsys.readouterr()
assert captured.out.startswith("Configure logging, from conf")
log_info("log info")
log_error("log error")
def test_logging_with_missing_file(capsys):
file_path = None
setup_logging(file_path)
captured = capsys.readouterr()
assert captured.err.startswith("Unable to configure logging")
| true
| true
|
1c3f8fd38b0665f47857eca6f549cdb202bf5080
| 5,283
|
py
|
Python
|
Lib/ctypes/macholib/dyld.py
|
pxeger/cpython
|
959580bd9ff8824590e8b24895bc2276f3f10b35
|
[
"0BSD"
] | 52,316
|
2015-01-01T15:56:25.000Z
|
2022-03-31T23:19:01.000Z
|
Lib/ctypes/macholib/dyld.py
|
dalakatt/cpython
|
2f49b97cc5426087b46515254b9a97a22ee8c807
|
[
"0BSD"
] | 25,286
|
2015-03-03T23:18:02.000Z
|
2022-03-31T23:17:27.000Z
|
Lib/ctypes/macholib/dyld.py
|
dalakatt/cpython
|
2f49b97cc5426087b46515254b9a97a22ee8c807
|
[
"0BSD"
] | 31,623
|
2015-01-01T13:29:37.000Z
|
2022-03-31T19:55:06.000Z
|
"""
dyld emulation
"""
import os
from ctypes.macholib.framework import framework_info
from ctypes.macholib.dylib import dylib_info
from itertools import *
try:
from _ctypes import _dyld_shared_cache_contains_path
except ImportError:
def _dyld_shared_cache_contains_path(*args):
raise NotImplementedError
__all__ = [
'dyld_find', 'framework_find',
'framework_info', 'dylib_info',
]
# These are the defaults as per man dyld(1)
#
DEFAULT_FRAMEWORK_FALLBACK = [
os.path.expanduser("~/Library/Frameworks"),
"/Library/Frameworks",
"/Network/Library/Frameworks",
"/System/Library/Frameworks",
]
DEFAULT_LIBRARY_FALLBACK = [
os.path.expanduser("~/lib"),
"/usr/local/lib",
"/lib",
"/usr/lib",
]
def dyld_env(env, var):
if env is None:
env = os.environ
rval = env.get(var)
if rval is None:
return []
return rval.split(':')
def dyld_image_suffix(env=None):
if env is None:
env = os.environ
return env.get('DYLD_IMAGE_SUFFIX')
def dyld_framework_path(env=None):
return dyld_env(env, 'DYLD_FRAMEWORK_PATH')
def dyld_library_path(env=None):
return dyld_env(env, 'DYLD_LIBRARY_PATH')
def dyld_fallback_framework_path(env=None):
return dyld_env(env, 'DYLD_FALLBACK_FRAMEWORK_PATH')
def dyld_fallback_library_path(env=None):
return dyld_env(env, 'DYLD_FALLBACK_LIBRARY_PATH')
def dyld_image_suffix_search(iterator, env=None):
"""For a potential path iterator, add DYLD_IMAGE_SUFFIX semantics"""
suffix = dyld_image_suffix(env)
if suffix is None:
return iterator
def _inject(iterator=iterator, suffix=suffix):
for path in iterator:
if path.endswith('.dylib'):
yield path[:-len('.dylib')] + suffix + '.dylib'
else:
yield path + suffix
yield path
return _inject()
def dyld_override_search(name, env=None):
# If DYLD_FRAMEWORK_PATH is set and this dylib_name is a
# framework name, use the first file that exists in the framework
# path if any. If there is none go on to search the DYLD_LIBRARY_PATH
# if any.
framework = framework_info(name)
if framework is not None:
for path in dyld_framework_path(env):
yield os.path.join(path, framework['name'])
# If DYLD_LIBRARY_PATH is set then use the first file that exists
# in the path. If none use the original name.
for path in dyld_library_path(env):
yield os.path.join(path, os.path.basename(name))
def dyld_executable_path_search(name, executable_path=None):
# If we haven't done any searching and found a library and the
# dylib_name starts with "@executable_path/" then construct the
# library name.
if name.startswith('@executable_path/') and executable_path is not None:
yield os.path.join(executable_path, name[len('@executable_path/'):])
def dyld_default_search(name, env=None):
yield name
framework = framework_info(name)
if framework is not None:
fallback_framework_path = dyld_fallback_framework_path(env)
for path in fallback_framework_path:
yield os.path.join(path, framework['name'])
fallback_library_path = dyld_fallback_library_path(env)
for path in fallback_library_path:
yield os.path.join(path, os.path.basename(name))
if framework is not None and not fallback_framework_path:
for path in DEFAULT_FRAMEWORK_FALLBACK:
yield os.path.join(path, framework['name'])
if not fallback_library_path:
for path in DEFAULT_LIBRARY_FALLBACK:
yield os.path.join(path, os.path.basename(name))
def dyld_find(name, executable_path=None, env=None):
"""
Find a library or framework using dyld semantics
"""
for path in dyld_image_suffix_search(chain(
dyld_override_search(name, env),
dyld_executable_path_search(name, executable_path),
dyld_default_search(name, env),
), env):
if os.path.isfile(path):
return path
try:
if _dyld_shared_cache_contains_path(path):
return path
except NotImplementedError:
pass
raise ValueError("dylib %s could not be found" % (name,))
def framework_find(fn, executable_path=None, env=None):
"""
Find a framework using dyld semantics in a very loose manner.
Will take input such as:
Python
Python.framework
Python.framework/Versions/Current
"""
error = None
try:
return dyld_find(fn, executable_path=executable_path, env=env)
except ValueError as e:
error = e
fmwk_index = fn.rfind('.framework')
if fmwk_index == -1:
fmwk_index = len(fn)
fn += '.framework'
fn = os.path.join(fn, os.path.basename(fn[:fmwk_index]))
try:
return dyld_find(fn, executable_path=executable_path, env=env)
except ValueError:
raise error
finally:
error = None
def test_dyld_find():
env = {}
assert dyld_find('libSystem.dylib') == '/usr/lib/libSystem.dylib'
assert dyld_find('System.framework/System') == '/System/Library/Frameworks/System.framework/System'
if __name__ == '__main__':
test_dyld_find()
| 30.362069
| 103
| 0.671588
|
import os
from ctypes.macholib.framework import framework_info
from ctypes.macholib.dylib import dylib_info
from itertools import *
try:
from _ctypes import _dyld_shared_cache_contains_path
except ImportError:
def _dyld_shared_cache_contains_path(*args):
raise NotImplementedError
__all__ = [
'dyld_find', 'framework_find',
'framework_info', 'dylib_info',
]
DEFAULT_FRAMEWORK_FALLBACK = [
os.path.expanduser("~/Library/Frameworks"),
"/Library/Frameworks",
"/Network/Library/Frameworks",
"/System/Library/Frameworks",
]
DEFAULT_LIBRARY_FALLBACK = [
os.path.expanduser("~/lib"),
"/usr/local/lib",
"/lib",
"/usr/lib",
]
def dyld_env(env, var):
if env is None:
env = os.environ
rval = env.get(var)
if rval is None:
return []
return rval.split(':')
def dyld_image_suffix(env=None):
if env is None:
env = os.environ
return env.get('DYLD_IMAGE_SUFFIX')
def dyld_framework_path(env=None):
return dyld_env(env, 'DYLD_FRAMEWORK_PATH')
def dyld_library_path(env=None):
return dyld_env(env, 'DYLD_LIBRARY_PATH')
def dyld_fallback_framework_path(env=None):
return dyld_env(env, 'DYLD_FALLBACK_FRAMEWORK_PATH')
def dyld_fallback_library_path(env=None):
return dyld_env(env, 'DYLD_FALLBACK_LIBRARY_PATH')
def dyld_image_suffix_search(iterator, env=None):
suffix = dyld_image_suffix(env)
if suffix is None:
return iterator
def _inject(iterator=iterator, suffix=suffix):
for path in iterator:
if path.endswith('.dylib'):
yield path[:-len('.dylib')] + suffix + '.dylib'
else:
yield path + suffix
yield path
return _inject()
def dyld_override_search(name, env=None):
framework = framework_info(name)
if framework is not None:
for path in dyld_framework_path(env):
yield os.path.join(path, framework['name'])
for path in dyld_library_path(env):
yield os.path.join(path, os.path.basename(name))
def dyld_executable_path_search(name, executable_path=None):
# dylib_name starts with "@executable_path/" then construct the
# library name.
if name.startswith('@executable_path/') and executable_path is not None:
yield os.path.join(executable_path, name[len('@executable_path/'):])
def dyld_default_search(name, env=None):
yield name
framework = framework_info(name)
if framework is not None:
fallback_framework_path = dyld_fallback_framework_path(env)
for path in fallback_framework_path:
yield os.path.join(path, framework['name'])
fallback_library_path = dyld_fallback_library_path(env)
for path in fallback_library_path:
yield os.path.join(path, os.path.basename(name))
if framework is not None and not fallback_framework_path:
for path in DEFAULT_FRAMEWORK_FALLBACK:
yield os.path.join(path, framework['name'])
if not fallback_library_path:
for path in DEFAULT_LIBRARY_FALLBACK:
yield os.path.join(path, os.path.basename(name))
def dyld_find(name, executable_path=None, env=None):
for path in dyld_image_suffix_search(chain(
dyld_override_search(name, env),
dyld_executable_path_search(name, executable_path),
dyld_default_search(name, env),
), env):
if os.path.isfile(path):
return path
try:
if _dyld_shared_cache_contains_path(path):
return path
except NotImplementedError:
pass
raise ValueError("dylib %s could not be found" % (name,))
def framework_find(fn, executable_path=None, env=None):
error = None
try:
return dyld_find(fn, executable_path=executable_path, env=env)
except ValueError as e:
error = e
fmwk_index = fn.rfind('.framework')
if fmwk_index == -1:
fmwk_index = len(fn)
fn += '.framework'
fn = os.path.join(fn, os.path.basename(fn[:fmwk_index]))
try:
return dyld_find(fn, executable_path=executable_path, env=env)
except ValueError:
raise error
finally:
error = None
def test_dyld_find():
env = {}
assert dyld_find('libSystem.dylib') == '/usr/lib/libSystem.dylib'
assert dyld_find('System.framework/System') == '/System/Library/Frameworks/System.framework/System'
if __name__ == '__main__':
test_dyld_find()
| true
| true
|
1c3f90839acedeb4b9a5d478f2e55f5b2a78c281
| 45,047
|
py
|
Python
|
proofwallet.py
|
RCasatta/proof-wallet
|
7dc27a7618ce8a1124975562ed8fa679eadee6b4
|
[
"MIT"
] | 1
|
2021-01-30T19:42:09.000Z
|
2021-01-30T19:42:09.000Z
|
proofwallet.py
|
RCasatta/proof-wallet
|
7dc27a7618ce8a1124975562ed8fa679eadee6b4
|
[
"MIT"
] | null | null | null |
proofwallet.py
|
RCasatta/proof-wallet
|
7dc27a7618ce8a1124975562ed8fa679eadee6b4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
################################################################################################
#
# GlacierScript: Part of the Glacier Protocol (http://glacierprotocol.org)
#
# GlacierScript is designed specifically for use in the context of executing the broader Glacier
# Protocol, a step-by-step procedure for high-security cold storage of Bitcoin. It is not
# intended to be used as standalone software.
#
# GlacierScript primarily replaces tasks that users would otherwise be doing manually, such as
# typing things on the command line, copying-and-pasting strings, and hand-editing JSON. It
# mostly consists of print statements, user input, string & JSON manipulation, and command-line
# wrappers around Bitcoin Core and other applications (e.g. those involved in reading and writing
# QR codes.)
#
# GlacierScript avoids cryptographic and other security-sensitive operations as much as possible.
#
# GlacierScript depends on the following command-line applications:
# - Bitcoin Core (http://bitcoincore.org)
# - qrencode (QR code writer: http://packages.ubuntu.com/xenial/qrencode)
# - zbarimg (QR code reader: http://packages.ubuntu.com/xenial/zbar-tools)
#
################################################################################################
# standard Python libraries
import argparse
import json
import os
import shlex
import subprocess
import sys
import time
import re
import glob
from decimal import Decimal
from hashlib import sha256, md5, new as hashlib_new
from binascii import unhexlify, hexlify
from mnemonic import Mnemonic
# Taken from https://github.com/keis/base58
from base58 import b58encode_check, b58decode
SATOSHI_PLACES = Decimal("0.00000001")
verbose_mode = 0
FEE_RATE_MULTIPLIER = 10**5 # BTC/kB -> sat/byte
LINE_BREAK = "=" * 80
FINGERPRINT_PATTERN = "(?P<fng>[a-fA-F0-9]{8})"
PATH_PATTERN = "(?P<path>(?:(?:/)(?:\d+)(?:['h]{0,1}))*)"
XPUB_PATTERN = "(?P<xpub>\w{110,112})"
DESCRIPTOR_KEY_PATTERN = re.compile("^\[" + FINGERPRINT_PATTERN + PATH_PATTERN + "\]" + XPUB_PATTERN + "$")
UNHARDENED_PATH_PATTERN = "^/([01])/(0|[1-9][0-9]*)$" # match /{change}/{idx} and prevent leading zeros
################################################################################################
#
# Minor helper functions
#
################################################################################################
def hash_sha256(s):
"""A thin wrapper around the hashlib SHA256 library to provide a more functional interface"""
m = sha256()
m.update(s.encode('ascii'))
return m.hexdigest()
def hash_md5(s):
"""A thin wrapper around the hashlib md5 library to provide a more functional interface"""
m = md5()
m.update(s.encode('ascii'))
return m.hexdigest()
def hash160(string):
"""A thin wrapper around hashlib to compute the hash160 (SHA256 followed by RIPEMD160)"""
intermed = sha256(string).digest()
return hashlib_new('ripemd160', intermed).digest()
################################################################################################
#
# Subprocess helper functions
#
################################################################################################
def verbose(content):
if verbose_mode: print(content)
def run_subprocess(exe, *args):
"""
Run a subprocess (bitcoind or bitcoin-cli)
Returns => (command, return code, output)
exe: executable file name (e.g. bitcoin-cli)
args: arguments to exe
"""
cmd_list = [exe] + cli_args + list(args)
verbose("bitcoin cli call:\n {0}\n".format(" ".join(shlex.quote(x) for x in cmd_list)))
with subprocess.Popen(cmd_list, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1) as pipe:
output, _ = pipe.communicate()
output = output.decode('ascii')
retcode = pipe.returncode
verbose("bitcoin cli call return code: {0} output:\n {1}\n".format(retcode, output))
return (cmd_list, retcode, output)
def bitcoin_cli_call(*args):
"""
Run `bitcoin-cli`, return OS return code
"""
_, retcode, _ = run_subprocess("bitcoin-cli", *args)
return retcode
def bitcoin_cli_checkoutput(*args):
"""
Run `bitcoin-cli`, fail if OS return code nonzero, return output
"""
cmd_list, retcode, output = run_subprocess("bitcoin-cli", *args)
if retcode != 0: raise subprocess.CalledProcessError(retcode, cmd_list, output=output)
return output
def bitcoin_cli_json(*args):
"""
Run `bitcoin-cli`, parse output as JSON
"""
return json.loads(bitcoin_cli_checkoutput(*args))
def bitcoind_call(*args):
"""
Run `bitcoind`, return OS return code
"""
_, retcode, _ = run_subprocess("bitcoind", *args)
return retcode
################################################################################################
#
# Read & validate random data from the user
#
################################################################################################
def validate_rng_seed(seed, min_length):
"""
Validates random hexadecimal seed
returns => <boolean>
seed: <string> hex string to be validated
min_length: <int> number of characters required. > 0
"""
if len(seed) < min_length:
print("Error: Computer entropy must be at least {0} characters long.".format(min_length))
return False
if len(seed) % 2 != 0:
print("Error: Computer entropy must contain an even number of characters.")
return False
try:
int(seed, 16)
except ValueError:
print("Error: Illegal character. Computer entropy must be composed of hexadecimal characters only (0-9, a-f).")
return False
return True
def read_rng_seed_interactive(min_length):
"""
Reads random seed (of at least min_length hexadecimal characters) from standard input
returns => string
min_length: <int> minimum number of bytes in the seed.
"""
char_length = min_length * 2
def ask_for_rng_seed(length):
print("Enter at least {0} characters of computer entropy. Spaces are OK, and will be ignored:".format(length))
ask_for_rng_seed(char_length)
seed = input()
seed = unchunk(seed)
while not validate_rng_seed(seed, char_length):
ask_for_rng_seed(char_length)
seed = input()
seed = unchunk(seed)
return seed
def validate_dice_seed(dice, min_length):
"""
Validates dice data (i.e. ensures all digits are between 1 and 6).
returns => <boolean>
dice: <string> representing list of dice rolls (e.g. "5261435236...")
"""
if len(dice) < min_length:
print("Error: You must provide at least {0} dice rolls.".format(min_length))
return False
for die in dice:
try:
i = int(die)
if i < 1 or i > 6:
print("Error: Dice rolls must be between 1 and 6.")
return False
except ValueError:
print("Error: Dice rolls must be numbers between 1 and 6.")
return False
return True
def read_dice_seed_interactive(min_length):
"""
Reads min_length dice rolls from standard input, as a string of consecutive integers
Returns a string representing the dice rolls
returns => <string>
min_length: <int> number of dice rolls required. > 0.
"""
def ask_for_dice_seed(x):
print("Enter {0} dice rolls (example: 62543 16325 21341...) Spaces are OK, and will be ignored:".format(x))
ask_for_dice_seed(min_length)
dice = input()
dice = unchunk(dice)
while not validate_dice_seed(dice, min_length):
ask_for_dice_seed(min_length)
dice = input()
dice = unchunk(dice)
return dice
################################################################################################
#
# private key generation
#
################################################################################################
def xor_hex_strings(str1, str2):
"""
Return xor of two hex strings.
An XOR of two pieces of data will be as random as the input with the most randomness.
We can thus combine two entropy sources in this way as a safeguard against one source being
compromised in some way.
For details, see http://crypto.stackexchange.com/a/17660
returns => <string> in hex format
"""
if len(str1) != len(str2):
raise Exception("tried to xor strings of unequal length")
str1_dec = int(str1, 16)
str2_dec = int(str2, 16)
xored = str1_dec ^ str2_dec
return "{:0{}x}".format(xored, len(str1))
################################################################################################
#
# Bitcoin helper functions
#
################################################################################################
def ensure_bitcoind_running():
"""
Start bitcoind (if it's not already running) and ensure it's functioning properly
"""
# start bitcoind. If another bitcoind process is already running, this will just print an error
# message (to /dev/null) and exit.
#
# -connect=0.0.0.0 because we're doing local operations only (and have no network connection anyway)
bitcoind_call("-daemon", "-connect=0.0.0.0")
# verify bitcoind started up and is functioning correctly
times = 0
while times <= 20:
times += 1
if bitcoin_cli_call("getnetworkinfo") == 0:
return
time.sleep(0.5)
raise Exception("Timeout while starting bitcoin server")
def require_minimum_bitcoind_version(min_version):
"""
Fail if the bitcoind version in use is older than required
<min_version> - required minimum version in format of getnetworkinfo, i.e. 150100 for v0.15.1
"""
networkinfo = bitcoin_cli_json("getnetworkinfo")
if int(networkinfo["version"]) < min_version:
print("ERROR: Your bitcoind version is too old. You have {}, I need {} or newer. Exiting...".format(networkinfo["version"], min_version))
sys.exit()
def get_xpub_from_xkey(xkey):
"""
Returns the xpub for a given xkey
xkey: <string> base58 encoded extended key
"""
descriptor = "pk({})".format(xkey)
out = bitcoin_cli_json("getdescriptorinfo", descriptor)
public_descriptor = out['descriptor'] # example: 'pk(XPUB)#checksum'
return public_descriptor[3:-10] # slice off 'pk(' prefix and ')#checksum' suffix
def bip32_deserialize(data):
"""
Deserialize a string into a BIP32 extended key (assumes string is valid)
See the bip32 implementation to validate correctness:
https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#Serialization_format
Parameters:
data (str): a serialized bip32 exteneded key
"""
PRIVATE = [b'\x04\x88\xAD\xE4', b'\x04\x35\x83\x94'] # mainnet and testnet private version bits
dbin = b58decode(data)
vbytes = dbin[0:4]
depth = dbin[4]
fingerprint = dbin[5:9]
i = dbin[9:13]
chaincode = dbin[13:45]
key = dbin[46:78] + b'\x01' if vbytes in PRIVATE else dbin[45:78]
return (vbytes, depth, fingerprint, i, chaincode, key)
def get_fingerprint_from_xkey(xkey):
"""
Returns the BIP32 fingerprint for the given extended key
xkey: <string> valid bip32 extended key
"""
xpub = get_xpub_from_xkey(xkey)
vbytes, depth, fingerprint, i, chaincode, key = bip32_deserialize(xpub)
fp_bytes = hash160(key)[:4]
return hexlify(fp_bytes).decode('ascii')
def is_valid_xpub(xpub):
"""
Returns whether the string is a valid xpub
xpub (str): potential xpub
"""
try:
descriptor = "pk({})".format(xpub)
# validate that the input string is an xpub
bitcoin_cli_checkoutput("getdescriptorinfo", descriptor)
return True
except subprocess.CalledProcessError:
print("Error: The provided xpub is invalid. Exiting.")
sys.exit(1)
def parse_descriptor_key(key):
regex_match = re.match(DESCRIPTOR_KEY_PATTERN, key)
if regex_match is None:
print("Error: The provided descriptor key is invalid. Exiting.")
sys.exit(1)
fng, path, xpub = regex_match.group('fng'), regex_match.group('path'), regex_match.group('xpub')
# parse path
if path == "":
path = "m"
else:
path_arr = path.replace("h", "'").split("/")[1:]
for idx in path_arr:
if idx[-1] == "'":
idx = idx[:-1]
if str(int(idx)) != idx: # check for leading zeros
print("Error: The provided descriptor key contains leading zeros. Exiting.")
sys.exit(1)
path = "m/" + "/".join(path_arr)
# validate xpub
is_valid_xpub(xpub)
return fng, path, xpub
def parse_mnemonic_to_master_key(string):
mnemo = Mnemonic("english")
words = string.split()
mnemonic = " ".join(words)
if len(words) not in {12, 24}:
print("Error: Mnemonic phrase must be either 12 or 24 words long. Exiting.")
sys.exit(1)
if mnemo.check(mnemonic) != True:
print("Error: The mnemonic phrase is invalid. Exiting.")
sys.exit(1)
seed = mnemo.to_seed(mnemonic)
return mnemo.to_hd_master_key(seed, network in {"testnet", "regtest"})
def get_mnemonic_interactive():
"""
Prompts the user for a valid (12 or 24 word) BIP39 mnemonic phrase
return => <string> xprv derived from the mnemonic (and empty passphrase)
"""
string = input("\nEnter your BIP39 mnemonic phrase (separate the words with whitespace): ")
return parse_mnemonic_to_master_key(string)
def get_descriptor_keys_interactive(n):
"""
Prompts the user for n unique and valid descriptor keys (must include fingerprint)
n: <int> the number of descriptor keys to import
returns: List<string> the list of validated descriptor keys
"""
keys = []
print("\nInput {} valid descriptor keys".format(n))
for idx in range(n):
key_str = input("\nEnter descriptor key #{}: ".format(idx+1))
key = parse_descriptor_key(key_str)
keys.append(key)
unique_fingerprints = set(map(lambda key: key[0], keys))
unique_xpubs = set(map(lambda key: key[2], keys))
if len(unique_fingerprints) != n or len(unique_xpubs) != n:
print("Expected {} unique descriptor keys. Exiting".format(n))
sys.exit(1)
return keys
def wsh_descriptor(dkeys, m, change = 0):
"""
Creates the desired Bitcoin Core sortedmulti wsh descriptor for
the provided descriptor keys
dkeys: List<string> wallet descriptor keys
m: <int> number of multisig keys required for withdrawal
change: <int> internal or external descriptor
"""
# create descriptor without checksum
dkeys_str = ",".join([
"[{}{}]{}/{}/*".format(fng, path[1:], dkey, str(change))
for fng, path, dkey
in dkeys
])
descriptor = "wsh(sortedmulti({},{}))".format(str(m), dkeys_str)
# getdescriptorinfo and append checksum
output = bitcoin_cli_json("getdescriptorinfo", descriptor)
return descriptor + "#" + output["checksum"]
def importmulti(idxs, dkeys, m):
"""
Imports private key data for (external and internal) addresses at the given
indices into Bitcoin Core
idxs: Set<int> address indices to perform the import
dkeys: List<string> wallet descriptor keys
m: <int> number of multisig keys required for withdrawal
"""
for change in {0, 1}:
desc = wsh_descriptor(dkeys, m, change)
args = []
for i in idxs:
args.append({
"desc": desc,
"internal": True if change == 1 else False,
"range": [i, i],
"timestamp": "now",
"keypool": False,
"watchonly": False
})
bitcoin_cli_json("importmulti", json.dumps(args))
def deriveaddresses(dkeys, m, start, end, change=0):
"""
Derives wallet addresses based on the requested parameters
dkeys: List<string> wallet descriptor keys
m: <int> number of multisig keys required for withdrawal
start: <int> first index to derive address of
end: <int> last index to derive address of
change: <int> internal or external address
"""
desc = wsh_descriptor(dkeys, m, change)
return bitcoin_cli_json("deriveaddresses", desc, json.dumps([start, end]))
def walletprocesspsbt(psbt, idxs, dkeys, m):
"""
Signs a psbt after importing the necessary key data
psbt: <str> base64 encoded psbt
idxs: Set<int> indices to import into Bitcoin Core to sign the psbt
dkeys: List<string> wallet descriptor keys (includes 1 xprv)
m: <int> number of multisig keys required for withdrawal
"""
# import the descriptors necessary to process the provided psbt
importmulti(idxs, dkeys, m)
return bitcoin_cli_json("walletprocesspsbt", psbt, "true", "ALL")
def validate_psbt_bip32_derivs(dkeys, psbt_in_or_out, i, what):
# Ensure input contains BIP32 derivations
if "bip32_derivs" not in psbt_in_or_out:
return ("Tx {} {} is missing bip32 metadata.".format(what, i), None, None)
bip32_derivs = psbt_in_or_out["bip32_derivs"]
# Ensure the bip32 derivations specified in the psbt input/output are consistent with out wallet's
expected_fps = set(map(lambda dkey: dkey[0], dkeys))
actual_fps = set(map(lambda bip32_deriv: bip32_deriv["master_fingerprint"], bip32_derivs))
if expected_fps != actual_fps or len(dkeys) != len(bip32_derivs):
return ("Tx {} {} has the wrong set of fingerprints.".format(what, i), None, None)
# Ensure each public key derives from the correct hardened path for its master fingerprint, and
# the _same_, _allowed_ unhardened path
input_paths = set()
for fng, expected_path, xpub in dkeys:
# get corresponding bip32 derivation by master fingerprint (guaranteed to succeed)
bip32_deriv = list(filter(lambda bip32_deriv: bip32_deriv["master_fingerprint"] == fng, bip32_derivs)).pop()
# check that hardened path matches cosigner hardened derivation path
path_arr = bip32_deriv["path"].split(expected_path)
if len(path_arr) != 2 and path_arr[0] != "":
return ("Tx {} {} contains an invalid hardened derivation path for cosigner {}.".format(what, i, fng), None, None)
input_paths.add(path_arr[1])
if len(input_paths) != 1:
return ("Tx {} {} contains different unhardened derivation paths.".format(what, i), None, None)
input_path = input_paths.pop()
match_object = re.match(UNHARDENED_PATH_PATTERN, input_path)
if match_object is None:
return ("Tx {} {} contains an unsupported derivation path: '..{}'.".format(what, i, input_path), None, None)
change, idx = map(int, match_object.groups())
return (None, change, idx)
def validate_psbt_in(dkeys, m, _input, i, response):
# Ensure input spends a witness UTXO
if not ("non_witness_utxo" in _input and "witness_utxo" in _input):
return "Tx input {} must include both PSBT_IN_WITNESS_UTXO and PSBT_IN_NON_WITNESS_UTXO fields.".format(i)
# Ensure the witness utxo is the expected type: witness_v0_scripthash
scriptpubkey_type = _input["witness_utxo"]["scriptPubKey"]["type"]
if scriptpubkey_type != "witness_v0_scripthash":
return "Tx input {} contains an incorrect scriptPubKey type.".format(i)
# Ensure input contains a witness script
if "witness_script" not in _input:
return "Tx input {} must include a PSBT_IN_WITNESS_SCRIPT field.".format(i)
# Ensure that the witness script hash equals the scriptPubKey
witness_script = _input["witness_script"]["hex"]
witness_script_hash = hexlify(sha256(unhexlify(witness_script)).digest()).decode()
scriptPubKeyParts = _input["witness_utxo"]["scriptPubKey"]["asm"].split(" ")
if witness_script_hash != scriptPubKeyParts[1]:
return "The SHA256 of PSBT_IN_WITNESS_SCRIPT and PSBT_IN_WITNESS_UTXO don't match for Tx input {}.".format(i)
# Validate psbt input bip32 derivations
(bip32_derivs_err, change, idx) = validate_psbt_bip32_derivs(dkeys, _input, i, "input")
if bip32_derivs_err is not None:
return bip32_derivs_err
# Ensure that the actual address contained in the witness_utxo matches our
# expectations given the BIP32 derivations provided
actual_address = _input["witness_utxo"]["scriptPubKey"]["address"]
# Ensure expected address implied by metadata matches actual address supplied
[expected_address] = deriveaddresses(dkeys, m, idx, idx, change)
if expected_address != actual_address:
return "Tx input {} contains an incorrect address based on the provided bip32 metadata.".format(i)
# Ensure sighash is not set at all or set correctly
if "sighash" in _input and _input["sighash"] != "ALL":
return "Tx input {} contains an unsupported PSBT_IN_SIGHASH_TYPE type: {}".format(i, _input["sighash"])
# validation successful (update importmulti indices)
response["importmulti_idxs"].add(idx)
return None
def validate_psbt_out(dkeys, m, tx, output, i, response):
# Get the corresponding Tx ouput
tx_out = tx["vout"][i]
# The output can't be change if it doesn't spend back to the `witness_v0_scripthash` script type
scriptpubkey_type = tx_out["scriptPubKey"]["type"]
if scriptpubkey_type != "witness_v0_scripthash":
return None
# The output can't be change if it doesn't contain bip32 metadata
if "bip32_derivs" not in output:
return None
# Get the actual Tx address from the scriptPubKey
[actual_address] = tx_out["scriptPubKey"]["addresses"]
# Validate psbt output bip32 derivations
(bip32_derivs_err, change, idx) = validate_psbt_bip32_derivs(dkeys, output, i, "output")
if bip32_derivs_err is not None:
return bip32_derivs_err
# Ensure the actual address in the Tx output matches the expected address given
# the BIP32 derivation paths
[expected_address] = deriveaddresses(dkeys, m, idx, idx, change)
if expected_address != actual_address:
return "Tx output {} spends bitcoin to an incorrect address based on the provided bip32 metadata.".format(i)
# Ensure that the witness script hash maps to the transaction output's scriptPubKey
if "witness_script" not in output:
return "Tx output {} must include a PSBT_OUT_WITNESS_SCRIPT field.".format(i)
witness_script = output["witness_script"]["hex"]
witness_script_hash = hexlify(sha256(unhexlify(witness_script)).digest()).decode()
scriptPubKeyParts = tx_out["scriptPubKey"]["asm"].split(" ")
if witness_script_hash != scriptPubKeyParts[1]:
return "The SHA256 of PSBT_OUT_WITNESS_SCRIPT and the scriptPubKey of Tx output {} don't match.".format(i)
response["change_idxs"].append(i) # change validations pass
return None
def validate_psbt(psbt_raw, dkeys, m):
"""
******************************************************************
******************** SECURITY CRITICAL *************************
******************************************************************
Validate whether the psbt is safe to sign based on exhaustive checks
psbt_raw: <string> base64 encoded psbt
dkeys: List<string> wallet descriptor keys (including our xprv)
m: <int> number of multisig keys required for withdrawal
returns: dict
error: <str> an error if one is found
warning: List<str> warnings about psbt
psbt: <dict> python dict loaded from decodepsbt RPC call
change_idxs: List<int> list of change indices
importmulti_idxs: Set<int> set of indices to pass to the importmulti RPC call
analysis: <dict> python dict loaded from analyzepsbt RPC call
"""
response = {
"error": None,
"warning": [],
"psbt": None,
"change_idxs": [],
"importmulti_idxs": set(),
"analysis": None
}
try:
# attempt to decode psbt
psbt = bitcoin_cli_json("decodepsbt", psbt_raw)
# attempt to analyze psbt (should always succeed if decode succeeds)
response["analysis"] = bitcoin_cli_json("analyzepsbt", psbt_raw)
# validate all inputs
for i, _input in enumerate(psbt["inputs"]):
psbt_in_validation_err = validate_psbt_in(dkeys, m, _input, i, response)
if psbt_in_validation_err is not None:
response["error"] = psbt_in_validation_err
return response
# validate all outputs
tx = psbt["tx"]
for i, output in enumerate(psbt["outputs"]):
psbt_out_validation_err = validate_psbt_out(dkeys, m, tx, output, i, response)
if psbt_out_validation_err is not None:
response["error"] = psbt_out_validation_err
return response
# Display a warning to the user if we can't recognize any change (suspicious)
if len(response["change_idxs"]) == 0:
no_change_warning = "No change outputs were identified in this transaction. "
no_change_warning += "If you intended to send bitcoin back to your wallet as change, "
no_change_warning += "abort this signing process. Otherwise, you can safely ignore this warning."
response["warning"].append(no_change_warning)
# Validations succeded!
response["psbt"] = psbt
# Catches exceptions in decoding or analyzing PSBT
except subprocess.CalledProcessError:
response["error"] = "The provided base64 encoded input is NOT a valid PSBT"
# Catch any other unexpected exception that may occur
except:
response["error"] = "An unexpected error occurred during the PSBT validation process"
return response
################################################################################################
#
# QR code helper functions
#
################################################################################################
def decode_one_qr(filename):
"""
Decode a QR code from an image file, and return the decoded string.
"""
zresults = subprocess.run(["zbarimg", "--set", "*.enable=0", "--set", "qr.enable=1",
"--quiet", "--raw", filename], check=True, stdout=subprocess.PIPE)
return zresults.stdout.decode('ascii').strip()
def decode_qr(filenames):
"""
Decode a (series of) QR codes from a (series of) image file(s), and return the decoded string.
"""
return ''.join(decode_one_qr(f) for f in filenames)
def write_qr_code(filename, data):
"""
Write one QR code.
"""
subprocess.run(["qrencode", "-o", filename, data], check=True)
def write_and_verify_qr_code(name, filename, data):
"""
Write a QR code and then read it back to try and detect any tricksy malware tampering with it.
name: <string> short description of the data
filename: <string> filename for storing the QR code
data: <string> the data to be encoded
If data fits in a single QR code, we use filename directly. Otherwise
we add "-%02d" to each filename; e.g. transaction-01.png transaction-02.png.
The `qrencode` program can do this directly using "structured symbols" with
its -S option, but `zbarimg` doesn't recognize those at all. See:
https://github.com/mchehab/zbar/issues/66
It's also possible that some mobile phone QR scanners won't recognize such
codes. So we split it up manually here.
The theoretical limit of alphanumeric QR codes is 4296 bytes, though
somehow qrencode can do up to 4302.
"""
# Remove any stale files, so we don't confuse user if a previous
# withdrawal created 3 files (or 1 file) and this one only has 2
base, ext = os.path.splitext(filename)
for deleteme in glob.glob("{}*{}".format(base, ext)):
os.remove(deleteme)
MAX_QR_LEN = 1000
if len(data) <= MAX_QR_LEN:
write_qr_code(filename, data)
filenames = [filename]
else:
idx = 1
filenames = []
intdata = data
while len(intdata) > 0:
thisdata = intdata[0:MAX_QR_LEN]
intdata = intdata[MAX_QR_LEN:]
thisfile = "{}-{:02d}{}".format(base, idx, ext)
filenames.append(thisfile)
write_qr_code(thisfile, thisdata)
idx += 1
qrdata = decode_qr(filenames)
if qrdata != data:
print("********************************************************************")
print("WARNING: {} QR code could not be verified properly. This could be a sign of a security breach.".format(name))
print("********************************************************************")
print("QR code for {0} written to {1}".format(name, ','.join(filenames)))
################################################################################################
#
# User sanity checking
#
################################################################################################
def safety_checklist():
checks = [
"Are you running this on a computer WITHOUT a network connection of any kind?",
"Have the wireless cards in this computer been physically removed?",
"Are you running on battery power?",
"Are you running on an operating system booted from a USB drive?",
"Is your screen hidden from view of windows, cameras, and other people?",
"Are smartphones and all other nearby devices turned off and in a Faraday bag?"]
for check in checks:
answer = input(check + " (y/n)?")
if answer.upper() != "Y":
print("\nError: Safety check failed. Exiting.")
sys.exit(1)
################################################################################################
#
# Main "entropy" function
#
################################################################################################
def unchunk(string):
"""
Remove spaces in string
"""
return string.replace(" ", "")
def chunk_string(string, length):
"""
Splits a string into chunks of [length] characters, for easy human readability
Source: https://stackoverflow.com/a/18854817/11031317
"""
return (string[0+i:length+i] for i in range(0, len(string), length))
def entropy(length):
"""
Generate 1 random string for the user from /dev/random
"""
safety_checklist()
print("\nMaking a random data string....")
print("(If the string doesn't appear right away, please continually move your mouse cursor. These movements generate entropy which is used to create random data.)\n")
seed = subprocess.check_output("xxd -l {} -p /dev/random".format(length), shell=True)
seed = seed.decode('ascii').replace('\n', '')
print("Computer entropy: {}".format(" ".join(chunk_string(seed, 4))))
################################################################################################
#
# main "create wallet" function
#
################################################################################################
def create_wallet_interactive(dice_seed_length=100, rng_seed_length=32, data_length=32):
"""
Generate data for a new cold storage multisignature signatory (mnemonic phrase, xpub)
dice_seed_length: <int> minimum number of dice rolls required
rng_seed_length: <int> minimum length of random seed required
data_length: <int> number of bytes used to construct mnemonic (32 => 24 words, 16 => 12 words)
"""
safety_checklist()
ensure_bitcoind_running()
require_minimum_bitcoind_version(200100)
print("\nCreating cold storage private key data.\n")
dice_seed_string = read_dice_seed_interactive(dice_seed_length)
dice_seed_hash = hash_sha256(dice_seed_string)
rng_seed_string = read_rng_seed_interactive(rng_seed_length)
rng_seed_hash = hash_sha256(rng_seed_string)
# back to hex string
hex_private_key = xor_hex_strings(dice_seed_hash, rng_seed_hash)
bin_private_key = unhexlify(hex_private_key)[:data_length]
# convert private key to BIP39 mnemonic phrase
mnemo = Mnemonic("english")
mnemonic = mnemo.to_mnemonic(bin_private_key)
seed = mnemo.to_seed(mnemonic)
xprv = mnemo.to_hd_master_key(seed, network in {"testnet", "regtest"})
print("\nBIP39 Mnemonic Phrase: ")
words = mnemonic.split(" ")
for i, word in enumerate(words):
print("{}. {}".format(i + 1, word))
xpub = get_xpub_from_xkey(xprv)
fng = get_fingerprint_from_xkey(xpub)
key_origin = "[{}]{}".format(fng, xpub)
print("\npublic key metadata:\n{}\n".format(key_origin))
write_and_verify_qr_code("public-key-metadata", "public-key-metadata.png", key_origin)
################################################################################################
#
# main "deposit" function
#
################################################################################################
def view_addresses_interactive(m, n, my_xprv = None, trust_xpubs = False):
"""
Show the addresses for a multisignature wallet with the user-provided policy
m: <int> number of multisig keys required for withdrawal
n: <int> total number of multisig keys
trust_xpubs: <boolean> only use xpubs to generate addresses
"""
safety_checklist()
ensure_bitcoind_running()
require_minimum_bitcoind_version(200100)
if trust_xpubs:
# only prompt user for xpubs
dkeys = get_descriptor_keys_interactive(n)
xkeys = list(map(lambda dkey: dkey[2], dkeys))
else:
# prompt user for mnemonic and all xpubs in the multisignature quorum
if my_xprv is None:
my_xprv = get_mnemonic_interactive()
my_xpub = get_xpub_from_xkey(my_xprv)
dkeys = get_descriptor_keys_interactive(n)
xpubs = list(map(lambda dkey: dkey[2], dkeys))
if my_xpub not in xpubs:
print("Error: No xpubs match the xpub of the provided mnemonic phrase. Exiting.")
sys.exit(1)
xkeys = [xpub if xpub != my_xpub else my_xprv for xpub in xpubs]
# first address index to show, change flag, number of addresses to show
start, change, N = 0, 0, 10
while True:
print(LINE_BREAK)
addresses = deriveaddresses(dkeys, m, start, start + N - 1, change)
print("Derivation Path, Address")
for i, addr in enumerate(addresses):
idx = start + i
print("../{}/{}, {} (Enter {} to save as a QR code in address.png)".format(
str(change), idx, addr, str(i)))
print("\nControls:")
print(" 'NEXT' -- view next {} addresses".format(N))
print(" 'PREV' -- view previous {} addresses".format(N))
print(" 'CHANGE' -- toggle to/from change addresses")
print(" 'QUIT' -- quit proof wallet\n")
cmd = input("Enter your desired command: ")
if cmd == "NEXT":
start += N
elif cmd == "PREV" and start > 0:
start -= N
elif cmd == "CHANGE":
change = 1 if change == 0 else 0
elif cmd == "QUIT":
sys.exit()
elif cmd in ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]:
write_and_verify_qr_code("address", "address.png", addresses[int(cmd)])
else:
print("Unsupported option.")
################################################################################################
#
# Main "withdraw" function
#
################################################################################################
def sign_psbt_interactive(m, n, my_xprv):
"""
Import, validate and sign a psbt to withdraw funds from cold storage.
All data required for this operation is input at the terminal
m: <int> number of multisig keys required for withdrawal
n: <int> total number of multisig keys
"""
safety_checklist()
ensure_bitcoind_running()
require_minimum_bitcoind_version(200100)
# prompt user for mnemonic and all xpubs in the multisignature quorum
if my_xprv is None:
my_xprv = get_mnemonic_interactive()
my_xpub = get_xpub_from_xkey(my_xprv)
dkeys = get_descriptor_keys_interactive(n)
xpubs = list(map(lambda dkey: dkey[2], dkeys))
if my_xpub not in xpubs:
print("Error: No xpubs match the xpub of the provided mnemonic phrase. Exiting.")
sys.exit(1)
dkeys = [
(fng, path, xpub if xpub != my_xpub else my_xprv)
for (fng, path, xpub)
in dkeys
]
# prompt user for base64 psbt string
psbt_raw = input("\nEnter the psbt for the transaction you wish to sign: ")
print("\nValidating the PSBT...")
psbt_validation = validate_psbt(psbt_raw, dkeys, m)
if psbt_validation["error"] is not None:
print("Error: {}".format(psbt_validation["error"]))
sys.exit(1)
psbt = psbt_validation["psbt"]
analysis = psbt_validation["analysis"]
change_idxs = psbt_validation["change_idxs"]
# Retrieve fields from decoded PSBT that need to be shown to user
tx = psbt["tx"]
txid = tx["txid"]
num_vin = len(tx["vin"])
num_vout = len(tx["vout"])
fee = Decimal(psbt["fee"]).quantize(SATOSHI_PLACES)
fee_rate_raw = Decimal(analysis["estimated_feerate"]).quantize(SATOSHI_PLACES)
fee_rate = round(FEE_RATE_MULTIPLIER * fee_rate_raw, 1) # convert and round BTC/kB to sat/byte
vsize = analysis["estimated_vsize"]
# Render transaction inputs
def parse_input(psbt, idx):
txid = psbt["tx"]["vin"][idx]["txid"]
vout = psbt["tx"]["vin"][idx]["vout"]
addr = psbt["inputs"][idx]["witness_utxo"]["scriptPubKey"]["address"]
amount = Decimal(psbt["inputs"][idx]["witness_utxo"]["amount"]).quantize(SATOSHI_PLACES)
return (txid, vout, addr, amount)
inputs = list(map(lambda i: parse_input(psbt, i), range(num_vin)))
inputs_str = "Inputs ({})\n".format(num_vin)
for txin, vout, addr, amount in inputs:
txid_formatted = txin[:10] + "..." + txin[-10:]
inputs_str += "{}:{}\t{}\t{}\n".format(
txid_formatted,
vout,
addr,
amount
)
# Render transaction outputs
def parse_output(psbt, idx):
change = idx in change_idxs
[addr] = psbt["tx"]["vout"][idx]["scriptPubKey"]["addresses"]
value = Decimal(psbt["tx"]["vout"][idx]["value"]).quantize(SATOSHI_PLACES)
return (addr, value, change)
outputs = list(map(lambda i: parse_output(psbt, i), range(num_vout)))
outputs_str = "Outputs ({})\n".format(num_vout)
for addr, value, change in outputs:
change_str = "CHANGE" if change else "NOT CHANGE"
outputs_str += "[{}] {}\t{}\n".format(change_str, addr, value)
while True:
print(LINE_BREAK)
if len(psbt_validation["warning"]) > 0:
print("PSBT validation was successful, but note the following warnings before signing the transaction:")
for warning in psbt_validation["warning"]:
print("* {}".format(warning))
else:
print("PSBT validation was successful.")
print("\n+-----------------------+")
print("| |")
print("| Transaction Summary |")
print("| |")
print("+-----------------------+")
print("Transaction ID: {}".format(txid))
print("Virtual size: {} vbyte".format(vsize))
print("Fee (total): {}".format(fee))
print("Fee (rate): {} sat/byte".format(fee_rate))
print("\n{}".format(inputs_str))
print("{}".format(outputs_str))
print("Controls:")
print(" 'SIGN' -- sign the transaction")
print(" 'QUIT' -- quit proof wallet without signing the transaction")
cmd = input("\nEnter your desired command: ")
if cmd == "SIGN":
# sign psbt and write QR code(s)
psbt_signed = walletprocesspsbt(psbt_raw, psbt_validation["importmulti_idxs"], dkeys, m)
# show text of signed PSBT
print("\nSigned psbt (base64):")
print(psbt_signed["psbt"])
# show PSBT md5 fingerprint
print("\nPSBT fingerprint (md5):")
print(hash_md5(psbt_signed["psbt"]))
print()
# write qr codes of signed psbt
write_and_verify_qr_code("signed psbt", "psbt-signed.png", psbt_signed["psbt"])
sys.exit()
elif cmd == "QUIT":
print("Exiting...")
sys.exit(0)
else:
print("Unsupported option.\n")
################################################################################################
#
# main function
#
# Show help, or execute one of the four main routines: entropy, create-wallet, view-addresses,
# and sign-psbt
#
################################################################################################
if __name__ == "__main__":
parser = argparse.ArgumentParser(epilog="For more help, include a subcommand, e.g. `./proofwallet.py entropy --help`")
parser.add_argument('-v', '--verbose', action='store_true', help='increase output verbosity')
subs = parser.add_subparsers(title='Subcommands', dest='program')
def add_networks(parser):
parser.add_argument('--testnet', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--regtest', action='store_true', help=argparse.SUPPRESS)
def add_rng(parser):
"""Add the --rng option to the parser."""
help_text = "Minimum number of 8-bit bytes to use for computer entropy when generating private keys (default: 32)"
parser.add_argument("-r", "--rng", type=int, help=help_text, default=32)
def add_m(parser):
"""Add the -m option to the parser."""
help_text = "Number of signing keys required in an m-of-n multisig wallet (default m-of-n = 1-of-2)"
parser.add_argument("-m", type=int, help=help_text, default=1)
def add_n(parser):
"""Add the -n option to the parser."""
help_text = "Number of total keys required in an m-of-n multisig wallet (default m-of-n = 1-of-2)"
parser.add_argument("-n", type=int, help=help_text, default=2)
def add_private_file(parser):
"""Add the --private-file option to the parser"""
help_text = "Filepath to a one line text file containing a BIP39 mnemonic"
parser.add_argument("--private-file", type=argparse.FileType('r'), help=help_text)
def get_xprv(args):
my_xprv = None
if args.private_file:
lines = args.private_file.readlines()
if len(lines) != 1:
print("Error: the private-file must contain 1 line with a BIP39 mnemonic.")
sys.exit(1)
my_xprv = parse_mnemonic_to_master_key(lines[0])
return my_xprv
# Entropy parser
parser_entropy = subs.add_parser('entropy', help="Generate computer entropy")
add_rng(parser_entropy)
add_networks(parser_entropy)
# Create wallet parser
parser_create_wallet = subs.add_parser('create-wallet', help="Create a BIP39 HD wallet")
add_rng(parser_create_wallet)
dice_help = "Minimum number of dice rolls to use for entropy when generating private keys (default: 100)"
parser_create_wallet.add_argument("-d", "--dice", type=int, help=dice_help, default=100)
parser_create_wallet.add_argument("--num-words", type=int, help="Number of words in BIP39 mnemonic", choices=[12, 24], default=24)
add_networks(parser_create_wallet)
# View addresses parser
parser_view_addresses = subs.add_parser('view-addresses', help="View deposit addresses")
add_m(parser_view_addresses)
add_n(parser_view_addresses)
add_networks(parser_view_addresses)
add_private_file(parser_view_addresses)
parser_view_addresses.add_argument("--trust-xpubs", action="store_true", help="Only prompts user for xpubs")
# Sign psbt parser
parser_sign_psbt = subs.add_parser('sign-psbt', help="Sign a PSBT")
add_m(parser_sign_psbt)
add_n(parser_sign_psbt)
add_networks(parser_sign_psbt)
add_private_file(parser_sign_psbt)
args = parser.parse_args()
verbose_mode = args.verbose
global network, cli_args
network = "testnet" if args.testnet else ("regtest" if args.regtest else "mainnet")
cli_args = {
'mainnet': [],
'testnet': ["-testnet"],
'regtest': ["-regtest"],
}[network]
if args.program == "entropy":
entropy(args.rng)
if args.program == "create-wallet":
seed_length = 32 if args.num_words == 24 else 16 # in bytes
create_wallet_interactive(args.dice, args.rng, seed_length)
if args.program == "view-addresses":
view_addresses_interactive(args.m, args.n, get_xprv(args), args.trust_xpubs)
if args.program == "sign-psbt":
sign_psbt_interactive(args.m, args.n, get_xprv(args))
| 38.305272
| 170
| 0.61609
|
tr = input("\nEnter descriptor key #{}: ".format(idx+1))
key = parse_descriptor_key(key_str)
keys.append(key)
unique_fingerprints = set(map(lambda key: key[0], keys))
unique_xpubs = set(map(lambda key: key[2], keys))
if len(unique_fingerprints) != n or len(unique_xpubs) != n:
print("Expected {} unique descriptor keys. Exiting".format(n))
sys.exit(1)
return keys
def wsh_descriptor(dkeys, m, change = 0):
dkeys_str = ",".join([
"[{}{}]{}/{}/*".format(fng, path[1:], dkey, str(change))
for fng, path, dkey
in dkeys
])
descriptor = "wsh(sortedmulti({},{}))".format(str(m), dkeys_str)
output = bitcoin_cli_json("getdescriptorinfo", descriptor)
return descriptor + "#" + output["checksum"]
def importmulti(idxs, dkeys, m):
for change in {0, 1}:
desc = wsh_descriptor(dkeys, m, change)
args = []
for i in idxs:
args.append({
"desc": desc,
"internal": True if change == 1 else False,
"range": [i, i],
"timestamp": "now",
"keypool": False,
"watchonly": False
})
bitcoin_cli_json("importmulti", json.dumps(args))
def deriveaddresses(dkeys, m, start, end, change=0):
desc = wsh_descriptor(dkeys, m, change)
return bitcoin_cli_json("deriveaddresses", desc, json.dumps([start, end]))
def walletprocesspsbt(psbt, idxs, dkeys, m):
importmulti(idxs, dkeys, m)
return bitcoin_cli_json("walletprocesspsbt", psbt, "true", "ALL")
def validate_psbt_bip32_derivs(dkeys, psbt_in_or_out, i, what):
if "bip32_derivs" not in psbt_in_or_out:
return ("Tx {} {} is missing bip32 metadata.".format(what, i), None, None)
bip32_derivs = psbt_in_or_out["bip32_derivs"]
expected_fps = set(map(lambda dkey: dkey[0], dkeys))
actual_fps = set(map(lambda bip32_deriv: bip32_deriv["master_fingerprint"], bip32_derivs))
if expected_fps != actual_fps or len(dkeys) != len(bip32_derivs):
return ("Tx {} {} has the wrong set of fingerprints.".format(what, i), None, None)
# Ensure each public key derives from the correct hardened path for its master fingerprint, and
# the _same_, _allowed_ unhardened path
input_paths = set()
for fng, expected_path, xpub in dkeys:
# get corresponding bip32 derivation by master fingerprint (guaranteed to succeed)
bip32_deriv = list(filter(lambda bip32_deriv: bip32_deriv["master_fingerprint"] == fng, bip32_derivs)).pop()
# check that hardened path matches cosigner hardened derivation path
path_arr = bip32_deriv["path"].split(expected_path)
if len(path_arr) != 2 and path_arr[0] != "":
return ("Tx {} {} contains an invalid hardened derivation path for cosigner {}.".format(what, i, fng), None, None)
input_paths.add(path_arr[1])
if len(input_paths) != 1:
return ("Tx {} {} contains different unhardened derivation paths.".format(what, i), None, None)
input_path = input_paths.pop()
match_object = re.match(UNHARDENED_PATH_PATTERN, input_path)
if match_object is None:
return ("Tx {} {} contains an unsupported derivation path: '..{}'.".format(what, i, input_path), None, None)
change, idx = map(int, match_object.groups())
return (None, change, idx)
def validate_psbt_in(dkeys, m, _input, i, response):
# Ensure input spends a witness UTXO
if not ("non_witness_utxo" in _input and "witness_utxo" in _input):
return "Tx input {} must include both PSBT_IN_WITNESS_UTXO and PSBT_IN_NON_WITNESS_UTXO fields.".format(i)
# Ensure the witness utxo is the expected type: witness_v0_scripthash
scriptpubkey_type = _input["witness_utxo"]["scriptPubKey"]["type"]
if scriptpubkey_type != "witness_v0_scripthash":
return "Tx input {} contains an incorrect scriptPubKey type.".format(i)
# Ensure input contains a witness script
if "witness_script" not in _input:
return "Tx input {} must include a PSBT_IN_WITNESS_SCRIPT field.".format(i)
# Ensure that the witness script hash equals the scriptPubKey
witness_script = _input["witness_script"]["hex"]
witness_script_hash = hexlify(sha256(unhexlify(witness_script)).digest()).decode()
scriptPubKeyParts = _input["witness_utxo"]["scriptPubKey"]["asm"].split(" ")
if witness_script_hash != scriptPubKeyParts[1]:
return "The SHA256 of PSBT_IN_WITNESS_SCRIPT and PSBT_IN_WITNESS_UTXO don't match for Tx input {}.".format(i)
(bip32_derivs_err, change, idx) = validate_psbt_bip32_derivs(dkeys, _input, i, "input")
if bip32_derivs_err is not None:
return bip32_derivs_err
actual_address = _input["witness_utxo"]["scriptPubKey"]["address"]
[expected_address] = deriveaddresses(dkeys, m, idx, idx, change)
if expected_address != actual_address:
return "Tx input {} contains an incorrect address based on the provided bip32 metadata.".format(i)
if "sighash" in _input and _input["sighash"] != "ALL":
return "Tx input {} contains an unsupported PSBT_IN_SIGHASH_TYPE type: {}".format(i, _input["sighash"])
response["importmulti_idxs"].add(idx)
return None
def validate_psbt_out(dkeys, m, tx, output, i, response):
tx_out = tx["vout"][i]
scriptpubkey_type = tx_out["scriptPubKey"]["type"]
if scriptpubkey_type != "witness_v0_scripthash":
return None
if "bip32_derivs" not in output:
return None
[actual_address] = tx_out["scriptPubKey"]["addresses"]
(bip32_derivs_err, change, idx) = validate_psbt_bip32_derivs(dkeys, output, i, "output")
if bip32_derivs_err is not None:
return bip32_derivs_err
[expected_address] = deriveaddresses(dkeys, m, idx, idx, change)
if expected_address != actual_address:
return "Tx output {} spends bitcoin to an incorrect address based on the provided bip32 metadata.".format(i)
if "witness_script" not in output:
return "Tx output {} must include a PSBT_OUT_WITNESS_SCRIPT field.".format(i)
witness_script = output["witness_script"]["hex"]
witness_script_hash = hexlify(sha256(unhexlify(witness_script)).digest()).decode()
scriptPubKeyParts = tx_out["scriptPubKey"]["asm"].split(" ")
if witness_script_hash != scriptPubKeyParts[1]:
return "The SHA256 of PSBT_OUT_WITNESS_SCRIPT and the scriptPubKey of Tx output {} don't match.".format(i)
response["change_idxs"].append(i)
return None
def validate_psbt(psbt_raw, dkeys, m):
response = {
"error": None,
"warning": [],
"psbt": None,
"change_idxs": [],
"importmulti_idxs": set(),
"analysis": None
}
try:
psbt = bitcoin_cli_json("decodepsbt", psbt_raw)
response["analysis"] = bitcoin_cli_json("analyzepsbt", psbt_raw)
for i, _input in enumerate(psbt["inputs"]):
psbt_in_validation_err = validate_psbt_in(dkeys, m, _input, i, response)
if psbt_in_validation_err is not None:
response["error"] = psbt_in_validation_err
return response
tx = psbt["tx"]
for i, output in enumerate(psbt["outputs"]):
psbt_out_validation_err = validate_psbt_out(dkeys, m, tx, output, i, response)
if psbt_out_validation_err is not None:
response["error"] = psbt_out_validation_err
return response
if len(response["change_idxs"]) == 0:
no_change_warning = "No change outputs were identified in this transaction. "
no_change_warning += "If you intended to send bitcoin back to your wallet as change, "
no_change_warning += "abort this signing process. Otherwise, you can safely ignore this warning."
response["warning"].append(no_change_warning)
# Validations succeded!
response["psbt"] = psbt
# Catches exceptions in decoding or analyzing PSBT
except subprocess.CalledProcessError:
response["error"] = "The provided base64 encoded input is NOT a valid PSBT"
# Catch any other unexpected exception that may occur
except:
response["error"] = "An unexpected error occurred during the PSBT validation process"
return response
################################################################################################
#
# QR code helper functions
#
################################################################################################
def decode_one_qr(filename):
zresults = subprocess.run(["zbarimg", "--set", "*.enable=0", "--set", "qr.enable=1",
"--quiet", "--raw", filename], check=True, stdout=subprocess.PIPE)
return zresults.stdout.decode('ascii').strip()
def decode_qr(filenames):
return ''.join(decode_one_qr(f) for f in filenames)
def write_qr_code(filename, data):
subprocess.run(["qrencode", "-o", filename, data], check=True)
def write_and_verify_qr_code(name, filename, data):
# Remove any stale files, so we don't confuse user if a previous
base, ext = os.path.splitext(filename)
for deleteme in glob.glob("{}*{}".format(base, ext)):
os.remove(deleteme)
MAX_QR_LEN = 1000
if len(data) <= MAX_QR_LEN:
write_qr_code(filename, data)
filenames = [filename]
else:
idx = 1
filenames = []
intdata = data
while len(intdata) > 0:
thisdata = intdata[0:MAX_QR_LEN]
intdata = intdata[MAX_QR_LEN:]
thisfile = "{}-{:02d}{}".format(base, idx, ext)
filenames.append(thisfile)
write_qr_code(thisfile, thisdata)
idx += 1
qrdata = decode_qr(filenames)
if qrdata != data:
print("********************************************************************")
print("WARNING: {} QR code could not be verified properly. This could be a sign of a security breach.".format(name))
print("********************************************************************")
print("QR code for {0} written to {1}".format(name, ','.join(filenames)))
| true
| true
|
1c3f91818350f40735892d2d47b1ccb5f5d2f5d9
| 359
|
py
|
Python
|
tests/import/import02.py
|
ktok07b6/polyphony
|
657c5c7440520db6b4985970bd50547407693ac4
|
[
"MIT"
] | 83
|
2015-11-30T09:59:13.000Z
|
2021-08-03T09:12:28.000Z
|
tests/import/import02.py
|
jesseclin/polyphony
|
657c5c7440520db6b4985970bd50547407693ac4
|
[
"MIT"
] | 4
|
2017-02-10T01:43:11.000Z
|
2020-07-14T03:52:25.000Z
|
tests/import/import02.py
|
jesseclin/polyphony
|
657c5c7440520db6b4985970bd50547407693ac4
|
[
"MIT"
] | 11
|
2016-11-18T14:39:15.000Z
|
2021-02-23T10:05:20.000Z
|
import polyphony
from polyphony import io as io_
from polyphony import timing as ti_
from polyphony import typing as ty_
def import02():
return (io_.__name__ == 'polyphony.io' and
ti_.__name__ == 'polyphony.timing' and
ty_.__name__ == 'polyphony.typing')
@polyphony.testbench
def test():
assert True == import02()
test()
| 18.894737
| 50
| 0.682451
|
import polyphony
from polyphony import io as io_
from polyphony import timing as ti_
from polyphony import typing as ty_
def import02():
return (io_.__name__ == 'polyphony.io' and
ti_.__name__ == 'polyphony.timing' and
ty_.__name__ == 'polyphony.typing')
@polyphony.testbench
def test():
assert True == import02()
test()
| true
| true
|
1c3f91d806b5845c36c6292a0a25f9d4cbab9821
| 28,266
|
py
|
Python
|
sdk/python/pulumi_azure/datafactory/factory.py
|
aangelisc/pulumi-azure
|
71dd9c75403146e16f7480e5a60b08bc0329660e
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/datafactory/factory.py
|
aangelisc/pulumi-azure
|
71dd9c75403146e16f7480e5a60b08bc0329660e
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/datafactory/factory.py
|
aangelisc/pulumi-azure
|
71dd9c75403146e16f7480e5a60b08bc0329660e
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['FactoryArgs', 'Factory']
@pulumi.input_type
class FactoryArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
customer_managed_key_id: Optional[pulumi.Input[str]] = None,
github_configuration: Optional[pulumi.Input['FactoryGithubConfigurationArgs']] = None,
identity: Optional[pulumi.Input['FactoryIdentityArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
public_network_enabled: Optional[pulumi.Input[bool]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vsts_configuration: Optional[pulumi.Input['FactoryVstsConfigurationArgs']] = None):
"""
The set of arguments for constructing a Factory resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the Data Factory.
:param pulumi.Input[str] customer_managed_key_id: Specifies the Azure Key Vault Key ID to be used as the Customer Managed Key (CMK) for double encryption. Required with user assigned identity.
:param pulumi.Input['FactoryGithubConfigurationArgs'] github_configuration: A `github_configuration` block as defined below.
:param pulumi.Input['FactoryIdentityArgs'] identity: An `identity` block as defined below.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Data Factory. Changing this forces a new resource to be created. Must be globally unique. See the [Microsoft documentation](https://docs.microsoft.com/en-us/azure/data-factory/naming-rules) for all restrictions.
:param pulumi.Input[bool] public_network_enabled: Is the Data Factory visible to the public network? Defaults to `true`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input['FactoryVstsConfigurationArgs'] vsts_configuration: A `vsts_configuration` block as defined below.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if customer_managed_key_id is not None:
pulumi.set(__self__, "customer_managed_key_id", customer_managed_key_id)
if github_configuration is not None:
pulumi.set(__self__, "github_configuration", github_configuration)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if public_network_enabled is not None:
pulumi.set(__self__, "public_network_enabled", public_network_enabled)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if vsts_configuration is not None:
pulumi.set(__self__, "vsts_configuration", vsts_configuration)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group in which to create the Data Factory.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="customerManagedKeyId")
def customer_managed_key_id(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the Azure Key Vault Key ID to be used as the Customer Managed Key (CMK) for double encryption. Required with user assigned identity.
"""
return pulumi.get(self, "customer_managed_key_id")
@customer_managed_key_id.setter
def customer_managed_key_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "customer_managed_key_id", value)
@property
@pulumi.getter(name="githubConfiguration")
def github_configuration(self) -> Optional[pulumi.Input['FactoryGithubConfigurationArgs']]:
"""
A `github_configuration` block as defined below.
"""
return pulumi.get(self, "github_configuration")
@github_configuration.setter
def github_configuration(self, value: Optional[pulumi.Input['FactoryGithubConfigurationArgs']]):
pulumi.set(self, "github_configuration", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['FactoryIdentityArgs']]:
"""
An `identity` block as defined below.
"""
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['FactoryIdentityArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Data Factory. Changing this forces a new resource to be created. Must be globally unique. See the [Microsoft documentation](https://docs.microsoft.com/en-us/azure/data-factory/naming-rules) for all restrictions.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="publicNetworkEnabled")
def public_network_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Is the Data Factory visible to the public network? Defaults to `true`.
"""
return pulumi.get(self, "public_network_enabled")
@public_network_enabled.setter
def public_network_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "public_network_enabled", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="vstsConfiguration")
def vsts_configuration(self) -> Optional[pulumi.Input['FactoryVstsConfigurationArgs']]:
"""
A `vsts_configuration` block as defined below.
"""
return pulumi.get(self, "vsts_configuration")
@vsts_configuration.setter
def vsts_configuration(self, value: Optional[pulumi.Input['FactoryVstsConfigurationArgs']]):
pulumi.set(self, "vsts_configuration", value)
@pulumi.input_type
class _FactoryState:
def __init__(__self__, *,
customer_managed_key_id: Optional[pulumi.Input[str]] = None,
github_configuration: Optional[pulumi.Input['FactoryGithubConfigurationArgs']] = None,
identity: Optional[pulumi.Input['FactoryIdentityArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
public_network_enabled: Optional[pulumi.Input[bool]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vsts_configuration: Optional[pulumi.Input['FactoryVstsConfigurationArgs']] = None):
"""
Input properties used for looking up and filtering Factory resources.
:param pulumi.Input[str] customer_managed_key_id: Specifies the Azure Key Vault Key ID to be used as the Customer Managed Key (CMK) for double encryption. Required with user assigned identity.
:param pulumi.Input['FactoryGithubConfigurationArgs'] github_configuration: A `github_configuration` block as defined below.
:param pulumi.Input['FactoryIdentityArgs'] identity: An `identity` block as defined below.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Data Factory. Changing this forces a new resource to be created. Must be globally unique. See the [Microsoft documentation](https://docs.microsoft.com/en-us/azure/data-factory/naming-rules) for all restrictions.
:param pulumi.Input[bool] public_network_enabled: Is the Data Factory visible to the public network? Defaults to `true`.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the Data Factory.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input['FactoryVstsConfigurationArgs'] vsts_configuration: A `vsts_configuration` block as defined below.
"""
if customer_managed_key_id is not None:
pulumi.set(__self__, "customer_managed_key_id", customer_managed_key_id)
if github_configuration is not None:
pulumi.set(__self__, "github_configuration", github_configuration)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if public_network_enabled is not None:
pulumi.set(__self__, "public_network_enabled", public_network_enabled)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if vsts_configuration is not None:
pulumi.set(__self__, "vsts_configuration", vsts_configuration)
@property
@pulumi.getter(name="customerManagedKeyId")
def customer_managed_key_id(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the Azure Key Vault Key ID to be used as the Customer Managed Key (CMK) for double encryption. Required with user assigned identity.
"""
return pulumi.get(self, "customer_managed_key_id")
@customer_managed_key_id.setter
def customer_managed_key_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "customer_managed_key_id", value)
@property
@pulumi.getter(name="githubConfiguration")
def github_configuration(self) -> Optional[pulumi.Input['FactoryGithubConfigurationArgs']]:
"""
A `github_configuration` block as defined below.
"""
return pulumi.get(self, "github_configuration")
@github_configuration.setter
def github_configuration(self, value: Optional[pulumi.Input['FactoryGithubConfigurationArgs']]):
pulumi.set(self, "github_configuration", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['FactoryIdentityArgs']]:
"""
An `identity` block as defined below.
"""
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['FactoryIdentityArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Data Factory. Changing this forces a new resource to be created. Must be globally unique. See the [Microsoft documentation](https://docs.microsoft.com/en-us/azure/data-factory/naming-rules) for all restrictions.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="publicNetworkEnabled")
def public_network_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Is the Data Factory visible to the public network? Defaults to `true`.
"""
return pulumi.get(self, "public_network_enabled")
@public_network_enabled.setter
def public_network_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "public_network_enabled", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource group in which to create the Data Factory.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="vstsConfiguration")
def vsts_configuration(self) -> Optional[pulumi.Input['FactoryVstsConfigurationArgs']]:
"""
A `vsts_configuration` block as defined below.
"""
return pulumi.get(self, "vsts_configuration")
@vsts_configuration.setter
def vsts_configuration(self, value: Optional[pulumi.Input['FactoryVstsConfigurationArgs']]):
pulumi.set(self, "vsts_configuration", value)
class Factory(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
customer_managed_key_id: Optional[pulumi.Input[str]] = None,
github_configuration: Optional[pulumi.Input[pulumi.InputType['FactoryGithubConfigurationArgs']]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['FactoryIdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
public_network_enabled: Optional[pulumi.Input[bool]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vsts_configuration: Optional[pulumi.Input[pulumi.InputType['FactoryVstsConfigurationArgs']]] = None,
__props__=None):
"""
Manages an Azure Data Factory (Version 2).
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_factory = azure.datafactory.Factory("exampleFactory",
location=example_resource_group.location,
resource_group_name=example_resource_group.name)
```
## Import
Data Factory can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:datafactory/factory:Factory example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/example/providers/Microsoft.DataFactory/factories/example
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] customer_managed_key_id: Specifies the Azure Key Vault Key ID to be used as the Customer Managed Key (CMK) for double encryption. Required with user assigned identity.
:param pulumi.Input[pulumi.InputType['FactoryGithubConfigurationArgs']] github_configuration: A `github_configuration` block as defined below.
:param pulumi.Input[pulumi.InputType['FactoryIdentityArgs']] identity: An `identity` block as defined below.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Data Factory. Changing this forces a new resource to be created. Must be globally unique. See the [Microsoft documentation](https://docs.microsoft.com/en-us/azure/data-factory/naming-rules) for all restrictions.
:param pulumi.Input[bool] public_network_enabled: Is the Data Factory visible to the public network? Defaults to `true`.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the Data Factory.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[pulumi.InputType['FactoryVstsConfigurationArgs']] vsts_configuration: A `vsts_configuration` block as defined below.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: FactoryArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages an Azure Data Factory (Version 2).
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_factory = azure.datafactory.Factory("exampleFactory",
location=example_resource_group.location,
resource_group_name=example_resource_group.name)
```
## Import
Data Factory can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:datafactory/factory:Factory example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/example/providers/Microsoft.DataFactory/factories/example
```
:param str resource_name: The name of the resource.
:param FactoryArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(FactoryArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
customer_managed_key_id: Optional[pulumi.Input[str]] = None,
github_configuration: Optional[pulumi.Input[pulumi.InputType['FactoryGithubConfigurationArgs']]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['FactoryIdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
public_network_enabled: Optional[pulumi.Input[bool]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vsts_configuration: Optional[pulumi.Input[pulumi.InputType['FactoryVstsConfigurationArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = FactoryArgs.__new__(FactoryArgs)
__props__.__dict__["customer_managed_key_id"] = customer_managed_key_id
__props__.__dict__["github_configuration"] = github_configuration
__props__.__dict__["identity"] = identity
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["public_network_enabled"] = public_network_enabled
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
__props__.__dict__["vsts_configuration"] = vsts_configuration
super(Factory, __self__).__init__(
'azure:datafactory/factory:Factory',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
customer_managed_key_id: Optional[pulumi.Input[str]] = None,
github_configuration: Optional[pulumi.Input[pulumi.InputType['FactoryGithubConfigurationArgs']]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['FactoryIdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
public_network_enabled: Optional[pulumi.Input[bool]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vsts_configuration: Optional[pulumi.Input[pulumi.InputType['FactoryVstsConfigurationArgs']]] = None) -> 'Factory':
"""
Get an existing Factory resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] customer_managed_key_id: Specifies the Azure Key Vault Key ID to be used as the Customer Managed Key (CMK) for double encryption. Required with user assigned identity.
:param pulumi.Input[pulumi.InputType['FactoryGithubConfigurationArgs']] github_configuration: A `github_configuration` block as defined below.
:param pulumi.Input[pulumi.InputType['FactoryIdentityArgs']] identity: An `identity` block as defined below.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Data Factory. Changing this forces a new resource to be created. Must be globally unique. See the [Microsoft documentation](https://docs.microsoft.com/en-us/azure/data-factory/naming-rules) for all restrictions.
:param pulumi.Input[bool] public_network_enabled: Is the Data Factory visible to the public network? Defaults to `true`.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the Data Factory.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[pulumi.InputType['FactoryVstsConfigurationArgs']] vsts_configuration: A `vsts_configuration` block as defined below.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _FactoryState.__new__(_FactoryState)
__props__.__dict__["customer_managed_key_id"] = customer_managed_key_id
__props__.__dict__["github_configuration"] = github_configuration
__props__.__dict__["identity"] = identity
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["public_network_enabled"] = public_network_enabled
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
__props__.__dict__["vsts_configuration"] = vsts_configuration
return Factory(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="customerManagedKeyId")
def customer_managed_key_id(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the Azure Key Vault Key ID to be used as the Customer Managed Key (CMK) for double encryption. Required with user assigned identity.
"""
return pulumi.get(self, "customer_managed_key_id")
@property
@pulumi.getter(name="githubConfiguration")
def github_configuration(self) -> pulumi.Output[Optional['outputs.FactoryGithubConfiguration']]:
"""
A `github_configuration` block as defined below.
"""
return pulumi.get(self, "github_configuration")
@property
@pulumi.getter
def identity(self) -> pulumi.Output['outputs.FactoryIdentity']:
"""
An `identity` block as defined below.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the name of the Data Factory. Changing this forces a new resource to be created. Must be globally unique. See the [Microsoft documentation](https://docs.microsoft.com/en-us/azure/data-factory/naming-rules) for all restrictions.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="publicNetworkEnabled")
def public_network_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Is the Data Factory visible to the public network? Defaults to `true`.
"""
return pulumi.get(self, "public_network_enabled")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the resource group in which to create the Data Factory.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="vstsConfiguration")
def vsts_configuration(self) -> pulumi.Output[Optional['outputs.FactoryVstsConfiguration']]:
"""
A `vsts_configuration` block as defined below.
"""
return pulumi.get(self, "vsts_configuration")
| 49.851852
| 276
| 0.679615
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['FactoryArgs', 'Factory']
@pulumi.input_type
class FactoryArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
customer_managed_key_id: Optional[pulumi.Input[str]] = None,
github_configuration: Optional[pulumi.Input['FactoryGithubConfigurationArgs']] = None,
identity: Optional[pulumi.Input['FactoryIdentityArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
public_network_enabled: Optional[pulumi.Input[bool]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vsts_configuration: Optional[pulumi.Input['FactoryVstsConfigurationArgs']] = None):
pulumi.set(__self__, "resource_group_name", resource_group_name)
if customer_managed_key_id is not None:
pulumi.set(__self__, "customer_managed_key_id", customer_managed_key_id)
if github_configuration is not None:
pulumi.set(__self__, "github_configuration", github_configuration)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if public_network_enabled is not None:
pulumi.set(__self__, "public_network_enabled", public_network_enabled)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if vsts_configuration is not None:
pulumi.set(__self__, "vsts_configuration", vsts_configuration)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="customerManagedKeyId")
def customer_managed_key_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "customer_managed_key_id")
@customer_managed_key_id.setter
def customer_managed_key_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "customer_managed_key_id", value)
@property
@pulumi.getter(name="githubConfiguration")
def github_configuration(self) -> Optional[pulumi.Input['FactoryGithubConfigurationArgs']]:
return pulumi.get(self, "github_configuration")
@github_configuration.setter
def github_configuration(self, value: Optional[pulumi.Input['FactoryGithubConfigurationArgs']]):
pulumi.set(self, "github_configuration", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['FactoryIdentityArgs']]:
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['FactoryIdentityArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="publicNetworkEnabled")
def public_network_enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "public_network_enabled")
@public_network_enabled.setter
def public_network_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "public_network_enabled", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="vstsConfiguration")
def vsts_configuration(self) -> Optional[pulumi.Input['FactoryVstsConfigurationArgs']]:
return pulumi.get(self, "vsts_configuration")
@vsts_configuration.setter
def vsts_configuration(self, value: Optional[pulumi.Input['FactoryVstsConfigurationArgs']]):
pulumi.set(self, "vsts_configuration", value)
@pulumi.input_type
class _FactoryState:
def __init__(__self__, *,
customer_managed_key_id: Optional[pulumi.Input[str]] = None,
github_configuration: Optional[pulumi.Input['FactoryGithubConfigurationArgs']] = None,
identity: Optional[pulumi.Input['FactoryIdentityArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
public_network_enabled: Optional[pulumi.Input[bool]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vsts_configuration: Optional[pulumi.Input['FactoryVstsConfigurationArgs']] = None):
if customer_managed_key_id is not None:
pulumi.set(__self__, "customer_managed_key_id", customer_managed_key_id)
if github_configuration is not None:
pulumi.set(__self__, "github_configuration", github_configuration)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if public_network_enabled is not None:
pulumi.set(__self__, "public_network_enabled", public_network_enabled)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if vsts_configuration is not None:
pulumi.set(__self__, "vsts_configuration", vsts_configuration)
@property
@pulumi.getter(name="customerManagedKeyId")
def customer_managed_key_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "customer_managed_key_id")
@customer_managed_key_id.setter
def customer_managed_key_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "customer_managed_key_id", value)
@property
@pulumi.getter(name="githubConfiguration")
def github_configuration(self) -> Optional[pulumi.Input['FactoryGithubConfigurationArgs']]:
return pulumi.get(self, "github_configuration")
@github_configuration.setter
def github_configuration(self, value: Optional[pulumi.Input['FactoryGithubConfigurationArgs']]):
pulumi.set(self, "github_configuration", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['FactoryIdentityArgs']]:
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['FactoryIdentityArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="publicNetworkEnabled")
def public_network_enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "public_network_enabled")
@public_network_enabled.setter
def public_network_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "public_network_enabled", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="vstsConfiguration")
def vsts_configuration(self) -> Optional[pulumi.Input['FactoryVstsConfigurationArgs']]:
return pulumi.get(self, "vsts_configuration")
@vsts_configuration.setter
def vsts_configuration(self, value: Optional[pulumi.Input['FactoryVstsConfigurationArgs']]):
pulumi.set(self, "vsts_configuration", value)
class Factory(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
customer_managed_key_id: Optional[pulumi.Input[str]] = None,
github_configuration: Optional[pulumi.Input[pulumi.InputType['FactoryGithubConfigurationArgs']]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['FactoryIdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
public_network_enabled: Optional[pulumi.Input[bool]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vsts_configuration: Optional[pulumi.Input[pulumi.InputType['FactoryVstsConfigurationArgs']]] = None,
__props__=None):
...
@overload
def __init__(__self__,
resource_name: str,
args: FactoryArgs,
opts: Optional[pulumi.ResourceOptions] = None):
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(FactoryArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
customer_managed_key_id: Optional[pulumi.Input[str]] = None,
github_configuration: Optional[pulumi.Input[pulumi.InputType['FactoryGithubConfigurationArgs']]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['FactoryIdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
public_network_enabled: Optional[pulumi.Input[bool]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vsts_configuration: Optional[pulumi.Input[pulumi.InputType['FactoryVstsConfigurationArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = FactoryArgs.__new__(FactoryArgs)
__props__.__dict__["customer_managed_key_id"] = customer_managed_key_id
__props__.__dict__["github_configuration"] = github_configuration
__props__.__dict__["identity"] = identity
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["public_network_enabled"] = public_network_enabled
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
__props__.__dict__["vsts_configuration"] = vsts_configuration
super(Factory, __self__).__init__(
'azure:datafactory/factory:Factory',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
customer_managed_key_id: Optional[pulumi.Input[str]] = None,
github_configuration: Optional[pulumi.Input[pulumi.InputType['FactoryGithubConfigurationArgs']]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['FactoryIdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
public_network_enabled: Optional[pulumi.Input[bool]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vsts_configuration: Optional[pulumi.Input[pulumi.InputType['FactoryVstsConfigurationArgs']]] = None) -> 'Factory':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _FactoryState.__new__(_FactoryState)
__props__.__dict__["customer_managed_key_id"] = customer_managed_key_id
__props__.__dict__["github_configuration"] = github_configuration
__props__.__dict__["identity"] = identity
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["public_network_enabled"] = public_network_enabled
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
__props__.__dict__["vsts_configuration"] = vsts_configuration
return Factory(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="customerManagedKeyId")
def customer_managed_key_id(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "customer_managed_key_id")
@property
@pulumi.getter(name="githubConfiguration")
def github_configuration(self) -> pulumi.Output[Optional['outputs.FactoryGithubConfiguration']]:
return pulumi.get(self, "github_configuration")
@property
@pulumi.getter
def identity(self) -> pulumi.Output['outputs.FactoryIdentity']:
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="publicNetworkEnabled")
def public_network_enabled(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "public_network_enabled")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="vstsConfiguration")
def vsts_configuration(self) -> pulumi.Output[Optional['outputs.FactoryVstsConfiguration']]:
return pulumi.get(self, "vsts_configuration")
| true
| true
|
1c3f92052a683c5775b5e40afe7aeb1e5fe16842
| 1,786
|
py
|
Python
|
OSMtools/utils/configmanager.py
|
Benni5K/orstools-qgis-plugin
|
d255cccd4be71b63321c68178e7111d27f2660fa
|
[
"MIT"
] | null | null | null |
OSMtools/utils/configmanager.py
|
Benni5K/orstools-qgis-plugin
|
d255cccd4be71b63321c68178e7111d27f2660fa
|
[
"MIT"
] | null | null | null |
OSMtools/utils/configmanager.py
|
Benni5K/orstools-qgis-plugin
|
d255cccd4be71b63321c68178e7111d27f2660fa
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
OSMtools
A QGIS plugin
falk
-------------------
begin : 2017-02-01
git sha : $Format:%H$
copyright : (C) 2017 by Nils Nolde
email : nils.nolde@gmail.com
***************************************************************************/
This plugin provides access to the various APIs from OpenRouteService
(https://openrouteservice.org), developed and
maintained by GIScience team at University of Heidelberg, Germany. By using
this plugin you agree to the ORS terms of service
(https://openrouteservice.org/terms-of-service/).
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import os.path
import yaml
from OSMtools import BASE_DIR, CONFIG
def read():
with open(os.path.join(BASE_DIR, CONFIG)) as f:
doc = yaml.safe_load(f)
return doc
def write(key, value):
doc = read()
doc[key] = value
with open(os.path.join(BASE_DIR, CONFIG), 'w') as f:
yaml.safe_dump(doc, f)
| 37.208333
| 78
| 0.416573
|
import os.path
import yaml
from OSMtools import BASE_DIR, CONFIG
def read():
with open(os.path.join(BASE_DIR, CONFIG)) as f:
doc = yaml.safe_load(f)
return doc
def write(key, value):
doc = read()
doc[key] = value
with open(os.path.join(BASE_DIR, CONFIG), 'w') as f:
yaml.safe_dump(doc, f)
| true
| true
|
1c3f920f5859c8fbcfe7d3f2e78549e10c915289
| 11,465
|
py
|
Python
|
qrl/services/PublicAPIService.py
|
michael-go/QRL
|
591dfa60acca68e3ef6b4a09e393907939ae92b0
|
[
"MIT"
] | null | null | null |
qrl/services/PublicAPIService.py
|
michael-go/QRL
|
591dfa60acca68e3ef6b4a09e393907939ae92b0
|
[
"MIT"
] | null | null | null |
qrl/services/PublicAPIService.py
|
michael-go/QRL
|
591dfa60acca68e3ef6b4a09e393907939ae92b0
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
from grpc import StatusCode
from qrl.core import logger
from qrl.core.StakeValidator import StakeValidator
from qrl.core.Transaction import Transaction
from qrl.core.qrlnode import QRLNode
from qrl.generated import qrl_pb2
from qrl.generated.qrl_pb2_grpc import PublicAPIServicer
from qrl.services.grpcHelper import grpc_exception_wrapper
class PublicAPIService(PublicAPIServicer):
MAX_REQUEST_QUANTITY = 100
# TODO: Separate the Service from the node model
def __init__(self, qrlnode: QRLNode):
self.qrlnode = qrlnode
@grpc_exception_wrapper(qrl_pb2.GetNodeStateResp, StatusCode.UNKNOWN)
def GetNodeState(self, request: qrl_pb2.GetNodeStateReq, context) -> qrl_pb2.GetNodeStateResp:
return qrl_pb2.GetNodeStateResp(info=self.qrlnode.getNodeInfo())
@grpc_exception_wrapper(qrl_pb2.GetKnownPeersResp, StatusCode.UNKNOWN)
def GetKnownPeers(self, request: qrl_pb2.GetKnownPeersReq, context) -> qrl_pb2.GetKnownPeersResp:
response = qrl_pb2.GetKnownPeersResp()
response.node_info.CopyFrom(self.qrlnode.getNodeInfo())
response.known_peers.extend([qrl_pb2.Peer(ip=p) for p in self.qrlnode._peer_addresses])
return response
@grpc_exception_wrapper(qrl_pb2.GetStatsResp, StatusCode.UNKNOWN)
def GetStats(self, request: qrl_pb2.GetStatsReq, context) -> qrl_pb2.GetStatsResp:
response = qrl_pb2.GetStatsResp()
response.node_info.CopyFrom(self.qrlnode.getNodeInfo())
response.epoch = self.qrlnode.epoch
response.uptime_network = self.qrlnode.uptime_network
response.stakers_count = self.qrlnode.stakers_count
response.block_last_reward = self.qrlnode.block_last_reward
response.block_time_mean = self.qrlnode.block_time_mean
response.block_time_sd = self.qrlnode.block_time_sd
response.coins_total_supply = self.qrlnode.coin_supply_max
response.coins_emitted = self.qrlnode.coin_supply
response.coins_atstake = self.qrlnode.coin_atstake
return response
@grpc_exception_wrapper(qrl_pb2.GetAddressStateResp, StatusCode.UNKNOWN)
def GetAddressState(self, request: qrl_pb2.GetAddressStateReq, context) -> qrl_pb2.GetAddressStateResp:
address_state = self.qrlnode.get_address_state(request.address)
return qrl_pb2.GetAddressStateResp(state=address_state)
@grpc_exception_wrapper(qrl_pb2.TransferCoinsResp, StatusCode.UNKNOWN)
def TransferCoins(self, request: qrl_pb2.TransferCoinsReq, context) -> qrl_pb2.TransferCoinsResp:
logger.debug("[PublicAPI] TransferCoins")
tx = self.qrlnode.create_send_tx(addr_from=request.address_from,
addr_to=request.address_to,
amount=request.amount,
fee=request.fee,
xmss_pk=request.xmss_pk,
xmss_ots_index=request.xmss_ots_index)
return qrl_pb2.TransferCoinsResp(transaction_unsigned=tx.pbdata)
@grpc_exception_wrapper(qrl_pb2.TransferCoinsResp, StatusCode.UNKNOWN)
def PushTransaction(self, request: qrl_pb2.PushTransactionReq, context) -> qrl_pb2.PushTransactionResp:
logger.debug("[PublicAPI] PushTransaction")
tx = Transaction.from_pbdata(request.transaction_signed)
submitted = self.qrlnode.submit_send_tx(tx)
# FIXME: Improve response type
# Prepare response
answer = qrl_pb2.PushTransactionResp()
answer.some_response = str(submitted)
return answer
@grpc_exception_wrapper(qrl_pb2.UnsignedTransactionResp, StatusCode.UNKNOWN)
def GetUnsignedTransaction(self, request: qrl_pb2.UnsignedTransactionReq, context) -> qrl_pb2.UnsignedTransactionResp:
logger.debug("[PublicAPI] GetUnsignedTxn")
tx = self.qrlnode.create_unsigned_tx(request.transaction)
response = None
if tx:
response = tx.pbdata
return qrl_pb2.UnsignedTransactionResp(transaction=response)
@grpc_exception_wrapper(qrl_pb2.TransferCoinsResp, StatusCode.UNKNOWN)
def GetLatticePublicKeyTxn(self, request: qrl_pb2.LatticePublicKeyTxnReq, context) -> qrl_pb2.TransferCoinsResp:
logger.debug("[PublicAPI] GetLatticePublicKeyTxn")
tx = self.qrlnode.create_lt(addr_from=request.address_from,
kyber_pk=request.kyber_pk,
tesla_pk=request.tesla_pk,
xmss_pk=request.xmss_pk,
xmss_ots_index=request.xmss_ots_index)
return qrl_pb2.TransferCoinsResp(transaction_unsigned=tx.pbdata)
@grpc_exception_wrapper(qrl_pb2.GetObjectResp, StatusCode.UNKNOWN)
def GetObject(self, request: qrl_pb2.GetObjectReq, context) -> qrl_pb2.GetObjectResp:
logger.debug("[PublicAPI] GetObject")
answer = qrl_pb2.GetObjectResp()
answer.found = False
# FIXME: We need a unified way to access and validate data.
query = bytes(request.query) # query will be as a string, if Q is detected convert, etc.
if self.qrlnode.address_is_valid(query):
if self.qrlnode.get_address_is_used(query):
address_state = self.qrlnode.get_address_state(query)
if address_state is not None:
answer.found = True
answer.address_state.CopyFrom(address_state)
return answer
transaction = self.qrlnode.get_transaction(query)
if transaction is not None:
answer.found = True
block_index = self.qrlnode.get_blockidx_from_txhash(transaction.txhash)
blockheader = None
if block_index:
block = self.qrlnode.get_block_from_index(block_index)
blockheader = block.blockheader.pbdata
txextended = qrl_pb2.TransactionExtended(header=blockheader,
tx=transaction.pbdata)
answer.transaction.CopyFrom(txextended)
return answer
block = self.qrlnode.get_block_from_hash(query)
if block is not None:
answer.found = True
answer.block.CopyFrom(block.pbdata)
return answer
block = self.qrlnode.get_block_from_hash(query)
if block is not None:
answer.found = True
answer.block.CopyFrom(block.pbdata)
return answer
# NOTE: This is temporary, indexes are accepted for blocks
try:
query_str = query.decode()
query_index = int(query_str)
block = self.qrlnode.get_block_from_index(query_index)
if block is not None:
answer.found = True
answer.block.CopyFrom(block.pbdata)
return answer
except Exception:
pass
return answer
@grpc_exception_wrapper(qrl_pb2.TokenDetailedList, StatusCode.UNKNOWN)
def GetTokenDetailedList(self, request: qrl_pb2.Empty, context) -> qrl_pb2.TokenDetailedList:
logger.debug("[PublicAPI] TokenDetailedList")
token_detailed_list = self.qrlnode.get_token_detailed_list()
return token_detailed_list
def _stake_validator_to_staker_data(self, stake_validator: StakeValidator) -> qrl_pb2.StakerData:
answer = qrl_pb2.StakerData()
answer.address_state.CopyFrom(self.qrlnode.get_address_state(stake_validator.address))
answer.terminator_hash = stake_validator.terminator_hash
return answer
@grpc_exception_wrapper(qrl_pb2.GetStakersResp, StatusCode.UNKNOWN)
def GetStakers(self, request: qrl_pb2.GetStakersReq, context) -> qrl_pb2.GetStakersResp:
logger.debug("[PublicAPI] GetStakers")
response = qrl_pb2.GetStakersResp()
quantity = min(request.quantity, self.MAX_REQUEST_QUANTITY)
if request.filter == qrl_pb2.GetStakersReq.CURRENT:
sv_list = self.qrlnode.get_current_stakers(offset=request.offset, count=quantity)
elif request.filter == qrl_pb2.GetStakersReq.NEXT:
sv_list = self.qrlnode.get_next_stakers(offset=request.offset, count=quantity)
else:
raise NotImplementedError("filter value is not supported")
sv_data = [self._stake_validator_to_staker_data(sv) for sv in sv_list]
response.stakers.extend(sv_data)
return response
@grpc_exception_wrapper(qrl_pb2.GetLatestDataResp, StatusCode.UNKNOWN)
def GetLatestData(self, request: qrl_pb2.GetLatestDataReq, context) -> qrl_pb2.GetLatestDataResp:
logger.debug("[PublicAPI] GetLatestData")
response = qrl_pb2.GetLatestDataResp()
all_requested = request.filter == qrl_pb2.GetLatestDataReq.ALL
quantity = min(request.quantity, self.MAX_REQUEST_QUANTITY)
if all_requested or request.filter == qrl_pb2.GetLatestDataReq.BLOCKHEADERS:
result = []
for blk in self.qrlnode.get_latest_blocks(offset=request.offset, count=quantity):
transaction_count = qrl_pb2.TransactionCount()
for tx in blk.transactions:
transaction_count.count[tx.type] += 1
for tx in blk.vote:
transaction_count.count[tx.type] += 1
for tx in blk.duplicate_transactions:
transaction_count.count[tx.type] += 1
voted_weight = 0
total_stake_weight = 0
if blk.block_number > 0:
voted_weight, total_stake_weight = self.qrlnode.get_vote_metadata(blk.block_number)
result.append(qrl_pb2.BlockHeaderExtended(header=blk.blockheader.pbdata,
transaction_count=transaction_count,
voted_weight=voted_weight,
total_stake_weight=total_stake_weight))
response.blockheaders.extend(result)
if all_requested or request.filter == qrl_pb2.GetLatestDataReq.TRANSACTIONS:
result = []
for tx in self.qrlnode.get_latest_transactions(offset=request.offset, count=quantity):
# FIXME: Improve this once we have a proper database schema
block_index = self.qrlnode.get_blockidx_from_txhash(tx.txhash)
block = self.qrlnode.get_block_from_index(block_index)
txextended = qrl_pb2.TransactionExtended(header=block.blockheader.pbdata,
tx=tx.pbdata)
result.append(txextended)
response.transactions.extend(result)
if all_requested or request.filter == qrl_pb2.GetLatestDataReq.TRANSACTIONS_UNCONFIRMED:
result = []
for tx in self.qrlnode.get_latest_transactions_unconfirmed(offset=request.offset, count=quantity):
txextended = qrl_pb2.TransactionExtended(header=None,
tx=tx.pbdata)
result.append(txextended)
response.transactions_unconfirmed.extend(result)
return response
| 48.172269
| 122
| 0.665329
|
from grpc import StatusCode
from qrl.core import logger
from qrl.core.StakeValidator import StakeValidator
from qrl.core.Transaction import Transaction
from qrl.core.qrlnode import QRLNode
from qrl.generated import qrl_pb2
from qrl.generated.qrl_pb2_grpc import PublicAPIServicer
from qrl.services.grpcHelper import grpc_exception_wrapper
class PublicAPIService(PublicAPIServicer):
MAX_REQUEST_QUANTITY = 100
def __init__(self, qrlnode: QRLNode):
self.qrlnode = qrlnode
@grpc_exception_wrapper(qrl_pb2.GetNodeStateResp, StatusCode.UNKNOWN)
def GetNodeState(self, request: qrl_pb2.GetNodeStateReq, context) -> qrl_pb2.GetNodeStateResp:
return qrl_pb2.GetNodeStateResp(info=self.qrlnode.getNodeInfo())
@grpc_exception_wrapper(qrl_pb2.GetKnownPeersResp, StatusCode.UNKNOWN)
def GetKnownPeers(self, request: qrl_pb2.GetKnownPeersReq, context) -> qrl_pb2.GetKnownPeersResp:
response = qrl_pb2.GetKnownPeersResp()
response.node_info.CopyFrom(self.qrlnode.getNodeInfo())
response.known_peers.extend([qrl_pb2.Peer(ip=p) for p in self.qrlnode._peer_addresses])
return response
@grpc_exception_wrapper(qrl_pb2.GetStatsResp, StatusCode.UNKNOWN)
def GetStats(self, request: qrl_pb2.GetStatsReq, context) -> qrl_pb2.GetStatsResp:
response = qrl_pb2.GetStatsResp()
response.node_info.CopyFrom(self.qrlnode.getNodeInfo())
response.epoch = self.qrlnode.epoch
response.uptime_network = self.qrlnode.uptime_network
response.stakers_count = self.qrlnode.stakers_count
response.block_last_reward = self.qrlnode.block_last_reward
response.block_time_mean = self.qrlnode.block_time_mean
response.block_time_sd = self.qrlnode.block_time_sd
response.coins_total_supply = self.qrlnode.coin_supply_max
response.coins_emitted = self.qrlnode.coin_supply
response.coins_atstake = self.qrlnode.coin_atstake
return response
@grpc_exception_wrapper(qrl_pb2.GetAddressStateResp, StatusCode.UNKNOWN)
def GetAddressState(self, request: qrl_pb2.GetAddressStateReq, context) -> qrl_pb2.GetAddressStateResp:
address_state = self.qrlnode.get_address_state(request.address)
return qrl_pb2.GetAddressStateResp(state=address_state)
@grpc_exception_wrapper(qrl_pb2.TransferCoinsResp, StatusCode.UNKNOWN)
def TransferCoins(self, request: qrl_pb2.TransferCoinsReq, context) -> qrl_pb2.TransferCoinsResp:
logger.debug("[PublicAPI] TransferCoins")
tx = self.qrlnode.create_send_tx(addr_from=request.address_from,
addr_to=request.address_to,
amount=request.amount,
fee=request.fee,
xmss_pk=request.xmss_pk,
xmss_ots_index=request.xmss_ots_index)
return qrl_pb2.TransferCoinsResp(transaction_unsigned=tx.pbdata)
@grpc_exception_wrapper(qrl_pb2.TransferCoinsResp, StatusCode.UNKNOWN)
def PushTransaction(self, request: qrl_pb2.PushTransactionReq, context) -> qrl_pb2.PushTransactionResp:
logger.debug("[PublicAPI] PushTransaction")
tx = Transaction.from_pbdata(request.transaction_signed)
submitted = self.qrlnode.submit_send_tx(tx)
answer = qrl_pb2.PushTransactionResp()
answer.some_response = str(submitted)
return answer
@grpc_exception_wrapper(qrl_pb2.UnsignedTransactionResp, StatusCode.UNKNOWN)
def GetUnsignedTransaction(self, request: qrl_pb2.UnsignedTransactionReq, context) -> qrl_pb2.UnsignedTransactionResp:
logger.debug("[PublicAPI] GetUnsignedTxn")
tx = self.qrlnode.create_unsigned_tx(request.transaction)
response = None
if tx:
response = tx.pbdata
return qrl_pb2.UnsignedTransactionResp(transaction=response)
@grpc_exception_wrapper(qrl_pb2.TransferCoinsResp, StatusCode.UNKNOWN)
def GetLatticePublicKeyTxn(self, request: qrl_pb2.LatticePublicKeyTxnReq, context) -> qrl_pb2.TransferCoinsResp:
logger.debug("[PublicAPI] GetLatticePublicKeyTxn")
tx = self.qrlnode.create_lt(addr_from=request.address_from,
kyber_pk=request.kyber_pk,
tesla_pk=request.tesla_pk,
xmss_pk=request.xmss_pk,
xmss_ots_index=request.xmss_ots_index)
return qrl_pb2.TransferCoinsResp(transaction_unsigned=tx.pbdata)
@grpc_exception_wrapper(qrl_pb2.GetObjectResp, StatusCode.UNKNOWN)
def GetObject(self, request: qrl_pb2.GetObjectReq, context) -> qrl_pb2.GetObjectResp:
logger.debug("[PublicAPI] GetObject")
answer = qrl_pb2.GetObjectResp()
answer.found = False
query = bytes(request.query)
if self.qrlnode.address_is_valid(query):
if self.qrlnode.get_address_is_used(query):
address_state = self.qrlnode.get_address_state(query)
if address_state is not None:
answer.found = True
answer.address_state.CopyFrom(address_state)
return answer
transaction = self.qrlnode.get_transaction(query)
if transaction is not None:
answer.found = True
block_index = self.qrlnode.get_blockidx_from_txhash(transaction.txhash)
blockheader = None
if block_index:
block = self.qrlnode.get_block_from_index(block_index)
blockheader = block.blockheader.pbdata
txextended = qrl_pb2.TransactionExtended(header=blockheader,
tx=transaction.pbdata)
answer.transaction.CopyFrom(txextended)
return answer
block = self.qrlnode.get_block_from_hash(query)
if block is not None:
answer.found = True
answer.block.CopyFrom(block.pbdata)
return answer
block = self.qrlnode.get_block_from_hash(query)
if block is not None:
answer.found = True
answer.block.CopyFrom(block.pbdata)
return answer
try:
query_str = query.decode()
query_index = int(query_str)
block = self.qrlnode.get_block_from_index(query_index)
if block is not None:
answer.found = True
answer.block.CopyFrom(block.pbdata)
return answer
except Exception:
pass
return answer
@grpc_exception_wrapper(qrl_pb2.TokenDetailedList, StatusCode.UNKNOWN)
def GetTokenDetailedList(self, request: qrl_pb2.Empty, context) -> qrl_pb2.TokenDetailedList:
logger.debug("[PublicAPI] TokenDetailedList")
token_detailed_list = self.qrlnode.get_token_detailed_list()
return token_detailed_list
def _stake_validator_to_staker_data(self, stake_validator: StakeValidator) -> qrl_pb2.StakerData:
answer = qrl_pb2.StakerData()
answer.address_state.CopyFrom(self.qrlnode.get_address_state(stake_validator.address))
answer.terminator_hash = stake_validator.terminator_hash
return answer
@grpc_exception_wrapper(qrl_pb2.GetStakersResp, StatusCode.UNKNOWN)
def GetStakers(self, request: qrl_pb2.GetStakersReq, context) -> qrl_pb2.GetStakersResp:
logger.debug("[PublicAPI] GetStakers")
response = qrl_pb2.GetStakersResp()
quantity = min(request.quantity, self.MAX_REQUEST_QUANTITY)
if request.filter == qrl_pb2.GetStakersReq.CURRENT:
sv_list = self.qrlnode.get_current_stakers(offset=request.offset, count=quantity)
elif request.filter == qrl_pb2.GetStakersReq.NEXT:
sv_list = self.qrlnode.get_next_stakers(offset=request.offset, count=quantity)
else:
raise NotImplementedError("filter value is not supported")
sv_data = [self._stake_validator_to_staker_data(sv) for sv in sv_list]
response.stakers.extend(sv_data)
return response
@grpc_exception_wrapper(qrl_pb2.GetLatestDataResp, StatusCode.UNKNOWN)
def GetLatestData(self, request: qrl_pb2.GetLatestDataReq, context) -> qrl_pb2.GetLatestDataResp:
logger.debug("[PublicAPI] GetLatestData")
response = qrl_pb2.GetLatestDataResp()
all_requested = request.filter == qrl_pb2.GetLatestDataReq.ALL
quantity = min(request.quantity, self.MAX_REQUEST_QUANTITY)
if all_requested or request.filter == qrl_pb2.GetLatestDataReq.BLOCKHEADERS:
result = []
for blk in self.qrlnode.get_latest_blocks(offset=request.offset, count=quantity):
transaction_count = qrl_pb2.TransactionCount()
for tx in blk.transactions:
transaction_count.count[tx.type] += 1
for tx in blk.vote:
transaction_count.count[tx.type] += 1
for tx in blk.duplicate_transactions:
transaction_count.count[tx.type] += 1
voted_weight = 0
total_stake_weight = 0
if blk.block_number > 0:
voted_weight, total_stake_weight = self.qrlnode.get_vote_metadata(blk.block_number)
result.append(qrl_pb2.BlockHeaderExtended(header=blk.blockheader.pbdata,
transaction_count=transaction_count,
voted_weight=voted_weight,
total_stake_weight=total_stake_weight))
response.blockheaders.extend(result)
if all_requested or request.filter == qrl_pb2.GetLatestDataReq.TRANSACTIONS:
result = []
for tx in self.qrlnode.get_latest_transactions(offset=request.offset, count=quantity):
block_index = self.qrlnode.get_blockidx_from_txhash(tx.txhash)
block = self.qrlnode.get_block_from_index(block_index)
txextended = qrl_pb2.TransactionExtended(header=block.blockheader.pbdata,
tx=tx.pbdata)
result.append(txextended)
response.transactions.extend(result)
if all_requested or request.filter == qrl_pb2.GetLatestDataReq.TRANSACTIONS_UNCONFIRMED:
result = []
for tx in self.qrlnode.get_latest_transactions_unconfirmed(offset=request.offset, count=quantity):
txextended = qrl_pb2.TransactionExtended(header=None,
tx=tx.pbdata)
result.append(txextended)
response.transactions_unconfirmed.extend(result)
return response
| true
| true
|
1c3f92cfca5a1aef1e8f513f32a79c2defedb9f5
| 570
|
py
|
Python
|
load_cifarlt.py
|
caisarl76/classifier-balancing
|
b381279dc29539afb92fe40f7ca917e352aff9c6
|
[
"BSD-3-Clause"
] | null | null | null |
load_cifarlt.py
|
caisarl76/classifier-balancing
|
b381279dc29539afb92fe40f7ca917e352aff9c6
|
[
"BSD-3-Clause"
] | null | null | null |
load_cifarlt.py
|
caisarl76/classifier-balancing
|
b381279dc29539afb92fe40f7ca917e352aff9c6
|
[
"BSD-3-Clause"
] | null | null | null |
from data.dataloader import *
from utils import source_import, get_value
splits = ['train', 'test', 'val']
data_root ='/home/vision/jihun/fb_decouple/dataset/cifar-100',
from data import dataloader
from data.CIFAR100_LT.imbalance_cifar import IMBALANCECIFAR100
data = {x: dataloader.load_data(data_root=data_root,
dataset='cifar100_lt', phase=x,
batch_size=128,
sampler_dic=None,
num_workers=0)
for x in splits}
| 38
| 67
| 0.575439
|
from data.dataloader import *
from utils import source_import, get_value
splits = ['train', 'test', 'val']
data_root ='/home/vision/jihun/fb_decouple/dataset/cifar-100',
from data import dataloader
from data.CIFAR100_LT.imbalance_cifar import IMBALANCECIFAR100
data = {x: dataloader.load_data(data_root=data_root,
dataset='cifar100_lt', phase=x,
batch_size=128,
sampler_dic=None,
num_workers=0)
for x in splits}
| true
| true
|
1c3f932c52cedfb81cad9a204332c60ff26b80d7
| 754
|
py
|
Python
|
scripts/sqlite3_sqlalchemy_core.py
|
scianand/Programming-With-Databases
|
4b4363be91f066f2852d78ae2a9240731c46f3b9
|
[
"Apache-2.0"
] | null | null | null |
scripts/sqlite3_sqlalchemy_core.py
|
scianand/Programming-With-Databases
|
4b4363be91f066f2852d78ae2a9240731c46f3b9
|
[
"Apache-2.0"
] | null | null | null |
scripts/sqlite3_sqlalchemy_core.py
|
scianand/Programming-With-Databases
|
4b4363be91f066f2852d78ae2a9240731c46f3b9
|
[
"Apache-2.0"
] | null | null | null |
"""
SQLite3 database connection using SQL Alchemy Core
"""
import sqlalchemy as db
# connect to the database
engine = db.create_engine('sqlite:///movies1.db')
connection = engine.connect()
# access the metadata of the database
metadata = db.MetaData()
movies = db.Table('Movies', metadata, autoload=True, autoload_with=engine)
# Adding record into the table
query = movies.insert().values(Title='Lust Stories', Director='Karan Johar', Year=2018)
connection.execute(query)
# Selecting all the data
query = db.select([movies])
# Filtering by year
#query = db.select([movies]).where(movies.columns.Year == 1995)
# Execute query
result_proxy = connection.execute(query)
# Fetch the query results
result_set = result_proxy.fetchall()
print(result_set)
| 25.133333
| 87
| 0.754642
|
import sqlalchemy as db
engine = db.create_engine('sqlite:///movies1.db')
connection = engine.connect()
metadata = db.MetaData()
movies = db.Table('Movies', metadata, autoload=True, autoload_with=engine)
query = movies.insert().values(Title='Lust Stories', Director='Karan Johar', Year=2018)
connection.execute(query)
query = db.select([movies])
result_proxy = connection.execute(query)
result_set = result_proxy.fetchall()
print(result_set)
| true
| true
|
1c3f93b1daae20653f905fd1cfa594de85ec6a73
| 1,786
|
py
|
Python
|
contenido/tests/test_HU036_models.py
|
slinan/border
|
682bed850b3ed48d4f9e817dc9c2938388dd2181
|
[
"MIT"
] | null | null | null |
contenido/tests/test_HU036_models.py
|
slinan/border
|
682bed850b3ed48d4f9e817dc9c2938388dd2181
|
[
"MIT"
] | null | null | null |
contenido/tests/test_HU036_models.py
|
slinan/border
|
682bed850b3ed48d4f9e817dc9c2938388dd2181
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from contenido.models import Audio, Ratings, Artista
from django.contrib.auth.models import User
MODELS = [Ratings, Audio, Artista, User]
class CalificacionTest(TestCase):
def setUp(self):
# Se elimina el contenido de las tablas del modelo
for model in MODELS:
if len(model.objects.all()):
model.objects.all().delete()
# Se crea un usuario regular de prueba
self.usuario_regular = User.objects.create_user(username='dh.mahecha', email='dh.mahecha@uniandes.edu.co',
password='Ab1234')
# Se crea un usuario artista
self.usuario_artista = User.objects.create_user(username='jdelafonte', email='jdelafonte@gmail.com', password='JFonte1234')
# Se crea un artista al cual se le asociara un audio
self.artista = Artista.objects.create(nom_artistico='Javier de la Fonte', nom_pais='Colombia', nom_ciudad='Bogota',
val_imagen='imagen.jpg', user=self.usuario_artista)
# Se crea un audio para pruebas
self.audio = Audio.objects.create(nom_audio='Que pecado Existe?', val_imagen='imagen4.jpg',
val_recurso='url-recurso.mp3')
self.audio.artistas.add(self.artista)
# Prueba utilizada la eliminación de ratings mediante acceso a datos directo
def test_rating_delete(self):
instance = Ratings.objects.create(val_rating=5, autor=self.usuario_regular,
audio=self.audio)
self.assertEqual(instance.__str__(), 5)
instance = Ratings.objects.filter(id=instance.id).delete()
self.assertEqual(Ratings.objects.count(), 0)
| 47
| 131
| 0.631579
|
from django.test import TestCase
from contenido.models import Audio, Ratings, Artista
from django.contrib.auth.models import User
MODELS = [Ratings, Audio, Artista, User]
class CalificacionTest(TestCase):
def setUp(self):
for model in MODELS:
if len(model.objects.all()):
model.objects.all().delete()
self.usuario_regular = User.objects.create_user(username='dh.mahecha', email='dh.mahecha@uniandes.edu.co',
password='Ab1234')
self.usuario_artista = User.objects.create_user(username='jdelafonte', email='jdelafonte@gmail.com', password='JFonte1234')
self.artista = Artista.objects.create(nom_artistico='Javier de la Fonte', nom_pais='Colombia', nom_ciudad='Bogota',
val_imagen='imagen.jpg', user=self.usuario_artista)
self.audio = Audio.objects.create(nom_audio='Que pecado Existe?', val_imagen='imagen4.jpg',
val_recurso='url-recurso.mp3')
self.audio.artistas.add(self.artista)
def test_rating_delete(self):
instance = Ratings.objects.create(val_rating=5, autor=self.usuario_regular,
audio=self.audio)
self.assertEqual(instance.__str__(), 5)
instance = Ratings.objects.filter(id=instance.id).delete()
self.assertEqual(Ratings.objects.count(), 0)
| true
| true
|
1c3f9460874cf861368978c0c3c74262daced354
| 234
|
py
|
Python
|
Kivy/cap01-layouts.py
|
fotavio16/PycharmProjects
|
f5be49db941de69159ec543e8a6dde61f9f94d86
|
[
"MIT"
] | null | null | null |
Kivy/cap01-layouts.py
|
fotavio16/PycharmProjects
|
f5be49db941de69159ec543e8a6dde61f9f94d86
|
[
"MIT"
] | null | null | null |
Kivy/cap01-layouts.py
|
fotavio16/PycharmProjects
|
f5be49db941de69159ec543e8a6dde61f9f94d86
|
[
"MIT"
] | null | null | null |
from kivy.app import App
from kivy.uix.gridlayout import GridLayout
class MyGridLayout(GridLayout):
pass
class LayoutsApp(App):
def build(self):
return MyGridLayout()
if __name__=="__main__":
LayoutsApp().run()
| 18
| 42
| 0.713675
|
from kivy.app import App
from kivy.uix.gridlayout import GridLayout
class MyGridLayout(GridLayout):
pass
class LayoutsApp(App):
def build(self):
return MyGridLayout()
if __name__=="__main__":
LayoutsApp().run()
| true
| true
|
1c3f94a07566d83bff0daa7b693c9b2ca1ca88a3
| 1,506
|
py
|
Python
|
scoutingserver/users/tests/test_views.py
|
MichianaYouthRobotics/2019ScoutingServer
|
29a58e566851da97d78c49ef9a581ec3c540c14a
|
[
"MIT"
] | null | null | null |
scoutingserver/users/tests/test_views.py
|
MichianaYouthRobotics/2019ScoutingServer
|
29a58e566851da97d78c49ef9a581ec3c540c14a
|
[
"MIT"
] | null | null | null |
scoutingserver/users/tests/test_views.py
|
MichianaYouthRobotics/2019ScoutingServer
|
29a58e566851da97d78c49ef9a581ec3c540c14a
|
[
"MIT"
] | null | null | null |
import pytest
from django.conf import settings
from django.test import RequestFactory
from scoutingserver.users.views import UserRedirectView, UserUpdateView
pytestmark = pytest.mark.django_db
class TestUserUpdateView:
"""
TODO:
extracting view initialization code as class-scoped fixture
would be great if only pytest-django supported non-function-scoped
fixture db access -- this is a work-in-progress for now:
https://github.com/pytest-dev/pytest-django/pull/258
"""
def test_get_success_url(
self, user: settings.AUTH_USER_MODEL, request_factory: RequestFactory
):
view = UserUpdateView()
request = request_factory.get("/fake-url/")
request.user = user
view.request = request
assert view.get_success_url() == f"/users/{user.username}/"
def test_get_object(
self, user: settings.AUTH_USER_MODEL, request_factory: RequestFactory
):
view = UserUpdateView()
request = request_factory.get("/fake-url/")
request.user = user
view.request = request
assert view.get_object() == user
class TestUserRedirectView:
def test_get_redirect_url(
self, user: settings.AUTH_USER_MODEL, request_factory: RequestFactory
):
view = UserRedirectView()
request = request_factory.get("/fake-url")
request.user = user
view.request = request
assert view.get_redirect_url() == f"/users/{user.username}/"
| 27.888889
| 77
| 0.675963
|
import pytest
from django.conf import settings
from django.test import RequestFactory
from scoutingserver.users.views import UserRedirectView, UserUpdateView
pytestmark = pytest.mark.django_db
class TestUserUpdateView:
def test_get_success_url(
self, user: settings.AUTH_USER_MODEL, request_factory: RequestFactory
):
view = UserUpdateView()
request = request_factory.get("/fake-url/")
request.user = user
view.request = request
assert view.get_success_url() == f"/users/{user.username}/"
def test_get_object(
self, user: settings.AUTH_USER_MODEL, request_factory: RequestFactory
):
view = UserUpdateView()
request = request_factory.get("/fake-url/")
request.user = user
view.request = request
assert view.get_object() == user
class TestUserRedirectView:
def test_get_redirect_url(
self, user: settings.AUTH_USER_MODEL, request_factory: RequestFactory
):
view = UserRedirectView()
request = request_factory.get("/fake-url")
request.user = user
view.request = request
assert view.get_redirect_url() == f"/users/{user.username}/"
| true
| true
|
1c3f94b3dd671d7cda82e3c5019659a1f8b39409
| 1,544
|
py
|
Python
|
tiger/urls.py
|
JingLinkai/tiger
|
ac54d0df2cc5d4f2a90cc59720762b28a1e9e2fa
|
[
"MIT"
] | null | null | null |
tiger/urls.py
|
JingLinkai/tiger
|
ac54d0df2cc5d4f2a90cc59720762b28a1e9e2fa
|
[
"MIT"
] | 1
|
2020-06-06T00:46:19.000Z
|
2020-06-06T00:46:19.000Z
|
tiger/urls.py
|
linkay3601/social-tiger
|
ac54d0df2cc5d4f2a90cc59720762b28a1e9e2fa
|
[
"MIT"
] | null | null | null |
"""tiger URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from user import api as user_api
from social import api as social_api
from vip import api as vip_api
urlpatterns = [
url(r'^api/user/vcode$', user_api.get_verify_code),
url(r'^api/user/login$', user_api.login),
url(r'^api/user/profile/show$', user_api.show_profile),
url(r'^api/user/profile/modify$', user_api.modify_profile),
url(r'^api/user/avatar/upload$', user_api.upload_avatar),
url(r'^api/social/rcmd_users$', social_api.get_rcmd_users),
url(r'^api/social/like$', social_api.like),
url(r'^api/social/superlike$', social_api.superlike),
url(r'^api/social/dislike$', social_api.dislike),
url(r'^api/social/rewind$', social_api.rewind),
url(r'^api/social/liked_me$', social_api.show_liked_me),
url(r'^api/social/friends$', social_api.get_friends),
url(r'^api/vip/permissions$', vip_api.show_vip_permissions),
]
| 38.6
| 79
| 0.707902
|
from django.conf.urls import url
from user import api as user_api
from social import api as social_api
from vip import api as vip_api
urlpatterns = [
url(r'^api/user/vcode$', user_api.get_verify_code),
url(r'^api/user/login$', user_api.login),
url(r'^api/user/profile/show$', user_api.show_profile),
url(r'^api/user/profile/modify$', user_api.modify_profile),
url(r'^api/user/avatar/upload$', user_api.upload_avatar),
url(r'^api/social/rcmd_users$', social_api.get_rcmd_users),
url(r'^api/social/like$', social_api.like),
url(r'^api/social/superlike$', social_api.superlike),
url(r'^api/social/dislike$', social_api.dislike),
url(r'^api/social/rewind$', social_api.rewind),
url(r'^api/social/liked_me$', social_api.show_liked_me),
url(r'^api/social/friends$', social_api.get_friends),
url(r'^api/vip/permissions$', vip_api.show_vip_permissions),
]
| true
| true
|
1c3f94bbab2f7cb9a67414113db5135aefc0a1fb
| 45
|
py
|
Python
|
src/deutschland/handelsregister/__init__.py
|
andreasbossard/deutschland
|
6f561256c707e21f81b54b139b9acb745b901298
|
[
"Apache-2.0"
] | 445
|
2021-07-26T22:00:26.000Z
|
2022-03-31T08:31:08.000Z
|
src/deutschland/handelsregister/__init__.py
|
andreasbossard/deutschland
|
6f561256c707e21f81b54b139b9acb745b901298
|
[
"Apache-2.0"
] | 30
|
2021-07-27T15:42:23.000Z
|
2022-03-26T16:14:11.000Z
|
src/deutschland/handelsregister/__init__.py
|
andreasbossard/deutschland
|
6f561256c707e21f81b54b139b9acb745b901298
|
[
"Apache-2.0"
] | 28
|
2021-07-27T10:48:43.000Z
|
2022-03-26T14:31:30.000Z
|
from .handelsregister import Handelsregister
| 22.5
| 44
| 0.888889
|
from .handelsregister import Handelsregister
| true
| true
|
1c3f94db5fa48ef5bb72826fe800d6a9515e1d59
| 3,288
|
py
|
Python
|
f5/bigip/tm/cm/test/functional/test_trust.py
|
nghia-tran/f5-common-python
|
acb23a6e5830a119b460c19a578654113419f5c3
|
[
"Apache-2.0"
] | 272
|
2016-02-23T06:05:44.000Z
|
2022-02-20T02:09:32.000Z
|
f5/bigip/tm/cm/test/functional/test_trust.py
|
nghia-tran/f5-common-python
|
acb23a6e5830a119b460c19a578654113419f5c3
|
[
"Apache-2.0"
] | 1,103
|
2016-02-11T17:48:03.000Z
|
2022-02-15T17:13:37.000Z
|
f5/bigip/tm/cm/test/functional/test_trust.py
|
nghia-tran/f5-common-python
|
acb23a6e5830a119b460c19a578654113419f5c3
|
[
"Apache-2.0"
] | 167
|
2016-02-11T17:48:21.000Z
|
2022-01-17T20:13:05.000Z
|
# Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from f5.sdk_exception import InvalidCommand
import pytest
def set_trust(request, mgmt_root, name, device, dev_name, usr, passwd):
dvcs = mgmt_root.tm.cm
trust = dvcs.add_to_trust.exec_cmd('run', name=name, device=device,
deviceName=dev_name, username=usr,
caDevice=True, password=passwd)
return trust
def unset_trust(request, mgmt_root, name, dev_name):
dvcs = mgmt_root.tm.cm
reset = dvcs.remove_from_trust.exec_cmd('run', name=name,
deviceName=dev_name)
return reset
def check_sync(request, mgmt_root):
sync_status = mgmt_root.tm.cm.sync_status
sync_status.refresh()
des = \
(sync_status.entries['https://localhost/mgmt/tm/cm/sync-status/0']
['nestedStats']
['entries']
['status']
['description'])
return des
def check_peer(request, mgmt_root):
dvcs = mgmt_root.tm.cm.devices.get_collection()
device = str(dvcs[0].managementIp)
devname = str(dvcs[0].hostname)
return device, devname
@pytest.mark.skipif(pytest.config.getoption('--peer') == 'none',
reason='Needs peer defined to run')
class TestTrust(object):
def test_run(self, request, mgmt_root, peer):
# Check sync state, assume standalone
assert check_sync(request, mgmt_root) == "Standalone"
assert check_sync(request, peer) == "Standalone"
# Obtain peer information
device1, devicename1 = check_peer(request, peer)
device2, devicename2 = check_peer(request, mgmt_root)
# Setup trust
set_trust(request, mgmt_root, 'Root', device1,
devicename1, 'admin', 'admin')
# Verify sync state assume disconnected
assert check_sync(request, mgmt_root) == "Disconnected"
assert check_sync(request, peer) == "Disconnected"
# Remove trust from both units
unset_trust(request, mgmt_root, 'Root', devicename1)
unset_trust(request, peer, 'Root', devicename2)
# Verify devices sync state is Standalone
assert check_sync(request, mgmt_root) == "Standalone"
assert check_sync(request, peer) == "Standalone"
def test_invalid_cmd_meta(self, request, mgmt_root):
dvcs = mgmt_root.tm.cm
with pytest.raises(InvalidCommand):
dvcs.add_to_trust.exec_cmd('foo', name='fooname',
device='foodev',
deviceName='foo_name',
username='foouser',
caDevice=True, password='foopasswd')
| 36.533333
| 75
| 0.630474
|
from f5.sdk_exception import InvalidCommand
import pytest
def set_trust(request, mgmt_root, name, device, dev_name, usr, passwd):
dvcs = mgmt_root.tm.cm
trust = dvcs.add_to_trust.exec_cmd('run', name=name, device=device,
deviceName=dev_name, username=usr,
caDevice=True, password=passwd)
return trust
def unset_trust(request, mgmt_root, name, dev_name):
dvcs = mgmt_root.tm.cm
reset = dvcs.remove_from_trust.exec_cmd('run', name=name,
deviceName=dev_name)
return reset
def check_sync(request, mgmt_root):
sync_status = mgmt_root.tm.cm.sync_status
sync_status.refresh()
des = \
(sync_status.entries['https://localhost/mgmt/tm/cm/sync-status/0']
['nestedStats']
['entries']
['status']
['description'])
return des
def check_peer(request, mgmt_root):
dvcs = mgmt_root.tm.cm.devices.get_collection()
device = str(dvcs[0].managementIp)
devname = str(dvcs[0].hostname)
return device, devname
@pytest.mark.skipif(pytest.config.getoption('--peer') == 'none',
reason='Needs peer defined to run')
class TestTrust(object):
def test_run(self, request, mgmt_root, peer):
assert check_sync(request, mgmt_root) == "Standalone"
assert check_sync(request, peer) == "Standalone"
device1, devicename1 = check_peer(request, peer)
device2, devicename2 = check_peer(request, mgmt_root)
set_trust(request, mgmt_root, 'Root', device1,
devicename1, 'admin', 'admin')
assert check_sync(request, mgmt_root) == "Disconnected"
assert check_sync(request, peer) == "Disconnected"
unset_trust(request, mgmt_root, 'Root', devicename1)
unset_trust(request, peer, 'Root', devicename2)
assert check_sync(request, mgmt_root) == "Standalone"
assert check_sync(request, peer) == "Standalone"
def test_invalid_cmd_meta(self, request, mgmt_root):
dvcs = mgmt_root.tm.cm
with pytest.raises(InvalidCommand):
dvcs.add_to_trust.exec_cmd('foo', name='fooname',
device='foodev',
deviceName='foo_name',
username='foouser',
caDevice=True, password='foopasswd')
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.