text
stringlengths 2
999k
|
|---|
from scipy import interpolate
import numpy as np
import matplotlib.pyplot as plt
# z kolika vyorku udelam orezanou funkci
samples = 6
# jakeho radu chci interpolacni polynom
order = 4
inter_functions = []
# orezana osa x
x_axis = np.linspace(0, 2 * np.pi, samples)
# poctiva osa x se spoustou vzorku
x_axis_true = np.linspace(0, 2 * np.pi, 1000)
# a orezana funkce vycislena
signal = np.sin(x_axis)
# grafovani
fig, ax = plt.subplots()
ax.plot(x_axis, signal, 'ro:')
ax.grid()
# vykreslim interpolaci pro vsechny rady az po order - 1 definovany nahore
# pocita se od nulteho, proto 4
for kind in range(order):
inter = interpolate.interp1d(x_axis, signal, kind=kind)
inter_functions.append(inter)
line = ax.plot(x_axis_true, inter(x_axis_true), '--', label=kind)
ax.legend()
plt.show()
|
#!/usr/bin/env python
#
# Test hook to launch an irker instance (if it doesn't already exist)
# just before shipping the notification. We start it in in another terminal
# so you can watch the debug messages. Intended to be used in the root
# directory of the irker repo. Probably only of interest only to irker
# developers
#
# To use this, set up irkerhook.py to fire on each commit. Creating a
# .git/hooks/post-commit file containing the line "irkerhook.py"; be
# sure to make the opos-commit file executable. Then set the
# filtercmd variable in your repo config as follows:
#
# [irker]
# filtercmd = filter-test.py
import os, sys, json, subprocess, time
metadata = json.loads(sys.argv[1])
ps = subprocess.Popen("ps -U %s uh" % os.getenv("LOGNAME"),
shell=True,
stdout=subprocess.PIPE)
data = ps.stdout.read()
irkerd_count = len([x for x in data.split("\n") if x.find("irkerd") != -1])
if irkerd_count:
sys.stderr.write("Using a running irker instance...\n")
else:
sys.stderr.write("Launching a new irker instance...\n")
os.system("gnome-terminal --title 'irkerd' -e 'irkerd -d 2' &")
time.sleep(1.5) # Avoid a race condition
print json.dumps(metadata)
# end
|
from __future__ import absolute_import
from __future__ import division
import argparse
import sys
import pwnlib
pwnlib.args.free_form = False
from pwn import *
from pwnlib.commandline import common
parser = common.parser_commands.add_parser(
'scramble',
help = 'Shellcode encoder'
)
parser.add_argument(
"-f", "--format",
help="Output format (defaults to hex for ttys, otherwise raw)",
choices=['raw', 'hex', 'string', 'elf']
)
parser.add_argument(
"-o","--output",
metavar='file',
help="Output file (defaults to stdout)",
type=argparse.FileType('wb'),
default=getattr(sys.stdout, 'buffer', sys.stdout)
)
parser.add_argument(
'-c', '--context',
metavar = 'context',
action = 'append',
type = common.context_arg,
choices = common.choices,
help = 'The os/architecture/endianness/bits the shellcode will run in (default: linux/i386), choose from: %s' % common.choices,
)
parser.add_argument(
'-p', '--alphanumeric',
action='store_true',
help = 'Encode the shellcode with an alphanumeric encoder'
)
parser.add_argument(
'-v', '--avoid',
action='append',
help = 'Encode the shellcode to avoid the listed bytes'
)
parser.add_argument(
'-n', '--newline',
dest='avoid',
action='append_const',
const='\n',
help = 'Encode the shellcode to avoid newlines'
)
parser.add_argument(
'-z', '--zero',
dest='avoid',
action='append_const',
const='\x00',
help = 'Encode the shellcode to avoid NULL bytes'
)
parser.add_argument(
'-d',
'--debug',
help='Debug the shellcode with GDB',
action='store_true'
)
def main(args):
tty = args.output.isatty()
if sys.stdin.isatty():
parser.print_usage()
sys.exit(0)
data = sys.stdin.read()
output = data
fmt = args.format or ('hex' if tty else 'raw')
formatters = {'r':bytes, 'h':enhex, 's':repr}
if args.alphanumeric:
output = alphanumeric(output)
if args.avoid:
output = avoid(output, ''.join(args.avoid))
if args.debug:
proc = gdb.debug_shellcode(output, arch=context.arch)
proc.interactive()
sys.exit(0)
if fmt[0] == 'e':
sys.stdout.write(make_elf(output))
else:
output = formatters[fmt[0]](output)
if not hasattr(output, 'decode'):
output = output.encode('ascii')
args.output.write(output)
if tty and fmt != 'raw':
args.output.write(b'\n')
if __name__ == '__main__':
pwnlib.commandline.common.main(__file__)
|
import time
import pickle
import uuid
from taskue.utils import logging
from redis import Redis
from taskue.utils import RedisKeys
from taskue.task import Task, TaskStatus
class WorkflowStatus:
CREATED = "Created"
QUEUED = "Queued"
RUNNING = "Running"
PASSED = "Passed"
FAILED = "Failed"
DONE_STATES = [PASSED, FAILED]
class WorkflowStageStatus:
RUNNING = "Running"
PASSED = "Passed"
FAILED = "Failed"
DONE_STATES = [PASSED, FAILED]
class Workflow:
def __init__(self):
self.uid = uuid.uuid4().hex
self.stages = []
self.current_stage = 1
self.status = WorkflowStatus.CREATED
self.created_at = None
self.queued_at = None
self.started_at = None
self.done_at = None
@property
def current_stage_tasks(self):
"""
Gets current stage tasks
"""
index = self.current_stage - 1
return self.stages[index].keys()
@property
def is_last_stage(self):
"""
Checks if current stage is the last stage of the workflow
"""
return self.current_stage == len(self.stages)
@property
def rkey(self):
"""
Returns redis key
"""
return RedisKeys.WORKFLOW.format(self.uid)
@property
def rqueue(self):
"""
Returns redis queue
"""
return RedisKeys.WORKFLOWS
@property
def is_current_stage_done(self):
"""
Checks if current stage is done or not
"""
return self.current_stage_status in WorkflowStageStatus.DONE_STATES
@property
def current_stage_status(self):
"""
Gets current stage status
"""
return self.get_stage_status(self.current_stage)
def get_stage_status(self, stage):
"""
Gets stage status
Arguments:
stage {int} -- stage number
"""
index = stage - 1
states = self.stages[index].values()
if not set(TaskStatus.UNDONE_STATES).isdisjoint(states):
return WorkflowStageStatus.RUNNING
if set(TaskStatus.FAILED_STATES).isdisjoint(states):
return WorkflowStageStatus.PASSED
return WorkflowStageStatus.FAILED
def update_status(self, redis: Redis):
"""
Updates workflow status
"""
cached = redis.hgetall(RedisKeys.CACHE.format(self.uid))
self.update_tasks_status(cached)
states = []
for stage, _ in enumerate(self.stages):
states.append(self.get_stage_status(stage))
if WorkflowStageStatus.RUNNING in states:
return
if WorkflowStageStatus.FAILED in states:
self.status = WorkflowStatus.FAILED
else:
self.status = WorkflowStatus.PASSED
self.done_at = time.time()
def add_task(self, task: Task, stage: int = None):
"""
Adds task to the stage
Arguments:
task {Task} -- Task object
Keyword Arguments:
stage {int} -- stage number (default: {None})
"""
index = stage - 1
task.stage = stage
task.created_at = self.created_at
task.workflow_uid = self.uid
self.stages[index][task.uid] = task.status
def add_stage(self, tasks: list = []):
"""
Adds stage to the workflow
Keyword Arguments:
tasks {list} -- List of tasks objects (default: {[]})
"""
self.stages.append({})
stage = len(self.stages)
for task in tasks:
self.add_task(task, stage)
def dump(self):
"""
Dumps this object
"""
return pickle.dumps(self)
def update_tasks_status(self, cached):
for stage in self.stages:
for uid in stage.keys():
stage[uid] = cached.get(uid.encode("utf-8")).decode()
|
from argparse import ArgumentParser, RawTextHelpFormatter
from typing import Optional, Tuple
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.python.distribute.tpu_strategy import TPUStrategy
import _path # noqa
from imagemodel.common.reporter import PredictorReporter
from imagemodel.common.setup import PredictExperimentSetup, predict_experiment_id
from imagemodel.common.utils.common_tpu import create_tpu, delete_tpu, tpu_initialize
from imagemodel.common.utils.gpu_check import check_first_gpu
from imagemodel.experimental.reference_tracking.dataset_providers.cell_tracking_dataset. \
rt_cell_tracking_provider_p import RTCellTrackingProviderP
from imagemodel.experimental.reference_tracking.dataset_providers.rt_provider import RTProviderP
from imagemodel.experimental.reference_tracking.models.testers.rt_provider_predictor import RTProviderPredictor
check_first_gpu()
# noinspection DuplicatedCode
if __name__ == "__main__":
"""
Examples
--------
# With CPU (O)
>>> docker run \
... -it \
... --rm \
... -u $(id -u):$(id -g) \
... -v /etc/localtime:/etc/localtime:ro \
... -v $(pwd):/imagemodel \
... -v /data:/data \
... -v ~/reference_tracking_results:/reference_tracking_results \
... -v /data/tensorflow_datasets:/tensorflow_datasets \
... --workdir="/imagemodel" \
... imagemodel/tkl:1.2
>>> python imagemodel/experimental/reference_tracking/models/testers/provider_predict.py \
... --model_name ref_local_tracking_model_031_mh \
... --model_weight_path saved/\
... training__model_ref_local_tracking_model_031_mh__run_reference_tracking__20210511_063754.epoch_23 \
... --run_id reference_tracking__20210513_050631 \
... --result_base_folder /reference_tracking_results \
... --predict_pipeline rt_cell_sample_2_test_1 \
... --batch_size 4
# With GPU (O)
>>> docker run \
... --gpus all \
... -it \
... --rm \
... -u $(id -u):$(id -g) \
... -v /etc/localtime:/etc/localtime:ro \
... -v $(pwd):/imagemodel \
... -v /data:/data \
... -v ~/reference_tracking_results:/reference_tracking_results \
... -v /data/tensorflow_datasets:/tensorflow_datasets \
... --workdir="/imagemodel" \
... imagemodel/tkl:1.2
>>> python imagemodel/experimental/reference_tracking/models/testers/provider_predict.py \
... --model_name ref_local_tracking_model_031 \
... --model_weight_path /reference_tracking_results/save/weights/\
... training__model_ref_local_tracking_model_031__run_reference_tracking__20210517_163254.epoch_02 \
... --run_id reference_tracking__20210517_170230 \
... --result_base_folder /reference_tracking_results \
... --predict_base_folder /data/tracking_test2 \
... --predict_filename_folder /data/tracking_test2/framed_sample \
... --batch_size 2
# With TPU (X)
>>> docker run \
... -it \
... --rm \
... -u $(id -u):$(id -g) \
... -v /etc/localtime:/etc/localtime:ro \
... -v ~/.config:/.config \
... -v ~/.local:/.local \
... -v $(pwd):/imagemodel \
... --workdir="/imagemodel" \
... imagemodel_tpu/tkl:1.0
>>> python imagemodel/experimental/reference_tracking/models/testers/predict.py \
... --model_name ref_local_tracking_model_031_mh \
... --model_weight_path gs://cell_dataset/save/weights/\
... training__model_ref_local_tracking_model_031_mh__run_reference_tracking__20210511_063754.epoch_23 \
... --run_id reference_tracking__20210513_052931 \
... --result_base_folder gs://cell_dataset \
... --predict_pipeline rt_gs_cell_sample_2_test_1 \
... --batch_size 4 \
... --ctpu_zone us-central1-b \
... --tpu_name leetaekyu-1-trainer
"""
# Argument Parsing
parser: ArgumentParser = ArgumentParser(
description="Arguments predicts in Reference Tracking",
formatter_class=RawTextHelpFormatter)
# model related
parser.add_argument("--model_name", type=str, required=True)
parser.add_argument("--input_color_image", action="store_true")
# trained model related
parser.add_argument("--model_weight_path", required=True, type=str)
# predict related
parser.add_argument("--run_id", type=str)
parser.add_argument("--result_base_folder", type=str, required=True)
# dataset related
parser.add_argument("--predict_filename_folder", type=str)
parser.add_argument("--predict_base_folder", type=str, required=True)
parser.add_argument("--batch_size", type=int)
# tpu related
parser.add_argument("--ctpu_zone", type=str, help="VM, TPU zone. ex) 'us-central1-b'")
parser.add_argument("--tpu_name", type=str, help="TPU name. ex) 'leetaekyu-1-trainer'")
args = parser.parse_args()
# model related
model_name: str = args.model_name
input_color_image: bool = args.input_color_image
# trained model related
model_weight_path: str = args.model_weight_path
# predict related
run_id: Optional[str] = args.run_id
result_base_folder: str = args.result_base_folder
predict_filename_folder: Optional[str] = args.predict_filename_folder
# dataset related
predict_base_folder: str = args.predict_base_folder
batch_size: int = args.batch_size or 4
# tpu related
ctpu_zone: str = args.ctpu_zone or "us-central1-b"
tpu_name_optional: Optional[str] = args.tpu_name
# TPU
strategy_optional: Optional[TPUStrategy] = None
if tpu_name_optional:
create_tpu(tpu_name=tpu_name_optional, ctpu_zone=ctpu_zone)
strategy_optional = tpu_initialize(tpu_address=tpu_name_optional, tpu_zone=ctpu_zone)
# Experiment Setup
experiment_setup = PredictExperimentSetup(
result_base_folder=result_base_folder,
model_name=model_name,
run_id=run_id,
experiment_id_generator=predict_experiment_id)
# Model Setup
input_shape: Tuple[int, int, int] = (256, 256, 3) if input_color_image else (256, 256, 1)
if tpu_name_optional:
with strategy_optional.scope():
model: Model = tf.keras.models.load_model(model_weight_path)
else:
model: Model = tf.keras.models.load_model(model_weight_path)
# Dataset Setup
rt_predict_provider: RTProviderP = RTCellTrackingProviderP(
filename_folder=predict_filename_folder,
base_folder=predict_base_folder,
shuffle=False,
random_seed=42,
bin_size=30,
resize_to=(256, 256))
rt_predict_dataset: tf.data.Dataset = rt_predict_provider.get_output_dataset()
rt_predict_dataset_description: str = rt_predict_provider.data_description
def combine_folder_file(a, b):
return a + "/" + b
def post_processing(predicted_current_bin_label: tf.Tensor, bin_color_map: tf.Tensor, current_filenames: tf.Tensor):
current_arg_max_bin = tf.argmax(predicted_current_bin_label, axis=-1)
current_label = tf.gather(bin_color_map, current_arg_max_bin, axis=1, batch_dims=1)
for index, current_filename in enumerate(current_filenames):
current_folder_filename = combine_folder_file(experiment_setup.save_result_images_folder, current_filename)
img = tf.image.encode_png(tf.cast(current_label[index], tf.uint8))
tf.io.write_file(current_folder_filename, img)
# Trainer Setup
predictor = RTProviderPredictor(
model=model,
predict_dataset=rt_predict_dataset,
predict_dataset_description=rt_predict_dataset_description,
predict_batch_size=batch_size,
strategy_optional=strategy_optional,
post_processing=post_processing)
# Report
reporter = PredictorReporter(setup=experiment_setup, predictor=predictor)
reporter.report()
reporter.plotmodel()
# Predict
predictor.predict()
if tpu_name_optional:
delete_tpu(tpu_name=tpu_name_optional, ctpu_zone=ctpu_zone)
|
"""
Given a collection of candidate numbers (candidates) and a target number (target), find all unique combinations in candidates where the candidate numbers sum to target.
Each number in candidates may only be used once in the combination.
Note: The solution set must not contain duplicate combinations.
Example 1:
Input: candidates = [10,1,2,7,6,1,5], target = 8
Output:
[
[1,1,6],
[1,2,5],
[1,7],
[2,6]
]
Example 2:
Input: candidates = [2,5,2,1,2], target = 5
Output:
[
[1,2,2],
[5]
]
Constraints:
1 <= candidates.length <= 100
1 <= candidates[i] <= 50
1 <= target <= 30
"""
class Solution:
def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]:
ans = []
candidates.sort()
self.helper(candidates, [], target, ans)
return ans
def helper(self, candidates, current, target, ans):
if target == 0:
ans.append(current)
return
for i in range(len(candidates)):
if target - candidates[i] < 0:
break
if i > 0 and candidates[i] == candidates[i-1]:
continue
new_target = target - candidates[i]
new_current = current + [candidates[i]]
new_candidates = candidates[i+1:]
self.helper(new_candidates, new_current, new_target, ans)
|
import time
from functools import wraps
def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None):
"""
Retry calling the decorated function using an exponential backoff.
http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry
:param ExceptionToCheck: the exception to check. may be a tuple of
exceptions to check
:type ExceptionToCheck: Exception or tuple
:param tries: number of times to try (not retry) before giving up
:type tries: int
:param delay: initial delay between retries in seconds
:type delay: int
:param backoff: backoff multiplier e.g. value of 2 will double the delay
each retry
:type backoff: int
:param logger: logger to use. If None, print
:type logger: logging.Logger instance
"""
def deco_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return f(*args, **kwargs)
except ExceptionToCheck as e:
msg = "%s, Retrying in %d seconds..." % (str(e), mdelay)
if logger:
logger.warning(msg)
else:
print(msg)
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
return f(*args, **kwargs)
return f_retry # true decorator
return deco_retry
|
"""Common helper functions for typing and general numpy tools."""
import numpy as np
from .utils import get_aliasing, check_boolean
_alias_numpy = {
np.add: 'sum',
np.sum: 'sum',
np.any: 'any',
np.all: 'all',
np.multiply: 'prod',
np.prod: 'prod',
np.amin: 'min',
np.min: 'min',
np.minimum: 'min',
np.amax: 'max',
np.max: 'max',
np.maximum: 'max',
np.argmax: 'argmax',
np.argmin: 'argmin',
np.mean: 'mean',
np.std: 'std',
np.var: 'var',
np.array: 'array',
np.asarray: 'array',
np.sort: 'sort',
np.nansum: 'nansum',
np.nanprod: 'nanprod',
np.nanmean: 'nanmean',
np.nanvar: 'nanvar',
np.nanmax: 'nanmax',
np.nanmin: 'nanmin',
np.nanstd: 'nanstd',
np.nanargmax: 'nanargmax',
np.nanargmin: 'nanargmin',
np.cumsum: 'cumsum',
np.cumprod: 'cumprod',
}
aliasing = get_aliasing(_alias_numpy)
_next_int_dtype = dict(
bool=np.int8,
uint8=np.int16,
int8=np.int16,
uint16=np.int32,
int16=np.int32,
uint32=np.int64,
int32=np.int64
)
_next_float_dtype = dict(
float16=np.float32,
float32=np.float64,
float64=np.complex64,
complex64=np.complex128
)
def minimum_dtype(x, dtype=np.bool_):
"""returns the "most basic" dtype which represents `x` properly, which
provides at least the same value range as the specified dtype."""
def check_type(x, dtype):
try:
converted = dtype.type(x)
except (ValueError, OverflowError):
return False
# False if some overflow has happened
return converted == x or np.isnan(x)
def type_loop(x, dtype, dtype_dict, default=None):
while True:
try:
dtype = np.dtype(dtype_dict[dtype.name])
if check_type(x, dtype):
return np.dtype(dtype)
except KeyError:
if default is not None:
return np.dtype(default)
raise ValueError("Can not determine dtype of %r" % x)
dtype = np.dtype(dtype)
if check_type(x, dtype):
return dtype
if np.issubdtype(dtype, np.inexact):
return type_loop(x, dtype, _next_float_dtype)
else:
return type_loop(x, dtype, _next_int_dtype, default=np.float32)
def minimum_dtype_scalar(x, dtype, a):
if dtype is None:
dtype = np.dtype(type(a)) if isinstance(a, (int, float))\
else a.dtype
return minimum_dtype(x, dtype)
_forced_types = {
'array': np.object,
'all': np.bool_,
'any': np.bool_,
'nanall': np.bool_,
'nanany': np.bool_,
'len': np.int64,
'nanlen': np.int64,
'allnan': np.bool_,
'anynan': np.bool_,
'argmax': np.int64,
'argmin': np.int64,
}
_forced_float_types = {'mean', 'var', 'std', 'nanmean', 'nanvar', 'nanstd'}
_forced_same_type = {'min', 'max', 'first', 'last', 'nanmin', 'nanmax',
'nanfirst', 'nanlast'}
def check_dtype(dtype, func_str, a, n):
if np.isscalar(a) or not a.shape:
if func_str not in ("sum", "prod", "len"):
raise ValueError("scalar inputs are supported only for 'sum', "
"'prod' and 'len'")
a_dtype = np.dtype(type(a))
else:
a_dtype = a.dtype
if dtype is not None:
# dtype set by the user
# Careful here: np.bool != np.bool_ !
if np.issubdtype(dtype, np.bool_) and \
not('all' in func_str or 'any' in func_str):
raise TypeError("function %s requires a more complex datatype "
"than bool" % func_str)
if not np.issubdtype(dtype, np.integer) and func_str in ('len', 'nanlen'):
raise TypeError("function %s requires an integer datatype" % func_str)
# TODO: Maybe have some more checks here
return np.dtype(dtype)
else:
try:
return np.dtype(_forced_types[func_str])
except KeyError:
if func_str in _forced_float_types:
if np.issubdtype(a_dtype, np.floating):
return a_dtype
else:
return np.dtype(np.float64)
else:
if func_str == 'sum':
# Try to guess the minimally required int size
if np.issubdtype(a_dtype, np.int64):
# It's not getting bigger anymore
# TODO: strictly speaking it might need float
return np.dtype(np.int64)
elif np.issubdtype(a_dtype, np.integer):
maxval = np.iinfo(a_dtype).max * n
return minimum_dtype(maxval, a_dtype)
elif np.issubdtype(a_dtype, np.bool_):
return minimum_dtype(n, a_dtype)
else:
# floating, inexact, whatever
return a_dtype
elif func_str in _forced_same_type:
return a_dtype
else:
if isinstance(a_dtype, np.integer):
return np.dtype(np.int64)
else:
return a_dtype
def check_fill_value(fill_value, dtype, func=None):
if func in ('all', 'any', 'allnan', 'anynan'):
check_boolean(fill_value)
else:
try:
return dtype.type(fill_value)
except ValueError:
raise ValueError("fill_value must be convertible into %s"
% dtype.type.__name__)
def check_group_idx(group_idx, a=None, check_min=True):
if a is not None and group_idx.size != a.size:
raise ValueError("The size of group_idx must be the same as "
"a.size")
if not issubclass(group_idx.dtype.type, np.integer):
raise TypeError("group_idx must be of integer type")
if check_min and np.min(group_idx) < 0:
raise ValueError("group_idx contains negative indices")
def input_validation(group_idx, a, size=None, order='C', axis=None,
ravel_group_idx=True, check_bounds=True):
""" Do some fairly extensive checking of group_idx and a, trying to
give the user as much help as possible with what is wrong. Also,
convert ndim-indexing to 1d indexing.
"""
if not isinstance(a, (int, float, complex)):
a = np.asanyarray(a)
group_idx = np.asanyarray(group_idx)
if not np.issubdtype(group_idx.dtype, np.integer):
raise TypeError("group_idx must be of integer type")
# This check works for multidimensional indexing as well
if check_bounds and np.any(group_idx < 0):
raise ValueError("negative indices not supported")
ndim_idx = np.ndim(group_idx)
ndim_a = np.ndim(a)
# Deal with the axis arg: if present, then turn 1d indexing into
# multi-dimensional indexing along the specified axis.
if axis is None:
if ndim_a > 1:
raise ValueError("a must be scalar or 1 dimensional, use .ravel to"
" flatten. Alternatively specify axis.")
elif axis >= ndim_a or axis < -ndim_a:
raise ValueError("axis arg too large for np.ndim(a)")
else:
axis = axis if axis >= 0 else ndim_a + axis # negative indexing
if ndim_idx > 1:
# TODO: we could support a sequence of axis values for multiple
# dimensions of group_idx.
raise NotImplementedError("only 1d indexing currently"
"supported with axis arg.")
elif a.shape[axis] != len(group_idx):
raise ValueError("a.shape[axis] doesn't match length of group_idx.")
elif size is not None and not np.isscalar(size):
raise NotImplementedError("when using axis arg, size must be"
"None or scalar.")
else:
# Create the broadcast-ready multidimensional indexing.
# Note the user could do this themselves, so this is
# very much just a convenience.
size_in = int(np.max(group_idx)) + 1 if size is None else size
group_idx_in = group_idx
group_idx = []
size = []
for ii, s in enumerate(a.shape):
ii_idx = group_idx_in if ii == axis else np.arange(s)
ii_shape = [1] * ndim_a
ii_shape[ii] = s
group_idx.append(ii_idx.reshape(ii_shape))
size.append(size_in if ii == axis else s)
# Use the indexing, and return. It's a bit simpler than
# using trying to keep all the logic below happy
group_idx = np.ravel_multi_index(group_idx, size, order=order,
mode='raise')
flat_size = np.prod(size)
ndim_idx = ndim_a
return group_idx.ravel(), a.ravel(), flat_size, ndim_idx, size
if ndim_idx == 1:
if size is None:
size = int(np.max(group_idx)) + 1
else:
if not np.isscalar(size):
raise ValueError("output size must be scalar or None")
if check_bounds and np.any(group_idx > size - 1):
raise ValueError("one or more indices are too large for "
"size %d" % size)
flat_size = size
else:
if size is None:
size = np.max(group_idx, axis=1).astype(int) + 1
elif np.isscalar(size):
raise ValueError("output size must be of length %d"
% len(group_idx))
elif len(size) != len(group_idx):
raise ValueError("%d sizes given, but %d output dimensions "
"specified in index" % (len(size),
len(group_idx)))
if ravel_group_idx:
group_idx = np.ravel_multi_index(group_idx, size, order=order,
mode='raise')
flat_size = np.prod(size)
if not (np.ndim(a) == 0 or len(a) == group_idx.size):
raise ValueError("group_idx and a must be of the same length, or a"
" can be scalar")
return group_idx, a, flat_size, ndim_idx, size
### General tools ###
def unpack(group_idx, ret):
""" Take an aggregate packed array and uncompress it to the size of group_idx.
This is equivalent to ret[group_idx].
"""
return ret[group_idx]
def allnan(x):
return np.all(np.isnan(x))
def anynan(x):
return np.any(np.isnan(x))
def nanfirst(x):
return x[~np.isnan(x)][0]
def nanlast(x):
return x[~np.isnan(x)][-1]
def multi_arange(n):
"""By example:
# 0 1 2 3 4 5 6 7 8
n = [0, 0, 3, 0, 0, 2, 0, 2, 1]
res = [0, 1, 2, 0, 1, 0, 1, 0]
That is it is equivalent to something like this :
hstack((arange(n_i) for n_i in n))
This version seems quite a bit faster, at least for some
possible inputs, and at any rate it encapsulates a task
in a function.
"""
if n.ndim != 1:
raise ValueError("n is supposed to be 1d array.")
n_mask = n.astype(bool)
n_cumsum = np.cumsum(n)
ret = np.ones(n_cumsum[-1] + 1, dtype=int)
ret[n_cumsum[n_mask]] -= n[n_mask]
ret[0] -= 1
return np.cumsum(ret)[:-1]
def label_contiguous_1d(X):
"""
WARNING: API for this function is not liable to change!!!
By example:
X = [F T T F F T F F F T T T]
result = [0 1 1 0 0 2 0 0 0 3 3 3]
Or:
X = [0 3 3 0 0 5 5 5 1 1 0 2]
result = [0 1 1 0 0 2 2 2 3 3 0 4]
The ``0`` or ``False`` elements of ``X`` are labeled as ``0`` in the output. If ``X``
is a boolean array, each contiguous block of ``True`` is given an integer
label, if ``X`` is not boolean, then each contiguous block of identical values
is given an integer label. Integer labels are 1, 2, 3,..... (i.e. start a 1
and increase by 1 for each block with no skipped numbers.)
"""
if X.ndim != 1:
raise ValueError("this is for 1d masks only.")
is_start = np.empty(len(X), dtype=bool)
is_start[0] = X[0] # True if X[0] is True or non-zero
if X.dtype.kind == 'b':
is_start[1:] = ~X[:-1] & X[1:]
M = X
else:
M = X.astype(bool)
is_start[1:] = X[:-1] != X[1:]
is_start[~M] = False
L = np.cumsum(is_start)
L[~M] = 0
return L
def relabel_groups_unique(group_idx):
"""
See also ``relabel_groups_masked``.
keep_group: [0 3 3 3 0 2 5 2 0 1 1 0 3 5 5]
ret: [0 3 3 3 0 2 4 2 0 1 1 0 3 4 4]
Description of above: unique groups in input was ``1,2,3,5``, i.e.
``4`` was missing, so group 5 was relabled to be ``4``.
Relabeling maintains order, just "compressing" the higher numbers
to fill gaps.
"""
keep_group = np.zeros(np.max(group_idx) + 1, dtype=bool)
keep_group[0] = True
keep_group[group_idx] = True
return relabel_groups_masked(group_idx, keep_group)
def relabel_groups_masked(group_idx, keep_group):
"""
group_idx: [0 3 3 3 0 2 5 2 0 1 1 0 3 5 5]
0 1 2 3 4 5
keep_group: [0 1 0 1 1 1]
ret: [0 2 2 2 0 0 4 0 0 1 1 0 2 4 4]
Description of above in words: remove group 2, and relabel group 3,4, and 5
to be 2, 3 and 4 respecitvely, in order to fill the gap. Note that group 4 was never used
in the input group_idx, but the user supplied mask said to keep group 4, so group
5 is only moved up by one place to fill the gap created by removing group 2.
That is, the mask describes which groups to remove,
the remaining groups are relabled to remove the gaps created by the falsy
elements in ``keep_group``. Note that ``keep_group[0]`` has no particular meaning because it refers
to the zero group which cannot be "removed".
``keep_group`` should be bool and ``group_idx`` int.
Values in ``group_idx`` can be any order, and
"""
keep_group = keep_group.astype(bool, copy=not keep_group[0])
if not keep_group[0]: # ensuring keep_group[0] is True makes life easier
keep_group[0] = True
relabel = np.zeros(keep_group.size, dtype=group_idx.dtype)
relabel[keep_group] = np.arange(np.count_nonzero(keep_group))
return relabel[group_idx]
|
import subprocess
import sys
import re
import os
repo_name_regex = r"^[_\-a-zA-Z0-9]+$"
repo_name = "{{ cookiecutter.repo_name }}"
initiate_repo = "{{ cookiecutter.initiate_repo }}"
create_conda_env_config = "{{ cookiecutter.create_conda_env_config }}"
conda_env_config_file = f"{repo_name}-env.yml"
if not re.match(repo_name_regex, repo_name):
print(f"ERROR: {repo_name} is not a valid repository name.")
sys.exit(1)
if create_conda_env_config == "no" and os.path.isfile(conda_env_config_file):
os.remove(conda_env_config_file)
try:
if initiate_repo == "yes":
subprocess.run(["git", "init"], check=True)
subprocess.run(["git", "add", "."], check=True)
subprocess.run(["git", "commit", "-m", "initial commit"], check=True)
except subprocess.CalledProcessError as e:
print(e.output)
sys.exit(1)
|
import os, sys
sys.path.insert(0, os.path.join("..",".."))
from nodebox.graphics.context import *
from nodebox.graphics import *
# This example uses the points method to connect letters together.
# It actually draws lines between points of each letter contour
# that are a certain distance from eachother.
def draw(canvas):
canvas.clear()
background(0.3,0,0.2)
# This utility method calculates the length between points.
# It's just a standard Pythagoras algorithm.
def calc_length(x1, y1, x2, y2):
from math import sqrt, pow
return sqrt(pow(x2-x1, 2) + pow(y2-y1, 2))
# First, create a textpath that we will use further on.
fontsize(150)
path = textpath("SPIDER",20, 150)
# Select a color for the lines.
nofill()
stroke(1)
strokewidth(0.3)
# The mutation adds a little extra randomness to each calculated point.
# Increase it to make the lines deviate more from the template path.
mutation = 2.0
# The maximum distance between two points. Increase this to get a more
# "spidery" effect.
maxdist = 40.0
# Amount of lines for each contour.
lines_per_contour = 300
# A path has a contours property that returns each seperate contours.
# Note that "holes" in a letter (such as a P or D) are contours as well.
for contour in path.contours:
# Get a list of 100 points on each contour, properly divided amongst
# the path. This is different from the elements of the path, because
# the points are evenly distributed along the path.
path_points = list(contour.points(100))
# We want a certain amount of lines.
for i in range(lines_per_contour):
# Choose a point on the path
pt1 = choice(path_points)
# To find the second point, we use a "brute-force" approach.
# We randomly select a point on the path, and see if its distance
# from the first point is smaller than the maximum allowed distance.
# If it is, the point is selected; otherwise, we try another point.
# Note that this might loop infinitely for very short (or negative) distances.
# Use Command-Period to break out of the loop.
# Initialize the current length to "infinity", which means it won't get selected.
length = float("inf")
while length > maxdist:
pt2 = choice(path_points)
length = calc_length(pt1.x, pt1.y, pt2.x, pt2.y)
# Once we've found a second point, draw it. Use the mutation parameter to add a bit
# of randomness to the position of the line.
line(pt1.x + random(-mutation, mutation), pt1.y + random(-mutation, mutation), \
pt2.x + random(-mutation, mutation), pt2.y + random(-mutation, mutation))
canvas.size = 500, 500
canvas.run(draw)
|
import numpy as np
class RandomAction:
def __init__(self, all_actions: int):
self.all_actions = all_actions
def __call__(self, population):
return np.random.randint(self.all_actions)
|
''' Module for reading SMART attribute values from hard drives. Requires smartctl.'''
import subprocess
def read_smart(dev):
s = subprocess.Popen(['smartctl', '-A', dev], stdout=subprocess.PIPE).stdout.read()
s = s.decode('utf-8')
lines = s.split('\n')
header = lines[6]
global positions
positions = []
positions.append(header.find('ID#'))
positions.append(header.find('ATTRIBUTE_NAME'))
positions.append(header.find('FLAG'))
positions.append(header.find('VALUE'))
positions.append(header.find('WORST'))
positions.append(header.find('THRESH'))
positions.append(header.find('TYPE'))
positions.append(header.find('UPDATED'))
positions.append(header.find('WHEN_FAILED'))
positions.append(header.find('RAW_VALUE'))
out = {}
for l in lines[7:-1]:
if l.strip() != '':
attr = _split_smart_line(l)
out[attr['id']] = attr
return out
def _split_smart_line(line):
out = {}
out['id'] = int(line[positions[0]:positions[1]].strip())
out['name'] = attribute_name.get(out['id'], '???')
out['flag'] = line[positions[2]:positions[3]].strip()
out['value'] = line[positions[3]:positions[4]].strip()
out['worst'] = line[positions[4]:positions[5]].strip()
out['threshold'] = line[positions[5]:positions[6]].strip()
out['type'] = line[positions[6]:positions[7]].strip()
out['updated'] = line[positions[7]:positions[8]].strip()
out['when_failed'] = line[positions[8]:positions[9]].strip()
raw = line[positions[9]:].strip()
if raw.find(' ') > -1:
raw = raw[:raw.find(' ')]
out['raw'] = raw
return out
attribute_name = {
1:'Read Error Rate',
2:'Throughput Performance',
3:'Spin-Up Time',
4:'Start/Stop Count',
5:'Reallocated Sectors Count',
6:'Read Channel Margin',
7:'Seek Error Rate',
8:'Seek Time Performance',
9:'Power-On Hours',
10:'Spin Retry Count',
11:'Recalibration Retries',
12:'Power Cycle Count',
13:'Soft Read Error Rate',
22:'Current Helium Level',
170:'Available Reserved Space',
171:'SSD Program Fail Count',
172:'SSD Erase Fail Count',
173:'SSD Wear Leveling Count',
174:'Unexpected power loss count',
175:'Power Loss Protection Failure',
176:'Erase Fail Count',
177:'Wear Range Delta',
179:'Used Reserved Block Count Total',
180:'Unused Reserved Block Count Total',
181:'Program Fail Count Total or Non-4K Aligned Access Count',
182:'Erase Fail Count',
183:'SATA Downshift Error Count or Runtime Bad Block',
184:'End-to-End error',
185:'Head Stability',
186:'Induced Op-Vibration Detection',
187:'Reported Uncorrectable Errors',
188:'Command Timeout',
189:'High Fly Writes',
190:'Airflow Temperature',
191:'G-sense Error Rate',
192:'Unsafe Shutdown Count',
193:'Load Cycle Count',
194:'Temperature Celsius',
195:'Hardware ECC Recovered',
196:'Reallocation Event Count',
197:'Current Pending Sector Count',
198:'Offline Uncorrectable Sector Count',
199:'UltraDMA CRC Error Count',
200:'Multi-Zone Error Rate',
201:'Soft Read Error Rate',
202:'Data Address Mark errors',
203:'Run Out Cancel',
204:'Soft ECC Correction',
205:'Thermal Asperity Rate',
206:'Flying Height',
207:'Spin High Current',
208:'Spin Buzz',
209:'Offline Seek Performance',
210:'Vibration During Write',
211:'Vibration During Write',
212:'Shock During Write',
220:'Disk Shift',
221:'G-Sense Error Rate',
222:'Loaded Hours',
223:'Load/Unload Retry Count',
224:'Load Friction',
225:'Load/Unload Cycle Count',
226:'Load \'In\'-time',
227:'Torque Amplification Count',
228:'Power-Off Retract Cycle',
230:'GMR Head Amplitude or Drive Life Protection Status (SSDs)',
231:'Life Left (SSDs) or Temperature',
232:'Endurance Remaining',
233:'Media Wearout Indicator (SSDs) or Power-On Hours',
234:'Average erase count AND Maximum Erase Count',
235:'Good Block Count AND System(Free) Block Count',
240:'Head Flying Hours or Transfer Error Rate',
241:'Total LBAs Written',
242:'Total LBAs Read',
243:'Total LBAs Written Expanded',
244:'Total LBAs Read Expanded',
249:'NAND Writes (1GiB)',
250:'Read Error Retry Rate',
251:'Minimum Spares Remaining',
252:'Newly Added Bad Flash Block',
254:'Free Fall Protection',
}
def __smart_mock():
return '''smartctl 6.3 2014-07-26 r3976 [FreeBSD 9.3-RELEASE-p16 amd64] (local build)
Copyright (C) 2002-14, Bruce Allen, Christian Franke, www.smartmontools.org
=== START OF READ SMART DATA SECTION ===
SMART Attributes Data Structure revision number: 10
Vendor Specific SMART Attributes with Thresholds:
ID# ATTRIBUTE_NAME FLAG VALUE WORST THRESH TYPE UPDATED WHEN_FAILED RAW_VALUE
1 Raw_Read_Error_Rate 0x000f 119 100 006 Pre-fail Always - 233506248
3 Spin_Up_Time 0x0003 095 095 000 Pre-fail Always - 0
4 Start_Stop_Count 0x0032 100 100 020 Old_age Always - 34
5 Reallocated_Sector_Ct 0x0033 100 100 010 Pre-fail Always - 0
7 Seek_Error_Rate 0x000f 070 060 030 Pre-fail Always - 10893010
9 Power_On_Hours 0x0032 100 100 000 Old_age Always - 422
10 Spin_Retry_Count 0x0013 100 100 097 Pre-fail Always - 0
12 Power_Cycle_Count 0x0032 100 100 020 Old_age Always - 34
183 Runtime_Bad_Block 0x0032 100 100 000 Old_age Always - 0
184 End-to-End_Error 0x0032 100 100 099 Old_age Always - 0
187 Reported_Uncorrect 0x0032 100 100 000 Old_age Always - 0
188 Command_Timeout 0x0032 100 100 000 Old_age Always - 0 0 0
189 High_Fly_Writes 0x003a 099 099 000 Old_age Always - 1
190 Airflow_Temperature_Cel 0x0022 065 058 045 Old_age Always - 35 (Min/Max 23/35)
191 G-Sense_Error_Rate 0x0032 100 100 000 Old_age Always - 0
192 Power-Off_Retract_Count 0x0032 100 100 000 Old_age Always - 2
193 Load_Cycle_Count 0x0032 100 100 000 Old_age Always - 81
194 Temperature_Celsius 0x0022 035 042 000 Old_age Always - 35 (0 23 0 0 0)
197 Current_Pending_Sector 0x0012 100 100 000 Old_age Always - 0
198 Offline_Uncorrectable 0x0010 100 100 000 Old_age Offline - 0
199 UDMA_CRC_Error_Count 0x003e 200 200 000 Old_age Always - 0
240 Head_Flying_Hours 0x0000 100 253 000 Old_age Offline - 422h+13m+26.772s
241 Total_LBAs_Written 0x0000 100 253 000 Old_age Offline - 11376810357
242 Total_LBAs_Read 0x0000 100 253 000 Old_age Offline - 211017862927
'''
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcdn.endpoint import endpoint_data
class DescribeUserCustomerLabelsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cdn', '2014-11-11', 'DescribeUserCustomerLabels')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_Uid(self):
return self.get_query_params().get('Uid')
def set_Uid(self,Uid):
self.add_query_param('Uid',Uid)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class CheckVirtualNetworkSubnetUsageOperations(object):
"""CheckVirtualNetworkSubnetUsageOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.rdbms.mysql_flexibleservers.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def execute(
self,
location_name, # type: str
parameters, # type: "_models.VirtualNetworkSubnetUsageParameter"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualNetworkSubnetUsageResult"
"""Get virtual network subnet usage for a given vNet resource id.
:param location_name: The name of the location.
:type location_name: str
:param parameters: The required parameters for creating or updating a server.
:type parameters: ~azure.mgmt.rdbms.mysql_flexibleservers.models.VirtualNetworkSubnetUsageParameter
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualNetworkSubnetUsageResult, or the result of cls(response)
:rtype: ~azure.mgmt.rdbms.mysql_flexibleservers.models.VirtualNetworkSubnetUsageResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkSubnetUsageResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.execute.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'locationName': self._serialize.url("location_name", location_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VirtualNetworkSubnetUsageParameter')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetworkSubnetUsageResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
execute.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DBforMySQL/locations/{locationName}/checkVirtualNetworkSubnetUsage'} # type: ignore
|
from utils import get_data, answers, print_answers
from operator import itemgetter
data = get_data(2020, 5).split('\n')
find_highest = []
for ticket in data:
count = 10
find_row = list(range(0, 128))
find_col = list(range(0, 8))
for t in ticket:
if count > 0:
r_ind = int(len(find_row)/2)
c_ind = int(len(find_col)/2)
if t == 'F':
find_row = find_row[:r_ind]
if t == 'B':
find_row = find_row[r_ind:]
if t == 'L':
find_col = find_col[:c_ind]
if t == 'R':
find_col = find_col[c_ind:]
count -= 1
find_highest.append(find_row[0]*8+find_col[0])
def find_missing(lst):
start = lst[0]
end = lst[-1]
return sorted(set(range(start, end + 1)).difference(lst))
my_seat = find_missing(sorted(find_highest))
answers['first'] = max(find_highest)
answers['second'] = my_seat[0]
print_answers(answers)
|
from Neuron import Neuron
from NeuronLayer import NeuronLayer
from NeuronNetwork import NeuronNetwork
import random
# onderstaande gegevens zijn op basis van de uitwerkingen van les 6.
print("andGate")
o = Neuron([random.uniform(-1, 1), random.uniform(-1, 1)], random.uniform(-1, 1)) # [-0.5, 0.5], 1.5 OR [1, 1], -1.5
outputLater = NeuronLayer([o])
andGate = NeuronNetwork([outputLater])
andGate.train([[0, 0], [1, 0], [0, 1], [1, 1]], [0, 0, 0, 1], ["time", 5])
print("xorGate")
f = Neuron([random.uniform(-1, 1), random.uniform(-1, 1)], random.uniform(-1, 1))
g = Neuron([random.uniform(-1, 1), random.uniform(-1, 1)], random.uniform(-1, 1))
hiddenLayer = NeuronLayer([f, g])
o = Neuron([random.uniform(-1, 1), random.uniform(-1, 1)], random.uniform(-1, 1))
outputLater = NeuronLayer([o])
xorGate = NeuronNetwork([hiddenLayer, outputLater])
xorGate.train([[0, 0], [1, 0], [0, 1], [1, 1]], [0, 1, 1, 0], ["time", 5])
h = Neuron([random.uniform(-1, 1), random.uniform(-1, 1)], random.uniform(-1, 1))
i = Neuron([random.uniform(-1, 1), random.uniform(-1, 1)], random.uniform(-1, 1))
hiddenLayer = NeuronLayer([h, i])
j = Neuron([random.uniform(-1, 1), random.uniform(-1, 1)], random.uniform(-1, 1))
k = Neuron([random.uniform(-1, 1), random.uniform(-1, 1)], random.uniform(-1, 1))
outputLater = NeuronLayer([j, k])
halfAdder = NeuronNetwork([hiddenLayer, outputLater])
halfAdder.train([[0, 0], [1, 0], [0, 1], [1, 1]], [[0, 0], [1, 0], [1, 0], [1, 1]], ["epochs", 1000])
print(halfAdder.feed_forward([[1, 1], [1, 1]]))
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2016 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.common import Backend
from lib.core.common import getLimitRange
from lib.core.common import isAdminFromPrivileges
from lib.core.common import isInferenceAvailable
from lib.core.common import isNoneValue
from lib.core.common import isNumPosStrValue
from lib.core.common import isTechniqueAvailable
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.data import queries
from lib.core.enums import CHARSET_TYPE
from lib.core.enums import EXPECTED
from lib.core.enums import PAYLOAD
from lib.core.exception import SqlmapNoneDataException
from lib.request import inject
from plugins.generic.enumeration import Enumeration as GenericEnumeration
class Enumeration(GenericEnumeration):
def __init__(self):
GenericEnumeration.__init__(self)
def getRoles(self, query2=False):
infoMsg = "fetching database users roles"
rootQuery = queries[Backend.getIdentifiedDbms()].roles
if conf.user == "CU":
infoMsg += " for current user"
conf.user = self.getCurrentUser()
logger.info(infoMsg)
# Set containing the list of DBMS administrators
areAdmins = set()
if any(isTechniqueAvailable(_) for _ in (PAYLOAD.TECHNIQUE.UNION, PAYLOAD.TECHNIQUE.ERROR, PAYLOAD.TECHNIQUE.QUERY)) or conf.direct:
if query2:
query = rootQuery.inband.query2
condition = rootQuery.inband.condition2
else:
query = rootQuery.inband.query
condition = rootQuery.inband.condition
if conf.user:
users = conf.user.split(",")
query += " WHERE "
query += " OR ".join("%s = '%s'" % (condition, user) for user in sorted(users))
values = inject.getValue(query, blind=False, time=False)
if not values and not query2:
infoMsg = "trying with table USER_ROLE_PRIVS"
logger.info(infoMsg)
return self.getRoles(query2=True)
if not isNoneValue(values):
for value in values:
user = None
roles = set()
for count in xrange(0, len(value)):
# The first column is always the username
if count == 0:
user = value[count]
# The other columns are the roles
else:
role = value[count]
# In Oracle we get the list of roles as string
roles.add(role)
if user in kb.data.cachedUsersRoles:
kb.data.cachedUsersRoles[user] = list(roles.union(kb.data.cachedUsersRoles[user]))
else:
kb.data.cachedUsersRoles[user] = list(roles)
if not kb.data.cachedUsersRoles and isInferenceAvailable() and not conf.direct:
if conf.user:
users = conf.user.split(",")
else:
if not len(kb.data.cachedUsers):
users = self.getUsers()
else:
users = kb.data.cachedUsers
retrievedUsers = set()
for user in users:
unescapedUser = None
if user in retrievedUsers:
continue
infoMsg = "fetching number of roles "
infoMsg += "for user '%s'" % user
logger.info(infoMsg)
if unescapedUser:
queryUser = unescapedUser
else:
queryUser = user
if query2:
query = rootQuery.blind.count2 % queryUser
else:
query = rootQuery.blind.count % queryUser
count = inject.getValue(query, union=False, error=False, expected=EXPECTED.INT, charsetType=CHARSET_TYPE.DIGITS)
if not isNumPosStrValue(count):
if count != 0 and not query2:
infoMsg = "trying with table USER_SYS_PRIVS"
logger.info(infoMsg)
return self.getPrivileges(query2=True)
warnMsg = "unable to retrieve the number of "
warnMsg += "roles for user '%s'" % user
logger.warn(warnMsg)
continue
infoMsg = "fetching roles for user '%s'" % user
logger.info(infoMsg)
roles = set()
indexRange = getLimitRange(count, plusOne=True)
for index in indexRange:
if query2:
query = rootQuery.blind.query2 % (queryUser, index)
else:
query = rootQuery.blind.query % (queryUser, index)
role = inject.getValue(query, union=False, error=False)
# In Oracle we get the list of roles as string
roles.add(role)
if roles:
kb.data.cachedUsersRoles[user] = list(roles)
else:
warnMsg = "unable to retrieve the roles "
warnMsg += "for user '%s'" % user
logger.warn(warnMsg)
retrievedUsers.add(user)
if not kb.data.cachedUsersRoles:
errMsg = "unable to retrieve the roles "
errMsg += "for the database users"
raise SqlmapNoneDataException(errMsg)
for user, privileges in kb.data.cachedUsersRoles.items():
if isAdminFromPrivileges(privileges):
areAdmins.add(user)
return kb.data.cachedUsersRoles, areAdmins
|
# Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow training code for Plane Strike board game."""
import os
from typing import Sequence
from absl import app
import tensorflow as tf
import common
ITERATIONS = 80000
LEARNING_RATE = 0.002
MODELDIR = './'
LOGDIR = './tf_log'
def train_agent(iterations, modeldir, logdir):
"""Train and convert the model."""
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(
input_shape=(common.BOARD_SIZE, common.BOARD_SIZE)),
tf.keras.layers.Dense(2 * common.BOARD_SIZE**2, activation='relu'),
tf.keras.layers.Dense(common.BOARD_SIZE**2, activation='relu'),
tf.keras.layers.Dense(common.BOARD_SIZE**2, activation='softmax')
])
sgd = tf.keras.optimizers.SGD(learning_rate=LEARNING_RATE)
model.compile(loss='sparse_categorical_crossentropy', optimizer=sgd)
summary_writer = tf.summary.create_file_writer(logdir)
def predict_fn(board):
return model.predict(board)
# Main training loop
progress_bar = tf.keras.utils.Progbar(iterations)
for i in range(iterations):
board_log, action_log, result_log = common.play_game(predict_fn)
with summary_writer.as_default():
tf.summary.scalar('game_length', len(action_log), step=i)
rewards = common.compute_rewards(result_log)
model.fit(
x=board_log,
y=action_log,
batch_size=1,
verbose=0,
epochs=1,
sample_weight=rewards)
summary_writer.flush()
progress_bar.add(1)
summary_writer.close()
# Convert to tflite model
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
# Save the model
with open(os.path.join(modeldir, 'planestrike.tflite'), 'wb') as f:
f.write(tflite_model)
print('TFLite model generated!')
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
train_agent(ITERATIONS, MODELDIR, LOGDIR)
if __name__ == '__main__':
app.run(main)
|
# qubit number=5
# total number=43
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=3
prog += H(1) # number=4
prog += H(2) # number=5
prog += H(3) # number=6
prog += H(4) # number=21
prog += CNOT(3,0) # number=32
prog += Z(3) # number=33
prog += CNOT(3,0) # number=34
prog += RX(0.11938052083641225,1) # number=36
prog += H(0) # number=1
prog += RX(1.4765485471872026,2) # number=35
prog += H(1) # number=2
prog += H(2) # number=7
prog += H(3) # number=8
prog += X(0) # number=9
prog += X(4) # number=30
prog += CNOT(0,1) # number=40
prog += X(1) # number=41
prog += CNOT(0,1) # number=42
prog += X(2) # number=11
prog += RX(0.45238934211692994,3) # number=38
prog += Y(1) # number=39
prog += RX(-2.5258404934861938,1) # number=25
prog += H(3) # number=29
prog += CNOT(0,3) # number=22
prog += X(3) # number=23
prog += CNOT(0,3) # number=24
prog += X(0) # number=13
prog += RX(-0.0722566310325653,4) # number=37
prog += X(1) # number=14
prog += CNOT(0,2) # number=26
prog += X(2) # number=27
prog += CNOT(0,2) # number=28
prog += X(3) # number=16
prog += H(0) # number=17
prog += H(1) # number=18
prog += H(2) # number=19
prog += H(3) # number=20
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('5q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil1243.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
|
# from dask_ml.preprocessing import DummyEncoder
from optimus.helpers.check import is_spark_dataframe
from optimus.helpers.columns import parse_columns, name_col, get_output_cols, prepare_columns
from optimus.helpers.constants import Actions
from optimus.helpers.raiseit import RaiseIt
from optimus.infer import is_, is_str
def n_gram(df, input_col, n=2):
"""
Converts the input array of strings inside of a Spark DF into an array of n-grams.
:param df: Pyspark dataframe to analyze
:param input_col: Column to analyzer.
:param n: number of elements per n-gram >=1.
:return: Spark DataFrame with n-grams calculated.
"""
is_spark_dataframe(df)
tokenizer = feature.Tokenizer().setInputCol(input_col) | feature.StopWordsRemover()
count = feature.CountVectorizer()
gram = feature.NGram(n=n) | feature.CountVectorizer()
tf = tokenizer | (count, gram) | feature.VectorAssembler()
tfidf = tf | feature.IDF().setOutputCol('features')
tfidf_model = tfidf.fit(df)
df_model = tfidf_model.transform(df)
return df_model, tfidf_model
def one_hot_encoder(df, input_cols, output_col=None, **kargs):
"""
Maps a column of label indices to a column of binary vectors, with at most a single one-value.
:param df: Dataframe to be transformed.
:param input_cols: Columns to be encoded.
:param output_col: Column where the output is going to be saved.
:return: Dataframe with encoded columns.
"""
input_cols = parse_columns(df, input_cols)
if output_col is None:
output_col = name_col(input_cols, "one_hot_encoder")
de = DummyEncoder()
df[output_col] = de.fit_transform(df[input_cols])
return df
# TODO: Must we use the pipeline version?
def vector_assembler(df, input_cols, output_col=None):
"""
Combines a given list of columns into a single vector column.
:param df: Dataframe to be transformed.
:param input_cols: Columns to be assembled.
:param output_col: Column where the output is going to be saved.
:return: Dataframe with assembled column.
"""
input_cols = parse_columns(df, input_cols)
if output_col is None:
output_col = name_col(input_cols, "vector_assembler")
assembler = [VectorAssembler(inputCols=input_cols, outputCol=output_col)]
pipeline = Pipeline(stages=assembler)
df = pipeline.fit(df).transform(df)
return df
def normalizer(df, input_cols, output_col=None, p=2.0):
"""
Transforms a dataset of Vector rows, normalizing each Vector to have unit norm. It takes parameter p, which
specifies the p-norm used for normalization. (p=2) by default.
:param df: Dataframe to be transformed
:param input_cols: Columns to be normalized.
:param output_col: Column where the output is going to be saved.
:param p: p-norm used for normalization.
:return: Dataframe with normalized columns.
"""
# Check if columns argument must be a string or list datat ype:
if not is_(input_cols, (str, list)):
RaiseIt.type_error(input_cols, ["str", "list"])
if is_str(input_cols):
input_cols = [input_cols]
if is_(input_cols, (float, int)):
RaiseIt.type_error(input_cols, ["float", "int"])
# Try to create a vector
if len(input_cols) > 1:
df = df.cols.cast(input_cols, "vector")
if output_col is None:
output_col = name_col(input_cols, "normalizer")
# TODO https://developer.ibm.com/code/2018/04/10/improve-performance-ml-pipelines-wide-dataframes-apache-spark-2-3/
normal = [Normalizer(inputCol=col_name, outputCol=output_col, p=p) for col_name in
list(set(input_cols))]
pipeline = Pipeline(stages=normal)
df = pipeline.fit(df).transform(df)
return df
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#############################################################################
# Copyright (c): 2021, Huawei Tech. Co., Ltd.
# FileName : source.py
# Version :
# Date : 2021-4-7
# Description :
#############################################################################
class Source:
"""
This is father class which is used for acquiring mutiple metric data at same time.
"""
def __init__(self):
self._channel_manager = None
def start(self):
pass
def stop(self):
pass
@property
def channel_manager(self):
return self._channel_manager
@channel_manager.setter
def channel_manager(self, channel_manager):
self._channel_manager = channel_manager
|
# vim: expandtab:ts=4:sw=4
from __future__ import absolute_import
import numpy as np
from . import linear_assignment
def iou(bbox, candidates):
"""Computer intersection over union.
Parameters
----------
bbox : ndarray
A bounding box in format `(top left x, top left y, width, height)`.
candidates : ndarray
A matrix of candidate bounding boxes (one per row) in the same format
as `bbox`.
Returns
-------
ndarray
The intersection over union in [0, 1] between the `bbox` and each
candidate. A higher score means a larger fraction of the `bbox` is
occluded by the candidate.
"""
bbox_tl, bbox_br = bbox[:2], bbox[:2] + bbox[2:]
candidates_tl = candidates[:, :2]
candidates_br = candidates[:, :2] + candidates[:, 2:]
tl = np.c_[np.maximum(bbox_tl[0], candidates_tl[:, 0])[:, np.newaxis],
np.maximum(bbox_tl[1], candidates_tl[:, 1])[:, np.newaxis]]
br = np.c_[np.minimum(bbox_br[0], candidates_br[:, 0])[:, np.newaxis],
np.minimum(bbox_br[1], candidates_br[:, 1])[:, np.newaxis]]
wh = np.maximum(0., br - tl)
area_intersection = wh.prod(axis=1)
area_bbox = bbox[2:].prod()
area_candidates = candidates[:, 2:].prod(axis=1)
return area_intersection / (area_bbox + area_candidates - area_intersection)
def iou_cost(tracks, detections, track_indices=None, detection_indices=None):
"""An intersection over union distance metric.
Parameters
----------
tracks : List[deep_sort.track.Track]
A list of tracks.
detections : List[deep_sort.detection.Detection]
A list of detections.
track_indices : Optional[List[int]]
A list of indices to tracks that should be matched. Defaults to
all `tracks`.
detection_indices : Optional[List[int]]
A list of indices to detections that should be matched. Defaults
to all `detections`.
Returns
-------
ndarray
Returns a cost matrix of shape
len(track_indices), len(detection_indices) where entry (i, j) is
`1 - iou(tracks[track_indices[i]], detections[detection_indices[j]])`.
"""
if track_indices is None:
track_indices = np.arange(len(tracks))
if detection_indices is None:
detection_indices = np.arange(len(detections))
cost_matrix = np.zeros((len(track_indices), len(detection_indices)))
for row, track_idx in enumerate(track_indices):
if tracks[track_idx].time_since_update > 1:
cost_matrix[row, :] = linear_assignment.INFTY_COST
continue
bbox = tracks[track_idx].to_tlwh()
candidates = np.asarray([detections[i].tlwh for i in detection_indices])
cost_matrix[row, :] = 1. - iou(bbox, candidates)
return cost_matrix
|
# Created July 2015
# TEASER Development Team
"""
This script loads the VDI 6007 Room 8 as *.teaserjson and computes
parameters. The parameters are then compared with the ones from Rouvel
"""
from teaser.project import Project
import teaser.logic.utilities as utilities
def parameter_room8():
prj = Project(False)
prj.name = "VDI_Verification_Room8"
prj.load_project(utilities.get_full_path(
"examples/examplefiles/VDI6007_Room8.json"))
prj.buildings[0].calc_building_parameter(
number_of_elements=2,
merge_windows=True,
used_library='AixLib')
return prj
if __name__ == "__main__":
prj = parameter_room8()
"""
parameters inner wall
"""
print("Parameters for inner wall")
print("r1_iw:", prj.buildings[0].thermal_zones[0].model_attr.r1_iw,
"K/W ---", "Rouvel: 0.000668640 K/W")
print("c1_iw: ", prj.buildings[0].thermal_zones[0].model_attr.c1_iw / 1000,
"kJ/K ---", "Rouvel: 12391.2 kJ/K")
print("area_iw: ", prj.buildings[0].thermal_zones[0].model_attr.area_iw,
"m2 ---", "Rouvel: 60.50 m2")
print("alpha_weight_conv_iw: ",
prj.buildings[0].thermal_zones[0].model_attr.alpha_conv_inner_iw,
"W/(m2*K) ---", "Rouvel: 2.121487317 W/(m2*K)")
"""
parameters outer wall
"""
print("\nParameters for outer wall")
print("r_rest_ow", prj.buildings[0].thermal_zones[0].model_attr.r_rest_ow,
"K/W ---", "Rouvel: 0.020705927 K/W")
print("r1_ow:", prj.buildings[0].thermal_zones[0].model_attr.r1_ow,
"K/W ---", "Rouvel: 0.001736253 K/W")
print("c1_ow: ", prj.buildings[0].thermal_zones[0].model_attr.c1_ow / 1000,
"kJ/K ---", "Rouvel: 5259.9 kJ/K")
print("area_ow + area_win: ", prj.buildings[0].thermal_zones[
0].model_attr.area_ow + prj.buildings[0].thermal_zones[
0].model_attr.area_win,
"m2 ---", "Rouvel: 25.5 m2")
print("alpha_conv_inner_ow: ",
prj.buildings[0].thermal_zones[0].model_attr.alpha_conv_inner_ow,
"W/(m2*K) ---", "Rouvel: 2.7 W/(m2*K)")
print("alpha_comb_outer_ow: ",
prj.buildings[0].thermal_zones[0].model_attr.alpha_comb_outer_ow,
"W/(m2*K) ---", "Rouvel: 25.0 W/(m2*K)")
prj.buildings[0].thermal_zones[0].model_attr.weightfactor_ow.sort()
print("weightfactor_ow: ",
prj.buildings[0].thermal_zones[0].model_attr.weightfactor_ow,
"Rouvel: 0.057968311, 0.132498994")
prj.buildings[0].thermal_zones[0].model_attr.weightfactor_win.sort()
print("weightfactor_win: ",
prj.buildings[0].thermal_zones[0].model_attr.weightfactor_win,
"Rouvel: 0.404766351, 0.404766351")
|
import vim
from . import breakpoint
from . import event
from . import opts
from . import session
from . import util
from .ui import vimui
class DebuggerInterface:
"""Provides all methods used to control the debugger."""
def __init__(self):
self.breakpoints = breakpoint.Store()
self.ui = vimui.Ui()
self.session_handler = session.SessionHandler(self.ui,
self.breakpoints)
self.event_dispatcher = event.Dispatcher(self.session_handler)
def __del__(self):
self.session_handler.close()
self.session_handler = None
@staticmethod
def reload_options():
util.Environment.reload()
def reload_keymappings(self):
self.session_handler.dispatch_event("reload_keymappings")
def status(self):
return self.session_handler.status()
def status_for_statusline(self):
return self.session_handler.status_for_statusline()
def start_if_ready(self):
self.session_handler.start_if_ready()
def listen(self):
self.session_handler.listen()
def run(self):
"""Tell the debugger to run, until the next breakpoint or end of script.
"""
self.session_handler.run()
def run_to_cursor(self):
"""Run to the current VIM cursor position.
"""
self.session_handler.dispatch_event("run_to_cursor")
def step_over(self):
"""Step over to the next statement.
"""
self.session_handler.dispatch_event("step_over")
def step_into(self):
"""Step into a statement on the current line.
"""
self.session_handler.dispatch_event("step_into")
def step_out(self):
"""Step out of the current statement.
"""
self.session_handler.dispatch_event("step_out")
def handle_return_keypress(self):
"""React to a <enter> keypress event.
"""
return self.event_dispatcher.by_position(self.session_handler)
def handle_double_click(self):
"""React to a mouse double click event.
"""
return self.event_dispatcher.by_position(self.session_handler)
def handle_visual_eval(self):
"""React to eval during visual selection.
"""
return self.event_dispatcher.visual_eval(self.session_handler)
def handle_eval(self, bang, args):
"""Evaluate a code snippet specified by args.
"""
return self.session_handler.dispatch_event("set_eval_expression",
len(bang) > 0, args)
def handle_trace(self, args=None):
"""Trace a code snippet specified by args.
"""
return self.session_handler.dispatch_event("trace", args)
def eval_under_cursor(self):
"""Evaluate the property under the cursor.
"""
return self.event_dispatcher.eval_under_cursor(self.session_handler)
def mark_window_as_closed(self, window):
self.session_handler.ui().mark_window_as_closed(window)
def toggle_window(self, name):
self.session_handler.ui().toggle_window(name)
def toggle_breakpoint_window(self):
self.session_handler.ui().toggle_window("DebuggerBreakpoints")
def get_last_error(self):
return self.session_handler.ui().get_last_error()
def set_breakpoint(self, args=None):
"""Set a breakpoint, specified by args.
"""
self.session_handler.dispatch_event("set_breakpoint", args)
def remove_breakpoint(self, args=None):
"""Remove one or more breakpoints, specified by args.
"""
self.session_handler.dispatch_event("remove_breakpoint", args)
def get_context(self):
"""Get all the variables in the default context
"""
self.session_handler.dispatch_event("get_context")
def detach(self):
"""Detach the debugger, so the script runs to the end.
"""
self.session_handler.dispatch_event("detach")
def close(self):
"""Close the connection, or the UI if already closed.
"""
self.session_handler.stop()
|
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
try:
from urllib.parse import parse_qs, urlsplit
except ImportError:
from urlparse import parse_qs, urlsplit
import uuid
from django.contrib.auth.models import AnonymousUser
from django.core.management import call_command
from django.core.urlresolvers import reverse
from django.test import (
RequestFactory,
override_settings,
)
from django.test import TestCase
from jwkest.jwt import JWT
from oidc_provider import settings
from oidc_provider.tests.app.utils import (
create_fake_user,
create_fake_client,
FAKE_CODE_CHALLENGE,
is_code_valid,
)
from oidc_provider.views import AuthorizeView
class AuthorizeEndpointMixin(object):
def _auth_request(self, method, data={}, is_user_authenticated=False):
url = reverse('oidc_provider:authorize')
if method.lower() == 'get':
query_str = urlencode(data).replace('+', '%20')
if query_str:
url += '?' + query_str
request = self.factory.get(url)
elif method.lower() == 'post':
request = self.factory.post(url, data=data)
else:
raise Exception('Method unsupported for an Authorization Request.')
# Simulate that the user is logged.
request.user = self.user if is_user_authenticated else AnonymousUser()
response = AuthorizeView.as_view()(request)
return response
class AuthorizationCodeFlowTestCase(TestCase, AuthorizeEndpointMixin):
"""
Test cases for Authorize Endpoint using Code Flow.
"""
def setUp(self):
call_command('creatersakey')
self.factory = RequestFactory()
self.user = create_fake_user()
self.client = create_fake_client(response_type='code')
self.client_public = create_fake_client(response_type='code', is_public=True)
self.state = uuid.uuid4().hex
self.nonce = uuid.uuid4().hex
def test_missing_parameters(self):
"""
If the request fails due to a missing, invalid, or mismatching
redirection URI, or if the client identifier is missing or invalid,
the authorization server SHOULD inform the resource owner of the error.
See: https://tools.ietf.org/html/rfc6749#section-4.1.2.1
"""
response = self._auth_request('get')
self.assertEqual(response.status_code, 200)
self.assertEqual(bool(response.content), True)
def test_invalid_response_type(self):
"""
The OP informs the RP by using the Error Response parameters defined
in Section 4.1.2.1 of OAuth 2.0.
See: http://openid.net/specs/openid-connect-core-1_0.html#AuthError
"""
# Create an authorize request with an unsupported response_type.
data = {
'client_id': self.client.client_id,
'response_type': 'something_wrong',
'redirect_uri': self.client.default_redirect_uri,
'scope': 'openid email',
'state': self.state,
}
response = self._auth_request('get', data)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.has_header('Location'), True)
# Should be an 'error' component in query.
self.assertIn('error=', response['Location'])
def test_user_not_logged(self):
"""
The Authorization Server attempts to Authenticate the End-User by
redirecting to the login view.
See: http://openid.net/specs/openid-connect-core-1_0.html#Authenticates
"""
data = {
'client_id': self.client.client_id,
'response_type': 'code',
'redirect_uri': self.client.default_redirect_uri,
'scope': 'openid email',
'state': self.state,
}
response = self._auth_request('get', data)
# Check if user was redirected to the login view.
self.assertIn(settings.get('LOGIN_URL'), response['Location'])
def test_user_consent_inputs(self):
"""
Once the End-User is authenticated, the Authorization Server MUST
obtain an authorization decision before releasing information to
the Client.
See: http://openid.net/specs/openid-connect-core-1_0.html#Consent
"""
data = {
'client_id': self.client.client_id,
'response_type': 'code',
'redirect_uri': self.client.default_redirect_uri,
'scope': 'openid email',
'state': self.state,
# PKCE parameters.
'code_challenge': FAKE_CODE_CHALLENGE,
'code_challenge_method': 'S256',
}
response = self._auth_request('get', data, is_user_authenticated=True)
# Check if hidden inputs exists in the form,
# also if their values are valid.
input_html = '<input name="{0}" type="hidden" value="{1}" />'
to_check = {
'client_id': self.client.client_id,
'redirect_uri': self.client.default_redirect_uri,
'response_type': 'code',
'code_challenge': FAKE_CODE_CHALLENGE,
'code_challenge_method': 'S256',
}
for key, value in iter(to_check.items()):
is_input_ok = input_html.format(key, value) in response.content.decode('utf-8')
self.assertEqual(is_input_ok, True,
msg='Hidden input for "' + key + '" fails.')
def test_user_consent_response(self):
"""
First,
if the user denied the consent we must ensure that
the error response parameters are added to the query component
of the Redirection URI.
Second,
if the user allow the RP then the server MUST return
the parameters defined in Section 4.1.2 of OAuth 2.0 [RFC6749]
by adding them as query parameters to the redirect_uri.
"""
data = {
'client_id': self.client.client_id,
'redirect_uri': self.client.default_redirect_uri,
'response_type': 'code',
'scope': 'openid email',
'state': self.state,
# PKCE parameters.
'code_challenge': FAKE_CODE_CHALLENGE,
'code_challenge_method': 'S256',
}
response = self._auth_request('post', data, is_user_authenticated=True)
# Because user doesn't allow app, SHOULD exists an error parameter
# in the query.
self.assertIn('error=', response['Location'], msg='error param is missing in query.')
self.assertIn('access_denied', response['Location'], msg='"access_denied" code is missing in query.')
# Simulate user authorization.
data['allow'] = 'Accept' # Will be the value of the button.
response = self._auth_request('post', data, is_user_authenticated=True)
is_code_ok = is_code_valid(url=response['Location'],
user=self.user,
client=self.client)
self.assertEqual(is_code_ok, True,
msg='Code returned is invalid.')
# Check if the state is returned.
state = (response['Location'].split('state='))[1].split('&')[0]
self.assertEqual(state, self.state, msg='State change or is missing.')
def test_user_consent_skipped(self):
"""
If users previously gave consent to some client (for a specific
list of scopes) and because they might be prompted for the same
authorization multiple times, the server skip it.
"""
data = {
'client_id': self.client.client_id,
'redirect_uri': self.client.default_redirect_uri,
'response_type': 'code',
'scope': 'openid email',
'state': self.state,
'allow': 'Accept',
}
request = self.factory.post(reverse('oidc_provider:authorize'),
data=data)
# Simulate that the user is logged.
request.user = self.user
with self.settings(OIDC_SKIP_CONSENT_ALWAYS=True):
response = self._auth_request('post', data, is_user_authenticated=True)
self.assertIn('code', response['Location'], msg='Code is missing in the returned url.')
response = self._auth_request('post', data, is_user_authenticated=True)
is_code_ok = is_code_valid(url=response['Location'],
user=self.user,
client=self.client)
self.assertEqual(is_code_ok, True, msg='Code returned is invalid.')
del data['allow']
response = self._auth_request('get', data, is_user_authenticated=True)
is_code_ok = is_code_valid(url=response['Location'],
user=self.user,
client=self.client)
self.assertEqual(is_code_ok, True, msg='Code returned is invalid or missing.')
def test_response_uri_is_properly_constructed(self):
data = {
'client_id': self.client.client_id,
'redirect_uri': self.client.default_redirect_uri + "?redirect_state=xyz",
'response_type': 'code',
'scope': 'openid email',
'state': self.state,
'allow': 'Accept',
}
response = self._auth_request('post', data, is_user_authenticated=True)
# TODO
def test_public_client_auto_approval(self):
"""
It's recommended not auto-approving requests for non-confidential clients.
"""
data = {
'client_id': self.client_public.client_id,
'response_type': 'code',
'redirect_uri': self.client_public.default_redirect_uri,
'scope': 'openid email',
'state': self.state,
}
with self.settings(OIDC_SKIP_CONSENT_ALWAYS=True):
response = self._auth_request('get', data, is_user_authenticated=True)
self.assertIn('Request for Permission', response.content.decode('utf-8'))
def test_prompt_parameter(self):
"""
Specifies whether the Authorization Server prompts the End-User for reauthentication and consent.
See: http://openid.net/specs/openid-connect-core-1_0.html#AuthRequest
"""
data = {
'client_id': self.client.client_id,
'response_type': self.client.response_type,
'redirect_uri': self.client.default_redirect_uri,
'scope': 'openid email',
'state': self.state,
}
data['prompt'] = 'none'
response = self._auth_request('get', data)
# An error is returned if an End-User is not already authenticated.
self.assertIn('login_required', response['Location'])
response = self._auth_request('get', data, is_user_authenticated=True)
# An error is returned if the Client does not have pre-configured consent for the requested Claims.
self.assertIn('interaction_required', response['Location'])
class AuthorizationImplicitFlowTestCase(TestCase, AuthorizeEndpointMixin):
"""
Test cases for Authorization Endpoint using Implicit Flow.
"""
def setUp(self):
call_command('creatersakey')
self.factory = RequestFactory()
self.user = create_fake_user()
self.client = create_fake_client(response_type='id_token token')
self.client_public = create_fake_client(response_type='id_token token', is_public=True)
self.client_no_access = create_fake_client(response_type='id_token')
self.client_public_no_access = create_fake_client(response_type='id_token', is_public=True)
self.state = uuid.uuid4().hex
self.nonce = uuid.uuid4().hex
def test_missing_nonce(self):
"""
The `nonce` parameter is REQUIRED if you use the Implicit Flow.
"""
data = {
'client_id': self.client.client_id,
'response_type': self.client.response_type,
'redirect_uri': self.client.default_redirect_uri,
'scope': 'openid email',
'state': self.state,
}
response = self._auth_request('get', data, is_user_authenticated=True)
self.assertIn('#error=invalid_request', response['Location'])
def test_idtoken_token_response(self):
"""
Implicit client requesting `id_token token` receives both id token
and access token as the result of the authorization request.
"""
data = {
'client_id': self.client.client_id,
'redirect_uri': self.client.default_redirect_uri,
'response_type': self.client.response_type,
'scope': 'openid email',
'state': self.state,
'nonce': self.nonce,
'allow': 'Accept',
}
response = self._auth_request('post', data, is_user_authenticated=True)
self.assertIn('access_token', response['Location'])
self.assertIn('id_token', response['Location'])
# same for public client
data['client_id'] = self.client_public.client_id,
data['redirect_uri'] = self.client_public.default_redirect_uri,
data['response_type'] = self.client_public.response_type,
response = self._auth_request('post', data, is_user_authenticated=True)
self.assertIn('access_token', response['Location'])
self.assertIn('id_token', response['Location'])
def test_idtoken_response(self):
"""
Implicit client requesting `id_token` receives
only an id token as the result of the authorization request.
"""
data = {
'client_id': self.client_no_access.client_id,
'redirect_uri': self.client_no_access.default_redirect_uri,
'response_type': self.client_no_access.response_type,
'scope': 'openid email',
'state': self.state,
'nonce': self.nonce,
'allow': 'Accept',
}
response = self._auth_request('post', data, is_user_authenticated=True)
self.assertNotIn('access_token', response['Location'])
self.assertIn('id_token', response['Location'])
# same for public client
data['client_id'] = self.client_public_no_access.client_id,
data['redirect_uri'] = self.client_public_no_access.default_redirect_uri,
data['response_type'] = self.client_public_no_access.response_type,
response = self._auth_request('post', data, is_user_authenticated=True)
self.assertNotIn('access_token', response['Location'])
self.assertIn('id_token', response['Location'])
def test_idtoken_token_at_hash(self):
"""
Implicit client requesting `id_token token` receives
`at_hash` in `id_token`.
"""
data = {
'client_id': self.client.client_id,
'redirect_uri': self.client.default_redirect_uri,
'response_type': self.client.response_type,
'scope': 'openid email',
'state': self.state,
'nonce': self.nonce,
'allow': 'Accept',
}
response = self._auth_request('post', data, is_user_authenticated=True)
self.assertIn('id_token', response['Location'])
# obtain `id_token` portion of Location
components = urlsplit(response['Location'])
fragment = parse_qs(components[4])
id_token = JWT().unpack(fragment["id_token"][0].encode('utf-8')).payload()
self.assertIn('at_hash', id_token)
def test_idtoken_at_hash(self):
"""
Implicit client requesting `id_token` should not receive
`at_hash` in `id_token`.
"""
data = {
'client_id': self.client_no_access.client_id,
'redirect_uri': self.client_no_access.default_redirect_uri,
'response_type': self.client_no_access.response_type,
'scope': 'openid email',
'state': self.state,
'nonce': self.nonce,
'allow': 'Accept',
}
response = self._auth_request('post', data, is_user_authenticated=True)
self.assertIn('id_token', response['Location'])
# obtain `id_token` portion of Location
components = urlsplit(response['Location'])
fragment = parse_qs(components[4])
id_token = JWT().unpack(fragment["id_token"][0].encode('utf-8')).payload()
self.assertNotIn('at_hash', id_token)
class AuthorizationHybridFlowTestCase(TestCase, AuthorizeEndpointMixin):
"""
Test cases for Authorization Endpoint using Hybrid Flow.
"""
def setUp(self):
call_command('creatersakey')
self.factory = RequestFactory()
self.user = create_fake_user()
self.client_code_idtoken_token = create_fake_client(response_type='code id_token token', is_public=True)
self.state = uuid.uuid4().hex
self.nonce = uuid.uuid4().hex
# Base data for the auth request.
self.data = {
'client_id': self.client_code_idtoken_token.client_id,
'redirect_uri': self.client_code_idtoken_token.default_redirect_uri,
'response_type': self.client_code_idtoken_token.response_type,
'scope': 'openid email',
'state': self.state,
'nonce': self.nonce,
'allow': 'Accept',
}
def test_code_idtoken_token_response(self):
"""
Implicit client requesting `id_token token` receives both id token
and access token as the result of the authorization request.
"""
response = self._auth_request('post', self.data, is_user_authenticated=True)
self.assertIn('#', response['Location'])
self.assertIn('access_token', response['Location'])
self.assertIn('id_token', response['Location'])
self.assertIn('state', response['Location'])
self.assertIn('code', response['Location'])
# Validate code.
is_code_ok = is_code_valid(url=response['Location'],
user=self.user,
client=self.client_code_idtoken_token)
self.assertEqual(is_code_ok, True, msg='Code returned is invalid.')
@override_settings(OIDC_TOKEN_EXPIRE=36000)
def test_access_token_expiration(self):
"""
Add ten hours of expiration to access_token. Check for the expires_in query in fragment.
"""
response = self._auth_request('post', self.data, is_user_authenticated=True)
self.assertIn('expires_in=36000', response['Location'])
|
#
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# SPDX-License-Identifier: BSD-3-Clause
"""
The DE0 Nano does not have an explicit USB port. Instead, you'll need to connect an external ULPI PHY breakout,
such as https://www.waveshare.com/wiki/USB3300_USB_HS_Board.
See the pin definitions below for connection information (ULPIResource).
The DE0 Nano is an -unsupported- platform! To use it, you'll need to set your LUNA_PLATFORM variable:
> export LUNA_PLATFORM="luna.gateware.platform.de0_nano:DE0NanoPlatform"
"""
import os
import logging
import subprocess
from nmigen import *
from nmigen.build import *
from nmigen.vendor.intel import IntelPlatform
from nmigen_boards.resources import *
from luna.gateware.platform.core import LUNAPlatform
__all__ = ["DE0NanoPlatform"]
class DE0NanoClockAndResetController(Elaboratable):
""" Controller for de0_nano's clocking and global resets. """
def __init__(self, *, clock_frequencies=None, clock_signal_name=None):
pass
def elaborate(self, platform):
m = Module()
# Create our domains; but don't do anything else for them, for now.
m.domains.sync = ClockDomain()
m.domains.usb = ClockDomain()
m.domains.jt51 = ClockDomain()
m.domains.adat = ClockDomain()
m.submodules.mainpll = Instance("ALTPLL",
p_BANDWIDTH_TYPE = "AUTO",
p_CLK0_DIVIDE_BY = 1,
p_CLK0_DUTY_CYCLE = 50,
p_CLK0_MULTIPLY_BY = 1,
p_CLK0_PHASE_SHIFT = 0,
p_INCLK0_INPUT_FREQUENCY = 16666,
p_OPERATION_MODE = "NORMAL",
# Drive our clock from the USB clock
# coming from the USB clock pin of the USB3300
i_inclk = ClockSignal("usb"),
o_clk = ClockSignal("sync"),
)
m.submodules.jt51pll = Instance("ALTPLL",
p_BANDWIDTH_TYPE = "AUTO",
p_CLK0_DIVIDE_BY = 218,
p_CLK0_DUTY_CYCLE = 50,
p_CLK0_MULTIPLY_BY = 13,
p_CLK0_PHASE_SHIFT = 0,
p_INCLK0_INPUT_FREQUENCY = 16666,
p_OPERATION_MODE = "NORMAL",
# Drive our clock from the USB clock
# coming from the USB clock pin of the USB3300
i_inclk = ClockSignal("usb"),
o_clk = ClockSignal("jt51"),
)
m.submodules.adatpll = Instance("ALTPLL",
p_BANDWIDTH_TYPE = "AUTO",
p_CLK0_DIVIDE_BY = 83,
p_CLK0_DUTY_CYCLE = 50,
p_CLK0_MULTIPLY_BY = 17,
p_CLK0_PHASE_SHIFT = 0,
p_INCLK0_INPUT_FREQUENCY = 16666,
p_OPERATION_MODE = "NORMAL",
# Drive our clock from the USB clock
# coming from the USB clock pin of the USB3300
i_inclk = ClockSignal("usb"),
o_clk = ClockSignal("adat"),
)
# Use a blinky to see if the clock signal works
# from nmigen_boards.test.blinky import Blinky
# m.submodules += Blinky()
return m
class DE0NanoPlatform(IntelPlatform, LUNAPlatform):
""" This is a de0_nano board with an USB3300 PHY attached to JP_2 """
name = "de0_nano"
device = "EP4CE22"
package = "F17"
speed = "C6"
default_clk = "clk_50MHz"
clock_domain_generator = DE0NanoClockAndResetController
default_usb_connection = "ulpi"
ignore_phy_vbus = True
def __init__(self, *args, **kwargs):
logging.warning("This platform is not officially supported, and thus not tested. Your results may vary.")
logging.warning("Note also that this platform does not use the DE0 nano's main USB port!")
logging.warning("You'll need to connect a ULPI PHY breakout. See the platform file for more info.")
super().__init__(*args, **kwargs)
#
# I/O resources.
#
resources = [
# Primary clock generator clocks.
Resource("clk_50MHz", 0, Pins("R8", dir="i"), Clock(50e6), Attrs(io_standard="3.3-V LVTTL")),
# USB2 / ULPI section of the USB3300.
ULPIResource("ulpi", 0,
data="JP_2:27 JP_2:25 JP_2:23 JP_2:21 JP_2:19 JP_2:17 JP_2:15 JP_2:13",
clk="JP_2:1", # this needs to be a clock pin of the FPGA or the core won't work
dir="JP_2:18", nxt="JP_2:16", stp="JP_2:14", rst="JP_2:22",
attrs=Attrs(io_standard="3.3-V LVCMOS")
),
UARTResource(0,
# GND on JP1 Pin 12.
rx="JP_1:8", tx="JP_1:10",
attrs=Attrs(io_standard="3.3-V LVTTL")),
*LEDResources(
pins="A15 A13 B13 A11 D1 F3 B1 L3",
attrs=Attrs(io_standard="3.3-V LVTTL")),
*ButtonResources(
pins="J15 E1", invert=True,
attrs=Attrs(io_standard="3.3-V LVTTL")),
*SwitchResources(
pins="M1 T8 B9 M15",
attrs=Attrs(io_standard="3.3-V LVTTL")),
SDRAMResource(0,
clk="R4", cke="L7", cs_n="P6", we_n="C2", ras_n="L2", cas_n="L1",
ba="M7 M6", a="P2 N5 N6 M8 P8 T7 N8 T6 R1 P1 N2 N1 L4",
dq="G2 G1 L8 K5 K2 J2 J1 R7 T4 T2 T3 R3 R5 P3 N3 K1", dqm="R6 T5",
attrs=Attrs(io_standard="3.3-V LVTTL")),
# Accelerometer
Resource("acc", 0,
Subsignal("cs_n", Pins("G5", dir="o")),
Subsignal("int", Pins("M2", dir="i")),
Attrs(io_standard="3.3-V LVTTL")),
# I2C is part of the Accelerometer
I2CResource(0,
scl="F2", sda="F1",
attrs=Attrs(io_standard="3.3-V LVTTL")),
# ADC
Resource("adc", 0,
Subsignal("cs_n", Pins("A10")),
Subsignal("saddr", Pins("B10")),
Subsignal("sclk", Pins("B14")),
Subsignal("sdat", Pins("A9")),
Attrs(io_standard="3.3-V LVTTL")),
# ECPS
Resource("epcs", 0,
Subsignal("data0", Pins("H2")),
Subsignal("dclk", Pins("H1")),
Subsignal("ncs0", Pins("D2")),
Subsignal("asd0", Pins("C1")),
Attrs(io_standard="3.3-V LVTTL")),
Resource("adat", 0,
Subsignal("tx", Pins("JP_3:5", dir="o")),
Subsignal("rx", Pins("JP_3:6", dir="i")),
Attrs(io_standard="3.3-V LVTTL")),
]
connectors = [
# PIN 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
Connector("JP", 1, "A8 D3 B8 C3 A2 A3 B3 B4 A4 B5 - - A5 D5 B6 A6 B7 D6 A7 C6 C8 E6 E7 D8 E8 F8 F9 E9 - - C9 D9 E11 E10 C11 B11 A12 D11 D12 B12"),
Connector("JP", 2, "T9 F13 R9 T15 T14 T13 R13 T12 R12 T11 - - T10 R11 P11 R10 N12 P9 N9 N11 L16 K16 R16 L15 P15 P16 R14 N16 - - N15 P14 L14 N14 M10 L13 J16 K15 J13 J14"),
Connector("JP", 3, "- E15 E16 M16 A14 B16 C14 C16 C15 D16 D15 D14 F15 F16 F14 G16 G15 - - - - - - - - -")
]
@property
def file_templates(self):
templates = super().file_templates
templates["{{name}}.qsf"] += r"""
set_global_assignment -name OPTIMIZATION_MODE "Aggressive Performance"
set_global_assignment -name FITTER_EFFORT "Standard Fit"
set_global_assignment -name PHYSICAL_SYNTHESIS_EFFORT "Extra"
set_instance_assignment -name DECREASE_INPUT_DELAY_TO_INPUT_REGISTER OFF -to *ulpi*
set_instance_assignment -name INCREASE_DELAY_TO_OUTPUT_PIN OFF -to *ulpi*
set_global_assignment -name NUM_PARALLEL_PROCESSORS ALL
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/filter/jt51_sincf.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/filter/jt51_interpol.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/filter/jt51_fir_ram.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/filter/jt51_fir8.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/filter/jt51_fir4.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/filter/jt51_fir.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/filter/jt51_dac2.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_timers.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_sh.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_reg.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_pm.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_phrom.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_phinc_rom.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_pg.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_op.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_noise_lfsr.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_noise.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_mod.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_mmr.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_lin2exp.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_lfo.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_kon.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_exprom.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_exp2lin.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_eg.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_csr_op.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_csr_ch.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51_acc.v
set_global_assignment -name VERILOG_FILE ../gateware/jt51/hdl/jt51.v
"""
templates["{{name}}.sdc"] += r"""
create_clock -name "clk_60MHz" -period 16.667 [get_ports "ulpi_0__clk__io"]
"""
return templates
def toolchain_program(self, products, name):
""" Programs the attached de0_nano board via a Quartus programming cable. """
quartus_pgm = os.environ.get("QUARTUS_PGM", "quartus_pgm")
with products.extract("{}.sof".format(name)) as bitstream_filename:
subprocess.check_call([quartus_pgm, "--haltcc", "--mode", "JTAG",
"--operation", "P;" + bitstream_filename])
|
#!/usr/bin/env micropython
from rubikscolorresolver import resolve_colors
import sys
resolve_colors(sys.argv)
|
from nonebot import on_command , CommandSession
import requests
from nonebot.permission import *
__plugin_name__ = '图书馆抢座'
__plugin_usage__ = r"""
图书馆抢座
1.状态查询
作用:显示当前图书馆各自习室人数
格式:状态查询
以下功能需要先私聊机器人,发送:图书馆
1.我的当前状态
作用:显示你的图书馆预约最新状态
格式:预约状态
2.抢座
作用:自动抢座(如果检测到有座位直接抢)
格式:抢座
3.取消预约
作用:取消当前预约的座位
格式:取消预约
"""
base_url = 'http://cloud.mrgump.org:9999/'
login_url = base_url + 'login?qid={}&username={}&password={}'
user_status_url = base_url + 'show?qid={}'
user_exists_url = base_url + 'user_exists?qid={}'
querry_url = base_url+'get_info'
cancel_url = base_url + 'cancel?qid={}'
grab_url = base_url + 'grab?qid={}'
@on_command('my_status',aliases=['预约状态'],permission=PRIVATE)
async def my_status(session=CommandSession):
qid = session.event["user_id"]
await session.send(requests.get(user_status_url.format(qid)).text)
@on_command('get_info',aliases=['状态查询'],only_to_me=False)
async def get_info(session=CommandSession):
res = requests.get(querry_url).text
await session.send(res)
@on_command('login',aliases=['图书馆'],permission=PRIVATE)
async def login(session=CommandSession):
return
@login.args_parser
async def _(session=CommandSession):
qid = session.event["user_id"]
if session.is_first_run:
if requests.get(user_exists_url.format(qid)).text == '检测到用户!':
await session.send('已检测到您的账户!')
await session.send(requests.get(user_status_url.format(qid)).text)
return
else:
session.pause('请按照:用户名 密码 的格式发送进行登录:\n例如:2199999999 123456')
else:
args = session.current_arg_text.strip().split(' ')
if len(args) == 2:
user_name = args[0]
password = args[1]
res = requests.get(login_url.format(qid , user_name , password)).text
if res != '登陆失败!密码错误!':
await session.send('登陆成功!')
await session.send(res)
else:
await session.send('用户名或密码错误,登陆失败!')
else:
await session.finish('格式错误,终止会话!')
@on_command('cancel',aliases='取消预约',permission=PRIVATE)
async def cancel(session = CommandSession):
qid = session.event["user_id"]
requests.get(cancel_url.format(qid))
await session.send('取消成功')
await session.send(requests.get(user_status_url.format(qid)).text)
@on_command('grab',aliases='抢座',permission=PRIVATE)
async def grab(session = CommandSession):
await session.send('正在抢座,可能需要一些时间~')
qid = session.event["user_id"]
res = requests.get(grab_url.format(qid))
if res.status_code == 200:
await session.send('抢座成功')
await session.send(res.text)
else:
await session.send('抢座失败,请重新尝试!')
|
# -*- coding: utf-8 -*-
# This information is located in its own file so that it can be loaded
# without importing the main package when its dependencies are not installed.
# See: https://packaging.python.org/guides/single-sourcing-package-version
__author__ = """Nils Hempelmann"""
__email__ = 'info@nilshempelmann.de'
__version__ = '1.4.1'
|
from unittest import TestCase
from bricklayer.utils.commands import open_ldd_command
import platform
import mock
class CommandsTest(TestCase):
def test_it_creates_the_right_windows_command(self):
with mock.patch('platform.system',return_value='Windows'):
self.assertEquals(open_ldd_command("file.txt"), ["start", "/wait", "file.txt"])
def test_it_creates_the_right_mac_command(self):
with mock.patch('platform.system',return_value='Darwin'):
self.assertEquals(open_ldd_command("file.txt"), ["open", "-W", "file.txt"])
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .convert_call_func import convert_call # noqa: F401
from .convert_operators import cast_bool_if_necessary # noqa: F401
from .convert_operators import convert_assert # noqa: F401
from .convert_operators import convert_ifelse # noqa: F401
from .convert_operators import convert_len # noqa: F401
from .convert_operators import convert_logical_and # noqa: F401
from .convert_operators import convert_logical_not # noqa: F401
from .convert_operators import convert_logical_or # noqa: F401
from .convert_operators import convert_pop # noqa: F401
from .convert_operators import convert_print # noqa: F401
from .convert_operators import convert_shape_compare # noqa: F401
from .convert_operators import convert_var_dtype # noqa: F401
from .convert_operators import convert_var_shape # noqa: F401
from .convert_operators import convert_var_shape_simple # noqa: F401
from .convert_operators import eval_if_exist_else_none # noqa: F401
from .convert_operators import choose_shape_attr_or_api # noqa: F401
from .convert_operators import convert_while_loop # noqa: F401
from .variable_trans_func import create_bool_as_type # noqa: F401
from .variable_trans_func import create_fill_constant_node # noqa: F401
from .variable_trans_func import create_static_variable_gast_node # noqa: F401
from .variable_trans_func import data_layer_not_check # noqa: F401
from .variable_trans_func import to_static_variable # noqa: F401
from .variable_trans_func import to_static_variable_gast_node # noqa: F401
__all__ = []
|
import os
import subprocess
import sys
kolibri_dir = os.path.abspath(os.path.join('src', 'kolibri'))
win_dir = os.path.abspath(os.path.join('dist', 'win', 'Kolibri'))
kolibri_dest_dir = os.path.join(win_dir, 'kolibri')
from .version import get_env_with_version_set
def do_build(args):
if 'android' in args and '--docker' in args:
subprocess.call(['docker', 'build', '-t', 'android_kolibri', '.'])
subprocess.call(['docker/android/rundocker.sh'])
return
elif '--docker' in args:
print("Docker builds not supported for this platform.")
print("Attempting non-docker build...")
try:
print("Building app...")
from . import stdlib
# see function docstring for more info on why we do this.
stdlib.generate_stdlib_imports()
env = get_env_with_version_set(args)
# This is needed to avoid errors when scanning python
# code for dependencies.
if sys.platform.startswith('darwin'):
env['PYTHONPATH'] = os.path.join(kolibri_dir, 'dist')
cmd = ['pew', 'build']
if args and len(args) > 0:
cmd.extend(args)
subprocess.call(cmd, env=env)
if sys.platform.startswith('win'):
stdlib.generate_python_bytecode(kolibri_dest_dir)
except Exception as e:
raise e
|
"""chatapp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
from django.contrib.auth import views as auth_views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'',include('home.urls')),
]
|
import random
n1 = str(input('Nome do primeiro Aluno: '))
n2 = str(input('Nome do segundo Aluno: '))
n3 = str(input('Nome do terceiro Aluno: '))
n4 = str(input('Nome do quarto Aluno: '))
lista = [n1,n2,n3,n4]
escolhido = random.choice(lista)
print('O Aluno escolhido foi {} .'.format(escolhido))
|
# Copyright 2021 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
import pytest
import salt.exceptions
import saltext.vmware.utils.cluster as utils_cluster
from pyVmomi import vim
def test_get_clusters(service_instance, integration_test_config):
datacenter_name = list(integration_test_config["datacenters"].keys())[0]
cluster_name = list(integration_test_config["datacenters"][datacenter_name].keys())[0]
# verify we get results with valid combinations of parameters
all_clusters = utils_cluster.get_clusters(service_instance)
for cluster in all_clusters:
assert isinstance(cluster, vim.ClusterComputeResource)
datacenter_clusters = utils_cluster.get_clusters(
service_instance, datacenter_name=datacenter_name
)
assert len(datacenter_clusters) <= len(all_clusters)
named_cluster = utils_cluster.get_clusters(
service_instance, datacenter_name=datacenter_name, cluster_name=cluster_name
)
assert len(named_cluster) <= len(datacenter_clusters)
# verify we get an exception with invalid parameters
with pytest.raises(salt.exceptions.ArgumentValueError):
utils_cluster.get_clusters(service_instance, cluster_name=cluster_name)
# verify we get 0 results with values that don't exist parameters
datacenter_clusters = utils_cluster.get_clusters(
service_instance, datacenter_name="DNE" + datacenter_name
)
assert len(datacenter_clusters) == 0
named_cluster = utils_cluster.get_clusters(
service_instance, datacenter_name=datacenter_name, cluster_name="DNE" + cluster_name
)
assert len(named_cluster) == 0
|
from test import support
from test.support import bigmemtest, _4G
import unittest
from io import BytesIO, DEFAULT_BUFFER_SIZE
import os
import pickle
import glob
import tempfile
import pathlib
import random
import shutil
import subprocess
import threading
from test.support import unlink
import _compression
import sys
# Skip tests if the bz2 module doesn't exist.
bz2 = support.import_module('bz2')
from bz2 import BZ2File, BZ2Compressor, BZ2Decompressor
has_cmdline_bunzip2 = None
def ext_decompress(data):
global has_cmdline_bunzip2
if has_cmdline_bunzip2 is None:
has_cmdline_bunzip2 = bool(shutil.which('bunzip2'))
if has_cmdline_bunzip2:
return subprocess.check_output(['bunzip2'], input=data)
else:
return bz2.decompress(data)
class BaseTest(unittest.TestCase):
"Base for other testcases."
TEXT_LINES = [
b'root:x:0:0:root:/root:/bin/bash\n',
b'bin:x:1:1:bin:/bin:\n',
b'daemon:x:2:2:daemon:/sbin:\n',
b'adm:x:3:4:adm:/var/adm:\n',
b'lp:x:4:7:lp:/var/spool/lpd:\n',
b'sync:x:5:0:sync:/sbin:/bin/sync\n',
b'shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown\n',
b'halt:x:7:0:halt:/sbin:/sbin/halt\n',
b'mail:x:8:12:mail:/var/spool/mail:\n',
b'news:x:9:13:news:/var/spool/news:\n',
b'uucp:x:10:14:uucp:/var/spool/uucp:\n',
b'operator:x:11:0:operator:/root:\n',
b'games:x:12:100:games:/usr/games:\n',
b'gopher:x:13:30:gopher:/usr/lib/gopher-data:\n',
b'ftp:x:14:50:FTP User:/var/ftp:/bin/bash\n',
b'nobody:x:65534:65534:Nobody:/home:\n',
b'postfix:x:100:101:postfix:/var/spool/postfix:\n',
b'niemeyer:x:500:500::/home/niemeyer:/bin/bash\n',
b'postgres:x:101:102:PostgreSQL Server:/var/lib/pgsql:/bin/bash\n',
b'mysql:x:102:103:MySQL server:/var/lib/mysql:/bin/bash\n',
b'www:x:103:104::/var/www:/bin/false\n',
]
TEXT = b''.join(TEXT_LINES)
DATA = b'BZh91AY&SY.\xc8N\x18\x00\x01>_\x80\x00\x10@\x02\xff\xf0\x01\x07n\x00?\xe7\xff\xe00\x01\x99\xaa\x00\xc0\x03F\x86\x8c#&\x83F\x9a\x03\x06\xa6\xd0\xa6\x93M\x0fQ\xa7\xa8\x06\x804hh\x12$\x11\xa4i4\xf14S\xd2<Q\xb5\x0fH\xd3\xd4\xdd\xd5\x87\xbb\xf8\x94\r\x8f\xafI\x12\xe1\xc9\xf8/E\x00pu\x89\x12]\xc9\xbbDL\nQ\x0e\t1\x12\xdf\xa0\xc0\x97\xac2O9\x89\x13\x94\x0e\x1c7\x0ed\x95I\x0c\xaaJ\xa4\x18L\x10\x05#\x9c\xaf\xba\xbc/\x97\x8a#C\xc8\xe1\x8cW\xf9\xe2\xd0\xd6M\xa7\x8bXa<e\x84t\xcbL\xb3\xa7\xd9\xcd\xd1\xcb\x84.\xaf\xb3\xab\xab\xad`n}\xa0lh\tE,\x8eZ\x15\x17VH>\x88\xe5\xcd9gd6\x0b\n\xe9\x9b\xd5\x8a\x99\xf7\x08.K\x8ev\xfb\xf7xw\xbb\xdf\xa1\x92\xf1\xdd|/";\xa2\xba\x9f\xd5\xb1#A\xb6\xf6\xb3o\xc9\xc5y\\\xebO\xe7\x85\x9a\xbc\xb6f8\x952\xd5\xd7"%\x89>V,\xf7\xa6z\xe2\x9f\xa3\xdf\x11\x11"\xd6E)I\xa9\x13^\xca\xf3r\xd0\x03U\x922\xf26\xec\xb6\xed\x8b\xc3U\x13\x9d\xc5\x170\xa4\xfa^\x92\xacDF\x8a\x97\xd6\x19\xfe\xdd\xb8\xbd\x1a\x9a\x19\xa3\x80ankR\x8b\xe5\xd83]\xa9\xc6\x08\x82f\xf6\xb9"6l$\xb8j@\xc0\x8a\xb0l1..\xbak\x83ls\x15\xbc\xf4\xc1\x13\xbe\xf8E\xb8\x9d\r\xa8\x9dk\x84\xd3n\xfa\xacQ\x07\xb1%y\xaav\xb4\x08\xe0z\x1b\x16\xf5\x04\xe9\xcc\xb9\x08z\x1en7.G\xfc]\xc9\x14\xe1B@\xbb!8`'
EMPTY_DATA = b'BZh9\x17rE8P\x90\x00\x00\x00\x00'
BAD_DATA = b'this is not a valid bzip2 file'
# Some tests need more than one block of uncompressed data. Since one block
# is at least 100,000 bytes, we gather some data dynamically and compress it.
# Note that this assumes that compression works correctly, so we cannot
# simply use the bigger test data for all tests.
test_size = 0
BIG_TEXT = bytearray(128*1024)
for fname in glob.glob(os.path.join(os.path.dirname(__file__), '*.py')):
with open(fname, 'rb') as fh:
test_size += fh.readinto(memoryview(BIG_TEXT)[test_size:])
if test_size > 128*1024:
break
BIG_DATA = bz2.compress(BIG_TEXT, compresslevel=1)
def setUp(self):
fd, self.filename = tempfile.mkstemp()
os.close(fd)
def tearDown(self):
unlink(self.filename)
class BZ2FileTest(BaseTest):
"Test the BZ2File class."
def createTempFile(self, streams=1, suffix=b""):
with open(self.filename, "wb") as f:
f.write(self.DATA * streams)
f.write(suffix)
def testBadArgs(self):
self.assertRaises(TypeError, BZ2File, 123.456)
self.assertRaises(ValueError, BZ2File, os.devnull, "z")
self.assertRaises(ValueError, BZ2File, os.devnull, "rx")
self.assertRaises(ValueError, BZ2File, os.devnull, "rbt")
self.assertRaises(ValueError, BZ2File, os.devnull, compresslevel=0)
self.assertRaises(ValueError, BZ2File, os.devnull, compresslevel=10)
# compresslevel is keyword-only
self.assertRaises(TypeError, BZ2File, os.devnull, "r", 3)
def testRead(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT)
def testReadBadFile(self):
self.createTempFile(streams=0, suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertRaises(OSError, bz2f.read)
def testReadMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT * 5)
def testReadMonkeyMultiStream(self):
# Test BZ2File.read() on a multi-stream archive where a stream
# boundary coincides with the end of the raw read buffer.
buffer_size = _compression.BUFFER_SIZE
_compression.BUFFER_SIZE = len(self.DATA)
try:
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT * 5)
finally:
_compression.BUFFER_SIZE = buffer_size
def testReadTrailingJunk(self):
self.createTempFile(suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), self.TEXT)
def testReadMultiStreamTrailingJunk(self):
self.createTempFile(streams=5, suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), self.TEXT * 5)
def testRead0(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(0), b"")
def testReadChunk10(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
text = b''
while True:
str = bz2f.read(10)
if not str:
break
text += str
self.assertEqual(text, self.TEXT)
def testReadChunk10MultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
text = b''
while True:
str = bz2f.read(10)
if not str:
break
text += str
self.assertEqual(text, self.TEXT * 5)
def testRead100(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(100), self.TEXT[:100])
def testPeek(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
pdata = bz2f.peek()
self.assertNotEqual(len(pdata), 0)
self.assertTrue(self.TEXT.startswith(pdata))
self.assertEqual(bz2f.read(), self.TEXT)
def testReadInto(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
n = 128
b = bytearray(n)
self.assertEqual(bz2f.readinto(b), n)
self.assertEqual(b, self.TEXT[:n])
n = len(self.TEXT) - n
b = bytearray(len(self.TEXT))
self.assertEqual(bz2f.readinto(b), n)
self.assertEqual(b[:n], self.TEXT[-n:])
def testReadLine(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readline, None)
for line in self.TEXT_LINES:
self.assertEqual(bz2f.readline(), line)
def testReadLineMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readline, None)
for line in self.TEXT_LINES * 5:
self.assertEqual(bz2f.readline(), line)
def testReadLines(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readlines, None)
self.assertEqual(bz2f.readlines(), self.TEXT_LINES)
def testReadLinesMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readlines, None)
self.assertEqual(bz2f.readlines(), self.TEXT_LINES * 5)
def testIterator(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertEqual(list(iter(bz2f)), self.TEXT_LINES)
def testIteratorMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertEqual(list(iter(bz2f)), self.TEXT_LINES * 5)
def testClosedIteratorDeadlock(self):
# Issue #3309: Iteration on a closed BZ2File should release the lock.
self.createTempFile()
bz2f = BZ2File(self.filename)
bz2f.close()
self.assertRaises(ValueError, next, bz2f)
# This call will deadlock if the above call failed to release the lock.
self.assertRaises(ValueError, bz2f.readlines)
def testWrite(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT)
def testWriteChunks10(self):
with BZ2File(self.filename, "w") as bz2f:
n = 0
while True:
str = self.TEXT[n*10:(n+1)*10]
if not str:
break
bz2f.write(str)
n += 1
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT)
def testWriteNonDefaultCompressLevel(self):
expected = bz2.compress(self.TEXT, compresslevel=5)
with BZ2File(self.filename, "w", compresslevel=5) as bz2f:
bz2f.write(self.TEXT)
with open(self.filename, "rb") as f:
self.assertEqual(f.read(), expected)
def testWriteLines(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.writelines)
bz2f.writelines(self.TEXT_LINES)
# Issue #1535500: Calling writelines() on a closed BZ2File
# should raise an exception.
self.assertRaises(ValueError, bz2f.writelines, ["a"])
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT)
def testWriteMethodsOnReadOnlyFile(self):
with BZ2File(self.filename, "w") as bz2f:
bz2f.write(b"abc")
with BZ2File(self.filename, "r") as bz2f:
self.assertRaises(OSError, bz2f.write, b"a")
self.assertRaises(OSError, bz2f.writelines, [b"a"])
def testAppend(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with BZ2File(self.filename, "a") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT * 2)
def testSeekForward(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekForwardAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(len(self.TEXT) + 150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekBackwards(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.read(500)
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[500-150:])
def testSeekBackwardsAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
readto = len(self.TEXT) + 100
while readto > 0:
readto -= len(bz2f.read(readto))
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[100-150:] + self.TEXT)
def testSeekBackwardsFromEnd(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150, 2)
self.assertEqual(bz2f.read(), self.TEXT[len(self.TEXT)-150:])
def testSeekBackwardsFromEndAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
bz2f.seek(-1000, 2)
self.assertEqual(bz2f.read(), (self.TEXT * 2)[-1000:])
def testSeekPostEnd(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT))
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT) * 5)
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndTwice(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT))
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndTwiceMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT) * 5)
self.assertEqual(bz2f.read(), b"")
def testSeekPreStart(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150)
self.assertEqual(bz2f.tell(), 0)
self.assertEqual(bz2f.read(), self.TEXT)
def testSeekPreStartMultiStream(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150)
self.assertEqual(bz2f.tell(), 0)
self.assertEqual(bz2f.read(), self.TEXT * 2)
def testFileno(self):
self.createTempFile()
with open(self.filename, 'rb') as rawf:
bz2f = BZ2File(rawf)
try:
self.assertEqual(bz2f.fileno(), rawf.fileno())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.fileno)
def testSeekable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertTrue(bz2f.seekable())
bz2f.read()
self.assertTrue(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertFalse(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
src = BytesIO(self.DATA)
src.seekable = lambda: False
bz2f = BZ2File(src)
try:
self.assertFalse(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
def testReadable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertTrue(bz2f.readable())
bz2f.read()
self.assertTrue(bz2f.readable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.readable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertFalse(bz2f.readable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.readable)
def testWritable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertFalse(bz2f.writable())
bz2f.read()
self.assertFalse(bz2f.writable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.writable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertTrue(bz2f.writable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.writable)
def testOpenDel(self):
self.createTempFile()
for i in range(10000):
o = BZ2File(self.filename)
del o
def testOpenNonexistent(self):
self.assertRaises(OSError, BZ2File, "/non/existent")
def testReadlinesNoNewline(self):
# Issue #1191043: readlines() fails on a file containing no newline.
data = b'BZh91AY&SY\xd9b\x89]\x00\x00\x00\x03\x80\x04\x00\x02\x00\x0c\x00 \x00!\x9ah3M\x13<]\xc9\x14\xe1BCe\x8a%t'
with open(self.filename, "wb") as f:
f.write(data)
with BZ2File(self.filename) as bz2f:
lines = bz2f.readlines()
self.assertEqual(lines, [b'Test'])
with BZ2File(self.filename) as bz2f:
xlines = list(bz2f.readlines())
self.assertEqual(xlines, [b'Test'])
def testContextProtocol(self):
f = None
with BZ2File(self.filename, "wb") as f:
f.write(b"xxx")
f = BZ2File(self.filename, "rb")
f.close()
try:
with f:
pass
except ValueError:
pass
else:
self.fail("__enter__ on a closed file didn't raise an exception")
try:
with BZ2File(self.filename, "wb") as f:
1/0
except ZeroDivisionError:
pass
else:
self.fail("1/0 didn't raise an exception")
def testThreading(self):
# Issue #7205: Using a BZ2File from several threads shouldn't deadlock.
data = b"1" * 2**20
nthreads = 10
with BZ2File(self.filename, 'wb') as f:
def comp():
for i in range(5):
f.write(data)
threads = [threading.Thread(target=comp) for i in range(nthreads)]
with support.start_threads(threads):
pass
def testMixedIterationAndReads(self):
self.createTempFile()
linelen = len(self.TEXT_LINES[0])
halflen = linelen // 2
with BZ2File(self.filename) as bz2f:
bz2f.read(halflen)
self.assertEqual(next(bz2f), self.TEXT_LINES[0][halflen:])
self.assertEqual(bz2f.read(), self.TEXT[linelen:])
with BZ2File(self.filename) as bz2f:
bz2f.readline()
self.assertEqual(next(bz2f), self.TEXT_LINES[1])
self.assertEqual(bz2f.readline(), self.TEXT_LINES[2])
with BZ2File(self.filename) as bz2f:
bz2f.readlines()
self.assertRaises(StopIteration, next, bz2f)
self.assertEqual(bz2f.readlines(), [])
def testMultiStreamOrdering(self):
# Test the ordering of streams when reading a multi-stream archive.
data1 = b"foo" * 1000
data2 = b"bar" * 1000
with BZ2File(self.filename, "w") as bz2f:
bz2f.write(data1)
with BZ2File(self.filename, "a") as bz2f:
bz2f.write(data2)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), data1 + data2)
def testOpenBytesFilename(self):
str_filename = self.filename
try:
bytes_filename = str_filename.encode("ascii")
except UnicodeEncodeError:
self.skipTest("Temporary file name needs to be ASCII")
with BZ2File(bytes_filename, "wb") as f:
f.write(self.DATA)
with BZ2File(bytes_filename, "rb") as f:
self.assertEqual(f.read(), self.DATA)
# Sanity check that we are actually operating on the right file.
with BZ2File(str_filename, "rb") as f:
self.assertEqual(f.read(), self.DATA)
def testOpenPathLikeFilename(self):
filename = pathlib.Path(self.filename)
with BZ2File(filename, "wb") as f:
f.write(self.DATA)
with BZ2File(filename, "rb") as f:
self.assertEqual(f.read(), self.DATA)
def testDecompressLimited(self):
"""Decompressed data buffering should be limited"""
bomb = bz2.compress(b'\0' * int(2e6), compresslevel=9)
self.assertLess(len(bomb), _compression.BUFFER_SIZE)
decomp = BZ2File(BytesIO(bomb))
self.assertEqual(decomp.read(1), b'\0')
max_decomp = 1 + DEFAULT_BUFFER_SIZE
self.assertLessEqual(decomp._buffer.raw.tell(), max_decomp,
"Excessive amount of data was decompressed")
# Tests for a BZ2File wrapping another file object:
def testReadBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT)
self.assertFalse(bio.closed)
def testPeekBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
pdata = bz2f.peek()
self.assertNotEqual(len(pdata), 0)
self.assertTrue(self.TEXT.startswith(pdata))
self.assertEqual(bz2f.read(), self.TEXT)
def testWriteBytesIO(self):
with BytesIO() as bio:
with BZ2File(bio, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
self.assertEqual(ext_decompress(bio.getvalue()), self.TEXT)
self.assertFalse(bio.closed)
def testSeekForwardBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekBackwardsBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
bz2f.read(500)
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[500-150:])
def test_read_truncated(self):
# Drop the eos_magic field (6 bytes) and CRC (4 bytes).
truncated = self.DATA[:-10]
with BZ2File(BytesIO(truncated)) as f:
self.assertRaises(EOFError, f.read)
with BZ2File(BytesIO(truncated)) as f:
self.assertEqual(f.read(len(self.TEXT)), self.TEXT)
self.assertRaises(EOFError, f.read, 1)
# Incomplete 4-byte file header, and block header of at least 146 bits.
for i in range(22):
with BZ2File(BytesIO(truncated[:i])) as f:
self.assertRaises(EOFError, f.read, 1)
class BZ2CompressorTest(BaseTest):
def testCompress(self):
bz2c = BZ2Compressor()
self.assertRaises(TypeError, bz2c.compress)
data = bz2c.compress(self.TEXT)
data += bz2c.flush()
self.assertEqual(ext_decompress(data), self.TEXT)
def testCompressEmptyString(self):
bz2c = BZ2Compressor()
data = bz2c.compress(b'')
data += bz2c.flush()
self.assertEqual(data, self.EMPTY_DATA)
def testCompressChunks10(self):
bz2c = BZ2Compressor()
n = 0
data = b''
while True:
str = self.TEXT[n*10:(n+1)*10]
if not str:
break
data += bz2c.compress(str)
n += 1
data += bz2c.flush()
self.assertEqual(ext_decompress(data), self.TEXT)
@support.skip_if_pgo_task
@bigmemtest(size=_4G + 100, memuse=2)
def testCompress4G(self, size):
# "Test BZ2Compressor.compress()/flush() with >4GiB input"
bz2c = BZ2Compressor()
data = b"x" * size
try:
compressed = bz2c.compress(data)
compressed += bz2c.flush()
finally:
data = None # Release memory
data = bz2.decompress(compressed)
try:
self.assertEqual(len(data), size)
self.assertEqual(len(data.strip(b"x")), 0)
finally:
data = None
def testPickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(TypeError):
pickle.dumps(BZ2Compressor(), proto)
class BZ2DecompressorTest(BaseTest):
def test_Constructor(self):
self.assertRaises(TypeError, BZ2Decompressor, 42)
def testDecompress(self):
bz2d = BZ2Decompressor()
self.assertRaises(TypeError, bz2d.decompress)
text = bz2d.decompress(self.DATA)
self.assertEqual(text, self.TEXT)
def testDecompressChunks10(self):
bz2d = BZ2Decompressor()
text = b''
n = 0
while True:
str = self.DATA[n*10:(n+1)*10]
if not str:
break
text += bz2d.decompress(str)
n += 1
self.assertEqual(text, self.TEXT)
def testDecompressUnusedData(self):
bz2d = BZ2Decompressor()
unused_data = b"this is unused data"
text = bz2d.decompress(self.DATA+unused_data)
self.assertEqual(text, self.TEXT)
self.assertEqual(bz2d.unused_data, unused_data)
def testEOFError(self):
bz2d = BZ2Decompressor()
text = bz2d.decompress(self.DATA)
self.assertRaises(EOFError, bz2d.decompress, b"anything")
self.assertRaises(EOFError, bz2d.decompress, b"")
@support.skip_if_pgo_task
@bigmemtest(size=_4G + 100, memuse=3.3)
def testDecompress4G(self, size):
# "Test BZ2Decompressor.decompress() with >4GiB input"
blocksize = 10 * 1024 * 1024
block = random.getrandbits(blocksize * 8).to_bytes(blocksize, 'little')
try:
data = block * (size // blocksize + 1)
compressed = bz2.compress(data)
bz2d = BZ2Decompressor()
decompressed = bz2d.decompress(compressed)
self.assertTrue(decompressed == data)
finally:
data = None
compressed = None
decompressed = None
def testPickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(TypeError):
pickle.dumps(BZ2Decompressor(), proto)
def testDecompressorChunksMaxsize(self):
bzd = BZ2Decompressor()
max_length = 100
out = []
# Feed some input
len_ = len(self.BIG_DATA) - 64
out.append(bzd.decompress(self.BIG_DATA[:len_],
max_length=max_length))
self.assertFalse(bzd.needs_input)
self.assertEqual(len(out[-1]), max_length)
# Retrieve more data without providing more input
out.append(bzd.decompress(b'', max_length=max_length))
self.assertFalse(bzd.needs_input)
self.assertEqual(len(out[-1]), max_length)
# Retrieve more data while providing more input
out.append(bzd.decompress(self.BIG_DATA[len_:],
max_length=max_length))
self.assertLessEqual(len(out[-1]), max_length)
# Retrieve remaining uncompressed data
while not bzd.eof:
out.append(bzd.decompress(b'', max_length=max_length))
self.assertLessEqual(len(out[-1]), max_length)
out = b"".join(out)
self.assertEqual(out, self.BIG_TEXT)
self.assertEqual(bzd.unused_data, b"")
def test_decompressor_inputbuf_1(self):
# Test reusing input buffer after moving existing
# contents to beginning
bzd = BZ2Decompressor()
out = []
# Create input buffer and fill it
self.assertEqual(bzd.decompress(self.DATA[:100],
max_length=0), b'')
# Retrieve some results, freeing capacity at beginning
# of input buffer
out.append(bzd.decompress(b'', 2))
# Add more data that fits into input buffer after
# moving existing data to beginning
out.append(bzd.decompress(self.DATA[100:105], 15))
# Decompress rest of data
out.append(bzd.decompress(self.DATA[105:]))
self.assertEqual(b''.join(out), self.TEXT)
def test_decompressor_inputbuf_2(self):
# Test reusing input buffer by appending data at the
# end right away
bzd = BZ2Decompressor()
out = []
# Create input buffer and empty it
self.assertEqual(bzd.decompress(self.DATA[:200],
max_length=0), b'')
out.append(bzd.decompress(b''))
# Fill buffer with new data
out.append(bzd.decompress(self.DATA[200:280], 2))
# Append some more data, not enough to require resize
out.append(bzd.decompress(self.DATA[280:300], 2))
# Decompress rest of data
out.append(bzd.decompress(self.DATA[300:]))
self.assertEqual(b''.join(out), self.TEXT)
def test_decompressor_inputbuf_3(self):
# Test reusing input buffer after extending it
bzd = BZ2Decompressor()
out = []
# Create almost full input buffer
out.append(bzd.decompress(self.DATA[:200], 5))
# Add even more data to it, requiring resize
out.append(bzd.decompress(self.DATA[200:300], 5))
# Decompress rest of data
out.append(bzd.decompress(self.DATA[300:]))
self.assertEqual(b''.join(out), self.TEXT)
def test_failure(self):
bzd = BZ2Decompressor()
self.assertRaises(Exception, bzd.decompress, self.BAD_DATA * 30)
# Previously, a second call could crash due to internal inconsistency
self.assertRaises(Exception, bzd.decompress, self.BAD_DATA * 30)
@support.refcount_test
def test_refleaks_in___init__(self):
gettotalrefcount = support.get_attribute(sys, 'gettotalrefcount')
bzd = BZ2Decompressor()
refs_before = gettotalrefcount()
for i in range(100):
bzd.__init__()
self.assertAlmostEqual(gettotalrefcount() - refs_before, 0, delta=10)
class CompressDecompressTest(BaseTest):
def testCompress(self):
data = bz2.compress(self.TEXT)
self.assertEqual(ext_decompress(data), self.TEXT)
def testCompressEmptyString(self):
text = bz2.compress(b'')
self.assertEqual(text, self.EMPTY_DATA)
def testDecompress(self):
text = bz2.decompress(self.DATA)
self.assertEqual(text, self.TEXT)
def testDecompressEmpty(self):
text = bz2.decompress(b"")
self.assertEqual(text, b"")
def testDecompressToEmptyString(self):
text = bz2.decompress(self.EMPTY_DATA)
self.assertEqual(text, b'')
def testDecompressIncomplete(self):
self.assertRaises(ValueError, bz2.decompress, self.DATA[:-10])
def testDecompressBadData(self):
self.assertRaises(OSError, bz2.decompress, self.BAD_DATA)
def testDecompressMultiStream(self):
text = bz2.decompress(self.DATA * 5)
self.assertEqual(text, self.TEXT * 5)
def testDecompressTrailingJunk(self):
text = bz2.decompress(self.DATA + self.BAD_DATA)
self.assertEqual(text, self.TEXT)
def testDecompressMultiStreamTrailingJunk(self):
text = bz2.decompress(self.DATA * 5 + self.BAD_DATA)
self.assertEqual(text, self.TEXT * 5)
class OpenTest(BaseTest):
"Test the open function."
def open(self, *args, **kwargs):
return bz2.open(*args, **kwargs)
def test_binary_modes(self):
for mode in ("wb", "xb"):
if mode == "xb":
unlink(self.filename)
with self.open(self.filename, mode) as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT)
with self.open(self.filename, "rb") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(self.filename, "ab") as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT * 2)
def test_implicit_binary_modes(self):
# Test implicit binary modes (no "b" or "t" in mode string).
for mode in ("w", "x"):
if mode == "x":
unlink(self.filename)
with self.open(self.filename, mode) as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT)
with self.open(self.filename, "r") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(self.filename, "a") as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT * 2)
def test_text_modes(self):
text = self.TEXT.decode("ascii")
text_native_eol = text.replace("\n", os.linesep)
for mode in ("wt", "xt"):
if mode == "xt":
unlink(self.filename)
with self.open(self.filename, mode) as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read()).decode("ascii")
self.assertEqual(file_data, text_native_eol)
with self.open(self.filename, "rt") as f:
self.assertEqual(f.read(), text)
with self.open(self.filename, "at") as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read()).decode("ascii")
self.assertEqual(file_data, text_native_eol * 2)
def test_x_mode(self):
for mode in ("x", "xb", "xt"):
unlink(self.filename)
with self.open(self.filename, mode) as f:
pass
with self.assertRaises(FileExistsError):
with self.open(self.filename, mode) as f:
pass
def test_fileobj(self):
with self.open(BytesIO(self.DATA), "r") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(BytesIO(self.DATA), "rb") as f:
self.assertEqual(f.read(), self.TEXT)
text = self.TEXT.decode("ascii")
with self.open(BytesIO(self.DATA), "rt") as f:
self.assertEqual(f.read(), text)
def test_bad_params(self):
# Test invalid parameter combinations.
self.assertRaises(ValueError,
self.open, self.filename, "wbt")
self.assertRaises(ValueError,
self.open, self.filename, "xbt")
self.assertRaises(ValueError,
self.open, self.filename, "rb", encoding="utf-8")
self.assertRaises(ValueError,
self.open, self.filename, "rb", errors="ignore")
self.assertRaises(ValueError,
self.open, self.filename, "rb", newline="\n")
def test_encoding(self):
# Test non-default encoding.
text = self.TEXT.decode("ascii")
text_native_eol = text.replace("\n", os.linesep)
with self.open(self.filename, "wt", encoding="utf-16-le") as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read()).decode("utf-16-le")
self.assertEqual(file_data, text_native_eol)
with self.open(self.filename, "rt", encoding="utf-16-le") as f:
self.assertEqual(f.read(), text)
def test_encoding_error_handler(self):
# Test with non-default encoding error handler.
with self.open(self.filename, "wb") as f:
f.write(b"foo\xffbar")
with self.open(self.filename, "rt", encoding="ascii", errors="ignore") \
as f:
self.assertEqual(f.read(), "foobar")
def test_newline(self):
# Test with explicit newline (universal newline mode disabled).
text = self.TEXT.decode("ascii")
with self.open(self.filename, "wt", newline="\n") as f:
f.write(text)
with self.open(self.filename, "rt", newline="\r") as f:
self.assertEqual(f.readlines(), [text])
def test_main():
support.run_unittest(
BZ2FileTest,
BZ2CompressorTest,
BZ2DecompressorTest,
CompressDecompressTest,
OpenTest,
)
support.reap_children()
if __name__ == '__main__':
test_main()
|
import sys
import io
import httplib2
import os
from mimetypes import MimeTypes
from apiclient.discovery import build
from oauth2client.file import Storage
from oauth2client.client import AccessTokenRefreshError, flow_from_clientsecrets
try:
from googleapiclient.errors import HttpError
from apiclient import discovery
import oauth2client
from googleapiclient.http import MediaFileUpload, MediaIoBaseDownload
from oauth2client import client
from oauth2client import tools
except ImportError:
print('goole-api-python-client is not installed. Try:')
print('sudo pip install --upgrade google-api-python-client')
sys.exit(1)
"""
GoogleDrive: Class for interacting with a google drive account programmatically
Example usage:
google_drive = GoogleDrive()
google_drive.upload("./example.mp4")
file_ids = google_drive.get_file_ids("example.mp4")
for file_id in file_ids:
google_drive.share(file_id, "email@gmail.com")
print(google_drive.drive_files)
google_drive.pull_and_set_drive_files()
print(google_drive.drive_files)
google_drive.create_folder("Test folder")
file_id = google_driver.get_file_ids("Test folder")
print(google_drive.drive_files)
google_drive.delete(file_id)
print(google_drive.drive_files)
"""
"""
TODO: constructor - Do not auth in the constructor. Not generic enough.
Make the program that is using it set the application name and secrets
file path and call auth explicitly.
"""
class GoogleDrive:
"""
GoogleDrive(): constructor
returns:
GoogleDrive class object
params:
scopes: String - google developer scope. Example: 'https://www.googleapis.com/auth/drive'
client_secret_file_path: String - path to google creds json from google developer account
application_name: String - google developer application name
drive_files: List - used by multiple functions in the class to have a local list of google drive files
"""
def __init__(
self,
scopes = 'https://www.googleapis.com/auth/drive',
client_secret_file_path = './client_secrets.json',
application_name = '',
drive_files = []
):
self.scopes = scopes
self.client_secret_file_path = client_secret_file_path
self.application_name = application_name
self.drive_files = drive_files
credentials = self.get_credentials()
http = credentials.authorize(httplib2.Http())
self.service = discovery.build('drive', 'v3', http=http)
"""
GoogleDrive(): get_credentials - checks if credentials already exist, if not save them to credential
params:
returns:
credentials for oauth2client
"""
def get_credentials(self):
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'drive-python-quickstart.json')
store = oauth2client.file.Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(self.client_secret_file_path, self.scopes)
flow.user_agent = self.application_name
credentials = tools.run_flow(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
"""
GoogleDrive(): upload - uploads a file to google drive
params:
file_path: String - full file path to the file that will be uploaded
folder_id: String - Google Drive ID for the folder that you want to upload the file. Defaults to None
returns:
file id: String - the id of the uploaded file from Google Drive
"""
def upload(self, file_path, folder_id=None):
mime = MimeTypes()
file_metadata = { 'name': os.path.basename(file_path) }
if folder_id:
file_metadata['parents'] = [folder_id]
media = MediaFileUpload(
file_path,
mimetype=mime.guess_type(os.path.basename(file_path))[0],
resumable=True
)
try:
file = self.service.files().create(
body=file_metadata,
media_body=media,
fields='id'
).execute()
except HttpError:
print('File could not be uploaded to google drive. Could be corrupted.')
pass
print(file.get('id'))
return file.get('id')
"""
GoogleDrive(): pull_and_set_drive_files - grabs files from authed google drive account and saves them on a class variable: drive_files
params:
returns:
"""
def pull_and_set_drive_files(self):
results = self.service.files().list(fields="nextPageToken, files(id, name, mimeType)").execute()
self.drive_files = results.get('files', [])
"""
GoogleDrive(): get_file_ids - grabs and returns the id(s) for a specific file_name from authed google drive account
params:
file_name: String - name of the file on Google Drive
returns:
List: - with file id(s) because there can be multiple instances of a file with the same name
"""
def get_file_ids(self, file_name):
self.pull_and_set_drive_files()
file_ids = []
for item in self.drive_files:
if file_name == item['name']:
file_ids.append(item['id'])
return file_ids
"""
GoogleDrive(): delete - remove file with specific id from Google Drive
params:
file_id: String - ID of the file that will be deleted from Google Drive
returns:
"""
def delete(self, file_id):
self.service.files().delete(fileId=file_id).execute()
self.pull_and_set_drive_files() # update list after deletion
"""
GoogleDrive(): get_folder_contents_by_id - return all fild id(s) from a given folder
params:
folder_id: String - folder id in google drive that will be checked for its contents
returns:
List: - returns a list of the file id(s) in a given folder
"""
def get_folder_contents_by_id(self, folder_id):
folder_contents = []
page_token = None
while True:
try:
param = {}
if page_token:
param['pageToken'] = page_token
children = self.service.children().list(
folderId=folder_id, **param).execute()
for child in children.get('items', []):
folder_contents.append(child['id'])
page_token = children.get('nextPageToken')
if not page_token:
break
except:
print ('An error occurred')
break
return folder_contents
"""
GoogleDrive(): download - downloads a file to the current or specified directory
params:
file_id: String - ID of the file that will be downloaded from Google Drive
path: String - path to directory where the download will go
returns:
"""
def download(self, file_id, path = os.getcwd()):
request = self.service.files().get_media(fileId=file_id)
name = self.service.files().get(fileId=file_id).execute()['name']
fh = io.BytesIO()
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
print(int(status.progress() * 100))
f = open(path + '/' + name, 'wb')
f.write(fh.getvalue())
f.close()
"""
GoogleDrive(): create_folder - creates either a single folder or replicates entire local folder structure in Google Drive
params:
folder: String - name of the folder that will be created
recursive: Bool - will walk down the path starting at the root and upload the contents.
returns:
String: - Id of the folder that is now created in Google Drive
"""
def create_folder(self, folder, recursive=False):
if recursive:
print('recursive ON')
ids = {}
for root, sub, files in os.walk(folder):
par = os.path.dirname(root)
file_metadata = {
'name': os.path.basename(root),
'mimeType': 'application/vnd.google-apps.folder'
}
if par in ids.keys():
file_metadata['parents'] = [ids[par]]
file = self.service.files().create(body=file_metadata, fields='id').execute()
id = file.get('id')
ids[root] = id
for f in files:
self.upload(root + '/' + f, id)
else:
print('recursive OFF')
file_metadata = {
'name': os.path.basename(folder),
'mimeType': 'application/vnd.google-apps.folder'
}
file = self.service.files().create(body=file_metadata,
fields='id').execute()
print(file.get('id'))
return(file.get('id'))
"""
GoogleDrive(): share - shares a file or folder with a specified email
params:
file_id: String - ID of the file that will be shared from Google Drive
email: String - email address that file will be shared with
returns:
"""
def share(self, file_id, email):
def callback(request_id, response, exception):
if exception:
# Handle error
print(exception)
else:
print("Got response: " + str(response.get('id')) + ". For request: " + str(request_id))
batch = self.service.new_batch_http_request(callback=callback)
user_permission = {
'type': 'user',
'role': 'reader',
'emailAddress': email
}
batch.add(self.service.permissions().create(
fileId=file_id,
body=user_permission,
fields='id',
))
batch.execute()
|
import logging
from datetime import datetime
from typing import Optional, Generator, Tuple
import shutil
from dateutil.parser import isoparse
from pathlib import Path
import pandas as pd
from collections import defaultdict
import calplot
from sqlite_utils import Database
from summary import update_daily_summaries
from summary import update_seasonal_summaries
from summary import get_nmis
from summary import get_usage_df
from jinja2 import Environment, FileSystemLoader
import plotly.express as px
import plotly.graph_objects as go
db = Database("nemdata.db")
def format_month(dt: datetime) -> str:
return dt.strftime("%b %Y")
def get_date_range(nmi: str):
sql = """select MIN(first_interval) start, MAX(last_interval) end
from nmi_summary where nmi = :nmi
"""
row = list(db.query(sql, {"nmi": nmi}))[0]
start = isoparse(row["start"])
end = isoparse(row["end"])
return start, end
def get_years(nmi: str):
start, end = get_date_range(nmi)
x = start.year
while x <= end.year:
yield x
x += 1
def get_day_data(
nmi: str,
) -> Generator[Tuple[str, float, float, float, float, float, float], None, None]:
sql = "select day, imp, exp, imp_morning, imp_day, imp_evening, imp_night from daily_reads where nmi = :nmi"
for row in db.query(sql, {"nmi": nmi}):
dt = datetime.strptime(row["day"], "%Y-%m-%d")
row = (
dt,
row["imp"],
row["exp"],
row["imp_morning"],
row["imp_day"],
row["imp_evening"],
row["imp_night"],
)
yield row
def get_import_overview_chart(nmi: str) -> Path:
"""Save calendar plot"""
days = []
data = []
for dt, imp, _, _, _, _, _ in get_day_data(nmi):
days.append(dt)
data.append(imp)
data = pd.Series(data, index=days)
plot = calplot.calplot(
data,
suptitle=f"Daily kWh for {nmi}",
how=None,
vmin=0,
vmax=35,
cmap="YlOrRd",
daylabels="MTWTFSS",
colorbar=True,
)
fig = plot[0]
file_path = Path(f"build/{nmi}_import.png")
fig.savefig(file_path, bbox_inches="tight")
logging.info("Created %s", file_path)
return file_path
def get_daily_plot(nmi: str) -> str:
"""Save calendar plot"""
day_data = list(get_day_data(nmi))
data = {
"morning": [x[3] for x in day_data],
"day": [x[4] for x in day_data],
"evening": [x[5] for x in day_data],
"night": [x[6] for x in day_data],
"export": [-x[2] for x in day_data],
}
index = [x[0] for x in day_data]
df = pd.DataFrame(index=index, data=data)
color_dict = {'export': 'green', 'morning': 'tan', 'day': 'skyblue', 'evening': 'orangered', 'night': 'slategrey'}
fig = px.bar(df, x=df.index, y=list(data.keys()), color_discrete_map = color_dict)
fig.update_xaxes(
rangeslider_visible=False,
rangeselector=dict(
buttons=list(
[
dict(count=1, label="1m", step="month", stepmode="backward"),
dict(count=6, label="6m", step="month", stepmode="backward"),
dict(count=1, label="1y", step="year", stepmode="backward"),
dict(step="all"),
]
)
),
)
file_path = Path(f"build/{nmi}_daily.html")
return fig.to_html(file_path, full_html=False, include_plotlyjs="cdn")
def get_usage_plot(nmi: str) -> str:
"""Save calendar plot"""
df = get_usage_df(nmi)
fig = px.line(df, x=df.index, y=["consumption", "export"])
file_path = Path(f"build/{nmi}_usage.html")
return fig.write_html(file_path, full_html=False, include_plotlyjs="cdn")
def get_export_overview_chart(nmi: str) -> Optional[Path]:
"""Save calendar plot"""
days = []
data = []
for dt, _, exp, _, _, _, _ in get_day_data(nmi):
if exp:
days.append(dt)
data.append(exp)
if len(data) == 0:
return None
data = pd.Series(data, index=days)
plot = calplot.calplot(
data,
suptitle=f"Daily Export kWh for {nmi}",
how=None,
vmin=0,
vmax=35,
cmap="Greens",
daylabels="MTWTFSS",
colorbar=True,
)
fig = plot[0]
file_path = Path(f"build/{nmi}_export.png")
fig.savefig(file_path, bbox_inches="tight")
logging.info("Created %s", file_path)
return file_path
def copy_static_data():
"""Copy static file"""
files = ["bootstrap.min.css"]
for file in files:
shutil.copy(f"templates/{file}", f"build/{file}")
def get_seasonal_data(nmi: str):
year_data = {}
for year in get_years(nmi):
data = get_year_season_data(nmi, year)
year_data[year] = data
return year_data
def get_year_season_data(nmi: str, year: int):
imp_values = {}
exp_values = {}
sql = """select season, imp, exp
from season_reads
where nmi = :nmi and year = :year
"""
for r in db.query(sql, {"nmi": nmi, "year": year}):
season = r["season"]
imp = r["imp"]
exp = r["exp"]
imp_values[season] = imp
exp_values[season] = exp
a_days = 90
a_avg = imp_values.get("A - Summer", None)
a_sum = a_avg * a_days if a_avg else None
b_days = 92
b_avg = imp_values.get("B - Autumn", None)
b_sum = b_avg * b_days if b_avg else None
c_days = 92
c_avg = imp_values.get("C - Winter", None)
c_sum = c_avg * c_days if c_avg else None
d_days = 91
d_avg = imp_values.get("D - Spring", None)
d_sum = d_avg * d_days if d_avg else None
yr_sum = 0
yr_days = 0
if a_sum is not None:
yr_sum += a_sum
yr_days += a_days
if b_sum is not None:
yr_sum += b_sum
yr_days += b_days
if c_sum is not None:
yr_sum += c_sum
yr_days += c_days
if d_sum is not None:
yr_sum += d_sum
yr_days += d_days
yr_avg = round(yr_sum / yr_days, 3)
summary = {
"Summer": (a_avg, a_sum),
"Autumn": (b_avg, b_sum),
"Winter": (c_avg, c_sum),
"Spring": (d_avg, d_sum),
"Export": (d_avg, d_sum),
"Year": (yr_avg, yr_sum),
}
return summary
def build_report(nmi: str):
template = env.get_template("nmi-report.html")
start, end = get_date_range(nmi)
fp_imp = get_import_overview_chart(nmi)
fp_exp = get_export_overview_chart(nmi)
daily_chart = get_daily_plot(nmi)
has_export = True if fp_exp else None
report_data = {
"start": start,
"end": end,
"has_export": has_export,
"daily_chart": daily_chart,
"imp_overview_chart": fp_imp.name,
"exp_overview_chart": fp_exp.name if has_export else None,
"season_data": get_seasonal_data(nmi),
}
print(report_data)
output_html = template.render(nmi=nmi, **report_data)
file_path = f"build/{nmi}.html"
with open(file_path, "w", encoding="utf-8") as fh:
fh.write(output_html)
logging.info("Created %s", file_path)
logging.basicConfig(level="INFO")
Path("build").mkdir(exist_ok=True)
update_daily_summaries()
update_seasonal_summaries()
env = Environment(loader=FileSystemLoader("templates"))
env.filters["yearmonth"] = format_month
# copy_static_data()
for nmi in get_nmis():
build_report(nmi)
|
#!/usr/bin/env python
from LinkedList import LinkedList
class Queue(object):
def __init__(self):
self.items = LinkedList()
def __str__(self):
return str(self.items)
def enqueue(self, item):
self.items.insertAtEnd(item)
def dequeue(self):
dequeued = self.items.head
self.items.removeFromBegining()
return dequeued
def is_empty(self):
return self.items.size() == 0
def size(self):
return self.items.size()
if __name__ == '__main__':
a = Queue()
a.enqueue(1)
a.enqueue(2)
a.enqueue(3)
print(a)
print(a.dequeue())
print(a.dequeue())
print(a.dequeue())
print(a.dequeue())
print(a)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DeleteContactRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cms', '2019-01-01', 'DeleteContact','cms')
def get_ContactName(self):
return self.get_query_params().get('ContactName')
def set_ContactName(self,ContactName):
self.add_query_param('ContactName',ContactName)
|
from abc import ABCMeta
from argparse import ArgumentParser
from warnings import warn
import numpy as np
import pydub
import pytorch_lightning as pl
import soundfile
import torch
import torch.nn as nn
import torch.nn.functional as f
import wandb
from pytorch_lightning.loggers import WandbLogger
import models.cunet_model as cunet
from models import fourier
def get_estimation(idx, target_name, estimation_dict):
estimated = estimation_dict[target_name][idx]
if len(estimated) == 0:
warn('TODO: zero estimation, caused by ddp')
return None
estimated = np.concatenate([estimated[key] for key in sorted(estimated.keys())], axis=0)
return estimated
class Conditional_Source_Separation(pl.LightningModule, metaclass=ABCMeta):
def __init__(self, n_fft, hop_length, num_frame, optimizer, lr, dev_mode, **kwargs):
super(Conditional_Source_Separation, self).__init__()
self.n_fft = n_fft
self.hop_length = hop_length
self.num_frame = num_frame
self.lr = lr
self.optimizer = optimizer
self.target_names = ['vocals', 'drums', 'bass', 'other']
self.valid_estimation_dict = {}
self.dev_mode = dev_mode
def configure_optimizers(self):
if self.optimizer == "adam":
optimizer = torch.optim.Adam
elif self.optimizer == "adagrad":
optimizer = torch.optim.Adagrad
elif self.optimizer == "sgd":
optimizer = torch.optim.SGD
elif self.optimizer == "rmsprop":
optimizer = torch.optim.RMSprop
else:
optimizer = torch.optim.Adam
return optimizer(self.parameters(), lr=float(self.lr))
def training_step(self, batch, batch_idx):
mixture_signal, target_signal, condition = batch
target = self.to_spec(target_signal)
target_hat = self.forward(mixture_signal, condition)
loss = f.mse_loss(target, target_hat)
self.log('loss/train_loss', loss, prog_bar=False, logger=True, on_step=False, on_epoch=True,reduce_fx=torch.mean)
return loss
# Validation Process
def on_validation_epoch_start(self):
for target_name in self.target_names:
self.valid_estimation_dict[target_name] = {mixture_idx: {}
for mixture_idx
in range(14)}
def validation_step(self, batch, batch_idx):
mixtures, mixture_ids, window_offsets, input_conditions, target_names, targets = batch
estimated_targets = self.separate(mixtures, input_conditions)[:, self.hop_length:-self.hop_length]
targets = targets[:, self.hop_length:-self.hop_length]
for mixture, mixture_idx, window_offset, input_condition, target_name, estimated_target \
in zip(mixtures, mixture_ids, window_offsets, input_conditions, target_names, estimated_targets):
self.valid_estimation_dict[target_name][mixture_idx.item()][
window_offset.item()] = estimated_target.detach().cpu().numpy()
# SDR - like Loss
s_targets = ((targets * estimated_targets).sum(axis=-2, keepdims=True) / (
(targets ** 2).sum(axis=-2, keepdims=True) + 1e-8)) * targets
distortion = estimated_targets - s_targets
loss = (((s_targets ** 2).sum(-2) + 1e-8).log() - ((distortion ** 2).sum(-2) + 1e-8).log()).mean()
# large value of SDR means good performance, so that we take the negative of sdr for the validation loss
loss = -1 * loss
self.log('loss/val_loss', loss, prog_bar=False, logger=True, on_step=False, on_epoch=True,reduce_fx=torch.mean)
return loss
def on_validation_epoch_end(self):
val_ids = [0] if self.dev_mode else [0, 1, 2]
for idx in val_ids:
estimation = {}
for target_name in self.target_names:
estimation[target_name] = get_estimation(idx, target_name, self.valid_estimation_dict)
if estimation[target_name] is None:
continue
if estimation[target_name] is not None:
estimation[target_name] = estimation[target_name].astype(np.float32)
if self.current_epoch > 10 and isinstance(self.logger, WandbLogger):
self.logger.experiment.log({'result_sample_{}_{}'.format(self.current_epoch, target_name): [
wandb.Audio(estimation[target_name][44100 * 60:44100 * 65],
caption='{}_{}'.format(idx, target_name),
sample_rate=44100)]})
def on_test_epoch_start(self):
import os
output_folder = 'museval_output'
if os.path.exists(output_folder):
os.rmdir(output_folder)
os.mkdir(output_folder)
self.valid_estimation_dict = None
self.test_estimation_dict = {}
self.musdb_test = self.test_dataloader().dataset
num_tracks = self.musdb_test.num_tracks
for target_name in self.target_names:
self.test_estimation_dict[target_name] = {mixture_idx: {}
for mixture_idx
in range(num_tracks)}
def test_step(self, batch, batch_idx):
mixtures, mixture_ids, window_offsets, input_conditions, target_names = batch
estimated_targets = self.separate(mixtures, input_conditions)[:, self.hop_length:-self.hop_length]
for mixture, mixture_idx, window_offset, input_condition, target_name, estimated_target \
in zip(mixtures, mixture_ids, window_offsets, input_conditions, target_names, estimated_targets):
self.test_estimation_dict[target_name][mixture_idx.item()][
window_offset.item()] = estimated_target.detach().cpu().numpy()
return torch.zeros(0)
def on_test_epoch_end(self):
import museval
results = museval.EvalStore(frames_agg='median', tracks_agg='median')
idx_list = [3, 2, 1, 0] if self.dev_mode else range(self.musdb_test.num_tracks)
for idx in idx_list:
estimation = {}
for target_name in self.target_names:
estimation[target_name] = get_estimation(idx, target_name, self.test_estimation_dict)
if estimation[target_name] is not None:
estimation[target_name] = estimation[target_name].astype(np.float32)
# Real SDR
if len(estimation) == len(self.target_names):
track_length = self.musdb_test.musdb_test[idx].samples
estimated_targets = [estimation[target_name][:track_length] for target_name in self.target_names]
if track_length > estimated_targets[0].shape[0]:
raise NotImplementedError
else:
estimated_targets_dict = {target_name: estimation[target_name][:track_length] for target_name in
self.target_names}
track_score = museval.eval_mus_track(
self.musdb_test.musdb_test[idx],
estimated_targets_dict
)
score_dict = track_score.df.loc[:, ['target', 'metric', 'score']].groupby(
['target', 'metric'])['score']\
.median().to_dict()
if isinstance(self.logger, WandbLogger):
self.logger.experiment.log(
{'test_result/{}_{}'.format(k1, k2): score_dict[(k1, k2)] for k1, k2 in score_dict.keys()})
else:
print(track_score)
results.add_track(track_score)
if isinstance(self.logger, WandbLogger):
result_dict = results.df.groupby(
['track', 'target', 'metric']
)['score'].median().reset_index().groupby(
['target', 'metric']
)['score'].median().to_dict()
self.logger.experiment.log(
{'test_result/agg/{}_{}'.format(k1, k2): result_dict[(k1, k2)] for k1, k2 in result_dict.keys()}
)
else:
print(results)
def export_mp3(self, idx, target_name):
estimated = self.test_estimation_dict[target_name][idx]
estimated = np.concatenate([estimated[key] for key in sorted(estimated.keys())], axis=0)
soundfile.write('tmp_output.wav', estimated, samplerate=44100)
audio = pydub.AudioSegment.from_wav('tmp_output.wav')
audio.export('{}_estimated/output_{}.mp3'.format(idx, target_name))
def separate(self, input_signal, input_condition) -> torch.Tensor:
pass
def init_weights(self):
for param in self.parameters():
if param.dim() > 1:
nn.init.kaiming_normal_(param)
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--optimizer', type=str, default='adam')
return parser
class CUNET_Framework(Conditional_Source_Separation):
@staticmethod
def get_arg_keys():
return ['n_fft', 'hop_length', 'num_frame', 'spec_type', 'spec_est_mode', 'optimizer', 'lr', 'dev_mode'] \
+ cunet.CUNET.get_arg_keys()
def __init__(self, n_fft, hop_length, num_frame, spec_type, spec_est_mode, **kwargs):
super(CUNET_Framework, self).__init__(n_fft, hop_length, num_frame, **kwargs)
self.save_hyperparameters(*CUNET_Framework.get_arg_keys())
self.n_fft = n_fft
self.hop_length = hop_length
self.num_frame = num_frame
self.magnitude_based = spec_type == "magnitude"
self.masking_based = spec_est_mode == "masking"
self.stft = fourier.multi_channeled_STFT(n_fft=n_fft, hop_length=hop_length)
cunet_args = cunet.CUNET.get_arg_keys()
self.spec2spec = cunet.CUNET(**{key: kwargs[key] for key in cunet_args})
self.init_weights()
def forward(self, input_signal, input_condition):
input_spec = self.to_spec(input_signal)
output_spec = self.spec2spec(input_spec, input_condition)
if self.masking_based:
output_spec = input_spec * output_spec
return output_spec
def init_weights(self):
for param in self.parameters():
if param.dim() > 1:
nn.init.kaiming_normal_(param)
def to_spec(self, input_signal) -> torch.Tensor:
if self.magnitude_based:
return self.stft.to_mag(input_signal).transpose(-1, -3)[..., 1:]
else:
raise NotImplementedError
def separate(self, input_signal, input_condition) -> torch.Tensor:
if self.magnitude_based:
mag, phase = self.stft.to_mag_phase(input_signal)
input_spec = mag.transpose(-1, -3)
output_spec = self.spec2spec(input_spec[..., 1:], input_condition)
if self.masking_based:
output_spec = input_spec[..., 1:] * output_spec
else:
raise NotImplementedError
else:
raise NotImplementedError
output_spec = torch.cat([input_spec[..., :1], output_spec], dim=-1)
output_spec = output_spec.transpose(-1, -3)
if self.magnitude_based:
restored = self.stft.restore_mag_phase(output_spec, phase)
else:
raise NotImplementedError
return restored
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument('--n_fft', type=int, default=1024)
parser.add_argument('--hop_length', type=int, default=256)
parser.add_argument('--num_frame', type=int, default=128)
parser.add_argument('--spec_type', type=str, default='magnitude')
parser.add_argument('--spec_est_mode', type=str, default='masking')
parser.add_argument('--n_layers', type=int, default=6)
parser.add_argument('--input_channels', type=int, default=2)
parser.add_argument('--filters_layer_1', type=int, default=16)
parser.add_argument('--kernel_size', default=(5, 5))
parser.add_argument('--stride', default=(2, 2))
parser.add_argument('--film_type', type=str, default='simple')
parser.add_argument('--control_type', type=str, default='dense')
parser.add_argument('--encoder_activation', type=str, default='leaky_relu')
parser.add_argument('--decoder_activation', type=str, default='relu')
parser.add_argument('--last_activation', type=str, default='sigmoid')
parser.add_argument('--control_input_dim', type=int, default=4)
parser.add_argument('--control_n_layer', type=int, default=4)
return Conditional_Source_Separation.add_model_specific_args(parser)
|
import unittest
from jit import jit, j_types as j
import ctypes
@jit
def inf1(a: j.f64):
return a + 2
@jit
def inf2(a: j.i32):
return a + 2
class Test(unittest.TestCase):
def test_inference(self):
self.assertEqual(inf1(2), 4.0)
self.assertEqual(inf2(2), 4)
self.assertEqual(inf1._wrapped._jit.restype, ctypes.c_double)
self.assertEqual(inf2._wrapped._jit.restype, ctypes.c_int32)
|
import FWCore.ParameterSet.Config as cms
# This config was generated automatically using generate2026Geometry.py
# If you notice a mistake, please update the generating script, not just this config
from Geometry.CMSCommonData.cmsExtendedGeometry2026D85XML_cfi import *
from Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cff import *
from Geometry.EcalCommonData.ecalSimulationParameters_cff import *
from Geometry.HcalCommonData.hcalDDDSimConstants_cff import *
from Geometry.HGCalCommonData.hgcalParametersInitialization_cfi import *
from Geometry.HGCalCommonData.hgcalNumberingInitialization_cfi import *
from Geometry.MuonNumbering.muonGeometryConstants_cff import *
from Geometry.MuonNumbering.muonOffsetESProducer_cff import *
from Geometry.MTDNumberingBuilder.mtdNumberingGeometry_cff import *
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test HiFiC."""
import contextlib
import tempfile
import numpy as np
import tensorflow.compat.v1 as tf
from . import configs
from . import helpers
from . import model
from . import train
class FakeHiFiC(model.HiFiC):
"""Fake class for testing."""
def _get_dataset(self, batch_size, crop_size,
tfds_arguments: helpers.TFDSArguments):
zeros = np.zeros((batch_size, crop_size, crop_size, 3))
return (tf.data.Dataset.from_tensor_slices(
(zeros,)).repeat(128).batch(batch_size))
class HiFiCTest(tf.test.TestCase):
"""Test public repo."""
def setUp(self):
super(HiFiCTest, self).setUp()
self._lpips_weight_path = 'test.weights'
def test_input_pipeline(self):
crop_size = 128
hific = FakeHiFiC(
configs.get_config('hific'), mode=helpers.ModelMode.TRAINING)
ds = hific.build_input(
batch_size=2,
crop_size=crop_size,
tfds_arguments=helpers.TFDSArguments(
dataset_name='', features_key='', downloads_dir=''))
iterator = tf.data.make_initializable_iterator(ds)
ds_next = iterator.get_next()
with tf.Session() as sess:
sess.run(iterator.initializer)
image = sess.run(ds_next)
self.assertEqual(image['input_image'].shape[1:3], (crop_size, crop_size))
def test_config(self):
config = helpers.Config(foo=1, bar=helpers.Config(baz=2))
self.assertEqual(config.foo, 1)
self.assertEqual(config['foo'], 1)
self.assertEqual(config.bar.baz, 2)
@contextlib.contextmanager
def _update_constants_for_testing(steps):
steps_default_value = train.SAVE_CHECKPOINT_STEPS
train.SAVE_CHECKPOINT_STEPS = steps
yield
train.SAVE_CHECKPOINT_STEPS = steps_default_value
if __name__ == '__main__':
tf.test.main()
|
"""Decide which plugins to use for authentication & installation"""
from __future__ import print_function
import os
import logging
import six
import zope.component
from certbot import errors
from certbot import interfaces
from certbot.display import util as display_util
logger = logging.getLogger(__name__)
z_util = zope.component.getUtility
def pick_configurator(
config, default, plugins,
question="How would you like to authenticate and install "
"certificates?"):
"""Pick configurator plugin."""
return pick_plugin(
config, default, plugins, question,
(interfaces.IAuthenticator, interfaces.IInstaller))
def pick_installer(config, default, plugins,
question="How would you like to install certificates?"):
"""Pick installer plugin."""
return pick_plugin(
config, default, plugins, question, (interfaces.IInstaller,))
def pick_authenticator(
config, default, plugins, question="How would you "
"like to authenticate with the ACME CA?"):
"""Pick authentication plugin."""
return pick_plugin(
config, default, plugins, question, (interfaces.IAuthenticator,))
def pick_plugin(config, default, plugins, question, ifaces):
"""Pick plugin.
:param certbot.interfaces.IConfig: Configuration
:param str default: Plugin name supplied by user or ``None``.
:param certbot.plugins.disco.PluginsRegistry plugins:
All plugins registered as entry points.
:param str question: Question to be presented to the user in case
multiple candidates are found.
:param list ifaces: Interfaces that plugins must provide.
:returns: Initialized plugin.
:rtype: IPlugin
"""
if default is not None:
# throw more UX-friendly error if default not in plugins
filtered = plugins.filter(lambda p_ep: p_ep.name == default)
else:
if config.noninteractive_mode:
# it's really bad to auto-select the single available plugin in
# non-interactive mode, because an update could later add a second
# available plugin
raise errors.MissingCommandlineFlag(
"Missing command line flags. For non-interactive execution, "
"you will need to specify a plugin on the command line. Run "
"with '--help plugins' to see a list of options, and see "
"https://eff.org/letsencrypt-plugins for more detail on what "
"the plugins do and how to use them.")
filtered = plugins.visible().ifaces(ifaces)
filtered.init(config)
verified = filtered.verify(ifaces)
verified.prepare()
prepared = verified.available()
if len(prepared) > 1:
logger.debug("Multiple candidate plugins: %s", prepared)
plugin_ep = choose_plugin(list(six.itervalues(prepared)), question)
if plugin_ep is None:
return None
else:
return plugin_ep.init()
elif len(prepared) == 1:
plugin_ep = list(prepared.values())[0]
logger.debug("Single candidate plugin: %s", plugin_ep)
if plugin_ep.misconfigured:
return None
return plugin_ep.init()
else:
logger.debug("No candidate plugin")
return None
def choose_plugin(prepared, question):
"""Allow the user to choose their plugin.
:param list prepared: List of `~.PluginEntryPoint`.
:param str question: Question to be presented to the user.
:returns: Plugin entry point chosen by the user.
:rtype: `~.PluginEntryPoint`
"""
opts = [plugin_ep.description_with_name +
(" [Misconfigured]" if plugin_ep.misconfigured else "")
for plugin_ep in prepared]
names = set(plugin_ep.name for plugin_ep in prepared)
while True:
disp = z_util(interfaces.IDisplay)
if "CERTBOT_AUTO" in os.environ and names == set(("apache", "nginx")):
# The possibility of being offered exactly apache and nginx here
# is new interactivity brought by https://github.com/certbot/certbot/issues/4079,
# so set apache as a default for those kinds of non-interactive use
# (the user will get a warning to set --non-interactive or --force-interactive)
apache_idx = [n for n, p in enumerate(prepared) if p.name == "apache"][0]
code, index = disp.menu(question, opts, default=apache_idx)
else:
code, index = disp.menu(question, opts, force_interactive=True)
if code == display_util.OK:
plugin_ep = prepared[index]
if plugin_ep.misconfigured:
z_util(interfaces.IDisplay).notification(
"The selected plugin encountered an error while parsing "
"your server configuration and cannot be used. The error "
"was:\n\n{0}".format(plugin_ep.prepare()), pause=False)
else:
return plugin_ep
else:
return None
noninstaller_plugins = ["webroot", "manual", "standalone", "dns-cloudflare", "dns-cloudxns",
"dns-digitalocean", "dns-dnsimple", "dns-dnsmadeeasy", "dns-google",
"dns-luadns", "dns-nsone", "dns-rfc2136", "dns-route53"]
def record_chosen_plugins(config, plugins, auth, inst):
"Update the config entries to reflect the plugins we actually selected."
config.authenticator = plugins.find_init(auth).name if auth else "None"
config.installer = plugins.find_init(inst).name if inst else "None"
logger.info("Plugins selected: Authenticator %s, Installer %s",
config.authenticator, config.installer)
def choose_configurator_plugins(config, plugins, verb):
# pylint: disable=too-many-branches
"""
Figure out which configurator we're going to use, modifies
config.authenticator and config.installer strings to reflect that choice if
necessary.
:raises errors.PluginSelectionError if there was a problem
:returns: (an `IAuthenticator` or None, an `IInstaller` or None)
:rtype: tuple
"""
req_auth, req_inst = cli_plugin_requests(config)
installer_question = None
if verb == "enhance":
installer_question = ("Which installer would you like to use to "
"configure the selected enhancements?")
# Which plugins do we need?
if verb == "run":
need_inst = need_auth = True
from certbot.cli import cli_command
if req_auth in noninstaller_plugins and not req_inst:
msg = ('With the {0} plugin, you probably want to use the "certonly" command, eg:{1}'
'{1} {2} certonly --{0}{1}{1}'
'(Alternatively, add a --installer flag. See https://eff.org/letsencrypt-plugins'
'{1} and "--help plugins" for more information.)'.format(
req_auth, os.linesep, cli_command))
raise errors.MissingCommandlineFlag(msg)
else:
need_inst = need_auth = False
if verb == "certonly":
need_auth = True
if verb == "install" or verb == "enhance":
need_inst = True
if config.authenticator:
logger.warning("Specifying an authenticator doesn't make sense when "
"running Certbot with verb \"%s\"", verb)
# Try to meet the user's request and/or ask them to pick plugins
authenticator = installer = None
if verb == "run" and req_auth == req_inst:
# Unless the user has explicitly asked for different auth/install,
# only consider offering a single choice
authenticator = installer = pick_configurator(config, req_inst, plugins)
else:
if need_inst or req_inst:
installer = pick_installer(config, req_inst, plugins, installer_question)
if need_auth:
authenticator = pick_authenticator(config, req_auth, plugins)
logger.debug("Selected authenticator %s and installer %s", authenticator, installer)
# Report on any failures
if need_inst and not installer:
diagnose_configurator_problem("installer", req_inst, plugins)
if need_auth and not authenticator:
diagnose_configurator_problem("authenticator", req_auth, plugins)
record_chosen_plugins(config, plugins, authenticator, installer)
return installer, authenticator
def set_configurator(previously, now):
"""
Setting configurators multiple ways is okay, as long as they all agree
:param str previously: previously identified request for the installer/authenticator
:param str requested: the request currently being processed
"""
if not now:
# we're not actually setting anything
return previously
if previously:
if previously != now:
msg = "Too many flags setting configurators/installers/authenticators {0} -> {1}"
raise errors.PluginSelectionError(msg.format(repr(previously), repr(now)))
return now
def cli_plugin_requests(config): # pylint: disable=too-many-branches
"""
Figure out which plugins the user requested with CLI and config options
:returns: (requested authenticator string or None, requested installer string or None)
:rtype: tuple
"""
req_inst = req_auth = config.configurator
req_inst = set_configurator(req_inst, config.installer)
req_auth = set_configurator(req_auth, config.authenticator)
if config.nginx:
req_inst = set_configurator(req_inst, "nginx")
req_auth = set_configurator(req_auth, "nginx")
if config.apache:
req_inst = set_configurator(req_inst, "apache")
req_auth = set_configurator(req_auth, "apache")
if config.standalone:
req_auth = set_configurator(req_auth, "standalone")
if config.webroot:
req_auth = set_configurator(req_auth, "webroot")
if config.manual:
req_auth = set_configurator(req_auth, "manual")
if config.dns_cloudflare:
req_auth = set_configurator(req_auth, "dns-cloudflare")
if config.dns_cloudxns:
req_auth = set_configurator(req_auth, "dns-cloudxns")
if config.dns_digitalocean:
req_auth = set_configurator(req_auth, "dns-digitalocean")
if config.dns_dnsimple:
req_auth = set_configurator(req_auth, "dns-dnsimple")
if config.dns_dnsmadeeasy:
req_auth = set_configurator(req_auth, "dns-dnsmadeeasy")
if config.dns_google:
req_auth = set_configurator(req_auth, "dns-google")
if config.dns_luadns:
req_auth = set_configurator(req_auth, "dns-luadns")
if config.dns_nsone:
req_auth = set_configurator(req_auth, "dns-nsone")
if config.dns_rfc2136:
req_auth = set_configurator(req_auth, "dns-rfc2136")
if config.dns_route53:
req_auth = set_configurator(req_auth, "dns-route53")
logger.debug("Requested authenticator %s and installer %s", req_auth, req_inst)
return req_auth, req_inst
def diagnose_configurator_problem(cfg_type, requested, plugins):
"""
Raise the most helpful error message about a plugin being unavailable
:param str cfg_type: either "installer" or "authenticator"
:param str requested: the plugin that was requested
:param .PluginsRegistry plugins: available plugins
:raises error.PluginSelectionError: if there was a problem
"""
if requested:
if requested not in plugins:
msg = "The requested {0} plugin does not appear to be installed".format(requested)
else:
msg = ("The {0} plugin is not working; there may be problems with "
"your existing configuration.\nThe error was: {1!r}"
.format(requested, plugins[requested].problem))
elif cfg_type == "installer":
from certbot.cli import cli_command
msg = ('Certbot doesn\'t know how to automatically configure the web '
'server on this system. However, it can still get a certificate for '
'you. Please run "{0} certonly" to do so. You\'ll need to '
'manually configure your web server to use the resulting '
'certificate.').format(cli_command)
else:
msg = "{0} could not be determined or is not installed".format(cfg_type)
raise errors.PluginSelectionError(msg)
|
from tests.flytekit.unit.common_tests.mixins import sample_registerable as _sample_registerable
def test_instance_tracker():
assert _sample_registerable.example.instantiated_in == "tests.flytekit.unit.common_tests.mixins.sample_registerable"
def test_auto_name_assignment():
_sample_registerable.example.auto_assign_name()
assert (
_sample_registerable.example.platform_valid_name
== "tests.flytekit.unit.common_tests.mixins.sample_registerable.example"
)
|
from app import apfell, db_objects
from sanic.response import json
from app.database_models.model import Task, ATTACKCommand, ATTACKTask, Callback, Command
from sanic_jwt.decorators import scoped, inject_user
import app.database_models.model as db_model
from sanic.exceptions import abort
@apfell.route(apfell.config['API_BASE'] + "/mitreattack/", methods=['GET'])
@inject_user()
@scoped(['auth:user', 'auth:apitoken_user'], False) # user or user-level api token are ok
async def get_all_mitre_attack_ids(request, user):
if user['auth'] not in ['access_token', 'apitoken']:
abort(status_code=403, message="Cannot access via Cookies. Use CLI or access via JS in browser")
query = await db_model.attack_query()
attack_entries = await db_objects.execute(query)
matrix = {}
for entry in attack_entries:
tactics = entry.tactic.split(" ")
for t in tactics:
if t not in matrix:
matrix[t] = []
matrix[t].append({**entry.to_json(), "tactic": t, "mappings": {}})
return json({'status': 'success', 'attack': matrix})
@apfell.route(apfell.config['API_BASE'] + "/mitreattack/listing", methods=['GET'])
@inject_user()
@scoped(['auth:user', 'auth:apitoken_user'], False) # user or user-level api token are ok
async def get_all_mitre_attack_ids(request, user):
if user['auth'] not in ['access_token', 'apitoken']:
abort(status_code=403, message="Cannot access via Cookies. Use CLI or access via JS in browser")
try:
query = await db_model.attack_query()
attack_entries = await db_objects.execute(query)
return json({'status': 'success', 'attack': [a.to_json() for a in attack_entries]})
except Exception as e:
return json({'status': 'error', 'error': str(e)})
@apfell.route(apfell.config['API_BASE'] + "/mitreattack/bycommand", methods=['GET'])
@inject_user()
@scoped(['auth:user', 'auth:apitoken_user'], False) # user or user-level api token are ok
async def get_all_mitre_attack_ids_by_command(request, user):
if user['auth'] not in ['access_token', 'apitoken']:
abort(status_code=403, message="Cannot access via Cookies. Use CLI or access via JS in browser")
query = await db_model.attack_query()
attack_entries = await db_objects.execute(query)
matrix = {}
for entry in attack_entries:
tactics = entry.tactic.split(" ")
for t in tactics:
if t not in matrix:
matrix[t] = []
entry_json = entry.to_json()
entry_json['mappings'] = {} # this is where we'll store payload_type and command mappings
entry_json['tactic'] = t
query = await db_model.attackcommand_query()
mappings = await db_objects.execute(query.where(ATTACKCommand.attack == entry))
for m in mappings:
if m.command.payload_type.ptype not in entry_json['mappings']:
entry_json['mappings'][m.command.payload_type.ptype] = []
entry_json['mappings'][m.command.payload_type.ptype].append(m.to_json())
matrix[t].append(entry_json)
return json({'status': 'success', 'attack': matrix})
@apfell.route(apfell.config['API_BASE'] + "/mitreattack/bytask", methods=['GET'])
@inject_user()
@scoped(['auth:user', 'auth:apitoken_user'], False) # user or user-level api token are ok
async def get_all_mitre_attack_ids_by_task(request, user):
if user['auth'] not in ['access_token', 'apitoken']:
abort(status_code=403, message="Cannot access via Cookies. Use CLI or access via JS in browser")
query = await db_model.attack_query()
attack_entries = await db_objects.execute(query)
matrix = {}
for entry in attack_entries:
tactics = entry.tactic.split(" ")
for t in tactics:
if t not in matrix:
matrix[t] = []
entry_json = entry.to_json()
entry_json['mappings'] = {} # this is where we'll store payload_type and command mappings
entry_json['tactic'] = t
query = await db_model.attacktask_query()
mappings = await db_objects.execute(query.where(ATTACKTask.attack == entry))
for m in mappings:
if m.task.command.payload_type.ptype not in entry_json['mappings']:
entry_json['mappings'][m.task.command.payload_type.ptype] = []
entry_json['mappings'][m.task.command.payload_type.ptype].append(m.to_json())
matrix[t].append(entry_json)
return json({'status': 'success', 'attack': matrix})
@apfell.route(apfell.config['API_BASE'] + "/mitreattack/regex", methods=['POST'])
@inject_user()
@scoped(['auth:user', 'auth:apitoken_user'], False) # user or user-level api token are ok
async def regex_against_tasks(request, user):
if user['auth'] not in ['access_token', 'apitoken']:
abort(status_code=403, message="Cannot access via Cookies. Use CLI or access via JS in browser")
data = request.json
try:
query = await db_model.operation_query()
operation = await db_objects.get(query, name=user['current_operation'])
except Exception as e:
return json({'status': 'error', 'error': "Failed to find current operation"})
if 'regex' not in data:
return json({'status': 'error', 'error': 'regex is a required field'})
if 'apply' not in data:
return json({'status': 'error', 'error': 'apply is a required field'})
if 'attack' not in data:
return json({'status': 'error', 'error': 'an attack T# is required'})
try:
query = await db_model.attack_query()
attack = await db_objects.get(query, t_num=data['attack'])
except Exception as e:
return json({'status': 'error', 'error': 'Failed to find that T#. Make sure you specify "attack": "T1124" for example'})
try:
query = await db_model.task_query()
matching_tasks = await db_objects.prefetch(query.switch(Callback).where(Callback.operation == operation).switch(Task).where(
(Task.params.regexp(data['regex'])) | (Task.original_params.regexp(data['regex']))).order_by(Task.id), Command.select())
if data['apply']:
# actually apply the specified att&ck id to the matched tasks
for t in matching_tasks:
# don't create duplicates
try:
query = await db_model.attacktask_query()
attacktask = await db_objects.get(query, attack=attack, task=t)
except Exception as e:
# we didn't find the specific attack-task mapping, so create a new one
attacktask = await db_objects.create(ATTACKTask, attack=attack, task=t)
return json({'status': 'success'})
else:
# simply return which tasks would have matched
# for each matching task, also return which other ATT&CK IDs are associated
tasks = []
for t in matching_tasks:
sub_attacks = []
query = await db_model.attacktask_query()
matching_attacks = await db_objects.execute(query.where(ATTACKTask.task == t))
for ma in matching_attacks:
sub_attacks.append({'t_num': ma.attack.t_num, 'name': ma.attack.name})
tasks.append({**t.to_json(), "attack": sub_attacks})
return json({'status': 'success', 'matches': tasks})
except Exception as e:
print(e)
return json({"status": "error", "error": str(e)})
@apfell.route(apfell.config['API_BASE'] + "/mitreattack/task/<tid:int>/attack/<tnum:string>", methods=['DELETE'])
@inject_user()
@scoped(['auth:user', 'auth:apitoken_user'], False) # user or user-level api token are ok
async def remove_task_attack_mapping(request, user, tid, tnum):
if user['auth'] not in ['access_token', 'apitoken']:
abort(status_code=403, message="Cannot access via Cookies. Use CLI or access via JS in browser")
try:
query = await db_model.task_query()
task = await db_objects.get(query, id=tid)
query = await db_model.attack_query()
attack = await db_objects.get(query, t_num=tnum)
query = await db_model.attacktask_query()
mapping = await db_objects.get(query, task=task, attack=attack)
await db_objects.delete(mapping)
return json({'status': 'success', "task_id": tid, "attack": tnum})
except Exception as e:
print(e)
return json({'status': 'error', 'error': str(e)})
|
import numpy as np
import torch
import warnings
from .neurodiffeq import safe_diff as diff
from ._version_utils import deprecated_alias
class BaseCondition:
r"""Base class for all conditions.
A condition is a tool to `re-parameterize` the output(s) of a neural network.
such that the re-parameterized output(s) will automatically satisfy initial conditions (ICs)
and boundary conditions (BCs) of the PDEs/ODEs that are being solved.
.. note::
- The nouns *(re-)parameterization* and *condition* are used interchangeably in the documentation and library.
- The verbs *(re-)parameterize* and *enforce* are different in that:
- *(re)parameterize* is said of network outputs;
- *enforce* is said of networks themselves.
"""
def __init__(self):
self.ith_unit = None
def parameterize(self, output_tensor, *input_tensors):
r"""Re-parameterizes output(s) of a network.
:param output_tensor: Output of the neural network.
:type output_tensor: `torch.Tensor`
:param input_tensors: Inputs to the neural network; i.e., sampled coordinates; i.e., independent variables.
:type input_tensors: `torch.Tensor`
:return: The re-parameterized output of the network.
:rtype: `torch.Tensor`
.. note::
This method is **abstract** for BaseCondition
"""
raise ValueError(f"Abstract {self.__class__.__name__} cannot be parameterized") # pragma: no cover
def enforce(self, net, *coordinates):
r"""Enforces this condition on a network.
:param net: The network whose output is to be re-parameterized.
:type net: `torch.nn.Module`
:param coordinates: Inputs of the neural network.
:type coordinates: `torch.Tensor`
:return: The re-parameterized output, where the condition is automatically satisfied.
:rtype: `torch.Tensor`
"""
# concatenate the coordinates and pass to network
network_output = net(torch.cat(coordinates, dim=1))
# if `ith_unit` is set, the condition will only be enforced on the i-th output unit
if self.ith_unit is not None:
network_output = network_output[:, self.ith_unit].view(-1, 1)
# parameterize the raw output and return
return self.parameterize(network_output, *coordinates)
def set_impose_on(self, ith_unit):
r"""**[DEPRECATED]** When training several functions with a single, multi-output network, this method is called
(by a `Solver` class or a `solve` function) to keep track of which output is being parameterized.
:param ith_unit: The index of network output to be parameterized.
:type ith_unit: int
.. note::
This method is deprecated and retained for backward compatibility only. Users interested in enforcing
conditions on multi-output networks should consider using a ``neurodiffeq.conditions.EnsembleCondition``.
"""
warnings.warn(
f"`{self.__class__.__name__}.set_impose_on` is deprecated and will be removed in the future",
DeprecationWarning,
)
self.ith_unit = ith_unit
class IrregularBoundaryCondition(BaseCondition):
# Is there a more elegant solution?
def in_domain(self, *coordinates):
"""Given the coordinates (numpy.ndarray), the methods returns an boolean array indicating
whether the points lie within the domain.
:param coordinates: Input tensors, each with shape (n_samples, 1).
:type coordinates: `numpy.ndarray`
:return: Whether each point lies within the domain.
:rtype: `numpy.ndarray`
.. note::
- This method is meant to be used by monitors for irregular domain visualization.
"""
# returns straight `True`-s by default; i.e., all points are considered within domain
return np.ones_like(coordinates[0], dtype=np.bool)
class EnsembleCondition(BaseCondition):
r"""An ensemble condition that enforces sub-conditions on individual output units of the networks.
:param sub_conditions: Condition(s) to be ensemble'd.
:type sub_conditions: BaseCondition
:param force: Whether or not to force ensembl'ing even when `.enforce` is overridden in one of the sub-conditions.
:type force: bool
"""
def __init__(self, *sub_conditions, force=False):
super(EnsembleCondition, self).__init__()
for i, c in enumerate(sub_conditions):
if c.__class__.enforce != BaseCondition.enforce:
msg = f"{c.__class__.__name__} (index={i})'s overrides BaseCondition's `.enforce` method. " \
f"Ensembl'ing is likely not going to work."
if force:
warnings.warn(msg)
else:
raise ValueError(msg + "\nTry with `force=True` if you know what you are doing.")
self.conditions = sub_conditions
def parameterize(self, output_tensor, *input_tensors):
r"""Re-parameterizes each column in output_tensor individually, using its corresponding sub-condition.
This is useful when solving differential equations with a single, multi-output network.
:param output_tensor:
Output of the neural network.
Number of units (.shape[1]) must equal number of sub-conditions.
:type output_tensor: `torch.Tensor`
:param input_tensors:
Inputs to the neural network;
i.e., sampled coordinates;
i.e., independent variables.
:type input_tensors: `torch.Tensor`
:return:
The column-wise re-parameterized network output,
concatenated across columns so that it's still one tensor.
:rtype: `torch.Tensor`
"""
if output_tensor.shape[1] != len(self.conditions):
raise ValueError(f"number of output units ({output_tensor.shape[1]}) "
f"differs from number of conditions ({len(self.conditions)})")
return torch.cat([
con.parameterize(output_tensor[:, i].view(-1, 1), *input_tensors) for i, con in enumerate(self.conditions)
], dim=1)
class NoCondition(BaseCondition):
r"""A polymorphic condition where no re-parameterization will be performed.
.. note::
This condition is called *polymorphic* because it can be enforced on networks of arbitrary input/output sizes.
"""
def parameterize(self, output_tensor, *input_tensors):
f"""Performs no re-parameterization, or identity parameterization, in this case.
:param output_tensor: Output of the neural network.
:type output_tensor: `torch.Tensor`
:param input_tensors: Inputs to the neural network; i.e., sampled coordinates; i.e., independent variables.
:type input_tensors: `torch.Tensor`
:return: The re-parameterized output of the network.
:rtype: `torch.Tensor`
"""
return output_tensor
class IVP(BaseCondition):
r"""An initial value problem of one of the following forms:
- Dirichlet condition: :math:`u(t_0)=u_0`.
- Neumann condition: :math:`\displaystyle\frac{\partial u}{\partial t}\bigg|_{t = t_0} = u_0'`.
:param t_0: The initial time.
:type t_0: float
:param u_0: The initial value of :math:`u`. :math:`u(t_0)=u_0`.
:type u_0: float
:param u_0_prime:
The initial derivative of :math:`u` w.r.t. :math:`t`.
:math:`\displaystyle\frac{\partial u}{\partial t}\bigg|_{t = t_0} = u_0'`.
Defaults to None.
:type u_0_prime: float, optional
"""
@deprecated_alias(x_0='u_0', x_0_prime='u_0_prime')
def __init__(self, t_0, u_0=None, u_0_prime=None):
super().__init__()
self.t_0, self.u_0, self.u_0_prime = t_0, u_0, u_0_prime
def parameterize(self, output_tensor, t):
r"""Re-parameterizes outputs such that the Dirichlet/Neumann condition is satisfied.
- For Dirichlet condition, the re-parameterization is
:math:`\displaystyle u(t) = u_0 + \left(1 - e^{-(t-t_0)}\right) \mathrm{ANN}(t)`
where :math:`\mathrm{ANN}` is the neural network.
- For Neumann condition, the re-parameterization is
:math:`\displaystyle u(t) = u_0 + (t-t_0) u'_0 + \left(1 - e^{-(t-t_0)}\right)^2 \mathrm{ANN}(t)`
where :math:`\mathrm{ANN}` is the neural network.
:param output_tensor: Output of the neural network.
:type output_tensor: `torch.Tensor`
:param t: Input to the neural network; i.e., sampled time-points; i.e., independent variables.
:type t: `torch.Tensor`
:return: The re-parameterized output of the network.
:rtype: `torch.Tensor`
"""
if self.u_0_prime is None:
return self.u_0 + (1 - torch.exp(-t + self.t_0)) * output_tensor
else:
return self.u_0 + (t - self.t_0) * self.u_0_prime + ((1 - torch.exp(-t + self.t_0)) ** 2) * output_tensor
class DirichletBVP(BaseCondition):
r"""A double-ended Dirichlet boundary condition:
:math:`u(t_0)=u_0` and :math:`u(t_1)=u_1`.
:param t_0: The initial time.
:type t_0: float
:param u_0: The initial value of :math:`u`. :math:`u(t_0)=u_0`.
:type u_0: float
:param t_1: The final time.
:type t_1: float
:param u_1: The initial value of :math:`u`. :math:`u(t_1)=u_1`.
:type u_1: float
"""
@deprecated_alias(x_0='u_0', x_1='u_1')
def __init__(self, t_0, u_0, t_1, u_1):
super().__init__()
self.t_0, self.u_0, self.t_1, self.u_1 = t_0, u_0, t_1, u_1
def parameterize(self, output_tensor, t):
r"""Re-parameterizes outputs such that the Dirichlet condition is satisfied on both ends of the domain.
The re-parameterization is
:math:`\displaystyle u(t)=(1-\tilde{t})u_0+\tilde{t}u_1+\left(1-e^{(1-\tilde{t})\tilde{t}}\right)\mathrm{ANN}(t)`,
where :math:`\displaystyle \tilde{t} = \frac{t-t_0}{t_1-t_0}` and :math:`\mathrm{ANN}` is the neural network.
:param output_tensor: Output of the neural network.
:type output_tensor: `torch.Tensor`
:param t: Input to the neural network; i.e., sampled time-points or another independent variable.
:type t: `torch.Tensor`
:return: The re-parameterized output of the network.
:rtype: `torch.Tensor`
"""
t_tilde = (t - self.t_0) / (self.t_1 - self.t_0)
return self.u_0 * (1 - t_tilde) \
+ self.u_1 * t_tilde \
+ (1 - torch.exp((1 - t_tilde) * t_tilde)) * output_tensor
class DirichletBVP2D(BaseCondition):
r"""An Dirichlet boundary condition on the boundary of :math:`[x_0, x_1] \times [y_0, y_1]`, where
- :math:`u(x_0, y) = f_0(y)`;
- :math:`u(x_1, y) = f_1(y)`;
- :math:`u(x, y_0) = g_0(x)`;
- :math:`u(x, y_1) = g_1(x)`.
:param x_min: The lower bound of x, the :math:`x_0`.
:type x_min: float
:param x_min_val: The boundary value on :math:`x = x_0`, i.e. :math:`f_0(y)`.
:type x_min_val: callable
:param x_max: The upper bound of x, the :math:`x_1`.
:type x_max: float
:param x_max_val: The boundary value on :math:`x = x_1`, i.e. :math:`f_1(y)`.
:type x_max_val: callable
:param y_min: The lower bound of y, the :math:`y_0`.
:type y_min: float
:param y_min_val: The boundary value on :math:`y = y_0`, i.e. :math:`g_0(x)`.
:type y_min_val: callable
:param y_max: The upper bound of y, the :math:`y_1`.
:type y_max: float
:param y_max_val: The boundary value on :math:`y = y_1`, i.e. :math:`g_1(x)`.
:type y_max_val: callable
"""
def __init__(self, x_min, x_min_val, x_max, x_max_val, y_min, y_min_val, y_max, y_max_val):
r"""Initializer method
"""
super().__init__()
self.x0, self.f0 = x_min, x_min_val
self.x1, self.f1 = x_max, x_max_val
self.y0, self.g0 = y_min, y_min_val
self.y1, self.g1 = y_max, y_max_val
def parameterize(self, output_tensor, x, y):
r"""Re-parameterizes outputs such that the Dirichlet condition is satisfied on all four sides of the domain.
The re-parameterization is
:math:`\displaystyle u(x,y)=A(x,y)
+\tilde{x}\big(1-\tilde{x}\big)\tilde{y}\big(1-\tilde{y}\big)\mathrm{ANN}(x,y)`, where
:math:`\displaystyle \begin{align*}
A(x,y)=&\big(1-\tilde{x}\big)f_0(y)+\tilde{x}f_1(y) \\
&+\big(1-\tilde{y}\big)\Big(g_0(x)-\big(1-\tilde{x}\big)g_0(x_0)+\tilde{x}g_0(x_1)\Big) \\
&+\tilde{y}\Big(g_1(x)-\big(1-\tilde{x}\big)g_1(x_0)+\tilde{x}g_1(x_1)\Big)
\end{align*}`
:math:`\displaystyle\tilde{x}=\frac{x-x_0}{x_1-x_0}`,
:math:`\displaystyle\tilde{y}=\frac{y-y_0}{y_1-y_0}`,
and :math:`\mathrm{ANN}` is the neural network.
:param output_tensor: Output of the neural network.
:type output_tensor: `torch.Tensor`
:param x: :math:`x`-coordinates of inputs to the neural network; i.e., the sampled :math:`x`-coordinates.
:type x: `torch.Tensor`
:param y: :math:`y`-coordinates of inputs to the neural network; i.e., the sampled :math:`y`-coordinates.
:type y: `torch.Tensor`
:return: The re-parameterized output of the network.
:rtype: `torch.Tensor`
"""
x_tilde = (x - self.x0) / (self.x1 - self.x0)
y_tilde = (y - self.y0) / (self.y1 - self.y0)
x0 = torch.ones_like(x_tilde[0, 0]).expand(*x_tilde.shape) * self.x0
x1 = torch.ones_like(x_tilde[0, 0]).expand(*x_tilde.shape) * self.x1
Axy = (1 - x_tilde) * self.f0(y) + x_tilde * self.f1(y) \
+ (1 - y_tilde) * (self.g0(x) - ((1 - x_tilde) * self.g0(x0) + x_tilde * self.g0(x1))) \
+ y_tilde * (self.g1(x) - ((1 - x_tilde) * self.g1(x0) + x_tilde * self.g1(x1)))
return Axy + x_tilde * (1 - x_tilde) * y_tilde * (1 - y_tilde) * output_tensor
class IBVP1D(BaseCondition):
r"""An initial & boundary condition on a 1-D range where :math:`x\in[x_0, x_1]` and time starts at :math:`t_0`.
The conditions should have the following parts:
- :math:`u(x,t_0)=u_0(x)`,
- :math:`u(x_0,t)=g(t)` or :math:`u'_x(x_0,t)=p(t)`,
- :math:`u(x_1,t)=h(t)` or :math:`u'_x(x_1,t)=q(t)`,
where :math:`\displaystyle u'_x=\frac{\partial u}{\partial x}`.
:param x_min: The lower bound of x, the :math:`x_0`.
:type x_min: float
:param x_max: The upper bound of x, the :math:`x_1`.
:type x_max: float
:param t_min: The initial time, the :math:`t_0`.
:type t_min: float
:param t_min_val: The initial condition, the :math:`u_0(x)`.
:type t_min_val: callable
:param x_min_val: The Dirichlet boundary condition when :math:`x = x_0`, the :math:`u(x_0, t)`, defaults to None.
:type x_min_val: callable, optional
:param x_min_prime: The Neumann boundary condition when :math:`x = x_0`, the :math:`u'_x(x_0, t)`, defaults to None.
:type x_min_prime: callable, optional
:param x_max_val: The Dirichlet boundary condition when :math:`x = x_1`, the :math:`u(x_1, t)`, defaults to None.
:type x_max_val: callable, optional
:param x_max_prime: The Neumann boundary condition when :math:`x = x_1`, the :math:`u'_x(x_1, t)`, defaults to None.
:type x_max_prime: callable, optional
:raises NotImplementedError: When unimplemented boundary conditions are configured.
.. note::
This condition cannot be passed to ``neurodiffeq.conditions.EnsembleCondition`` unless both boundaries uses
Dirichlet conditions (by specifying only ``x_min_val`` and ``x_max_val``) and ``force`` is set to True in
EnsembleCondition's constructor.
"""
def __init__(
self, x_min, x_max, t_min, t_min_val,
x_min_val=None, x_min_prime=None,
x_max_val=None, x_max_prime=None,
):
super().__init__()
n_conditions = sum(c is not None for c in [x_min_val, x_min_prime, x_max_val, x_max_prime])
if n_conditions != 2 or (x_min_val and x_min_prime) or (x_max_val and x_max_prime):
raise NotImplementedError('Sorry, this boundary condition is not implemented.')
self.x_min, self.x_min_val, self.x_min_prime = x_min, x_min_val, x_min_prime
self.x_max, self.x_max_val, self.x_max_prime = x_max, x_max_val, x_max_prime
self.t_min, self.t_min_val = t_min, t_min_val
def enforce(self, net, x, t):
r"""Enforces this condition on a network with inputs `x` and `t`
:param net: The network whose output is to be re-parameterized.
:type net: `torch.nn.Module`
:param x: The :math:`x`-coordinates of the samples; i.e., the spatial coordinates.
:type x: `torch.Tensor`
:param t: The :math:`t`-coordinates of the samples; i.e., the temporal coordinates.
:type t: `torch.Tensor`
:return: The re-parameterized output, where the condition is automatically satisfied.
:rtype: `torch.Tensor`
.. note::
This method overrides the default method of ``neurodiffeq.conditions.BaseCondition`` .
In general, you should avoid overriding ``enforce`` when implementing custom boundary conditions.
"""
def ANN(x, t):
out = net(torch.cat([x, t], dim=1))
if self.ith_unit is not None:
out = out[:, self.ith_unit].view(-1, 1)
return out
uxt = ANN(x, t)
if self.x_min_val and self.x_max_val:
return self.parameterize(uxt, x, t)
elif self.x_min_val and self.x_max_prime:
x1 = self.x_max * torch.ones_like(x, requires_grad=True)
ux1t = ANN(x1, t)
return self.parameterize(uxt, x, t, ux1t, x1)
elif self.x_min_prime and self.x_max_val:
x0 = self.x_min * torch.ones_like(x, requires_grad=True)
ux0t = ANN(x0, t)
return self.parameterize(uxt, x, t, ux0t, x0)
elif self.x_min_prime and self.x_max_prime:
x0 = self.x_min * torch.ones_like(x, requires_grad=True)
x1 = self.x_max * torch.ones_like(x, requires_grad=True)
ux0t = ANN(x0, t)
ux1t = ANN(x1, t)
return self.parameterize(uxt, x, t, ux0t, x0, ux1t, x1)
else:
raise NotImplementedError('Sorry, this boundary condition is not implemented.')
def parameterize(self, u, x, t, *additional_tensors):
r"""Re-parameterizes outputs such that the initial and boundary conditions are satisfied.
The Initial condition is always :math:`u(x,t_0)=u_0(x)`. There are four boundary conditions that are
currently implemented:
- For Dirichlet-Dirichlet boundary condition :math:`u(x_0,t)=g(t)` and :math:`u(x_1,t)=h(t)`:
The re-parameterization is
:math:`\displaystyle u(x,t)=A(x,t)+\tilde{x}\big(1-\tilde{x}\big)\Big(1-e^{-\tilde{t}}\Big)\mathrm{ANN}(x,t)`,
where :math:`\displaystyle A(x,t)=u_0(x)+
\tilde{x}\big(h(t)-h(t_0)\big)+\big(1-\tilde{x}\big)\big(g(t)-g(t_0)\big)`.
- For Dirichlet-Neumann boundary condition :math:`u(x_0,t)=g(t)` and :math:`u'_x(x_1, t)=q(t)`:
The re-parameterization is
:math:`\displaystyle u(x,t)=A(x,t)+\tilde{x}\Big(1-e^{-\tilde{t}}\Big)
\Big(\mathrm{ANN}(x,t)-\big(x_1-x_0\big)\mathrm{ANN}'_x(x_1,t)-\mathrm{ANN}(x_1,t)\Big)`,
where :math:`\displaystyle A(x,t)=u_0(x)+\big(x-x_0\big)\big(q(t)-q(t_0)\big)+\big(g(t)-g(t_0)\big)`.
- For Neumann-Dirichlet boundary condition :math:`u'_x(x_0,t)=p(t)` and :math:`u(x_1, t)=h(t)`:
The re-parameterization is
:math:`\displaystyle u(x,t)=A(x,t)+\big(1-\tilde{x}\big)\Big(1-e^{-\tilde{t}}\Big)
\Big(\mathrm{ANN}(x,t)-\big(x_1-x_0\big)\mathrm{ANN}'_x(x_0,t)-\mathrm{ANN}(x_0,t)\Big)`,
where :math:`\displaystyle A(x,t)=u_0(x)+\big(x_1-x\big)\big(p(t)-p(t_0)\big)+\big(h(t)-h(t_0)\big)`.
- For Neumann-Neumann boundary condition :math:`u'_x(x_0,t)=p(t)` and :math:`u'_x(x_1, t)=q(t)`
The re-parameterization is
:math:`\displaystyle u(x,t)=A(x,t)+\left(1-e^{-\tilde{t}}\right)
\Big(
\mathrm{ANN}(x,t)-\big(x-x_0\big)\mathrm{ANN}'_x(x_0,t)
+\frac{1}{2}\tilde{x}^2\big(x_1-x_0\big)
\big(\mathrm{ANN}'_x(x_0,t)-\mathrm{ANN}'_x(x_1,t)\big)
\Big)`,
where :math:`\displaystyle A(x,t)=u_0(x)
-\frac{1}{2}\big(1-\tilde{x}\big)^2\big(x_1-x_0\big)\big(p(t)-p(t_0)\big)
+\frac{1}{2}\tilde{x}^2\big(x_1-x_0\big)\big(q(t)-q(t_0)\big)`.
Notations:
- :math:`\displaystyle\tilde{t}=\frac{t-t_0}{t_1-t_0}`,
- :math:`\displaystyle\tilde{x}=\frac{x-x_0}{x_1-x_0}`,
- :math:`\displaystyle\mathrm{ANN}` is the neural network,
- and :math:`\displaystyle\mathrm{ANN}'_x=\frac{\partial ANN}{\partial x}`.
:param output_tensor: Output of the neural network.
:type output_tensor: `torch.Tensor`
:param x: The :math:`x`-coordinates of the samples; i.e., the spatial coordinates.
:type x: `torch.Tensor`
:param t: The :math:`t`-coordinates of the samples; i.e., the temporal coordinates.
:type t: `torch.Tensor`
:param additional_tensors: additional tensors that will be passed by ``enforce``
:type additional_tensors: `torch.Tensor`
:return: The re-parameterized output of the network.
:rtype: `torch.Tensor`
"""
t0 = self.t_min * torch.ones_like(t, requires_grad=True)
x_tilde = (x - self.x_min) / (self.x_max - self.x_min)
t_tilde = t - self.t_min
if self.x_min_val and self.x_max_val:
return self._parameterize_dd(u, x, t, x_tilde, t_tilde, t0)
elif self.x_min_val and self.x_max_prime:
return self._parameterize_dn(u, x, t, x_tilde, t_tilde, t0, *additional_tensors)
elif self.x_min_prime and self.x_max_val:
return self._parameterize_nd(u, x, t, x_tilde, t_tilde, t0, *additional_tensors)
elif self.x_min_prime and self.x_max_prime:
return self._parameterize_nn(u, x, t, x_tilde, t_tilde, t0, *additional_tensors)
else:
raise NotImplementedError('Sorry, this boundary condition is not implemented.')
# When we have Dirichlet boundary conditions on both ends of the domain:
def _parameterize_dd(self, uxt, x, t, x_tilde, t_tilde, t0):
Axt = self.t_min_val(x) + \
x_tilde * (self.x_max_val(t) - self.x_max_val(t0)) + \
(1 - x_tilde) * (self.x_min_val(t) - self.x_min_val(t0))
return Axt + x_tilde * (1 - x_tilde) * (1 - torch.exp(-t_tilde)) * uxt
# When we have Dirichlet boundary condition on the left end of the domain
# and Neumann boundary condition on the right end of the domain:
def _parameterize_dn(self, uxt, x, t, x_tilde, t_tilde, t0, ux1t, x1):
Axt = (self.x_min_val(t) - self.x_min_val(t0)) + self.t_min_val(x) + \
x_tilde * (self.x_max - self.x_min) * (self.x_max_prime(t) - self.x_max_prime(t0))
return Axt + x_tilde * (1 - torch.exp(-t_tilde)) * (
uxt - (self.x_max - self.x_min) * diff(ux1t, x1) - ux1t
)
# When we have Neumann boundary condition on the left end of the domain
# and Dirichlet boundary condition on the right end of the domain:
def _parameterize_nd(self, uxt, x, t, x_tilde, t_tilde, t0, ux0t, x0):
Axt = (self.x_max_val(t) - self.x_max_val(t0)) + self.t_min_val(x) + \
(x_tilde - 1) * (self.x_max - self.x_min) * (self.x_min_prime(t) - self.x_min_prime(t0))
return Axt + (1 - x_tilde) * (1 - torch.exp(-t_tilde)) * (
uxt + (self.x_max - self.x_min) * diff(ux0t, x0) - ux0t
)
# When we have Neumann boundary conditions on both ends of the domain:
def _parameterize_nn(self, uxt, x, t, x_tilde, t_tilde, t0, ux0t, x0, ux1t, x1):
Axt = self.t_min_val(x) \
- 0.5 * (1 - x_tilde) ** 2 * (self.x_max - self.x_min) * (self.x_min_prime(t) - self.x_min_prime(t0)) \
+ 0.5 * x_tilde ** 2 * (self.x_max - self.x_min) * (self.x_max_prime(t) - self.x_max_prime(t0))
return Axt + (1 - torch.exp(-t_tilde)) * (
uxt
- x_tilde * (self.x_max - self.x_min) * diff(ux0t, x0)
+ 0.5 * x_tilde ** 2 * (self.x_max - self.x_min) * (
diff(ux0t, x0) - diff(ux1t, x1)
)
)
class DoubleEndedBVP1D(BaseCondition):
r"""A boundary condition on a 1-D range where :math:`x\in[x_0, x_1]`.
The conditions should have the following parts:
- :math:`u(x_0)=u_0` or :math:`u'_x(x_0)=u'_0`,
- :math:`u(x_1)=u_1` or :math:`u'_x(x_1)=u'_1`,
where :math:`\displaystyle u'_x=\frac{\partial u}{\partial x}`.
:param x_min: The lower bound of x, the :math:`x_0`.
:type x_min: float
:param x_max: The upper bound of x, the :math:`x_1`.
:type x_max: float
:param x_min_val: The Dirichlet boundary condition when :math:`x = x_0`, the :math:`u(x_0)`, defaults to None.
:type x_min_val: callable, optional
:param x_min_prime: The Neumann boundary condition when :math:`x = x_0`, the :math:`u'_x(x_0)`, defaults to None.
:type x_min_prime: callable, optional
:param x_max_val: The Dirichlet boundary condition when :math:`x = x_1`, the :math:`u(x_1)`, defaults to None.
:type x_max_val: callable, optional
:param x_max_prime: The Neumann boundary condition when :math:`x = x_1`, the :math:`u'_x(x_1)`, defaults to None.
:type x_max_prime: callable, optional
:raises NotImplementedError: When unimplemented boundary conditions are configured.
.. note::
This condition cannot be passed to ``neurodiffeq.conditions.EnsembleCondition`` unless both boundaries uses
Dirichlet conditions (by specifying only ``x_min_val`` and ``x_max_val``) and ``force`` is set to True in
EnsembleCondition's constructor.
"""
def __init__(
self, x_min, x_max,
x_min_val=None, x_min_prime=None,
x_max_val=None, x_max_prime=None,
):
super().__init__()
n_conditions = sum(c is not None for c in [x_min_val, x_min_prime, x_max_val, x_max_prime])
if n_conditions != 2 or (x_min_val and x_min_prime) or (x_max_val and x_max_prime):
raise NotImplementedError('Sorry, this boundary condition is not implemented.')
self.x_min, self.x_min_val, self.x_min_prime = x_min, x_min_val, x_min_prime
self.x_max, self.x_max_val, self.x_max_prime = x_max, x_max_val, x_max_prime
def enforce(self, net, x):
r"""Enforces this condition on a network with inputs `x`.
:param net: The network whose output is to be re-parameterized.
:type net: `torch.nn.Module`
:param x: The :math:`x`-coordinates of the samples; i.e., the spatial coordinates.
:type x: `torch.Tensor`
:return: The re-parameterized output, where the condition is automatically satisfied.
:rtype: `torch.Tensor`
.. note::
This method overrides the default method of ``neurodiffeq.conditions.BaseCondition`` .
In general, you should avoid overriding ``enforce`` when implementing custom boundary conditions.
"""
def ANN(x):
out = net(torch.cat([x], dim=1))
if self.ith_unit is not None:
out = out[:, self.ith_unit].view(-1, 1)
return out
ux = ANN(x)
if self.x_min_val is not None and self.x_max_val is not None:
return self.parameterize(ux, x)
elif self.x_min_val is not None and self.x_max_prime is not None:
x1 = self.x_max * torch.ones_like(x, requires_grad=True)
ux1 = ANN(x1)
return self.parameterize(ux, x, ux1, x1)
elif self.x_min_prime is not None and self.x_max_val is not None:
x0 = self.x_min * torch.ones_like(x, requires_grad=True)
ux0 = ANN(x0)
return self.parameterize(ux, x, ux0, x0)
elif self.x_min_prime is not None and self.x_max_prime is not None:
x0 = self.x_min * torch.ones_like(x, requires_grad=True)
x1 = self.x_max * torch.ones_like(x, requires_grad=True)
ux0 = ANN(x0)
ux1 = ANN(x1)
return self.parameterize(ux, x, ux0, x0, ux1, x1)
else:
raise NotImplementedError('Sorry, this boundary condition is not implemented.')
def parameterize(self, u, x, *additional_tensors):
r"""Re-parameterizes outputs such that the boundary conditions are satisfied.
There are four boundary conditions that are
currently implemented:
- For Dirichlet-Dirichlet boundary condition :math:`u(x_0)=u_0` and :math:`u(x_1)=u_1`:
The re-parameterization is
:math:`\displaystyle u(x)=A+\tilde{x}\big(1-\tilde{x}\big)\mathrm{ANN}(x)`,
where :math:`\displaystyle A=\big(1-\tilde{x}\big)u_0+\big(\tilde{x}\big)u_1`.
- For Dirichlet-Neumann boundary condition :math:`u(x_0)=u_0` and :math:`u'_x(x_1)=u'_1`:
The re-parameterization is
:math:`\displaystyle u(x)=A(x)+\tilde{x}\Big(\mathrm{ANN}(x)-\mathrm{ANN}(x_1)+x_0 - \big(x_1-x_0\big)\mathrm{ANN}'_x(x_1)\Big)`,
where :math:`\displaystyle A(x)=\big(1-\tilde{x}\big)u_0+\frac{1}{2}\tilde{x}^2\big(x_1-x_0\big)u'_1`.
- For Neumann-Dirichlet boundary condition :math:`u'_x(x_0)=u'_0` and :math:`u(x_1)=u_1`:
The re-parameterization is
:math:`\displaystyle u(x)=A(x)+\big(1-\tilde{x}\big)\Big(\mathrm{ANN}(x)-\mathrm{ANN}(x_0)+x_1 + \big(x_1-x_0\big)\mathrm{ANN}'_x(x_0)\Big)`,
where :math:`\displaystyle A(x)=\tilde{x}u_1-\frac{1}{2}\big(1-\tilde{x}\big)^2\big(x_1-x_0\big)u'_0`.
- For Neumann-Neumann boundary condition :math:`u'_x(x_0)=u'_0` and :math:`u'_x(x_1)=u'_1`:
The re-parameterization is
:math:`\displaystyle u(x)=A(x)+\frac{1}{2}\tilde{x}^2\big(\mathrm{ANN}(x)-\mathrm{ANN}(x_1)-\frac{1}{2}\mathrm{ANN}'_x(x_1)\big(x_1-x_0\big)\big),
+\frac{1}{2}\big(1-\tilde{x}\big)^2\big(\mathrm{ANN}(x)-\mathrm{ANN}(x_0)+\frac{1}{2}\mathrm{ANN}'_x(x_0)\big(x_1-x_0\big)\big)`,
where :math:`\displaystyle A(x)=\frac{1}{2}\tilde{x}^2\big(x_1-x_0\big)u'_1 - \frac{1}{2}\big(1-\tilde{x}\big)^2\big(x_1-x_0\big)u'_0`.
Notations:
- :math:`\displaystyle\tilde{x}=\frac{x-x_0}{x_1-x_0}`,
- :math:`\displaystyle\mathrm{ANN}` is the neural network,
- and :math:`\displaystyle\mathrm{ANN}'_x=\frac{\partial ANN}{\partial x}`.
:param output_tensor: Output of the neural network.
:type output_tensor: `torch.Tensor`
:param x: The :math:`x`-coordinates of the samples; i.e., the spatial coordinates.
:type x: `torch.Tensor`
:param additional_tensors: additional tensors that will be passed by ``enforce``
:type additional_tensors: `torch.Tensor`
:return: The re-parameterized output of the network.
:rtype: `torch.Tensor`
"""
x_tilde = (x - self.x_min) / (self.x_max - self.x_min)
if self.x_min_val is not None and self.x_max_val is not None:
return self._parameterize_dd(u, x, x_tilde)
elif self.x_min_val is not None and self.x_max_prime is not None:
return self._parameterize_dn(u, x, x_tilde, *additional_tensors)
elif self.x_min_prime is not None and self.x_max_val is not None:
return self._parameterize_nd(u, x, x_tilde, *additional_tensors)
elif self.x_min_prime is not None and self.x_max_prime is not None:
return self._parameterize_nn(u, x, x_tilde, *additional_tensors)
else:
raise NotImplementedError('Sorry, this boundary condition is not implemented.')
# When we have Dirichlet boundary conditions on both ends of the domain:
def _parameterize_dd(self, ux, x, x_tilde):
Ax = self.x_min_val * (1-x_tilde) + self.x_max_val * (x_tilde)
return Ax + x_tilde * (1 - x_tilde) * ux
# When we have Dirichlet boundary condition on the left end of the domain
# and Neumann boundary condition on the right end of the domain:
def _parameterize_dn(self, ux, x, x_tilde, ux1, x1):
Ax = (1 - x_tilde) * self.x_min_val + 0.5 * x_tilde ** 2 * self.x_max_prime * (self.x_max - self.x_min)
return Ax + x_tilde * (ux - ux1 + self.x_min_val - diff(ux1, x1) * (self.x_max - self.x_min))
#Ax = self.x_min_val + (x - self.x_min) * self.x_max_prime
#return Ax + x_tilde * (ux - (self.x_max - self.x_min) * diff(ux1, x1) - ux1)
# When we have Neumann boundary condition on the left end of the domain
# and Dirichlet boundary condition on the right end of the domain:
def _parameterize_nd(self, ux, x, x_tilde, ux0, x0):
Ax = x_tilde * self.x_max_val - 0.5 * (1 - x_tilde) ** 2 * self.x_min_prime * (self.x_max - self.x_min)
return Ax + (1 - x_tilde) * (ux - ux0 + self.x_max_val + diff(ux0, x0) * (self.x_max - self.x_min))
#Ax = self.x_max_val + (x - self.x_max) * self.x_min_prime
#return Ax + (1 - x_tilde) * (ux + (self.x_max - self.x_min) * diff(ux0, x0) - ux0)
# When we have Neumann boundary conditions on both ends of the domain:
def _parameterize_nn(self, ux, x, x_tilde, ux0, x0, ux1, x1):
Ax = - 0.5 * (1 - x_tilde) ** 2 * (self.x_max - self.x_min) * self.x_min_prime + 0.5 * x_tilde ** 2 * (self.x_max - self.x_min) * self.x_max_prime
return Ax + 0.5 * x_tilde ** 2 * (ux - ux1 - 0.5 * diff(ux1, x1)*(self.x_max - self.x_min)) + \
0.5 * (1 - x_tilde) ** 2 * (ux - ux0 + 0.5 * diff(ux0, x0)*(self.x_max - self.x_min))
# TODO: reduce duplication
class DirichletBVPSpherical(BaseCondition):
r"""The Dirichlet boundary condition for the interior and exterior boundary of the sphere,
where the interior boundary is not necessarily a point. The conditions are:
- :math:`u(r_0,\theta,\phi)=f(\theta,\phi)`
- :math:`u(r_1,\theta,\phi)=g(\theta,\phi)`
:param r_0:
The radius of the interior boundary.
When :math:`r_0 = 0`, the interior boundary collapses to a single point (center of the ball).
:type r_0: float
:param f:
The value of :math:`u` on the interior boundary.
:math:`u(r_0, \theta, \phi)=f(\theta, \phi)`.
:type f: callable
:param r_1:
The radius of the exterior boundary.
If set to None, `g` must also be None.
:type r_1: float or None
:param g:
The value of :math:`u` on the exterior boundary.
:math:`u(r_1, \theta, \phi)=g(\theta, \phi)`.
If set to None, `r_1` must also be set to None.
:type g: callable or None
"""
def __init__(self, r_0, f, r_1=None, g=None):
super(DirichletBVPSpherical, self).__init__()
if (r_1 is None) ^ (g is None):
raise ValueError(f'r_1 and g must be both/neither set to None; got r_1={r_1}, g={g}')
self.r_0, self.r_1 = r_0, r_1
self.f, self.g = f, g
def parameterize(self, output_tensor, r, theta, phi):
r"""Re-parameterizes outputs such that the Dirichlet condition is satisfied on both spherical boundaries.
- If both inner and outer boundaries are specified
:math:`u(r_0,\theta,\phi)=f(\theta,\phi)` and
:math:`u(r_1,\theta,\phi)=g(\theta,\phi)`:
The re-parameterization is
:math:`\big(1-\tilde{r}\big)f(\theta,\phi)+\tilde{r}g(\theta,\phi)
+\Big(1-e^{\tilde{r}(1-{\tilde{r}})}\Big)\mathrm{ANN}(r, \theta, \phi)`
where :math:`\displaystyle\tilde{r}=\frac{r-r_0}{r_1-r_0}`;
- If only one boundary is specified (inner or outer) :math:`u(r_0,\theta,\phi)=f(\theta,\phi)`
The re-parameterization is
:math:`f(\theta,\phi)+\Big(1-e^{-|r-r_0|}\Big)\mathrm{ANN}(r, \theta, \phi)`;
where :math:`\mathrm{ANN}` is the neural network.
:param output_tensor: Output of the neural network.
:type output_tensor: `torch.Tensor`
:param r: The radii (or :math:`r`-component) of the inputs to the network.
:type r: `torch.Tensor`
:param theta: The co-latitudes (or :math:`\theta`-component) of the inputs to the network.
:type theta: `torch.Tensor`
:param phi: The longitudes (or :math:`\phi`-component) of the inputs to the network.
:type phi: `torch.Tensor`
:return: The re-parameterized output of the network.
:rtype: `torch.Tensor`
"""
if self.r_1 is None:
return (1 - torch.exp(-torch.abs(r - self.r_0))) * output_tensor + self.f(theta, phi)
else:
r_tilde = (r - self.r_0) / (self.r_1 - self.r_0)
return self.f(theta, phi) * (1 - r_tilde) + \
self.g(theta, phi) * r_tilde + \
(1. - torch.exp((1 - r_tilde) * r_tilde)) * output_tensor
# TODO: reduce duplication
class InfDirichletBVPSpherical(BaseCondition):
r"""Similar to ``neurodiffeq.conditions.DirichletBVPSpherical``. but with :math:`r_1\to+\infty`. Specifically,
- :math:`\displaystyle u(r_0,\theta,\phi)=f(\theta,\phi)`,
- :math:`\lim_{r\to+\infty}u(r,\theta,\phi)=g(\theta,\phi)`.
:param r_0:
The radius of the interior boundary.
When :math:`r_0=0`, the interior boundary collapses to a single point (center of the ball).
:type r_0: float
:param f:
The value of :math:`u` on the interior boundary.
:math:`u(r_0,\theta,\phi)=f(\theta,\phi)`.
:type f: callable
:param g:
The value of :math:`u` at infinity.
:math:`\lim_{r\to+\infty}u(r,\theta,\phi)=g(\theta,\phi)`.
:type g: callable
:param order:
The smallest :math:`k` such that :math:`\lim_{r\to+\infty}u(r,\theta,\phi)e^{-kr}=0`.
Defaults to 1.
:type order: int or float
"""
def __init__(self, r_0, f, g, order=1):
super(InfDirichletBVPSpherical, self).__init__()
self.r_0 = r_0
self.f = f
self.g = g
self.order = order
def parameterize(self, output_tensor, r, theta, phi):
r"""Re-parameterizes outputs such that the Dirichlet condition is satisfied both at :math:`r_0` and infinity.
The re-parameterization is
:math:`\begin{align}
u(r,\theta,\phi)=
&e^{-k(r-r_0)}f(\theta,\phi)\\
&+\tanh{\big(r-r_0\big)}g(\theta,\phi)\\
&+e^{-k(r-r_0)}\tanh{\big(r-r_0\big)}\mathrm{ANN}(r,\theta,\phi)
\end{align}`,
where :math:`\mathrm{ANN}` is the neural network.
:param output_tensor: Output of the neural network.
:type output_tensor: `torch.Tensor`
:param r: The radii (or :math:`r`-component) of the inputs to the network.
:type r: `torch.Tensor`
:param theta: The co-latitudes (or :math:`\theta`-component) of the inputs to the network.
:type theta: `torch.Tensor`
:param phi: The longitudes (or :math:`\phi`-component) of the inputs to the network.
:type phi: `torch.Tensor`
:return: The re-parameterized output of the network.
:rtype: `torch.Tensor`
"""
dr = r - self.r_0
return self.f(theta, phi) * torch.exp(-self.order * dr) + \
self.g(theta, phi) * torch.tanh(dr) + \
torch.exp(-self.order * dr) * torch.tanh(dr) * output_tensor
# TODO: reduce duplication
class DirichletBVPSphericalBasis(BaseCondition):
r"""Similar to ``neurodiffeq.conditions.DirichletBVPSpherical``.
The only difference is this condition is enforced on a neural net that only takes in :math:`r`
and returns the spherical harmonic coefficients R(r).
We constrain the coefficients :math:`R_k(r)` in :math:`u(r,\theta,\phi)=\sum_{k}R_k(r)Y_k(\theta,\phi)`,
where :math:`\big\{Y_k(\theta,\phi)\big\}_{k=1}^{K}` can be **any spherical function basis**.
A recommended choice is the real spherical harmonics :math:`Y_l^m(\theta,\phi)`,
where :math:`l` is the degree of the spherical harmonics and :math:`m` is the order of the spherical harmonics.
The boundary conditions are: :math:`\mathbf{R}(r_0)=\mathbf{R}_0` and :math:`\mathbf{R}(r_1)=\mathbf{R}_1`,
where :math:`\mathbf{R}` is a vector whose components are :math:`\big\{R_k\big\}_{k=1}^{K}`
:param r_0:
The radius of the interior boundary.
When r_0 = 0, the interior boundary is collapsed to a single point (center of the ball).
:type r_0: float
:param R_0:
The value of harmonic coefficients :math:`\mathbf{R}` on the interior boundary.
:math:`\mathbf{R}(r_0)=\mathbf{R}_0`.
:type R_0: `torch.Tensor`
:param r_1:
The radius of the exterior boundary.
If set to None, `R_1` must also be None
:type r_1: float or None
:param R_1:
The value of harmonic coefficients :math:`\mathbf{R}` on the exterior boundary.
:math:`\mathbf{R}(r_1)=\mathbf{R}_1`.
:type R_1: `torch.Tensor`
:param max_degree:
**[DEPRECATED]** Highest degree when using spherical harmonics.
:type max_degree: int
"""
def __init__(self, r_0, R_0, r_1=None, R_1=None, max_degree=None):
super(DirichletBVPSphericalBasis, self).__init__()
if (r_1 is None) ^ (R_1 is None):
raise ValueError(f'r_1 and R_1 must be both/neither set to None; got r_1={r_1}, R_1={R_1}')
self.r_0, self.r_1 = r_0, r_1
self.R_0, self.R_1 = R_0, R_1
def parameterize(self, output_tensor, r):
r"""Re-parameterizes outputs such that the Dirichlet condition is satisfied on both spherical boundaries.
- If both inner and outer boundaries are specified
:math:`\mathbf{R}(r_0,\theta,\phi)=\mathbf{R}_0` and
:math:`\mathbf{R}(r_1,\theta,\phi)=\mathbf{R}_1`.
The re-parameterization is
:math:`\big(1-\tilde{r}\big)\mathbf{R}_0+\tilde{r}\mathbf{R}_1
+\Big(1-e^{\tilde{r}(1-{\tilde{r}})}\Big)\mathrm{ANN}(r)`
where :math:`\displaystyle\tilde{r}=\frac{r-r_0}{r_1-r_0}`;
- If only one boundary is specified (inner or outer) :math:`\mathbf{R}(r_0,\theta,\phi)=\mathbf{R_0}`
The re-parameterization is
:math:`\mathbf{R}_0+\Big(1-e^{-|r-r_0|}\Big)\mathrm{ANN}(r)`;
where :math:`\mathrm{ANN}` is the neural network.
:param output_tensor: Output of the neural network.
:type output_tensor: `torch.Tensor`
:param r: The radii (or :math:`r`-component) of the inputs to the network.
:type r: `torch.Tensor`
:return: The re-parameterized output of the network.
:rtype: `torch.Tensor`
"""
if self.r_1 is None:
ret = (1 - torch.exp(-r + self.r_0)) * output_tensor + self.R_0
else:
r_tilde = (r - self.r_0) / (self.r_1 - self.r_0)
ret = self.R_0 * (1 - r_tilde) + \
self.R_1 * r_tilde + \
(1. - torch.exp((1 - r_tilde) * r_tilde)) * output_tensor
return ret
# TODO: reduce duplication
class InfDirichletBVPSphericalBasis(BaseCondition):
r"""Similar to ``neurodiffeq.conditions.InfDirichletBVPSpherical``.
The only difference is this condition is enforced on a neural net that only takes in :math:`r`
and returns the spherical harmonic coefficients R(r).
We constrain the coefficients :math:`R_k(r)` in :math:`u(r,\theta,\phi)=\sum_{k}R_k(r)Y_k(\theta,\phi)`,
where :math:`\big\{Y_k(\theta,\phi)\big\}_{k=1}^{K}` can be **any spherical function basis**.
A recommended choice is the real spherical harmonics :math:`Y_l^m(\theta,\phi)`,
where :math:`l` is the degree of the spherical harmonics and :math:`m` is the order of the spherical harmonics.
The boundary conditions are:
:math:`\mathbf{R}(r_0)=\mathbf{R}_0` and
:math:`\lim_{r_0\to+\infty}\mathbf{R}(r)=\mathbf{R}_1`,
where :math:`\mathbf{R}` is a vector whose components are :math:`\big\{R_k\big\}_{k=1}^{K}`.
:param r_0:
The radius of the interior boundary.
When r_0 = 0, the interior boundary is collapsed to a single point (center of the ball).
:type r_0: float
:param R_0:
The value of harmonic coefficients :math:`R` on the interior boundary.
:math:`R(r_0)=R_0`.
:type R_0: `torch.Tensor`
:param R_inf:
The value of harmonic coefficients :math:`R` at infinity.
:math:`\lim_{r\to+\infty}R(r)=R_\infty`.
:type R_inf: `torch.Tensor`
:param order:
The smallest :math:`k` that guarantees :math:`\lim_{r \to +\infty} R(r) e^{-k r} = \bf 0`.
Defaults to 1.
:type order: int or float
:param max_degree:
**[DEPRECATED]** Highest degree when using spherical harmonics.
:type max_degree: int
"""
def __init__(self, r_0, R_0, R_inf, order=1, max_degree=None):
super(InfDirichletBVPSphericalBasis, self).__init__()
self.r_0 = r_0
self.R_0 = R_0
self.R_inf = R_inf
self.order = order
def parameterize(self, output_tensor, r):
r"""Re-parameterizes outputs such that the Dirichlet condition is satisfied at both :math:`r_0` and infinity.
The re-parameterization is
:math:`\begin{align}
u(r,\theta,\phi)=
&e^{-k(r-r_0)}\mathbf{R}_0\\
&+\tanh{\big(r-r_0\big)}\mathbf{R}_1\\
&+e^{-k(r-r_0)}\tanh{\big(r-r_0\big)}\mathrm{ANN}(r)
\end{align}`,
where :math:`\mathrm{ANN}` is the neural network.
:param output_tensor: Output of the neural network.
:type output_tensor: `torch.Tensor`
:param r: The radii (or :math:`r`-component) of the inputs to the network.
:type r: `torch.Tensor`
:return: The re-parameterized output of the network.
:rtype: `torch.Tensor`
"""
dr = r - self.r_0
return self.R_0 * torch.exp(-self.order * dr) + \
self.R_inf * torch.tanh(dr) + \
torch.exp(-self.order * dr) * torch.tanh(dr) * output_tensor
|
import skimage.io as io
import skimage.transform as skt
import numpy as np
from PIL import Image
from src.models.class_patcher import patcher
from src.utils.imgproc import *
class patcher(patcher):
def __init__(self, body='./body/body_lopolykon.png', **options):
super().__init__('ロポリこん', body=body, pantie_position=[5, 662], **options)
self.skin = Image.open('./material/lopolykon_skin.png')
def convert(self, image):
pantie = np.array(image)
arrx = np.zeros(100)
arrx[:50] = np.sin(np.linspace(0, np.pi, 50)) * -50
arry = np.zeros(100)
arry[12:-10] = np.sin(np.linspace(0, np.pi, 78)) * 350
pantie = affine_transform_by_arr(pantie, arrx, arry)[:140,:300]
pantie = np.uint8(resize(pantie, [0.84, 0.84])*255)
return Image.fromarray(pantie)
def patch(self, image, transparent=False):
pantie = self.convert(image)
if transparent:
patched = Image.new("RGBA", self.body_size)
else:
patched = self.body.copy()
patched = self.paste(patched, self.skin, [0, 0])
patched = self.paste(patched, pantie, self.pantie_position)
return patched
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "celerytimer_test.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
import re
import datetime
timedeltaRegex = re.compile(r'((?P<days>\d+?)d)((?P<hours>\d+?)h)?((?P<minutes>\d+?)m)?((?P<seconds>\d+?)s)?')
def parse_timedelta(delta_str):
parts = timedeltaRegex.match(delta_str)
if not parts:
return
parts = parts.groupdict()
params = {}
for name, param in parts.items():
if param:
params[name] = int(param)
return datetime.timedelta(**params)
|
#!/usr/bin/env python
import argparse
import logging
import os
import re
import sys
import click
def valid_date(date_string):
DATE_INPUT_FORMAT = "%d-%m-%Y"
DATE_INPUT_FORMAT_ALT = "%Y-%m-%dT%H:%M"
from datetime import datetime
try:
return datetime.strptime(date_string, DATE_INPUT_FORMAT)
except ValueError:
try:
return datetime.strptime(date_string, DATE_INPUT_FORMAT_ALT)
except ValueError:
msg = "Not a valid date: '{0}'.".format(date_string)
raise argparse.ArgumentTypeError(msg)
@click.group()
def cli():
pass
@cli.command()
@click.option(
"--open",
"open_server",
is_flag=True,
help="Open the server for communication from outside",
default=False,
)
@click.option("--debug-js", is_flag=True, help="Don't minify the JavaScript files")
def testserver(open_server, debug_js):
from anyway.app_and_db import app
logging.basicConfig(
format="%(asctime)s %(levelname)-8s %(message)s",
level=logging.DEBUG,
datefmt="%Y-%m-%d %H:%M:%S",
)
if debug_js:
app.config["ASSETS_DEBUG"] = True
default_host = "0.0.0.0" if open_server else "127.0.0.1"
app.run(debug=True, host=os.getenv("IP", default_host), port=int(os.getenv("PORT", 5000)))
@cli.group()
def update_news_flash():
pass
@update_news_flash.command()
@click.option("--source", default="", type=str)
@click.option("--news_flash_id", default="", type=str)
def update(source, news_flash_id):
from anyway.parsers import news_flash
if not source:
source = None
if not news_flash_id:
news_flash_id = None
return news_flash.update_all_in_db(source, news_flash_id)
@update_news_flash.command()
def remove_duplicate_news_flash_rows():
from anyway.parsers import news_flash_db_adapter
news_flash_db_adapter.init_db().remove_duplicate_rows()
@cli.group()
def process():
pass
@process.command()
@click.option("--batch_size", type=int, default=5000)
@click.option("--load_start_year", type=str, default=None)
@click.option("--source", type=str, default="s3")
def cbs(
batch_size,
load_start_year,
source
):
from anyway.parsers.cbs.executor import main
return main(
batch_size=batch_size,
load_start_year=load_start_year,
source=source
)
@process.command()
def news_flash():
from anyway.parsers.news_flash import scrape_all
return scrape_all()
@process.command()
@click.option("--specific_folder", is_flag=True, default=False)
@click.option("--delete_all", is_flag=True)
@click.option("--path", type=str, default="static/data/cbs_vehicles_registered")
def registered_vehicles(specific_folder, delete_all, path):
from anyway.parsers.registered import main
return main(specific_folder=specific_folder, delete_all=delete_all, path=path)
@process.command()
@click.option("--path", type=str, default="static/data/traffic_volume")
def traffic_volume(path):
from anyway.parsers.traffic_volume import main
return main(path)
@process.command()
@click.argument("filename")
def rsa(filename):
from anyway.parsers.rsa import parse
return parse(filename)
@process.command()
@click.argument("filename", type=str, default="static/data/segments/road_segments.xlsx")
def road_segments(filename):
from anyway.parsers.road_segments import parse
return parse(filename)
@process.command()
@click.argument("filepath", type=str, default="static/data/schools/schools.csv")
@click.option("--batch_size", type=int, default=5000)
def schools(filepath, batch_size):
from anyway.parsers.schools import parse
return parse(filepath=filepath, batch_size=batch_size)
@process.command()
@click.argument(
"schools_description_filepath", type=str, default="static/data/schools/schools_description.xlsx"
)
@click.argument(
"schools_coordinates_filepath", type=str, default="static/data/schools/schools_coordinates.xlsx"
)
@click.option("--batch_size", type=int, default=5000)
def schools_with_description(
schools_description_filepath, schools_coordinates_filepath, batch_size
):
from anyway.parsers.schools_with_description import parse
return parse(
schools_description_filepath=schools_description_filepath,
schools_coordinates_filepath=schools_coordinates_filepath,
batch_size=batch_size,
)
@process.command()
@click.argument(
"schools_description_filepath",
type=str,
default="static/data/schools/schools_description_2020.xlsx",
)
@click.argument(
"schools_coordinates_filepath",
type=str,
default="static/data/schools/schools_coordinates_2020.xlsx",
)
@click.option("--batch_size", type=int, default=5000)
def schools_with_description_2020(
schools_description_filepath, schools_coordinates_filepath, batch_size
):
from anyway.parsers.schools_with_description_2020 import parse
return parse(
schools_description_filepath=schools_description_filepath,
schools_coordinates_filepath=schools_coordinates_filepath,
batch_size=batch_size,
)
@process.command()
@click.option(
"--start_date", default="01-01-2014", type=valid_date, help="The Start Date - format DD-MM-YYYY"
)
@click.option(
"--end_date", default="31-12-2018", type=valid_date, help="The End Date - format DD-MM-YYYY"
)
@click.option("--distance", default=0.5, help="float In KM. Default is 0.5 (500m)", type=float)
@click.option("--batch_size", type=int, default=5000)
def injured_around_schools(start_date, end_date, distance, batch_size):
from anyway.parsers.injured_around_schools import parse
return parse(start_date=start_date, end_date=end_date, distance=distance, batch_size=batch_size)
@process.command()
@click.option(
"--from_s3",
"-f",
is_flag=True,
help="get the data from files, instead of waze api",
)
@click.option(
"--start_date", default="01-01-2019", type=valid_date, help="The Start Date - format DD-MM-YYYY"
)
@click.option(
"--end_date", default="01-01-2020", type=valid_date, help="The End Date - format DD-MM-YYYY"
)
def waze_data(from_s3, start_date, end_date):
"""
Get waze data from existing files or from waze api.
Examples for running the script:
- For getting data from waze RTS HTTP API, run:
python -m main process waze-data
- For getting data from the S3 stored json files, run (change the start and end date as you need):
python -m main process waze-data --from_s3 --start_date=01-01-2020 --end_date=01-01-2020
"""
from anyway.parsers.waze.waze_data_parser import ingest_waze_from_files, ingest_waze_from_api
if from_s3:
return ingest_waze_from_files(
bucket_name="anyway-hasadna.appspot.com", start_date=start_date, end_date=end_date
)
else:
return ingest_waze_from_api()
@process.command()
@click.argument("filename", type=str, default="static/data/embedded_reports/embedded_reports.csv")
def embedded_reports(filename):
from anyway.parsers.embedded_reports import parse
return parse(filename)
@process.command()
@click.option(
"--update",
"update",
is_flag=True,
help="Recalculates the cache (default is False)",
default=False,
)
@click.option(
"--no_info", "info", is_flag=True, help="Prints info on cache (default is True)", default=True
)
def infographics_data_cache(info, update):
"""Will refresh the infographics data cache"""
from anyway.parsers.infographics_data_cache_updater import main
return main(update=update, info=info)
@process.command()
def infographics_data_cache_for_road_segments():
"""Will refresh the infographics data cache"""
from anyway.parsers.infographics_data_cache_updater import main_for_road_segments
return main_for_road_segments(update=True, info=True)
@process.group()
def cache():
pass
@cache.command()
def update_street():
"""Update street cache"""
from anyway.parsers.infographics_data_cache_updater import main_for_street
main_for_street()
@process.command()
@click.argument("filename", type=str, default="static/data/casualties/casualties_costs.csv")
def update_casualties_costs(filename):
from anyway.parsers.casualties_costs import parse
return parse(filename)
@cli.group()
def preprocess():
pass
@preprocess.command()
@click.option("--path", type=str)
def preprocess_cbs(path):
from anyway.parsers.cbs.preprocessing_cbs_files import update_cbs_files_names
return update_cbs_files_names(path)
@cli.group()
def create_tables():
pass
@create_tables.command()
def create_cbs_tables():
from anyway.parsers.cbs.executor import create_tables
return create_tables()
@cli.group()
def update_dictionary_tables():
pass
@update_dictionary_tables.command()
@click.option("--path", type=str, default="static/data/cbs")
def update_cbs(path):
from anyway.parsers.cbs.executor import update_dictionary_tables
return update_dictionary_tables(path)
@cli.group()
def truncate_dictionary_tables():
pass
@truncate_dictionary_tables.command()
@click.option("--path", type=str)
def truncate_cbs(path):
from anyway.parsers.cbs.executor import truncate_dictionary_tables
return truncate_dictionary_tables(path)
@cli.command()
@click.argument("identifiers", nargs=-1)
def load_discussions(identifiers):
from anyway.models import DiscussionMarker
from anyway.app_and_db import db
identifiers = identifiers or sys.stdin
for identifier in identifiers:
identifier = identifier.strip()
m = re.match(r"\((\d+\.\d+),\s*(\d+\.\d+)\)", identifier)
if not m:
logging.error("Failed processing: " + identifier)
continue
(latitude, longitude) = m.group(1, 2)
marker = DiscussionMarker.parse(
{
"latitude": latitude,
"longitude": longitude,
"title": identifier,
"identifier": identifier,
}
)
try:
db.session.add(marker)
db.session.commit()
logging.info("Added: " + identifier)
except Exception as e:
db.session.rollback()
logging.warn("Failed: " + identifier + ": " + e)
@cli.group()
def scripts():
pass
@scripts.command()
@click.option(
"--start_date", default="01-01-2013", type=valid_date, help="The Start Date - format DD-MM-YYYY"
)
@click.option(
"--end_date", default="31-12-2017", type=valid_date, help="The End Date - format DD-MM-YYYY"
)
@click.option("--distance", default=0.5, help="float In KM. Default is 0.5 (500m)", type=float)
@click.option(
"--output_path", default="output", help="output file of the results. Default is output.csv"
)
def accidents_around_schools(start_date, end_date, distance, output_path):
from anyway.accidents_around_schools import main
return main(
start_date=start_date, end_date=end_date, distance=distance, output_path=output_path
)
@scripts.command()
def importemail():
from anyway.parsers.cbs.importmail_cbs import main
return main()
if __name__ == "__main__":
cli(sys.argv[1:]) # pylint: disable=too-many-function-args
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains Google Spanner operators.
"""
from typing import List, Optional, Union
from airflow import AirflowException
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.spanner import SpannerHook
from airflow.utils.decorators import apply_defaults
class SpannerDeployInstanceOperator(BaseOperator):
"""
Creates a new Cloud Spanner instance, or if an instance with the same instance_id
exists in the specified project, updates the Cloud Spanner instance.
:param instance_id: Cloud Spanner instance ID.
:type instance_id: str
:param configuration_name: The name of the Cloud Spanner instance configuration
defining how the instance will be created. Required for
instances that do not yet exist.
:type configuration_name: str
:param node_count: (Optional) The number of nodes allocated to the Cloud Spanner
instance.
:type node_count: int
:param display_name: (Optional) The display name for the Cloud Spanner instance in
the GCP Console. (Must be between 4 and 30 characters.) If this value is not set
in the constructor, the name is the same as the instance ID.
:type display_name: str
:param project_id: Optional, the ID of the project which owns the Cloud Spanner
Database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param gcp_conn_id: The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
"""
# [START gcp_spanner_deploy_template_fields]
template_fields = ('project_id', 'instance_id', 'configuration_name', 'display_name',
'gcp_conn_id')
# [END gcp_spanner_deploy_template_fields]
@apply_defaults
def __init__(self,
instance_id: int,
configuration_name: str,
node_count: str,
display_name: str,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
*args, **kwargs) -> None:
self.instance_id = instance_id
self.project_id = project_id
self.configuration_name = configuration_name
self.node_count = node_count
self.display_name = display_name
self.gcp_conn_id = gcp_conn_id
self._validate_inputs()
super().__init__(*args, **kwargs)
def _validate_inputs(self):
if self.project_id == '':
raise AirflowException("The required parameter 'project_id' is empty")
if not self.instance_id:
raise AirflowException("The required parameter 'instance_id' "
"is empty or None")
def execute(self, context):
hook = SpannerHook(gcp_conn_id=self.gcp_conn_id)
if not hook.get_instance(project_id=self.project_id, instance_id=self.instance_id):
self.log.info("Creating Cloud Spanner instance '%s'", self.instance_id)
func = hook.create_instance
else:
self.log.info("Updating Cloud Spanner instance '%s'", self.instance_id)
func = hook.update_instance
func(project_id=self.project_id,
instance_id=self.instance_id,
configuration_name=self.configuration_name,
node_count=self.node_count,
display_name=self.display_name)
class SpannerDeleteInstanceOperator(BaseOperator):
"""
Deletes a Cloud Spanner instance. If an instance does not exist,
no action is taken and the operator succeeds.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SpannerDeleteInstanceOperator`
:param instance_id: The Cloud Spanner instance ID.
:type instance_id: str
:param project_id: Optional, the ID of the project that owns the Cloud Spanner
Database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param gcp_conn_id: The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
"""
# [START gcp_spanner_delete_template_fields]
template_fields = ('project_id', 'instance_id', 'gcp_conn_id')
# [END gcp_spanner_delete_template_fields]
@apply_defaults
def __init__(self,
instance_id: int,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
*args, **kwargs) -> None:
self.instance_id = instance_id
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self._validate_inputs()
super().__init__(*args, **kwargs)
def _validate_inputs(self):
if self.project_id == '':
raise AirflowException("The required parameter 'project_id' is empty")
if not self.instance_id:
raise AirflowException("The required parameter 'instance_id' "
"is empty or None")
def execute(self, context):
hook = SpannerHook(gcp_conn_id=self.gcp_conn_id)
if hook.get_instance(project_id=self.project_id, instance_id=self.instance_id):
return hook.delete_instance(project_id=self.project_id,
instance_id=self.instance_id)
else:
self.log.info("Instance '%s' does not exist in project '%s'. "
"Aborting delete.", self.instance_id, self.project_id)
return True
class SpannerQueryDatabaseInstanceOperator(BaseOperator):
"""
Executes an arbitrary DML query (INSERT, UPDATE, DELETE).
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SpannerQueryDatabaseInstanceOperator`
:param instance_id: The Cloud Spanner instance ID.
:type instance_id: str
:param database_id: The Cloud Spanner database ID.
:type database_id: str
:param query: The query or list of queries to be executed. Can be a path to a SQL
file.
:type query: str or list
:param project_id: Optional, the ID of the project that owns the Cloud Spanner
Database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param gcp_conn_id: The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
"""
# [START gcp_spanner_query_template_fields]
template_fields = ('project_id', 'instance_id', 'database_id', 'query', 'gcp_conn_id')
template_ext = ('.sql',)
# [END gcp_spanner_query_template_fields]
@apply_defaults
def __init__(self,
instance_id: int,
database_id: str,
query: Union[str, List[str]],
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
*args, **kwargs) -> None:
self.instance_id = instance_id
self.project_id = project_id
self.database_id = database_id
self.query = query
self.gcp_conn_id = gcp_conn_id
self._validate_inputs()
super().__init__(*args, **kwargs)
def _validate_inputs(self):
if self.project_id == '':
raise AirflowException("The required parameter 'project_id' is empty")
if not self.instance_id:
raise AirflowException("The required parameter 'instance_id' "
"is empty or None")
if not self.database_id:
raise AirflowException("The required parameter 'database_id' "
"is empty or None")
if not self.query:
raise AirflowException("The required parameter 'query' is empty")
def execute(self, context):
hook = SpannerHook(gcp_conn_id=self.gcp_conn_id)
queries = self.query
if isinstance(self.query, str):
queries = [x.strip() for x in self.query.split(';')]
self.sanitize_queries(queries)
self.log.info("Executing DML query(-ies) on "
"projects/%s/instances/%s/databases/%s",
self.project_id, self.instance_id, self.database_id)
self.log.info(queries)
hook.execute_dml(project_id=self.project_id,
instance_id=self.instance_id,
database_id=self.database_id,
queries=queries)
@staticmethod
def sanitize_queries(queries):
"""
Drops empty query in queries.
:param queries: queries
:type queries: List[str]
:rtype: None
"""
if queries and queries[-1] == '':
del queries[-1]
class SpannerDeployDatabaseInstanceOperator(BaseOperator):
"""
Creates a new Cloud Spanner database, or if database exists,
the operator does nothing.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SpannerDeployDatabaseInstanceOperator`
:param instance_id: The Cloud Spanner instance ID.
:type instance_id: str
:param database_id: The Cloud Spanner database ID.
:type database_id: str
:param ddl_statements: The string list containing DDL for the new database.
:type ddl_statements: list[str]
:param project_id: Optional, the ID of the project that owns the Cloud Spanner
Database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param gcp_conn_id: The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
"""
# [START gcp_spanner_database_deploy_template_fields]
template_fields = ('project_id', 'instance_id', 'database_id', 'ddl_statements',
'gcp_conn_id')
template_ext = ('.sql', )
# [END gcp_spanner_database_deploy_template_fields]
@apply_defaults
def __init__(self,
instance_id: int,
database_id: str,
ddl_statements: List[str],
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
*args, **kwargs) -> None:
self.instance_id = instance_id
self.project_id = project_id
self.database_id = database_id
self.ddl_statements = ddl_statements
self.gcp_conn_id = gcp_conn_id
self._validate_inputs()
super().__init__(*args, **kwargs)
def _validate_inputs(self):
if self.project_id == '':
raise AirflowException("The required parameter 'project_id' is empty")
if not self.instance_id:
raise AirflowException("The required parameter 'instance_id' is empty "
"or None")
if not self.database_id:
raise AirflowException("The required parameter 'database_id' is empty"
" or None")
def execute(self, context):
hook = SpannerHook(gcp_conn_id=self.gcp_conn_id)
if not hook.get_database(project_id=self.project_id,
instance_id=self.instance_id,
database_id=self.database_id):
self.log.info("Creating Cloud Spanner database "
"'%s' in project '%s' and instance '%s'",
self.database_id, self.project_id, self.instance_id)
return hook.create_database(project_id=self.project_id,
instance_id=self.instance_id,
database_id=self.database_id,
ddl_statements=self.ddl_statements)
else:
self.log.info("The database '%s' in project '%s' and instance '%s'"
" already exists. Nothing to do. Exiting.",
self.database_id, self.project_id, self.instance_id)
return True
class SpannerUpdateDatabaseInstanceOperator(BaseOperator):
"""
Updates a Cloud Spanner database with the specified DDL statement.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SpannerUpdateDatabaseInstanceOperator`
:param instance_id: The Cloud Spanner instance ID.
:type instance_id: str
:param database_id: The Cloud Spanner database ID.
:type database_id: str
:param ddl_statements: The string list containing DDL to apply to the database.
:type ddl_statements: list[str]
:param project_id: Optional, the ID of the project that owns the Cloud Spanner
Database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param operation_id: (Optional) Unique per database operation id that can
be specified to implement idempotency check.
:type operation_id: str
:param gcp_conn_id: The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
"""
# [START gcp_spanner_database_update_template_fields]
template_fields = ('project_id', 'instance_id', 'database_id', 'ddl_statements',
'gcp_conn_id')
template_ext = ('.sql', )
# [END gcp_spanner_database_update_template_fields]
@apply_defaults
def __init__(self,
instance_id: int,
database_id: str,
ddl_statements: List[str],
project_id: Optional[str] = None,
operation_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
*args, **kwargs) -> None:
self.instance_id = instance_id
self.project_id = project_id
self.database_id = database_id
self.ddl_statements = ddl_statements
self.operation_id = operation_id
self.gcp_conn_id = gcp_conn_id
self._validate_inputs()
super().__init__(*args, **kwargs)
def _validate_inputs(self):
if self.project_id == '':
raise AirflowException("The required parameter 'project_id' is empty")
if not self.instance_id:
raise AirflowException("The required parameter 'instance_id' is empty"
" or None")
if not self.database_id:
raise AirflowException("The required parameter 'database_id' is empty"
" or None")
if not self.ddl_statements:
raise AirflowException("The required parameter 'ddl_statements' is empty"
" or None")
def execute(self, context):
hook = SpannerHook(gcp_conn_id=self.gcp_conn_id)
if not hook.get_database(project_id=self.project_id,
instance_id=self.instance_id,
database_id=self.database_id):
raise AirflowException("The Cloud Spanner database '{}' in project '{}' and "
"instance '{}' is missing. Create the database first "
"before you can update it.".format(self.database_id,
self.project_id,
self.instance_id))
else:
return hook.update_database(project_id=self.project_id,
instance_id=self.instance_id,
database_id=self.database_id,
ddl_statements=self.ddl_statements,
operation_id=self.operation_id)
class SpannerDeleteDatabaseInstanceOperator(BaseOperator):
"""
Deletes a Cloud Spanner database.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SpannerDeleteDatabaseInstanceOperator`
:param instance_id: Cloud Spanner instance ID.
:type instance_id: str
:param database_id: Cloud Spanner database ID.
:type database_id: str
:param project_id: Optional, the ID of the project that owns the Cloud Spanner
Database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param gcp_conn_id: The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
"""
# [START gcp_spanner_database_delete_template_fields]
template_fields = ('project_id', 'instance_id', 'database_id',
'gcp_conn_id')
# [END gcp_spanner_database_delete_template_fields]
@apply_defaults
def __init__(self,
instance_id: int,
database_id: str,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
*args, **kwargs) -> None:
self.instance_id = instance_id
self.project_id = project_id
self.database_id = database_id
self.gcp_conn_id = gcp_conn_id
self._validate_inputs()
super().__init__(*args, **kwargs)
def _validate_inputs(self):
if self.project_id == '':
raise AirflowException("The required parameter 'project_id' is empty")
if not self.instance_id:
raise AirflowException("The required parameter 'instance_id' is empty"
" or None")
if not self.database_id:
raise AirflowException("The required parameter 'database_id' is empty"
" or None")
def execute(self, context):
hook = SpannerHook(gcp_conn_id=self.gcp_conn_id)
database = hook.get_database(project_id=self.project_id,
instance_id=self.instance_id,
database_id=self.database_id)
if not database:
self.log.info("The Cloud Spanner database was missing: "
"'%s' in project '%s' and instance '%s'. Assuming success.",
self.database_id, self.project_id, self.instance_id)
return True
else:
return hook.delete_database(project_id=self.project_id,
instance_id=self.instance_id,
database_id=self.database_id)
|
import json, requests
from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult
from anime_downloader.sites import helpers
class AnimeOnline(Anime, sitename = 'animeonline360'):
sitename = 'animeonline360'
@classmethod
def search(cls, query):
try:
r = helpers.soupify(helpers.get('https://animeonline360.me/', params = {'s': query})).select('div.title')
results = [{"title": x.text, "url": x.a['href']} for x in r]
search_results = [
SearchResult(
title = i['title'],
url = i['url'],
)
for i in results
]
return search_results
except:
return ""
def _scrape_episodes(self):
data = helpers.soupify(helpers.get(self.url)).select('div.episodiotitle > a')
return [i.get('href') for i in data[::-1]]
def _scrape_metadata(self):
self.title = helpers.soupify(helpers.get(self.url)).title.text.split('|')[0].strip().title()
class AnimeOnlineEpisode(AnimeEpisode, sitename='animeonline360'):
def _get_sources(self):
return [('animeonline360', self.url)]
|
import pytest
import os
from openl3.cli import positive_int, positive_float, get_file_list, parse_args,\
run, main
from argparse import ArgumentTypeError
from openl3.openl3_exceptions import OpenL3Error
import tempfile
import numpy as np
from unittest.mock import patch
TEST_DIR = os.path.dirname(__file__)
TEST_AUDIO_DIR = os.path.join(TEST_DIR, 'data', 'audio')
TEST_IMAGE_DIR = os.path.join(TEST_DIR, 'data', 'image')
TEST_VIDEO_DIR = os.path.join(TEST_DIR, 'data', 'video')
# Test audio file paths
CHIRP_MONO_PATH = os.path.join(TEST_AUDIO_DIR, 'chirp_mono.wav')
CHIRP_STEREO_PATH = os.path.join(TEST_AUDIO_DIR, 'chirp_stereo.wav')
CHIRP_44K_PATH = os.path.join(TEST_AUDIO_DIR, 'chirp_44k.wav')
CHIRP_1S_PATH = os.path.join(TEST_AUDIO_DIR, 'chirp_1s.wav')
EMPTY_PATH = os.path.join(TEST_AUDIO_DIR, 'empty.wav')
SHORT_PATH = os.path.join(TEST_AUDIO_DIR, 'short.wav')
SILENCE_PATH = os.path.join(TEST_AUDIO_DIR, 'silence.wav')
# Test video file paths
BENTO_PATH = os.path.join(TEST_VIDEO_DIR, 'bento.mp4')
# Regression file paths
TEST_REG_DIR = os.path.join(TEST_DIR, 'data', 'regression')
REG_CHIRP_44K_PATH = os.path.join(TEST_REG_DIR, 'chirp_44k_kapre.npz')
REG_CHIRP_44K_LINEAR_PATH = os.path.join(TEST_REG_DIR, 'chirp_44k_kapre_linear.npz')
REG_BENTO_AUDIO_PATH = os.path.join(TEST_REG_DIR, 'bento_audio_kapre.npz')
REG_BENTO_AUDIO_LINEAR_PATH = os.path.join(TEST_REG_DIR, 'bento_audio_kapre_linear.npz')
REG_BENTO_IMAGE_PATH = os.path.join(TEST_REG_DIR, 'bento_image_kapre.npz')
REG_BENTO_IMAGE_LINEAR_PATH = os.path.join(TEST_REG_DIR, 'bento_image_kapre_linear.npz')
def test_positive_float():
# test that returned value is float
f = positive_float(5)
assert f == 5.0
assert type(f) is float
# test it works for valid strings
f = positive_float('1.3')
assert f == 1.3
assert type(f) is float
# make sure error raised for all invalid values:
invalid = [-5, -1.0, None, 'hello']
for i in invalid:
pytest.raises(ArgumentTypeError, positive_float, i)
def test_positive_int():
# test that returned value is int
i = positive_int(5)
assert i == 5
assert type(i) is int
i = positive_int(5.0)
assert i == 5
assert type(i) is int
# test it works for valid strings
i = positive_int('1')
assert i == 1
assert type(i) is int
# make sure error raised for all invalid values:
invalid = [-5, -1.0, None, 'hello']
for i in invalid:
pytest.raises(ArgumentTypeError, positive_int, i)
def test_get_file_list():
# test for invalid input (must be iterable, e.g. list)
pytest.raises(ArgumentTypeError, get_file_list, CHIRP_44K_PATH)
# test for valid list of file paths
flist = get_file_list([CHIRP_44K_PATH, CHIRP_1S_PATH])
assert len(flist) == 2
assert flist[0] == CHIRP_44K_PATH and flist[1] == CHIRP_1S_PATH
# test for valid folder
flist = get_file_list([TEST_AUDIO_DIR])
assert len(flist) == 7
flist = sorted(flist)
assert flist[0] == CHIRP_1S_PATH
assert flist[1] == CHIRP_44K_PATH
assert flist[2] == CHIRP_MONO_PATH
assert flist[3] == CHIRP_STEREO_PATH
assert flist[4] == EMPTY_PATH
assert flist[5] == SHORT_PATH
assert flist[6] == SILENCE_PATH
# combine list of files and folders
flist = get_file_list([TEST_AUDIO_DIR, CHIRP_44K_PATH])
assert len(flist) == 8
# nonexistent path
pytest.raises(OpenL3Error, get_file_list, ['/fake/path/to/file'])
def test_parse_args():
# test for all the defaults
args = ['audio', CHIRP_44K_PATH]
args = parse_args(args)
assert args.modality == 'audio'
assert args.inputs == [CHIRP_44K_PATH]
assert args.output_dir is None
assert args.suffix is None
assert args.input_repr == 'mel256'
assert args.content_type == 'music'
assert args.audio_embedding_size == 6144
assert args.no_audio_centering is False
assert args.audio_hop_size == 0.1
assert args.image_embedding_size == 8192
assert args.quiet is False
# test when setting all values
args = ['video', BENTO_PATH, '-o', '/output/dir', '--suffix', 'suffix',
'--input-repr', 'linear', '--content-type', 'env',
'--audio-embedding-size', '512', '--no-audio-centering',
'--audio-hop-size', '0.5', '--image-embedding-size', '512',
'--quiet']
args = parse_args(args)
assert args.inputs == [BENTO_PATH]
assert args.output_dir == '/output/dir'
assert args.suffix == 'suffix'
assert args.input_repr == 'linear'
assert args.content_type == 'env'
assert args.audio_embedding_size == 512
assert args.no_audio_centering is True
assert args.audio_hop_size == 0.5
assert args.image_embedding_size == 512
assert args.quiet is True
def test_run(capsys):
# test invalid input
invalid = [None, 5, 1.0]
for i in invalid:
pytest.raises(OpenL3Error, run, i, i)
# test empty input folder
with pytest.raises(SystemExit) as pytest_wrapped_e:
tempdir = tempfile.mkdtemp()
run('audio', [tempdir])
# make sure it exited
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == -1
# make sure it printed a message
captured = capsys.readouterr()
expected_message = 'openl3: No files found in {}. Aborting.\n'.format(str([tempdir]))
assert captured.out == expected_message
# delete tempdir
if os.path.exists(tempdir):
os.rmdir(tempdir)
# test invalid modality
with pytest.raises(OpenL3Error) as pytest_wrapped_e:
tempdir = tempfile.mkdtemp()
run('invalid', CHIRP_44K_PATH, output_dir=tempdir)
# delete tempdir
if os.path.exists(tempdir):
os.rmdir(tempdir)
def test_main():
# Duplicate audio regression test from test_run just to hit coverage
tempdir = tempfile.mkdtemp()
with patch('sys.argv', ['openl3', 'audio', CHIRP_44K_PATH, '--output-dir', tempdir]):
main()
# check output file created
outfile = os.path.join(tempdir, 'chirp_44k.npz')
assert os.path.isfile(outfile)
# regression test
data_reg = np.load(REG_CHIRP_44K_PATH)
data_out = np.load(outfile)
assert sorted(data_out.files) == sorted(data_out.files) == sorted(
['embedding', 'timestamps'])
assert np.allclose(data_out['timestamps'], data_reg['timestamps'],
rtol=1e-05, atol=1e-05, equal_nan=False)
assert np.allclose(data_out['embedding'], data_reg['embedding'],
rtol=1e-05, atol=1e-05, equal_nan=False)
def test_script_main():
# Duplicate audio regression test from test_run just to hit coverage
tempdir = tempfile.mkdtemp()
with patch('sys.argv', ['openl3', 'audio', CHIRP_44K_PATH, '--output-dir', tempdir]):
import openl3.__main__
# check output file created
outfile = os.path.join(tempdir, 'chirp_44k.npz')
assert os.path.isfile(outfile)
# regression test
data_reg = np.load(REG_CHIRP_44K_PATH)
data_out = np.load(outfile)
assert sorted(data_out.files) == sorted(data_out.files) == sorted(
['embedding', 'timestamps'])
assert np.allclose(data_out['timestamps'], data_reg['timestamps'],
rtol=1e-05, atol=1e-05, equal_nan=False)
assert np.allclose(data_out['embedding'], data_reg['embedding'],
rtol=1e-05, atol=1e-05, equal_nan=False)
def debugallclose(x1, x2, rtol=1e-05, atol=1e-05, **kw):
passed = _allclose(x1, x2, rtol=rtol, atol=atol, **kw)
if not passed:
x1, x2 = np.asarray(x1), np.asarray(x2)
print('shapes:', x1.shape, x2.shape)
print('nans:', np.mean(np.isnan(x1)), np.mean(np.isnan(x2)))
diff = np.abs(x2 - x1)
print('amount above rtol:', np.mean(diff > rtol))
print('min:', diff.min(1))
print('max:', diff.max(1))
print('mean:', diff.mean(1))
print()
return passed
_allclose = np.allclose
np.allclose = debugallclose
|
from typing import List, Optional
import tiledb
from .annotation_dataframe import AnnotationDataFrame
from .assay_matrix import AssayMatrix
from .tiledb_group import TileDBGroup
class AssayMatrixGroup(TileDBGroup):
"""
Nominally for `X` and `raw/X` elements. You can find element names using soma.X.keys(); you
access elements using soma.X['data'] etc., or soma.X.data if you prefer. (The latter syntax is
possible when the element name doesn't have dashes, dots, etc. in it.)
"""
row_dim_name: str
col_dim_name: str
row_dataframe: AnnotationDataFrame
col_dataframe: AnnotationDataFrame
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str, # Nominally "X"
row_dim_name: str, # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str, # var_id for X, obs_id_j for obsp; var_id_j for varp
row_dataframe: AnnotationDataFrame, # Nominally a reference to soma.obs
col_dataframe: AnnotationDataFrame, # Nominally a reference to soma.var
parent: Optional[TileDBGroup] = None,
):
"""
See the `TileDBObject` constructor.
See `AssayMatrix` for the rationale behind retaining references to the `row_dataframe` and
`col_dataframe` objects.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.row_dataframe = row_dataframe
self.col_dataframe = col_dataframe
# ----------------------------------------------------------------
def keys(self) -> List[str]:
"""
For `obsm` and `varm`, `.keys()` is a keystroke-saver for the more general group-member
accessor `._get_member_names()`.
"""
return self._get_member_names()
# ----------------------------------------------------------------
def __getattr__(self, name) -> AssayMatrix:
"""
This is called on `soma.X.name` when `name` is not already an attribute.
This way you can do `soma.X.data` as an alias for `soma.X['data']`.
"""
with self._open() as G:
if name not in G:
raise AttributeError(
f"'{self.__class__.__name__}' object has no attribute '{name}'"
)
return self[name]
# ----------------------------------------------------------------
def __iter__(self) -> List[AssayMatrix]:
"""
Implements `for matrix in soma.obsm: ...` and `for matrix in soma.varm: ...`
"""
retval = []
for name, uri in self._get_member_names_to_uris().items():
matrix = AssayMatrix(
uri=uri,
name=name,
row_dim_name=self.row_dim_name,
col_dim_name=self.col_dim_name,
row_dataframe=self.row_dataframe,
col_dataframe=self.col_dataframe,
parent=self,
)
retval.append(matrix)
return iter(retval)
# ----------------------------------------------------------------
# At the tiledb-py API level, *all* groups are name-indexable. But here at the tiledbsc-py
# level, we implement name-indexing only for some groups:
#
# * Most soma member references are done using Python's dot syntax. For example, rather than
# soma['X'], we have simply soma.X, and likewise, soma.raw.X. Likewise soma.obs and soma.var.
#
# * Index references are supported for obsm, varm, obsp, varp, and uns. E.g.
# soma.obsm['X_pca'] or soma.uns['neighbors']['params']['method']
#
# * Overloading the `[]` operator at the TileDBGroup level isn't necessary -- e.g. we don't need
# soma['X'] when we have soma.X -- but also it causes circular-import issues in Python.
#
# * Rather than doing a TileDBIndexableGroup which overloads the `[]` operator, we overload
# the `[]` operator separately in the various classes which need indexing. This is again to
# avoid circular-import issues, and means that [] on `AnnotationMatrixGroup` will return an
# `AnnotationMatrix, [] on `UnsGroup` will return `UnsArray` or `UnsGroup`, etc.
def __getitem__(self, name) -> AssayMatrix:
"""
Returns an `AnnotationMatrix` element at the given name within the group, or None if no such
member exists. Overloads the `[...]` operator.
"""
with self._open("r") as G:
if name not in G:
return None
obj = G[name] # This returns a tiledb.object.Object.
if obj.type == tiledb.tiledb.Group:
raise Exception(
"Internal error: found group element where array element was expected."
)
if obj.type != tiledb.libtiledb.Array:
raise Exception(
f"Internal error: found group element neither subgroup nor array: type is {str(obj.type)}"
)
return AssayMatrix(
uri=obj.uri,
name=name,
row_dim_name=self.row_dim_name,
col_dim_name=self.col_dim_name,
row_dataframe=self.row_dataframe,
col_dataframe=self.col_dataframe,
parent=self,
)
# ----------------------------------------------------------------
def __contains__(self, name) -> bool:
"""
Implements the `in` operator, e.g. `"data" in soma.X`.
"""
with self._open("r") as G:
return name in G
# ----------------------------------------------------------------
def add_layer_from_matrix_and_dim_values(
self,
matrix,
row_names: str,
col_names: str,
layer_name="data",
) -> None:
"""
Populates the `X` or `raw.X` subgroup for a `SOMA` object. For `X` and `raw.X`, nominally `row_names` will be `anndata.obs_names` and `col_names` will be `anndata.var_names` or `anndata.raw.var_names`. For `obsp` elements, both will be `anndata.obs_names`; for `varp elements, both will be `anndata.var_names`.
"""
if matrix is not None:
# Must be done first, to create the parent directory
self.create_unless_exists()
assay_matrix_uri = self._get_child_uri(
layer_name
) # See comments in that function
assay_matrix = AssayMatrix(
uri=assay_matrix_uri,
name=layer_name,
row_dim_name=self.row_dim_name,
col_dim_name=self.col_dim_name,
row_dataframe=self.row_dataframe,
col_dataframe=self.col_dataframe,
parent=self,
)
assay_matrix.from_matrix_and_dim_values(matrix, row_names, col_names)
self._add_object(assay_matrix)
|
from easygraphics import *
init_graph(headless=True)
img = create_image(800, 600)
set_target(img)
set_fill_color(Color.RED)
draw_circle(200, 200, 50)
save_image("test.png")
close_image(img)
close_graph()
|
# Copyright 2018 - 2019 Fabian Wenzelmann
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django import template
register = template.Library()
@register.filter
def lindex(l, i):
if i < len(l):
return l[i]
return None
|
# coding=utf-8
import os
import shutil
import skimage
import skimage.io
import skimage.transform
import numpy as np
def get_all_files(path):
all_file = []
for dirpath, dirnames, filenames in os.walk(path):
for name in filenames:
if name.endswith('.jpg'):
all_file.append(os.path.join(dirpath, name))
return all_file
# synset = [l.strip() for l in open('synset.txt').readlines()]
# returns image of shape [224, 224, 3]
# [height, width, depth]
def load_image(path):
# load image
img = skimage.io.imread(path)
img = img / 255.0
assert (0 <= img).all() and (img <= 1.0).all()
# print "Original Image Shape: ", img.shape
# we crop image from center
short_edge = min(img.shape[:2])
yy = int((img.shape[0] - short_edge) / 2)
xx = int((img.shape[1] - short_edge) / 2)
crop_img = img[yy: yy + short_edge, xx: xx + short_edge]
# resize to 224, 224
resized_img = skimage.transform.resize(crop_img, (224, 224))
return resized_img
# returns the top1 string
def print_prob(prob, file_path):
synset = [l.strip() for l in open(file_path).readlines()]
# print prob
pred = np.argsort(prob)[::-1]
# Get top1 label
top1 = synset[pred[0]]
print(("Top1: ", top1, prob[pred[0]]))
# Get top5 label
top5 = [(synset[pred[i]], prob[pred[i]]) for i in range(5)]
print(("Top5: ", top5))
return top1
def load_image2(path, height=None, width=None):
# load image
img = skimage.io.imread(path)
img = img / 255.0
if height is not None and width is not None:
ny = height
nx = width
elif height is not None:
ny = height
nx = img.shape[1] * ny / img.shape[0]
elif width is not None:
nx = width
ny = img.shape[0] * nx / img.shape[1]
else:
ny = img.shape[0]
nx = img.shape[1]
return skimage.transform.resize(img, (ny, nx))
def make_dir(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def test():
img = skimage.io.imread("./test_data/starry_night.jpg")
ny = 300
nx = img.shape[1] * ny / img.shape[0]
img = skimage.transform.resize(img, (ny, nx))
skimage.io.imsave("./test_data/test/output.jpg", img)
def copy_size_max(dirname, targetDir):
subdir_list = os.listdir(dirname)
video_max = []
for sub_dirname in subdir_list:
allsize = []
for (curDir, subDir, fileHere) in os.walk(os.path.join(dirname, sub_dirname)):
print(curDir)
for filename in fileHere:
fullname = os.path.join(curDir, filename)
filesize = os.path.getsize(fullname)
allsize.append((filesize, fullname))
allsize.sort(key=lambda x: x[0])
print(allsize[-1])
video_max.append(allsize[-1])
print video_max
if not os.path.exists(targetDir):
os.makedirs(targetDir)
for eachfile in video_max:
eachfile = eachfile[1]
if not os.path.exists(eachfile):
print "src path not exist:" + eachfile
shutil.copy(eachfile, os.path.join(targetDir, os.path.basename(eachfile)))
print eachfile + " copy succeeded!"
if __name__ == '__main__':
copy_size_max('/home/shihuijie/Desktop/ml/tmp/vcdb/core_dataset', '/home/shihuijie/Desktop/video_vcdb_to_train')
|
# -*- coding: utf-8 -*-
# Copyright 2018, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
"""
Exceptions for errors raised while handling backends and jobs.
"""
from qiskit import QiskitError
class JobError(QiskitError):
"""Base class for errors raised by jobs."""
def __init__(self, *message):
"""Set the error message."""
super().__init__(*message)
self.message = ' '.join(message)
def __str__(self):
"""Return the message."""
return repr(self.message)
class JobTimeoutError(JobError):
"""Base class for timeout errors raised by jobs."""
def __init__(self, *message):
"""Set the error message."""
super().__init__(*message)
self.message = ' '.join(message)
def __str__(self):
"""Return the message."""
return repr(self.message)
class QiskitBackendNotFoundError(QiskitError):
"""Base class for errors raised while looking up for a backend."""
def __init__(self, *message):
"""Set the error message."""
super().__init__(*message)
self.message = ' '.join(message)
def __str__(self):
"""Return the message."""
return repr(self.message)
|
# %%
"""
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/FeatureCollection/select_by_attributes.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/FeatureCollection/select_by_attributes.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/FeatureCollection/select_by_attributes.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
"""
# %%
"""
## Install Earth Engine API and geemap
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.
The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet.
**Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).
"""
# %%
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('geemap package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
# Checks whether this notebook is running on Google Colab
try:
import google.colab
import geemap.eefolium as geemap
except:
import geemap
# Authenticates and initializes Earth Engine
import ee
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
# %%
"""
## Create an interactive map
The default basemap is `Google Maps`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/basemaps.py) can be added using the `Map.add_basemap()` function.
"""
# %%
Map = geemap.Map(center=[40,-100], zoom=4)
Map
# %%
"""
## Add Earth Engine Python script
"""
# %%
# Add Earth Engine dataset
# Select North Dakota and South Dakota
fc = ee.FeatureCollection('TIGER/2018/States') \
.filter(ee.Filter.Or(
ee.Filter.eq('STUSPS', 'ND'),
ee.Filter.eq('STUSPS', 'SD'),
))
image = ee.Image().paint(fc, 0, 2)
# Map.setCenter(-99.844, 37.649, 5)
Map.centerObject(fc, 6)
Map.addLayer(image, {'palette': 'FF0000'}, 'TIGER/2018/States')
# %%
"""
## Display Earth Engine data layers
"""
# %%
Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.
Map
|
#!/usr/bin/env python3
import numpy as np
import scipy.interpolate
import math
import bdsim
import unittest
import numpy.testing as nt
class BlockTest(unittest.TestCase):
pass
class BlockDiagramTest(unittest.TestCase):
pass
class WiringTest(unittest.TestCase):
def test_connect_1(self):
bd = bdsim.BlockDiagram()
src = bd.CONSTANT(2)
dst = bd.OUTPORT(1) # 1 port
bd.connect(src, dst)
bd.compile()
bd.evaluate(x=[], t=0)
self.assertEqual(dst.inputs[0], 2)
def test_connect_2(self):
bd = bdsim.BlockDiagram()
src = bd.CONSTANT(2)
dst1 = bd.OUTPORT(1) # 1 port
dst2 = bd.OUTPORT(1) # 1 port
bd.connect(src, dst1)
bd.connect(src, dst2)
bd.compile()
bd.evaluate(x=[], t=0)
self.assertEqual(dst1.inputs[0], 2)
self.assertEqual(dst2.inputs[0], 2)
def test_multi_connect(self):
bd = bdsim.BlockDiagram()
src = bd.CONSTANT(2)
dst1 = bd.OUTPORT(1) # 1 port
dst2 = bd.OUTPORT(1) # 1 port
bd.connect(src, dst1, dst2)
bd.compile()
bd.evaluate(x=[], t=0)
self.assertEqual(dst1.inputs[0], 2)
self.assertEqual(dst2.inputs[0], 2)
def test_ports1(self):
bd = bdsim.BlockDiagram()
const1 = bd.CONSTANT(2)
const2 = bd.CONSTANT(3)
dst = bd.OUTPORT(2) # 2 ports
bd.connect(const1, dst[0])
bd.connect(const2, dst[1])
bd.compile()
bd.evaluate(x=[], t=0)
self.assertEqual(dst.inputs, [2, 3])
def test_ports2(self):
bd = bdsim.BlockDiagram()
const = bd.CONSTANT([2, 3])
src = bd.DEMUX(2)
bd.connect(const, src)
dst1 = bd.OUTPORT(1) # 1 port
dst2 = bd.OUTPORT(1) # 1 port
bd.connect(src[0], dst1)
bd.connect(src[1], dst2)
bd.compile()
bd.evaluate(x=[], t=0)
self.assertEqual(dst.inputs, [2, 3, 4, 5])
def test_ports2(self):
bd = bdsim.BlockDiagram()
const = bd.CONSTANT([2, 3, 4, 5])
src = bd.DEMUX(4)
bd.connect(const, src)
dst = bd.OUTPORT(4) # 4 ports
bd.connect(src[0], dst[0])
bd.connect(src[1], dst[1])
bd.connect(src[2], dst[2])
bd.connect(src[3], dst[3])
bd.compile()
bd.evaluate(x=[], t=0)
self.assertEqual(dst.inputs, [2, 3, 4, 5])
def test_slice1(self):
bd = bdsim.BlockDiagram()
src = bd.CONSTANT(2)
dst = bd.OUTPORT(2) # 1 port
bd.connect(src, dst[0])
bd.connect(src, dst[1])
bd.compile()
bd.evaluate(x=[], t=0)
self.assertEqual(dst.inputs, [2, 2])
def test_slice2(self):
bd = bdsim.BlockDiagram()
const = bd.CONSTANT([2, 3, 4, 5])
src = bd.DEMUX(4)
bd.connect(const, src)
dst = bd.OUTPORT(4) # 4 ports
bd.connect(src[0:4], dst[0:4])
bd.compile()
bd.evaluate(x=[], t=0)
self.assertEqual(dst.inputs, [2, 3, 4, 5])
def test_slice3(self):
bd = bdsim.BlockDiagram()
const = bd.CONSTANT([2, 3, 4, 5])
src = bd.DEMUX(4)
bd.connect(const, src)
dst = bd.OUTPORT(4) # 4 ports
bd.connect(src[0:4], dst[3:-1:-1])
bd.compile()
bd.evaluate(x=[], t=0)
self.assertEqual(dst.inputs, [5, 4, 3, 2])
def test_slice4(self):
bd = bdsim.BlockDiagram()
const = bd.CONSTANT([2, 3, 4, 5])
src = bd.DEMUX(4)
bd.connect(const, src)
dst = bd.OUTPORT(4) # 4 ports
bd.connect(src[3:-1:-1], dst[0:4])
bd.compile()
bd.evaluate(x=[], t=0)
self.assertEqual(dst.inputs, [5, 4, 3, 2])
def test_slice5(self):
bd = bdsim.BlockDiagram()
const = bd.CONSTANT([2, 3, 4, 5])
src = bd.DEMUX(4)
bd.connect(const, src)
dst = bd.OUTPORT(4) # 4 ports
bd.connect(src[0:4:2], dst[0:4:2])
bd.connect(src[1:4:2], dst[1:4:2])
bd.compile()
bd.evaluate(x=[], t=0)
self.assertEqual(dst.inputs, [2, 4, 3, 5])
def test_slice5(self):
bd = bdsim.BlockDiagram()
const = bd.CONSTANT([2, 3, 4, 5])
src = bd.DEMUX(4)
bd.connect(const, src)
dst = bd.OUTPORT(4) # 4 ports
bd.connect(src[3:-1:-1], dst[3:-1:-1])
bd.compile()
bd.evaluate(x=[], t=0)
self.assertEqual(dst.inputs, [2, 3, 4, 5])
def test_assignment11(self):
bd = bdsim.BlockDiagram()
src = bd.CONSTANT(2)
dst = bd.OUTPORT(1) # 1 port
dst[0] = src
bd.compile()
bd.evaluate(x=[], t=0)
self.assertEqual(dst.inputs[0], 2)
def test_assignment2(self):
bd = bdsim.BlockDiagram()
const1 = bd.CONSTANT(2)
const2 = bd.CONSTANT(3)
dst = bd.OUTPORT(2) # 2 ports
dst[0] = const1
dst[1] = const2
bd.compile()
bd.evaluate(x=[], t=0)
self.assertEqual(dst.inputs, [2, 3])
def test_assignment3(self):
bd = bdsim.BlockDiagram()
const = bd.CONSTANT([2, 3, 4, 5])
src = bd.DEMUX(4)
bd.connect(const, src)
dst = bd.OUTPORT(4) # 4 ports
dst[3:-1:-1] = src[0:4]
bd.compile()
bd.evaluate(x=[], t=0)
self.assertEqual(dst.inputs, [5, 4, 3, 2])
def test_multiply1(self):
bd = bdsim.BlockDiagram()
dst = bd.OUTPORT(1) # 1 ports
dst[0] = bd.CONSTANT(2) * bd.GAIN(3)
bd.compile()
bd.evaluate(x=[], t=0)
self.assertEqual(dst.inputs, [6])
def test_multiply2(self):
bd = bdsim.BlockDiagram()
dst = bd.OUTPORT(1) # 1 ports
dst[0] = bd.CONSTANT(2) * bd.GAIN(3) * bd.GAIN(4)
bd.compile()
bd.evaluate(x=[], t=0)
self.assertEqual(dst.inputs, [24])
def test_multiply3(self):
bd = bdsim.BlockDiagram()
const = bd.CONSTANT([2, 3])
src = bd.DEMUX(2)
bd.connect(const, src)
dst = bd.OUTPORT(2) # 2 ports
dst[0] = src[0] * bd.GAIN(2)
dst[1] = src[1] * bd.GAIN(3)
bd.compile()
bd.evaluate(x=[], t=0)
self.assertEqual(dst.inputs, [4, 9])
def test_inline1(self):
bd = bdsim.BlockDiagram()
dst = bd.OUTPORT(1) # 1 ports
const1 = bd.CONSTANT(2)
const2 = bd.CONSTANT(3)
dst[0] = bd.SUM('++', const1, const2)
bd.compile()
bd.evaluate(x=[], t=0)
self.assertEqual(dst.inputs, [5])
def test_inline2(self):
bd = bdsim.BlockDiagram()
dst = bd.OUTPORT(1) # 1 ports
const1 = bd.CONSTANT(2)
const2 = bd.CONSTANT(3)
dst[0] = bd.SUM('++', const1, const2) * bd.GAIN(2)
bd.compile()
bd.evaluate(x=[], t=0)
self.assertEqual(dst.inputs, [10])
class ImportTest(unittest.TestCase):
def test_import1(self):
# create a subsystem
ss = bdsim.BlockDiagram(name='subsystem1')
f = ss.FUNCTION(lambda x: x)
inp = ss.INPORT(1)
outp = ss.OUTPORT(1)
ss.connect(inp, f)
ss.connect(f, outp)
# create main system
bd = bdsim.BlockDiagram()
const = bd.CONSTANT(1)
scope = bd.SCOPE()
f = bd.SUBSYSTEM(ss, name='subsys')
bd.connect(const, f)
bd.connect(f, scope)
bd.compile()
self.assertEqual(len(bd.blocklist), 3)
self.assertEqual(len(bd.wirelist), 2)
def test_import2(self):
# create a subsystem
ss = bdsim.BlockDiagram(name='subsystem1')
f = ss.FUNCTION(lambda x: x)
inp = ss.INPORT(1)
outp = ss.OUTPORT(1)
ss.connect(inp, f)
ss.connect(f, outp)
# create main system
bd = bdsim.BlockDiagram()
const = bd.CONSTANT(1)
scope1 = bd.SCOPE()
scope2 = bd.SCOPE()
f1 = bd.SUBSYSTEM(ss, name='subsys1')
f2 = bd.SUBSYSTEM(ss, name='subsys2')
bd.connect(const, f1, f2)
bd.connect(f1, scope1)
bd.connect(f2, scope2)
bd.compile()
# ---------------------------------------------------------------------------------------#
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
"""
@author:XuMing(xuming624@qq.com)
@description:
@reference: https://github.com/tkipf/pygcn; https://github.com/dawnranger/pytorch-AGNN
"""
from __future__ import division
from __future__ import print_function
import argparse
import time
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from rater.models.graph.agnn import AGNN
from rater.models.graph.reader import load_data, accuracy
def train():
t_total = time.time()
for epoch in range(args.epochs):
t = time.time()
model.train()
optimizer.zero_grad()
output = model(features, adj)
loss_train = F.nll_loss(output[idx_train], labels[idx_train])
acc_train = accuracy(output[idx_train], labels[idx_train])
loss_train.backward()
optimizer.step()
if not args.fastmode:
# Evaluate validation set performance separately,
# deactivates dropout during validation run.
model.eval()
output = model(features, adj)
loss_val = F.nll_loss(output[idx_val], labels[idx_val])
acc_val = accuracy(output[idx_val], labels[idx_val])
print('Epoch: {:04d}'.format(epoch + 1),
'loss_train: {:.4f}'.format(loss_train.item()),
'acc_train: {:.4f}'.format(acc_train.item()),
'loss_val: {:.4f}'.format(loss_val.item()),
'acc_val: {:.4f}'.format(acc_val.item()),
'time: {:.4f}s'.format(time.time() - t))
print("Optimization Finished!")
print("Total time elapsed: {:.4f}s".format(time.time() - t_total))
def test():
model.eval()
output = model(features, adj)
loss_test = F.nll_loss(output[idx_test], labels[idx_test])
acc_test = accuracy(output[idx_test], labels[idx_test])
print("Test set results:",
"loss= {:.4f}".format(loss_test.item()),
"accuracy= {:.4f}".format(acc_test.item()))
if __name__ == "__main__":
# Training settings
parser = argparse.ArgumentParser()
parser.add_argument('--no-cuda', action='store_true', default=False, help='Disables CUDA training.')
parser.add_argument('--fastmode', action='store_true', default=True, help='Validate during training pass.')
parser.add_argument('--seed', type=int, default=42, help='Random seed.')
parser.add_argument('--epochs', type=int, default=500, help='Number of epochs to train.')
parser.add_argument('--lr', type=float, default=0.01, help='Initial learning rate.')
parser.add_argument('--weight_decay', type=float, default=5e-4, help='Weight decay (L2 loss on parameters).')
parser.add_argument('--hidden', type=int, default=16, help='Number of hidden units.')
parser.add_argument('--layers', type=int, default=3, help='Number of attention layers.')
parser.add_argument('--dropout_rate', type=float, default=0.5, help='Dropout rate (1 - keep probability).')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# Load data
adj, features, labels, idx_train, idx_val, idx_test = load_data()
# Model and optimizer
model = AGNN(nfeat=features.shape[1],
nhid=args.hidden,
nclass=labels.max() + 1,
nlayers=args.layers,
dropout_rate=args.dropout_rate)
# print(model)
optimizer = optim.Adam(model.parameters(),
lr=args.lr, weight_decay=args.weight_decay)
if args.cuda:
model.cuda()
features = features.cuda()
adj = adj.cuda()
labels = labels.cuda()
idx_train = idx_train.cuda()
idx_val = idx_val.cuda()
idx_test = idx_test.cuda()
features, adj, labels = Variable(features), Variable(adj), Variable(labels)
train()
test()
|
import os
curr_path = os.path.dirname(os.path.realpath(__file__))
os.chdir(curr_path)
from models import DeepDoubleSarsa, Double_Sarsa, Expected_Double_Sarsa, ReplayBuffer
import numpy as np
import matplotlib.pyplot as plt
import random
import operator as op
import torch
from torch.autograd import Variable
episodes = 2000
batch_size = 32
BUFFER_SIZE = 1000
epsilon_decay = 1000
def act(q1, q2, epsilon):
if np.random.rand(1)[0] < epsilon:
return random.choice([0,1,2,3])
else:
avg = [i+j for i,j in zip(np.squeeze(q1.cpu().data.numpy()),np.squeeze(q2.cpu().data.numpy()))]
return np.argmax(np.array(avg))
def step(state, action, step_reward=None):
r, sn, done = 0, None, False
if action=='r' and not state[1]==3 and not [state[0], state[1]+1]==[2,2]:
sn = [state[0], state[1]+1]
elif action=='l' and not state[1]==0 and not [state[0], state[1]-1]==[2,2]:
sn = [state[0], state[1]-1]
elif action=='u' and not state[0]==0 and not [state[0]-1, state[1]]==[2,2]:
sn = [state[0]-1, state[1]]
elif action=='d' and not state[0]==3 and not [state[0]+1, state[1]]==[2,2]:
sn = [state[0]+1, state[1]]
else:
sn = state
if sn==[1,1]:
r = -10
done = True
elif sn==[3,3]:
r = 10
done = True
else:
if step_reward=='d':
r = np.random.normal(loc=-1, scale=0)
elif step_reward=='s':
if np.random.rand(1)[0] > 0.5:
r = -8
else:
r = 6
return r, sn, done
def grid_world():
env = np.arange(16)+1
env = np.reshape(env, (4,4))
actions = np.array(['r','l','u','d'])
# 2 env[0,1]=A, 6 env[1,1]=P, 11 env[2,2]=W, 16 env[3,3]=G
#action 'r': x+1, 'l': x-1, 'u': y-1, 'd': y+1
alpha, gamma, epsilon = 0.1, 0.99, 0.1
alphas = []
for alpha in np.arange(0.1, 1.0, 0.1):
dsarsa = Double_Sarsa(env, actions, alpha=alpha, gamma=gamma, epsilon=epsilon)
episodes = 10000
rewards = []
for e in range(episodes):
total_reward = 0
agent_pose = [0,1]
s = 2
a = dsarsa.act(s)
done = False
while not done:
r, sn, done = step(agent_pose, a, step_reward='d')
agent_pose = sn
sn = env[sn[0], sn[1]]
an = dsarsa.act(sn)
sarsa = [s, a, r, sn, an]
dsarsa.update(sarsa)
# dsarsa.new_alpha(dsarsa.alpha+1/episodes)
a = an
s = sn
total_reward +=r
print("Alpha: {} | Episode: {} | Reward: {}".format(alpha, e, total_reward))
# epsilon -= 0.5/episodes
rewards.append(total_reward)
alphas.append(np.mean(rewards))
plt.plot(np.arange(0.1, 1.0, 0.1), alphas)
plt.ylabel("Rewards")
plt.xlabel("alpha")
plt.savefig('edsarsa_dgrid.png')
plt.show()
def deep_grid_world():
env = np.arange(16)+1
env = np.reshape(env, (4,4))
actions = np.array(['r','l','u','d'])
# 2 env[0,1]=A, 6 env[1,1]=P, 11 env[2,2]=W, 16 env[3,3]=G
#action 'r': x+1, 'l': x-1, 'u': y-1, 'd': y+1
phip = np.zeros(16)
phip[5] = 1
phiw = np.zeros(16)
phiw[10] = 1
phig = np.zeros(16)
phig[15] = 1
alpha, gamma, epsilon = 0.1, 0.99, 1.0
# for alhpa in np.arange(0.0, 1.0, 0.1):
ddsarsaA = DeepDoubleSarsa(64, 4)
ddsarsaA.cuda()
ddsarsaB = DeepDoubleSarsa(64, 4)
ddsarsaB.cuda()
rewards, lossa, lossb = [], [], []
replay_buffer = ReplayBuffer(BUFFER_SIZE)
for e in range(episodes):
total_reward = 0
agent_pose = [3,0]
phia = np.zeros(16)
phia[12] = 1
input1 = np.concatenate([phia, phip, phiw, phig], axis=0)
input = Variable(torch.from_numpy(input1))
input = input.view(-1, 64)
input = input.float()
q1 = ddsarsaA(input.cuda())
q2 = ddsarsaB(input.cuda())
a = act(q1, q2, epsilon)
done = False
loss = Variable(torch.from_numpy(np.array([0.0])))
timestep = 1
while not done:
r, sn, done = step(agent_pose, actions[a], step_reward='s')
agent_pose = sn
spose = env[sn[0], sn[1]]
phia = np.zeros(16)
phia[spose-1] = 1
n_input1 = np.concatenate([phia, phip, phiw, phig], axis=0)
n_input = torch.from_numpy(n_input1)
n_input = Variable(n_input.view(-1, 64))
n_input = n_input.float()
q1n = ddsarsaA(n_input.cuda())
q2n = ddsarsaB(n_input.cuda())
an = act(q1n, q2n, epsilon)
# if not timestep%100:
# done = True
# timestep += 1
if done:
replay_buffer.push(input1, a, r, n_input1, an, 1.0, np.squeeze(q1n.cpu().data.numpy()), np.squeeze(q2n.cpu().data.numpy()))
else:
replay_buffer.push(input1, a, r, n_input1, an, 0.0, np.squeeze(q1n.cpu().data.numpy()), np.squeeze(q2n.cpu().data.numpy()))
total_reward +=r
if len(replay_buffer)>batch_size:
s, a, r, sp, ap, d, q1nn, q2nn = replay_buffer.sample(batch_size)
if np.random.rand(1)[0] > 0.5:
loss = ddsarsaA.update([s, a, r, sp, ap, d], q2nn, gamma)
lossa.append(loss.item())
else:
loss = ddsarsaB.update([s, a, r, sp, ap, d], q1nn, gamma)
lossb.append(loss.item())
a = an
input = n_input
print("Episode: {} | Reward: {} | Loss: {}".format(e, total_reward, loss.item()))
rewards.append(total_reward)
if e<epsilon_decay:
epsilon -= (1.0 - 1.0)/epsilon_decay
np.save('gws_p12', rewards)
plt.plot(rewards)
plt.ylabel("Rewards")
plt.xlabel("Episodes")
plt.title("Rewards over Episodes")
plt.savefig('gws_p12.png')
plt.show()
if __name__ == '__main__':
deep_grid_world()
|
from typing import List, Dict
from asgard.clients.apps.client import AppsClient
from asgard.workers.autoscaler.cloudinterface import CloudInterface
from asgard.workers.converters.asgard_converter import (
AppConverter,
AppStatsConverter,
DecisionConverter,
)
from asgard.workers.models.app_stats import AppStats
from asgard.workers.models.decision import Decision
from asgard.workers.models.scalable_app import ScalableApp
class AsgardInterface(CloudInterface):
def __init__(self):
self._asgard_client = AppsClient()
async def fetch_all_apps(self) -> List[ScalableApp]:
app_dtos = await self._asgard_client.get_all_apps()
apps = AppConverter.all_to_model(app_dtos)
return apps
async def get_all_scalable_apps(self) -> List[ScalableApp]:
all_apps = await self.fetch_all_apps()
if all_apps:
return list(filter(ScalableApp.is_set_to_scale, all_apps))
return list()
async def get_app_stats(self, app: ScalableApp) -> AppStats:
app_stats_dto = await self._asgard_client.get_app_stats(app.id)
app_stats = AppStatsConverter.to_model(app_stats_dto)
return app_stats
async def apply_decisions(
self, scaling_decisions: List[Decision]
) -> List[Dict]:
if scaling_decisions:
decision_dtos = DecisionConverter.all_to_dto(scaling_decisions)
post_body = await self._asgard_client.post_scaling_decisions(
decision_dtos
)
return post_body
return []
|
#!/router/bin/python
from .trex_general_test import CTRexGeneral_Test, CTRexScenario
from .trex_nbar_test import CTRexNbarBase
from CPlatform import CStaticRouteConfig
from .tests_exceptions import *
#import sys
import time
from nose.tools import nottest
# Testing client cfg ARP resolve. Actually, just need to check that TRex run finished with no errors.
# If resolve will fail, TRex will exit with exit code != 0
class CTRexClientCfg_Test(CTRexNbarBase):
"""This class defines the IMIX testcase of the TRex traffic generator"""
def __init__(self, *args, **kwargs):
CTRexNbarBase.__init__(self, *args, **kwargs)
def setUp(self):
if CTRexScenario.setup_name == 'kiwi02':
self.skip("Can't run currently on kiwi02")
super(CTRexClientCfg_Test, self).setUp() # launch super test class setUp process
def test_client_cfg_nbar(self):
if self.is_loopback:
self.skip('No NBAR on loopback')
if not CTRexScenario.router_cfg['no_dut_config']:
self.router.configure_basic_interfaces()
self.router.config_pbr(mode = "config")
self.router.config_nbar_pd()
mult = self.get_benchmark_param('multiplier')
core = self.get_benchmark_param('cores')
ret = self.trex.start_trex (
c = core,
m = mult,
nc = True,
p = True,
d = 100,
f = 'avl/sfr_delay_10_1g.yaml',
client_cfg = 'automation/regression/cfg/client_cfg.yaml',
l = 1000)
trex_res = self.trex.sample_until_finish()
print("\nLATEST RESULT OBJECT:")
print(trex_res)
self.check_general_scenario_results(trex_res, check_latency = False) # no latency with client config
self.match_classification()
def test_client_cfg_vlan(self):
if self.is_loopback:
self.skip('Not relevant on loopback')
if not CTRexScenario.router_cfg['no_dut_config']:
self.router.configure_basic_interfaces(vlan = True)
self.router.config_pbr(mode = "config", vlan = True)
mult = self.get_benchmark_param('multiplier')
core = self.get_benchmark_param('cores')
ret = self.trex.start_trex (
c = core,
m = mult,
nc = True,
p = True,
d = 60,
f = 'cap2/dns.yaml',
limit_ports = 4,
client_cfg = 'automation/regression/cfg/client_cfg_vlan.yaml')
trex_res = self.trex.sample_until_finish()
print("\nLATEST RESULT OBJECT:")
print(trex_res)
self.check_general_scenario_results(trex_res, check_latency = False) # no latency with client config
def tearDown(self):
CTRexNbarBase.tearDown(self)
pass
if __name__ == "__main__":
pass
|
# Copyright 2018-2020 Descartes Labs.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
import unittest
import mock
import descarteslabs
from descarteslabs.client.exceptions import (
ProxyAuthenticationRequiredError,
BadRequestError,
)
from descarteslabs.client.services.service import (
JsonApiService,
JsonApiSession,
Service,
Session,
ThirdPartyService,
)
from descarteslabs.client.services.service.service import (
HttpHeaderKeys,
HttpHeaderValues,
HttpRequestMethod,
HttpStatusCode,
WrappedSession,
requests,
)
from descarteslabs.client.version import __version__
from descarteslabs.common.http.authorization import add_bearer
FAKE_URL = "http://localhost"
FAKE_TOKEN = "foo.bar.sig"
class TestService(unittest.TestCase):
def test_session_token(self):
service = Service("foo", auth=mock.MagicMock(token=FAKE_TOKEN))
assert service.session.headers.get("Authorization") == add_bearer(FAKE_TOKEN)
def test_client_session_header(self):
service = Service("foo", auth=mock.MagicMock(token=FAKE_TOKEN))
assert "X-Client-Session" in service.session.headers
assert (
service.session.headers[HttpHeaderKeys.ContentType]
== HttpHeaderValues.ApplicationJson
)
assert service.session.headers[HttpHeaderKeys.UserAgent] == "{}/{}".format(
HttpHeaderValues.DlPython, __version__
)
class TestJsonApiService(unittest.TestCase):
def test_session_token(self):
service = JsonApiService("foo", auth=mock.MagicMock(token=FAKE_TOKEN))
assert service.session.headers.get("Authorization") == add_bearer(FAKE_TOKEN)
def test_client_session_header(self):
service = JsonApiService("foo", auth=mock.MagicMock(token=FAKE_TOKEN))
assert "X-Client-Session" in service.session.headers
assert (
service.session.headers[HttpHeaderKeys.ContentType]
== HttpHeaderValues.ApplicationVndApiJson
)
assert service.session.headers[HttpHeaderKeys.UserAgent] == "{}/{}".format(
HttpHeaderValues.DlPython, __version__
)
class TestThirdParyService(unittest.TestCase):
def test_client_session_header(self):
service = ThirdPartyService()
assert "User-Agent" in service.session.headers
class TestWrappedSession(unittest.TestCase):
def test_pickling(self):
session = WrappedSession(FAKE_URL, timeout=10)
assert 10 == session.timeout
unpickled = pickle.loads(pickle.dumps(session))
assert 10 == unpickled.timeout
@mock.patch.object(requests.Session, "request")
def test_request_group_header_none(self, request):
request.return_value.status_code = 200
session = WrappedSession("")
session.request("POST", FAKE_URL)
request.assert_called_once()
assert "X-Request-Group" in request.call_args[1]["headers"]
@mock.patch.object(requests.Session, "request")
def test_request_group_header_conflict(self, request):
request.return_value.status_code = HttpStatusCode.Ok
args = "POST", FAKE_URL
kwargs = dict(headers={"X-Request-Group": "f00"})
session = WrappedSession("")
session.request(*args, **kwargs)
request.assert_called_once_with(*args, **kwargs) # we do nothing here
@mock.patch.object(requests.Session, "request")
def test_request_group_header_no_conflict(self, request):
request.return_value.status_code = HttpStatusCode.Ok
session = WrappedSession("")
session.request("POST", FAKE_URL, headers={"foo": "bar"})
request.assert_called_once()
assert "X-Request-Group" in request.call_args[1]["headers"]
class TestSessionClass(unittest.TestCase):
def test_bad_session(self):
class MySession:
pass
with self.assertRaises(TypeError):
Service(
"foo", auth=mock.MagicMock(token=FAKE_TOKEN), session_class=MySession
)
@mock.patch.object(requests.Session, "request")
def test_good_session(self, request):
request.return_value.status_code = HttpStatusCode.Ok
class MySession(Session):
pass
service = Service(
"foo", auth=mock.MagicMock(token=FAKE_TOKEN), session_class=MySession
)
service.session.get("bar")
request.assert_called()
@mock.patch.object(requests.Session, "request")
def test_bad_json_session(self, request):
request.return_value.status_code = HttpStatusCode.Ok
class MySession(Session):
pass
with self.assertRaises(TypeError):
JsonApiService(
"foo", auth=mock.MagicMock(token=FAKE_TOKEN), session_class=MySession
)
@mock.patch.object(requests.Session, "request")
def test_good_json_session(self, request):
request.return_value.status_code = HttpStatusCode.Ok
class MySession(JsonApiSession):
pass
service = JsonApiService(
"foo", auth=mock.MagicMock(token=FAKE_TOKEN), session_class=MySession
)
service.session.get("bar")
request.assert_called()
@mock.patch.object(requests.Session, "request")
def test_proxy_called(self, request):
request.return_value.status_code = HttpStatusCode.ProxyAuthenticationRequired
class MySession(Session):
handle_proxy_authentication_called = 0
handled = True
def handle_proxy_authentication(self, method, url, **kwargs):
MySession.handle_proxy_authentication_called += 1
assert method == HttpRequestMethod.GET
assert url == "bar"
return MySession.handled
service = Service(
"foo", auth=mock.MagicMock(token=FAKE_TOKEN), session_class=MySession
)
service.session.get("bar")
assert MySession.handle_proxy_authentication_called == 1
MySession.handled = False
with self.assertRaises(ProxyAuthenticationRequiredError):
service.session.get("bar")
assert MySession.handle_proxy_authentication_called == 2
@mock.patch.object(requests.Session, "request")
def test_proxy_called_jsonapi(self, request):
request.return_value.status_code = HttpStatusCode.ProxyAuthenticationRequired
class MySession(JsonApiSession):
handle_proxy_authentication_called = 0
handled = True
def handle_proxy_authentication(self, method, url, **kwargs):
MySession.handle_proxy_authentication_called += 1
assert method == HttpRequestMethod.GET
assert url == "bar"
return MySession.handled
service = JsonApiService(
"foo", auth=mock.MagicMock(token=FAKE_TOKEN), session_class=MySession
)
service.session.get("bar")
assert MySession.handle_proxy_authentication_called == 1
MySession.handled = False
with self.assertRaises(ProxyAuthenticationRequiredError):
service.session.get("bar")
assert MySession.handle_proxy_authentication_called == 2
@mock.patch.object(requests.Session, "request")
def test_proxy_called_thirdpary(self, request):
request.return_value.status_code = HttpStatusCode.ProxyAuthenticationRequired
class MySession(Session):
handle_proxy_authentication_called = 0
handled = True
def handle_proxy_authentication(self, method, url, **kwargs):
MySession.handle_proxy_authentication_called += 1
assert method == HttpRequestMethod.GET
assert url == "bar"
return MySession.handled
service = ThirdPartyService(session_class=MySession)
service.session.get("bar")
assert MySession.handle_proxy_authentication_called == 1
MySession.handled = False
with self.assertRaises(ProxyAuthenticationRequiredError):
service.session.get("bar")
assert MySession.handle_proxy_authentication_called == 2
class TestJsonApiSession(unittest.TestCase):
# A JSONAPI error can contain, amongst others, the following fields:
# status, title, detail, source
# The source field can contain:
# pointer, parameter
# When rewriting the error, it looks like
# [title or status: ][description: ][source or parameter][ (id)][
# link]
@mock.patch.object(requests.Session, "request")
def test_jsonapi_error(self, request):
error_title = "Title"
error_status = "Status" # Should be ignored
request.return_value.status_code = HttpStatusCode.BadRequest
request.return_value.text = (
'{{"errors": [{{"title": "{}", "status": "{}"}}]}}'
).format(error_title, error_status)
service = JsonApiService(
"foo", auth=mock.MagicMock(token=FAKE_TOKEN), rewrite_errors=True
)
try:
service.session.get("bar")
except BadRequestError as e:
assert e.args == ("\n {}".format(error_title),)
@mock.patch.object(requests.Session, "request")
def test_jsonapi_error_with_detail(self, request):
error_title = "Title"
error_detail = "Description"
request.return_value.status_code = HttpStatusCode.BadRequest
request.return_value.text = (
'{{"errors": [{{"title": "{}", "detail": "{}"}}]}}'
).format(error_title, error_detail)
service = JsonApiService(
"foo", auth=mock.MagicMock(token=FAKE_TOKEN), rewrite_errors=True
)
try:
service.session.get("bar")
except BadRequestError as e:
assert e.args == ("\n {}: {}".format(error_title, error_detail),)
@mock.patch.object(requests.Session, "request")
def test_jsonapi_error_no_title(self, request):
error_status = "Status" # Should be used instead of the title
error_detail = "Description"
request.return_value.status_code = HttpStatusCode.BadRequest
request.return_value.text = (
'{{"errors": [{{"status": "{}", "detail": "{}"}}]}}'
).format(error_status, error_detail)
service = JsonApiService(
"foo", auth=mock.MagicMock(token=FAKE_TOKEN), rewrite_errors=True
)
try:
service.session.get("bar")
except BadRequestError as e:
assert e.args == ("\n {}: {}".format(error_status, error_detail),)
@mock.patch.object(requests.Session, "request")
def test_jsonapi_error_with_source(self, request):
error_title = "Title"
error_detail = "Detail"
error_field = "Field"
request.return_value.status_code = HttpStatusCode.BadRequest
request.return_value.text = (
'{{"errors": [{{"title": "{}", "detail": "{}", "source": '
'{{"pointer": "/path/to/{}"}}}}]}}'
).format(error_title, error_detail, error_field)
service = JsonApiService(
"foo", auth=mock.MagicMock(token=FAKE_TOKEN), rewrite_errors=True
)
try:
service.session.get("bar")
except BadRequestError as e:
assert e.args == (
"\n {}: {}: {}".format(error_title, error_detail, error_field),
)
@mock.patch.object(requests.Session, "request")
def test_jsonapi_error_with_id(self, request):
error_title = "Title"
error_detail = "Detail"
error_id = "123"
request.return_value.status_code = HttpStatusCode.BadRequest
request.return_value.text = (
'{{"errors": [{{"title": "{}", "detail": "{}", "id": {}}}]}}'
).format(error_title, error_detail, error_id)
service = JsonApiService(
"foo", auth=mock.MagicMock(token=FAKE_TOKEN), rewrite_errors=True
)
try:
service.session.get("bar")
except BadRequestError as e:
assert e.args == (
"\n {}: {} ({})".format(error_title, error_detail, error_id),
)
@mock.patch.object(requests.Session, "request")
def test_jsonapi_error_with_link(self, request):
error_title = "Title"
error_detail = "Detail"
error_href = "Href"
request.return_value.status_code = HttpStatusCode.BadRequest
request.return_value.text = (
'{{"errors": [{{"title": "{}", "detail": "{}", "links": '
'{{"about": "{}"}}}}]}}'
).format(error_title, error_detail, error_href)
service = JsonApiService(
"foo", auth=mock.MagicMock(token=FAKE_TOKEN), rewrite_errors=True
)
try:
service.session.get("bar")
except BadRequestError as e:
assert e.args == (
"\n {}: {}\n {}".format(
error_title, error_detail, error_href
),
)
request.return_value.text = (
'{{"errors": [{{"title": "{}", "detail": "{}", "links": '
'{{"about": {{"href": "{}"}}}}}}]}}'
).format(error_title, error_detail, error_href)
service = JsonApiService(
"foo", auth=mock.MagicMock(token=FAKE_TOKEN), rewrite_errors=True
)
try:
service.session.get("bar")
except BadRequestError as e:
assert e.args == (
"\n {}: {}\n {}".format(
error_title, error_detail, error_href
),
)
class TestDefaultProxyClass(unittest.TestCase):
@mock.patch.object(requests.Session, "request")
def test_session_default_proxy(self, request):
request.return_value.status_code = HttpStatusCode.ProxyAuthenticationRequired
class MySession(Session):
handle_proxy_authentication_called = 0
handled = True
def handle_proxy_authentication(self, method, url, **kwargs):
MySession.handle_proxy_authentication_called += 1
assert method == HttpRequestMethod.GET
assert url == "bar"
return MySession.handled
Service.set_default_session_class(MySession)
service = Service("foo", auth=mock.MagicMock(token=FAKE_TOKEN))
service.session.get("bar")
assert MySession.handle_proxy_authentication_called == 1
MySession.handled = False
with self.assertRaises(ProxyAuthenticationRequiredError):
service.session.get("bar")
assert MySession.handle_proxy_authentication_called == 2
MySession.handled = True
ThirdPartyService.set_default_session_class(MySession)
service = ThirdPartyService()
service.session.get("bar")
assert MySession.handle_proxy_authentication_called == 3
MySession.handled = False
with self.assertRaises(ProxyAuthenticationRequiredError):
service.session.get("bar")
assert MySession.handle_proxy_authentication_called == 4
class TestWarningsClass(unittest.TestCase):
@mock.patch.object(descarteslabs.client.services.service.service, "warn")
@mock.patch.object(requests.Session, "request")
def test_session_deprecation_warning(self, request, warn):
message = "Warning"
cls = FutureWarning
class result:
status_code = HttpStatusCode.Ok
def json(self):
return {
"meta": {
"warnings": [{"message": message, "category": cls.__name__}]
}
}
request.side_effect = lambda *args, **kw: result()
service = JsonApiService("foo", auth=mock.MagicMock(token=FAKE_TOKEN))
service.session.get("bar")
warn.assert_called_once_with(message, cls)
@mock.patch.object(descarteslabs.client.services.service.service, "warn")
@mock.patch.object(requests.Session, "request")
def test_session_my_warning(self, request, warn):
message = "Warning"
category = "MyCategory"
class result:
status_code = HttpStatusCode.Ok
def json(self):
return {
"meta": {"warnings": [{"message": message, "category": category}]}
}
request.side_effect = lambda *args, **kw: result()
service = JsonApiService("foo", auth=mock.MagicMock(token=FAKE_TOKEN))
service.session.get("bar")
warn.assert_called_once_with("{}: {}".format(category, message), UserWarning)
@mock.patch.object(descarteslabs.client.services.service.service, "warn")
@mock.patch.object(requests.Session, "request")
def test_session_warning(self, request, warn):
message = "Warning"
class result:
status_code = HttpStatusCode.Ok
def json(self):
return {"meta": {"warnings": [{"message": message}]}}
request.side_effect = lambda *args, **kw: result()
service = JsonApiService("foo", auth=mock.MagicMock(token=FAKE_TOKEN))
service.session.get("bar")
warn.assert_called_once_with(message, UserWarning)
class TestInitialize(unittest.TestCase):
@mock.patch.object(requests.Session, "request")
def test_initialize_session(self, request):
request.return_value.status_code = HttpStatusCode.Ok
class MySession(Session):
initialize_called = 0
def initialize(self):
MySession.initialize_called += 1
service = Service(
"foo", auth=mock.MagicMock(token=FAKE_TOKEN), session_class=MySession
)
service.session.get("bar")
assert MySession.initialize_called == 1
@mock.patch.object(requests.Session, "request")
def test_initialize_json_api_session(self, request):
request.return_value.status_code = HttpStatusCode.Ok
class MySession(JsonApiSession):
initialize_called = 0
def initialize(self):
MySession.initialize_called += 1
service = JsonApiService(
"foo", auth=mock.MagicMock(token=FAKE_TOKEN), session_class=MySession
)
service.session.get("bar")
assert MySession.initialize_called == 1
@mock.patch.object(requests.Session, "request")
def test_initialize_third_party_session(self, request):
request.return_value.status_code = HttpStatusCode.Ok
class MySession(Session):
initialize_called = 0
def initialize(self):
MySession.initialize_called += 1
service = ThirdPartyService(session_class=MySession)
service.session.get("bar")
assert MySession.initialize_called == 1
|
from prismriver.plugin.common import Plugin
from prismriver.struct import Song
# todo: load lyrics translations
class MusixmatchPlugin(Plugin):
ID = 'musixmatch'
RANK = 6
def __init__(self, config):
super(MusixmatchPlugin, self).__init__('Musixmatch', config)
def search_song(self, artist, title):
to_delete = ['!', '"', '(', ')']
to_replace = [' ', '.', "'", ' + ']
link = 'https://www.musixmatch.com/lyrics/{}/{}'.format(
self.prepare_url_parameter(artist, to_delete=to_delete, to_replace=to_replace),
self.prepare_url_parameter(title, to_delete=to_delete, to_replace=to_replace))
# return 404 if song not found
page = self.download_webpage_text(link)
if page:
soup = self.prepare_soup(page)
title_pane = soup.find('div', {'class': 'mxm-track-title'})
song_artist = title_pane.find('a').text
song_title_tag = title_pane.find('h1', recursive=False)
self.remove_tags_from_block(song_title_tag, ['small'])
song_title = song_title_tag.text
base_lyrics_pane = soup.find('div', {'class': 'mxm-lyrics'})
lyrics = ''
for lyrics_pane in base_lyrics_pane.findAll('p', {'class': 'mxm-lyrics__content'}):
lyrics += (lyrics_pane.text + '\n')
return Song(song_artist, song_title, self.sanitize_lyrics([lyrics]))
|
# -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# Stefan Rainow <s.rainow@fz-juelich.de>
#
# *****************************************************************************
"""Custom commands for SPHERES"""
from nicos.commands import usercommand
from nicos.core import UsageError
from nicos_mlz.spheres.utils import getTemperatureController, parseDuration
@usercommand
def ramp(target, ramp=None):
"""Move the temperature to target with the given ramp.
If ramp is omitted and the current ramp is > 0 it is used.
If the current ramp is 0 the command is not executed.
"""
controller = getTemperatureController()
if ramp is not None:
if ramp > 100:
raise UsageError('TemperatureController does not support ramps '
'higher then 100 K/min. If you want to get to '
'%f as fast as possible use rush(%f). '
'Ramp will be set to max.' % (target, target))
controller.ramp = ramp
elif controller.ramp == 0:
raise UsageError('Ramp of the TemperatureController is 0. '
'Please specify a ramp with this command.\n'
'Use "ramp(target, RAMP)", '
'"timeramp(target, time)", or "rush(target)"')
controller.move(target)
@usercommand
def timeramp(target, time):
"""Ramp to the given target in the given timeframe.
Ramp will be calculated by taking current temperature, given target and
given time into account. Ramps for tube and sample will be calculated
separately"""
time = parseDuration(time, 'timeramp')
controller = getTemperatureController()
# stop current ramp
controller.ramp = 0
controller.move(controller.read())
# set new target
controller.ramp = abs(target-controller.read())/(time/60)
controller.move(target)
@usercommand
def rush(target):
"""Move to the given temperature as fast as possible.
Previously set ramps will be ignored but preserved.
"""
getTemperatureController().rushTemperature(target)
@usercommand
def stoppressure():
"""Stop pressure regulation"""
getTemperatureController().stopPressure()
@usercommand
def stoptemperature():
"""Stop the temperature ramp"""
controller = getTemperatureController()
old_ramp = controller.ramp
controller.ramp = 0
controller.move(controller.read())
controller.ramp = old_ramp
|
#!BPY
# Copyright (c) 2020 SuperTuxKart author(s)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import bpy, math, re, random
from mathutils import *
from . import stk_utils
# --------------------------------------------------------------------------
def writeBezierCurve(f, curve, speed, extend="cyclic"):
matrix = curve.matrix_world
if len(curve.data.splines) > 1:
self.log.report({'WARNING'}, curve.name + " contains multiple curves, will only export the first one")
f.write(' <curve channel="LocXYZ" speed="%.2f" interpolation="bezier" extend="%s">\n'\
%(speed, extend))
if curve.data.splines[0].type != 'BEZIER':
self.log.report({'WARNING'}, curve.name + " should be a bezier curve, not a " + curve.data.splines[0].type)
else:
for pt in curve.data.splines[0].bezier_points:
v0 = matrix @ pt.handle_left
v1 = matrix @ pt.co @ matrix
v2 = matrix @ pt.handle_right
f.write(" <point c=\"%f %f %f\" h1=\"%f %f %f\" h2=\"%f %f %f\" />\n"% \
( v1[0],v1[2],v1[1],
v0[0],v0[2],v0[1],
v2[0],v2[2],v2[1] ) )
f.write(" </curve>\n")
# ------------------------------------------------------------------------------
class ItemsExporter:
def __init__(self):
self.m_objects = []
def processObject(self, object, stktype):
if object.type=="EMPTY":
# For backward compatibility test for the blender name
# in case that there is no type property defined. This makes
# it easier to port old style tracks without having to
# add the property for all items.
stktype = stk_utils.getObjectProperty(object, "type", object.name).upper()
# Check for old and new style names
if stktype[:8] in ["GHERRING", "RHERRING", "YHERRING", "SHERRING"] \
or stktype[: 6]== "BANANA" or stktype[:4]=="ITEM" \
or stktype[:11]=="NITRO-SMALL" or stktype[:9]=="NITRO-BIG" \
or stktype[:11]=="NITRO_SMALL" or stktype[:9]=="NITRO_BIG" \
or stktype[:11]=="SMALL-NITRO" or stktype[:9]=="BIG-NITRO" \
or stktype[: 6]=="ZIPPER":
self.m_objects.append(object)
return True
return False
def export(self, f):
rad2deg = 180.0/3.1415926535
scene = bpy.context.scene
is_ctf = stk_utils.getSceneProperty(scene, "ctf", "false") == "true"
for obj in self.m_objects:
item_type = stk_utils.getObjectProperty(obj, "type", "").lower()
if item_type=="":
# If the type is not specified in the property,
# assume it's an old style item, which means the
# blender object name is to be used
l = obj.name.split(".")
if len(l)!=1:
if l[-1].isdigit(): # Remove number appended by blender
l = l[:-1]
item_type = ".".join(l)
else:
item_type = obj.name
# Portability for old models:
g=re.match("(.*) *{(.*)}", item_type)
if g:
item_type = g.group(1)
specs = g.group(2).lower()
if specs.find("z")>=0: z=None
if specs.find("p")>=0: p=None
if specs.find("r")>=0: r=None
if item_type=="GHERRING": item_type="banana"
if item_type=="RHERRING": item_type="item"
if item_type=="YHERRING": item_type="big-nitro"
if item_type=="SHERRING": item_type="small-nitro"
else:
if item_type=="nitro-big": item_type="big-nitro"
if item_type=="nitro_big": item_type="big-nitro"
if item_type=="nitro-small": item_type="small-nitro"
if item_type=="nitro_small": item_type="small-nitro"
# Get the position of the item - first check if the item should
# be dropped on the track, or stay at the position indicated.
rx,ry,rz = map(lambda x: rad2deg*x, obj.rotation_euler)
h,p,r = map(lambda i: "%.2f"%i, [rz,rx,ry])
x,y,z = map(lambda i: "%.2f"%i, obj.location)
drop = stk_utils.getObjectProperty(obj, "dropitem", "true").lower()
# Swap y and z axis to have the same coordinate system used in game.
s = "%s id=\"%s\" x=\"%s\" y=\"%s\" z=\"%s\"" % (item_type, obj.name, x, z, y)
if h and h!="0.00": s = "%s h=\"%s\""%(s, h)
if drop=="false":
# Pitch and roll will be set automatically if dropped
if p and p!="0.00": s="%s p=\"%s\""%(s, p)
if r and r!="0.00": s="%s r=\"%s\""%(s, r)
s="%s drop=\"false\""%s
if is_ctf:
f.write(" <%s ctf=\"%s\"/>\n" % (s, stk_utils.getObjectProperty(obj, "ctf_only", "false").lower()))
else:
f.write(" <%s />\n" % s)
# ------------------------------------------------------------------------------
class ParticleEmitterExporter:
def __init__(self):
self.m_objects = []
def __init__(self, log):
self.m_objects = []
self.log = log
def processObject(self, object, stktype):
if object.type=="EMPTY" and stktype=="PARTICLE_EMITTER":
self.m_objects.append(object)
return True
else:
return False
def export(self, f):
for obj in self.m_objects:
try:
originXYZ = stk_utils.getNewXYZHString(obj)
flags = []
if len(stk_utils.getObjectProperty(obj, "particle_condition", "")) > 0:
flags.append('conditions="' + stk_utils.getObjectProperty(obj, "particle_condition", "") + '"')
if stk_utils.getObjectProperty(obj, "clip_distance", 0) > 0 :
flags.append('clip_distance="%i"' % stk_utils.getObjectProperty(obj, "clip_distance", 0))
if stk_utils.getObjectProperty(obj, "auto_emit", 'true') == 'false':
flags.append('auto_emit="%s"' % stk_utils.getObjectProperty(obj, "auto_emit", 'true'))
f.write(' <particle-emitter kind="%s" id=\"%s\" %s %s>\n' %\
(stk_utils.getObjectProperty(obj, "kind", 0), obj.name, originXYZ, ' '.join(flags)))
if obj.animation_data and obj.animation_data.action and obj.animation_data.action.fcurves and len(obj.animation_data.action.fcurves) > 0:
writeIPO(f, obj.animation_data)
f.write(' </particle-emitter>\n')
except:
self.log.report({'ERROR'}, "Invalid particle emitter <" + stk_utils.getObjectProperty(obj, "name", obj.name) + "> ")
# ------------------------------------------------------------------------------
# Blender hair systems are usually used to automate the placement of plants on the ground
class BlenderHairExporter:
def __init__(self):
self.m_objects = []
def __init__(self, log):
self.m_objects = []
self.log = log
def processObject(self, object, stktype):
if object.particle_systems is not None and len(object.particle_systems) >= 1 and \
object.particle_systems[0].settings.type == 'EMITTER':
if (object.particle_systems[0].settings.instance_object is not None): # or \
#(object.particle_systems[0].settings.dupli_group is not None): #and stk_utils.getObjectProperty(object.particle_systems[0].settings.dupli_object, "type", "") == "object":
self.m_objects.append(object)
else:
self.log.report({'WARNING'}, "Ignoring invalid hair system <%s>" % object.name)
return False # always return false so that the object is exported normally as a mesh too
def export(self, f):
rad2deg = 180.0/3.1415926535;
for obj in self.m_objects:
for particleSystem in obj.particle_systems:
f.write(' <!-- Hair system %s, contains %i particles -->\n' % (obj.name, len(particleSystem.particles)))
for particle in particleSystem.particles:
if particleSystem.settings.render_type == 'OBJECT':
duplicated_obj = particleSystem.settings.dupli_object
# Currently we only support random picking from the group
elif particleSystem.settings.render_type == 'GROUP':
object_group = particleSystem.settings.dupli_group.objects
choice = random.randint(0, len(object_group) - 1)
duplicated_obj = object_group[choice]
loc = particle.location
hpr = particle.rotation.to_euler('XYZ')
# hack to get proper orientation
if (particleSystem.settings.normal_factor >= 0.5):
hpr.rotate_axis("Z", -1.57079633)
#print (particle.size)
si = particle.size #/ duplicated_obj.dimensions[2]
loc_rot_scale_str = "xyz=\"%.2f %.2f %.2f\" hpr=\"%.1f %.1f %.1f\" scale=\"%.2f %.2f %.2f\"" %\
(loc[0], loc[2], loc[1], -hpr[0]*rad2deg, -hpr[2]*rad2deg,
-hpr[1]*rad2deg, si, si, si)
if duplicated_obj.proxy is not None and duplicated_obj.proxy.library is not None:
path_parts = re.split("/|\\\\", duplicated_obj.proxy.library.filepath)
lib_name = path_parts[-2]
f.write(' <library name="%s" id=\"%s\" %s/>\n' % (lib_name, duplicated_obj.name, loc_rot_scale_str))
else:
name = stk_utils.getObjectProperty(duplicated_obj, "name", duplicated_obj.name )
if len(name) == 0:
name = duplicated_obj.name
f.write(' <object type="animation" %s interaction="ghost" model="%s.spm" skeletal-animation="false"></object>\n' % (loc_rot_scale_str, name))
f.write(' <!-- END Hair system %s -->\n\n' % obj.name)
# ------------------------------------------------------------------------------
class SoundEmitterExporter:
def __init__(self):
self.m_objects = []
def __init__(self, log):
self.m_objects = []
self.log = log
def processObject(self, object, stktype):
if object.type=="EMPTY" and stktype=="SFX_EMITTER":
self.m_objects.append(object)
return True
else:
return False
def export(self, f):
for obj in self.m_objects:
try:
# origin
originXYZ = stk_utils.getXYZHPRString(obj)
play_near_string = ""
if stk_utils.getObjectProperty(obj, "play_when_near", "false") == "true":
dist = stk_utils.getObjectProperty(obj, "play_distance", 1.0)
play_near_string = " play-when-near=\"true\" distance=\"%.1f\"" % dist
conditions_string = ""
if len(stk_utils.getObjectProperty(obj, "sfx_conditions", "")) > 0:
conditions_string = ' conditions="' + stk_utils.getObjectProperty(obj, "sfx_conditions", "") + '"'
f.write(' <object type="sfx-emitter" id=\"%s\" sound="%s" rolloff="%.3f" volume="%s" max_dist="%.1f" %s%s%s>\n' %\
(obj.name,
stk_utils.getObjectProperty(obj, "sfx_filename", "some_sound.ogg"),
stk_utils.getObjectProperty(obj, "sfx_rolloff", 0.05),
stk_utils.getObjectProperty(obj, "sfx_volume", 0),
stk_utils.getObjectProperty(obj, "sfx_max_dist", 500.0), originXYZ, play_near_string, conditions_string))
if obj.animation_data and obj.animation_data.action and obj.animation_data.action.fcurves and len(obj.animation_data.action.fcurves) > 0:
writeIPO(f, obj.animation_data)
f.write(' </object>\n')
except:
self.log.report({'ERROR'}, "Invalid sound emitter <" + stk_utils.getObjectProperty(obj, "name", obj.name) + "> ")
# ------------------------------------------------------------------------------
class ActionTriggerExporter:
def __init__(self):
self.m_objects = []
def __init__(self, log):
self.m_objects = []
self.log = log
def processObject(self, object, stktype):
if stktype=="ACTION_TRIGGER":
self.m_objects.append(object)
return True
else:
return False
def export(self, f):
for obj in self.m_objects:
try:
# origin
originXYZ = stk_utils.getXYZHPRString(obj)
trigger_type = stk_utils.getObjectProperty(obj, "trigger_type", "point")
#if trigger_type == "sphere":
# radius = (obj.dimensions.x + obj.dimensions.y + obj.dimensions.z)/6 # divide by 3 to get average size, divide by 2 to get radius from diameter
# f.write(" <check-sphere xyz=\"%.2f %.2f %.2f\" radius=\"%.2f\"/>\n" % \
# (obj.location[0], obj.location[2], obj.location[1], radius) )
if trigger_type == "point":
f.write(' <object type="action-trigger" trigger-type="point" id=\"%s\" action="%s" distance="%s" reenable-timeout="%s" triggered-object="%s" %s/>\n' %\
(obj.name,
stk_utils.getObjectProperty(obj, "action", ""),
stk_utils.getObjectProperty(obj, "trigger_distance", 5.0),
stk_utils.getObjectProperty(obj, "reenable_timeout", 999999.9),
stk_utils.getObjectProperty(obj, "triggered_object", ""),
originXYZ))
elif trigger_type == "cylinder":
radius = (obj.dimensions.x + obj.dimensions.y)/4 # divide by 2 to get average size, divide by 2 to get radius from diameter
f.write(" <object type=\"action-trigger\" trigger-type=\"cylinder\" action=\"%s\" xyz=\"%.2f %.2f %.2f\" radius=\"%.2f\" height=\"%.2f\"/>\n" % \
(stk_utils.getObjectProperty(obj, "action", ""), obj.location[0], obj.location[2], obj.location[1], radius, obj.dimensions.z) )
except:
self.log.report({'ERROR'}, "Invalid action <" + stk_utils.getObjectProperty(obj, "name", obj.name) + "> ")
# ------------------------------------------------------------------------------
class StartPositionFlagExporter:
def __init__(self):
self.m_objects = []
self.m_red_flag = None
self.m_blue_flag = None
def __init__(self, log):
self.m_objects = []
self.m_red_flag = None
self.m_blue_flag = None
self.log = log
def processObject(self, object, stktype):
if object.type=="EMPTY" and stktype[:5]=="START":
self.m_objects.append(object)
return True
elif object.type=="EMPTY" and stktype[:8]=="RED_FLAG":
self.m_red_flag = object
return True
elif object.type=="EMPTY" and stktype[:9]=="BLUE_FLAG":
self.m_blue_flag = object
return True
else:
return False
def export(self, f):
scene = bpy.context.scene
karts_per_row = int(stk_utils.getSceneProperty(scene, "start_karts_per_row", 2))
distance_forwards = float(stk_utils.getSceneProperty(scene, "start_forwards_distance", 1.5))
distance_sidewards = float(stk_utils.getSceneProperty(scene, "start_sidewards_distance", 3.0))
distance_upwards = float(stk_utils.getSceneProperty(scene, "start_upwards_distance", 0.1))
if stk_utils.getSceneProperty(bpy.data.scenes[0], 'is_stk_node', 'false') != 'true':
f.write(" <default-start karts-per-row =\"%i\"\n"%karts_per_row )
f.write(" forwards-distance =\"%.2f\"\n"%distance_forwards )
f.write(" sidewards-distance=\"%.2f\"\n"%distance_sidewards)
f.write(" upwards-distance =\"%.2f\"/>\n"%distance_upwards)
is_ctf = self.m_red_flag is not None and self.m_blue_flag is not None \
and stk_utils.getSceneProperty(scene, "ctf", "false") == "true"
dId2Obj_ctf = {}
dId2Obj = {}
for obj in self.m_objects:
stktype = stk_utils.getObjectProperty(obj, "type", obj.name).upper()
id = int(stk_utils.getObjectProperty(obj, "start_index", "-1"))
if id == "-1":
self.log.report({'WARNING'}, "Invalid start position " + id)
if is_ctf and stk_utils.getObjectProperty(obj, "ctf_only", "false").lower() == "true":
dId2Obj_ctf[id] = obj
else:
dId2Obj[id] = obj
l = dId2Obj.keys()
if len(l) < 4 and stk_utils.getSceneProperty(scene, "arena", "false") == "true":
self.log.report({'WARNING'}, "You should define at least 4 start positions")
if is_ctf and len(dId2Obj_ctf.keys()) < 16:
self.log.report({'WARNING'}, "You should define at least 16 ctf start positions, odd"
" / even index alternatively for blue and red team.")
for key, value in sorted(dId2Obj.items()):
f.write(" <start %s/>\n" % stk_utils.getXYZHString(value))
for key, value in sorted(dId2Obj_ctf.items()):
f.write(" <ctf-start %s/>\n" % stk_utils.getXYZHString(value))
if is_ctf:
f.write(" <red-flag %s/>\n" % stk_utils.getXYZHString(self.m_red_flag))
f.write(" <blue-flag %s/>\n" % stk_utils.getXYZHString(self.m_blue_flag))
# ------------------------------------------------------------------------------
class LibraryNodeExporter:
def __init__(self):
self.m_objects = []
def __init__(self, log):
self.m_objects = []
self.log = log
def processObject(self, object, stktype):
if object.proxy is not None and object.proxy.library is not None:
self.m_objects.append(object)
return True
else:
return False
def export(self, f):
import re
for obj in self.m_objects:
try:
path_parts = re.split("/|\\\\", obj.proxy.library.filepath)
lib_name = path_parts[-2]
# origin
originXYZ = stk_utils.getXYZHPRString(obj)
f.write(' <library name="%s" id=\"%s\" %s>\n' % (lib_name, obj.name, originXYZ))
if obj.animation_data and obj.animation_data.action and obj.animation_data.action.fcurves and len(obj.animation_data.action.fcurves) > 0:
writeIPO(f, obj.animation_data)
f.write(' </library>\n')
except:
self.log.report({'ERROR'}, "Invalid linked object <" + stk_utils.getObjectProperty(obj, "name", obj.name) + "> ")
# ------------------------------------------------------------------------------
class BillboardExporter:
def __init__(self):
self.m_objects = []
def __init__(self, log):
self.m_objects = []
self.log = log
def processObject(self, object, stktype):
if stktype=="BILLBOARD":
self.m_objects.append(object)
return True
else:
return False
def export(self, f):
for obj in self.m_objects:
data = obj.data
# check the face
face_len = len(data.polygons)
if face_len == 0:
self.log.report({'ERROR'}, "Billboard <" + stk_utils.getObjectProperty(obj, "name", obj.name) \
+ "> must have at least one face")
return
if face_len > 1:
self.log.report({'ERROR'}, "Billboard <" + stk_utils.getObjectProperty(obj, "name", obj.name) \
+ "> has more than ONE face")
return
# check the points
if len(data.polygons[0].vertices) > 4:
self.log.report({'ERROR'}, "Billboard <" + stk_utils.getObjectProperty(obj, "name", obj.name)\
+ "> has more than 4 points")
return
if len(obj.material_slots) < 1:
self.log.report({'ERROR'}, "Billboard <" + stk_utils.getObjectProperty(obj, "name", obj.name)\
+ "> has no materials")
return
try:
# write in the XML
# calcul the size and the position
x_min = data.vertices[0].co[0]
x_max = x_min
y_min = data.vertices[0].co[2]
y_max = y_min
z_min = data.vertices[0].co[1]
z_max = z_min
for i in range(1, 4):
x_min = min(x_min, data.vertices[i].co[0])
x_max = max(x_max, data.vertices[i].co[0])
y_min = min(y_min, data.vertices[i].co[2])
y_max = max(y_max, data.vertices[i].co[2])
z_min = min(z_min, data.vertices[i].co[1])
z_max = max(z_max, data.vertices[i].co[1])
fadeout_str = ""
fadeout = stk_utils.getObjectProperty(obj, "fadeout", "false")
if fadeout == "true":
start = float(stk_utils.getObjectProperty(obj, "start", 1.0))
end = float(stk_utils.getObjectProperty(obj, "end", 15.0))
fadeout_str = "fadeout=\"true\" start=\"%.2f\" end=\"%.2f\""%(start,end)
node_tree = obj.material_slots[data.polygons[0].material_index].node_tree.nodes
f.write(' <object type="billboard" id=\"%s\" texture="%s" xyz="%.2f %.2f %.2f" \n'%
(obj.name, stk_utils.searchNodeTreeForImage(node_tree, 1)),
obj.location[0], obj.location[2], obj.location[1])
f.write(' width="%.3f" height="%.3f" %s>\n' %(max(x_max-x_min, z_max-z_min), y_max-y_min, fadeout_str) )
if obj.animation_data and obj.animation_data.action and obj.animation_data.action.fcurves and len(obj.animation_data.action.fcurves) > 0:
writeIPO(f, obj.animation_data)
f.write(' </object>\n')
except:
self.log.report({'ERROR'}, "Invalid billboard <" + stk_utils.getObjectProperty(obj, "name", obj.name) + "> ")
# ------------------------------------------------------------------------------
class LightsExporter:
def __init__(self):
self.m_objects = []
def processObject(self, object, stktype):
if object.type=="LIGHT" and stktype == "LIGHT":
self.m_objects.append(object)
return True
else:
return False
def export(self, f):
for obj in self.m_objects:
colR = int(obj.data.color[0] * 255)
colG = int(obj.data.color[1] * 255)
colB = int(obj.data.color[2] * 255)
f.write(' <light %s id=\"%s\" distance="%.2f" energy="%.2f" color="%i %i %i"' \
% (stk_utils.getXYZString(obj), obj.name, obj.data.distance, obj.data.energy, colR, colG, colB))
if_condition = stk_utils.getObjectProperty(obj, "if", "")
if len(if_condition) > 0:
f.write(' if=\"%s\"' % if_condition)
f.write('>\n')
if obj.animation_data and obj.animation_data.action and obj.animation_data.action.fcurves and len(obj.animation_data.action.fcurves) > 0:
writeIPO(f, obj.animation_data)
f.write(' </light>\n')
# ------------------------------------------------------------------------------
class LightShaftExporter:
def __init__(self):
self.m_objects = []
def processObject(self, object, stktype):
if object.type=="LIGHT" and stktype == "LIGHTSHAFT_EMITTER":
self.m_objects.append(object)
return True
else:
return False
def export(self, f):
for obj in self.m_objects:
f.write(' <lightshaft %s id=\"%s\" opacity="%.2f" color="%s"/>\n' \
% (stk_utils.getXYZString(obj), obj.name, stk_utils.getObjectProperty(obj, "lightshaft_opacity", 0.7), stk_utils.getObjectProperty(obj, "lightshaft_color", "255 255 255")))
# ------------------------------------------------------------------------------
class NavmeshExporter:
def __init__(self):
self.m_objects = []
def __init__(self, log):
self.m_objects = []
self.log = log
def processObject(self, object, stktype):
if stktype=="NAVMESH":
is_arena = stk_utils.getSceneProperty(bpy.data.scenes[0], "arena", "false") == "true"
is_soccer = stk_utils.getSceneProperty(bpy.data.scenes[0], "soccer", "false") == "true"
if (is_arena or is_soccer):
self.m_objects.append(object)
else:
self.log.report({'WARNING'}, "Navmesh may only be used in battle arenas or soccer field")
if len(self.m_objects) > 1:
self.log.report({'WARNING'}, "Cannot have more than 1 navmesh")
print("exportNavmesh 1")
return True
else:
return False
def export(self, f):
return None
def exportNavmesh(self, sPath):
print("exportNavmesh 2")
import bmesh
if len(self.m_objects) > 0:
print("exportNavmesh 3")
with open(sPath+"/navmesh.xml", "w", encoding="utf8", newline="\n") as navmeshfile:
navmesh_obj = self.m_objects[0]
bm = bmesh.new()
mm = navmesh_obj.to_mesh(bpy.data.scenes[0], True, 'PREVIEW', False, False)
bm.from_mesh(mm)
om = navmesh_obj.matrix_world
navmeshfile.write('<?xml version="1.0" encoding=\"utf-8\"?>\n')
navmeshfile.write('<navmesh>\n')
min_height_testing = stk_utils.getObjectProperty(navmesh_obj, "min_height_testing", -1.0)
max_height_testing = stk_utils.getObjectProperty(navmesh_obj, "max_height_testing", 5.0)
navmeshfile.write('<height-testing min="%f" max="%f"/>\n' % (min_height_testing, max_height_testing))
navmeshfile.write('<MaxVertsPerPoly nvp="4" />\n')
navmeshfile.write('<vertices>\n')
for vert in bm.verts:
navmeshfile.write('<vertex x="%f" y="%f" z="%f" />\n' % ((om*vert.co).x, (om*vert.co).z, (om*vert.co).y))
navmeshfile.write('</vertices>\n')
navmeshfile.write('<faces>\n')
for face in bm.faces:
navmeshfile.write('<face indices="')
if len(face.verts) != 4:
self.log.report({'ERROR'}, 'Use only quad for navmesh, face %d not quad!' % face.index)
self.log.report({'ERROR'}, 'To find it out, select the navmesh object and toggle edit mode, than in python console:')
self.log.report({'ERROR'}, 'me = bpy.data.objects[\'%s\'].data' % self.m_objects[0].name)
self.log.report({'ERROR'}, 'import bmesh')
self.log.report({'ERROR'}, 'bm = bmesh.from_edit_mesh(me)')
self.log.report({'ERROR'}, 'bm.faces[%d].select = True' % face.index)
self.log.report({'ERROR'}, 'bmesh.update_edit_mesh(me, True)')
assert False
for vert in face.verts:
navmeshfile.write('%d ' % vert.index)
list_face = []
unique_face = []
for edge in face.edges:
for l_face in edge.link_faces:
list_face.append(l_face.index)
[unique_face.append(item) for item in list_face if item not in unique_face]
unique_face.remove(face.index) #remove current face index
navmeshfile.write('" adjacents="')
for num in unique_face:
navmeshfile.write('%d ' % num)
navmeshfile.write('" />\n')
navmeshfile.write('</faces>\n')
navmeshfile.write('</navmesh>\n')
# ------------------------------------------------------------------------------
class DrivelineExporter:
def __init__(self):
self.lChecks = []
self.lCannons = []
self.lDrivelines = []
self.found_main_driveline = False
self.lEndCameras = []
def __init__(self, log):
self.lChecks = []
self.lCannons = []
self.lDrivelines = []
self.found_main_driveline = False
self.lEndCameras = []
self.log = log
def processObject(self, obj, stktype):
if stktype=="CHECK" or stktype=="LAP" or stktype=="GOAL":
self.lChecks.append(obj)
return True
if stktype=="CANNONSTART":
self.lCannons.append(obj)
return True
# Check for new drivelines
elif stktype=="MAIN-DRIVELINE" or \
stktype=="MAINDRIVELINE" or \
stktype=="MAINDL":
# Main driveline must be the first entry in the list
self.lDrivelines.insert(0, Driveline(obj, 1, self.log))
self.found_main_driveline = True
return True
elif stktype=="DRIVELINE":
self.lDrivelines.append(Driveline(obj, 0))
return True
elif obj.type=="CAMERA" and stktype in ['FIXED', 'AHEAD']:
self.lEndCameras.append(obj)
return True
return False
def export(self, f):
is_arena = stk_utils.getSceneProperty(bpy.data.scenes[0], "arena", "false") == "true"
is_soccer = stk_utils.getSceneProperty(bpy.data.scenes[0], "soccer", "false") == "true"
is_cutscene = stk_utils.getSceneProperty(bpy.data.scenes[0], "cutscene", "false") == "true"
if not self.found_main_driveline and not is_arena and not is_soccer and not is_cutscene:
if len(self.lDrivelines) > 0:
self.log.report({'WARNING'}, "Main driveline missing, using first driveline as main!")
elif stk_utils.getSceneProperty(bpy.data.scenes[0], 'is_stk_node', 'false') != 'true':
self.log.report({'ERROR'}, "No driveline found")
if len(self.lDrivelines) == 0:
self.lDrivelines=[None]
mainDriveline = self.lDrivelines[0]
if mainDriveline is None and stk_utils.getSceneProperty(bpy.data.scenes[0], 'is_stk_node', 'false') != 'true' and not (is_arena or is_soccer):
self.log.report({'ERROR'}, "No main driveline found")
self.lChecks = self.lChecks + self.lCannons # cannons at the end, see #1386
if self.lChecks or mainDriveline:
if not self.lChecks:
self.log.report({'WARNING'}, "No check defined, lap counting will not work properly!")
self.writeChecks(f, self.lChecks, mainDriveline)
if self.lEndCameras:
f.write(" <end-cameras>\n")
for i in self.lEndCameras:
type = stk_utils.getObjectProperty(i, "type", "ahead").lower()
if type=="ahead":
type="ahead_of_kart"
elif type=="fixed":
type="static_follow_kart"
else:
log_warning ("Unknown camera type %s - ignored." % type)
continue
xyz = "%f %f %f" % (i.location[0], i.location[2], i.location[1])
start = stk_utils.getObjectProperty(i, "start", 5)
f.write(" <camera type=\"%s\" xyz=\"%s\" distance=\"%s\"/> <!-- %s -->\n"%
(type, xyz, start, i.name) )
f.write(" </end-cameras>\n")
# --------------------------------------------------------------------------
# Finds the closest driveline from the list lDrivelines to the point p (i.e.
# the driveline for which the distance between p and the drivelines start
# point is as small as possible. Returns the index of the closest drivelines.
def findClosestDrivelineToPoint(self, lDrivelines, p):
min_index = 0
min_dist = lDrivelines[0].getStartDistanceTo(p)
for i in range(1,len(lDrivelines)):
driveline=lDrivelines[i]
dist_new = driveline.getStartDistanceTo(p)
if dist_new<min_dist:
min_dist = dist_new
min_index = i
return min_index
# --------------------------------------------------------------------------
# Find the driveline from lRemain that is closest to any of the drivelines
# in lSorted.
def findClosestDrivelineToDrivelines(self, lRemain, lSorted):
remain_index = 0
(min_dist, sorted_index, min_quad) = lRemain[0].getDistanceToStart(lSorted)
for i in range(1, len(lRemain)):
(dist, index, quad) = lRemain[i].getDistanceToStart(lSorted)
if dist<min_dist:
min_dist = dist
sorted_index = index
min_quad = quad
remain_index = i
return (remain_index, sorted_index, min_quad)
# --------------------------------------------------------------------------
# Converts a new drivelines. New drivelines have the following structure:
# +---+---+--+--...--+--
# | | | |
# +---+--+---+--...--+--
# The starting quad of the drivelines is marked by two edges ending in a
# single otherwise unconnected vertex. These two vertices (and edges) are
# not used in the actual driveline, they are only used to indicate where
# the drivelines starts. This data structure is handled in the Driveline
# class.
# Additionally, this function sorts the end cameras according to distance
# to the main driveline - so the first end camera will be the camera
# closest to the start line etc.
def convertDrivelinesAndSortEndCameras(self, lDrivelines, lSorted,
lEndCameras):
# First collect all main drivelines, and all remaining drivelines
# ---------------------------------------------------------------
lMain = []
lRemain = []
for driveline in lDrivelines:
if driveline.isMain():
lMain.append(driveline)
else:
lRemain.append(driveline)
# Now collect all main drivelines in one list starting
# with the closest to 0, then the one closest to the
# end of the first one, etc
p = (0,0,0)
quad_index = 0
while lMain:
min_index = self.findClosestDrivelineToPoint(lMain, p)
# Move the main driveline with minimal distance to the
# sorted list.
lSorted.append(lMain[min_index])
del lMain[min_index]
# Set the start quad index for all quads.
lSorted[-1].setStartQuadIndex(quad_index)
quad_index = quad_index + lSorted[-1].getNumberOfQuads()
p = lSorted[-1].getEndPoint()
# Create a new list for all cameras, which also stores the
# quad index to which the camera is closest to, the distance
# to the quad, and the camera object. The order is important
# since this list is later sorted by quad index, so that the
# first camera is the first in the list.
lCamerasDistance = []
for i in range(len(lEndCameras)):
cam = lEndCameras[i]
try:
(distance, driveline_index, quad_index_camera) = \
lSorted[0].getDistanceTo(cam.location, lSorted)
# Each list contains the index of the closest quad, the
# distance, and then the camera
lEndCameras[i] = (driveline_index, quad_index_camera, cam)
except:
self.log.report({'WARNING'}, "Problem with the end camera '%s'. Check if the main driveline is " +\
"properly defined (check warning messages), and the " +\
"settings of the camera."%cam.name)
lEndCameras.sort()
# After sorting remove the unnecessary distance and quad index
for i in range(len(lEndCameras)):
# Avoid crash in case that some problem with the camera happened,
# and lEndCameras is just the blender camera, not the tuple
if type(lEndCameras[i])==type(()):
lEndCameras[i] = lEndCameras[i][2]
# There were already two warning messages printed at this stage, so just
# ignore this to avoid further crashes
if len(lSorted) < 1:
return
# The last main driveline needs to be closed to the first quad.
# So set a flag in that driveline that it is the last one.
lSorted[-1].setIsLastMain(lSorted[0])
quad_index = quad_index + 1
# Now add the remaining drivelines one at a time. From all remaining
# drivelines we pick the one closest to the drivelines contained in
# lSorted.
while lRemain:
t = self.findClosestDrivelineToDrivelines(lRemain, lSorted)
(remain_index, sorted_index, quad_to_index) = t
lRemain[remain_index].setFromQuad(lSorted[sorted_index],
quad_to_index)
lSorted.append(lRemain[remain_index])
del lRemain[remain_index]
# Set the start quad index for all quads.
lSorted[-1].setStartQuadIndex(quad_index)
quad_index = quad_index + lSorted[-1].getNumberOfQuads()
# --------------------------------------------------------------------------
# Writes the track.quad file with the list of all quads, and the track.graph
# file defining a graph node for each quad and a basic connection between
# all graph nodes.
def writeQuadAndGraph(self, sPath):
#start_time = bsys.time()
lDrivelines = self.lDrivelines
lEndCameras = self.lEndCameras
print("Writing quad file --> \t")
if not lDrivelines:
print("No main driveline defined, no driveline information exported!!!")
return
lSorted = []
self.convertDrivelinesAndSortEndCameras(lDrivelines, lSorted, lEndCameras)
# That means that there were some problems with the drivelines, and
# it doesn't make any sense to continue anyway
if not lSorted:
return
# Stores the first quad number (and since quads = graph nodes the node
# number) of each section of the track. I.e. the main track starts with
# quad 0, then the first alternative way, ...
lStartQuad = [0]
dSuccessor = {}
last_main_lap_quad = 0
count = 0
with open(sPath + "/quads.xml", "w", encoding="utf8", newline="\n") as f:
f.write("<?xml version=\"1.0\" encoding=\"utf-8\"?>\n")
f.write("<quads>\n")
f.write(' <height-testing min="%f" max="%f"/>\n' %\
(lSorted[0].min_height_testing, lSorted[0].max_height_testing))
for driveline in lSorted:
driveline.writeQuads(f)
f.write("</quads>\n")
#print bsys.time() - start_time,"seconds. "
#start_time = bsys.time()
print("Writing graph file --> \t")
with open(sPath + "/graph.xml", "w", encoding="utf8", newline="\n") as f:
f.write("<?xml version=\"1.0\"?> encoding=\"utf-8\"?>\n")
f.write("<graph>\n")
f.write(" <!-- First define all nodes of the graph, and what quads they represent -->\n")
f.write(" <node-list from-quad=\"%d\" to-quad=\"%d\"/> <!-- map each quad to a node -->\n"\
%(0, lSorted[-1].getLastQuadIndex()))
f.write(" <!-- Define the main loop -->\n");
last_main = None
for i in lSorted:
if i.isMain():
last_main = i
else:
break
# The main driveline is written as a simple loop
f.write(" <edge-loop from=\"%d\" to=\"%d\"/>\n" %
(0, last_main.getLastQuadIndex()) )
# Each non-main driveline writes potentially three entries in the
# graph file: connection to the beginning of this driveline, the
# driveline quads themselves, and a connection from the end of the
# driveline to another driveline. But this can result in edged being
# written more than once: consider two non-main drivelines A and B
# which are connected to each other. Then A will write the edge from
# A to B as its end connection, and B will write the same connection
# as its begin connection. To avoid this, we keep track of all
# written from/to edges, and only write one if it hasn't been written.
dWrittenEdges={}
# Now write the remaining drivelines
for driveline in lSorted:
# Mainline was already written, so ignore it
if driveline.isMain(): continue
f.write(" <!-- Shortcut %s -->\n"%driveline.getName())
# Write the connection from an already written quad to this
fr = driveline.getFromQuad()
to = driveline.getFirstQuadIndex()
if (fr,to) not in dWrittenEdges:
f.write(" <edge from=\"%d\" to=\"%d\"/>\n" %(fr, to))
#if to.isEnabled() and fr.isEnabled():
# f.write(" <edge from=\"%d\" to=\"%d\"/>\n" %(fr, to))
#elif to.isEnabled():
# f.write(" <!-- %s disabled <edge from=\"%d\" to=\"%d\"/> -->\n" \
# %(fr.getName(), fr, to))
#else:
# f.write(" <!-- %s disabled <edge from=\"%d\" to=\"%d\"/> -->\n"
# %(to.getName(), fr, to))
dWrittenEdges[ (fr, to) ] = 1
if driveline.getFirstQuadIndex()< driveline.getLastQuadIndex():
f.write(" <edge-line from=\"%d\" to=\"%d\"/>\n" \
%(driveline.getFirstQuadIndex(),
driveline.getLastQuadIndex()))
fr = driveline.getLastQuadIndex()
to = driveline.computeSuccessor(lSorted)
if (fr, to) not in dWrittenEdges:
f.write(" <edge from=\"%d\" to=\"%d\"/>\n" %(fr, to))
dWrittenEdges[ (fr, to) ] = 1
f.write("</graph>\n")
#print bsys.time()-start_time,"seconds. "
# Write out a goal line
def writeGoal(self, f, goal):
if len(goal.data.vertices) != 2:
self.log.report({'WARNING'}, "Goal line is not a line made of 2 vertices as expected")
goal_matrix = goal.rotation_euler.to_matrix()
goal_pt1 = goal.data.vertices[0].co @ goal_matrix + goal.location
goal_pt2 = goal.data.vertices[1].co @ goal_matrix + goal.location
first_goal_string = ""
if stk_utils.getObjectProperty(goal, "first_goal", "false") == "true":
first_goal_string=" first_goal=\"true\" "
f.write(' <goal p1="%.2f %.2f %.2f" p2="%.2f %.2f %.2f" %s/>\n'%\
(goal_pt1[0], goal_pt1[2], goal_pt1[1],
goal_pt2[0], goal_pt2[2], goal_pt2[1],
first_goal_string))
# Writes out all cannon checklines.
def writeCannon(self, f, cannon):
start = cannon
endSegmentName = stk_utils.getObjectProperty(start, "cannonend", "")
if len(endSegmentName) == 0 or endSegmentName not in bpy.data.objects:
self.log.report({'ERROR'}, "Cannon " + cannon.name + " end is not defined")
return
end = bpy.data.objects[endSegmentName]
if len(start.data.vertices) != 2:
self.log.report({'WARNING'}, "Cannon start " + start.name + " is not a line made of 2 vertices as expected")
if len(end.data.vertices) != 2:
self.log.report({'WARNING'}, "Cannon end " + end.name + " is not a line made of 2 vertices as expected")
curvename = stk_utils.getObjectProperty(start, "cannonpath", "")
start_pt1 = start.matrix_world @ start.data.vertices[0].co
start_pt2 = start.matrix_world @ start.data.vertices[1].co
end_pt1 = end.matrix_world @ end.data.vertices[0].co
end_pt2 = end.matrix_world @ end.data.vertices[1].co
f.write(' <cannon p1="%.2f %.2f %.2f" p2="%.2f %.2f %.2f" target-p1="%.2f %.2f %.2f" target-p2="%.2f %.2f %.2f">\n'%\
(start_pt1[0], start_pt1[2], start_pt1[1],
start_pt2[0], start_pt2[2], start_pt2[1],
end_pt1[0], end_pt1[2], end_pt1[1],
end_pt2[0], end_pt2[2], end_pt2[1]))
if len(curvename) > 0:
writeBezierCurve(f, bpy.data.objects[curvename], \
stk_utils.getObjectProperty(start, "cannonspeed", 50.0), "const" )
f.write(' </cannon>\n')
# --------------------------------------------------------------------------
# Writes out all checklines.
# \param lChecks All check meshes
# \param mainDriveline The main driveline, from which the lap
# counting check line is determined.
def writeChecks(self, f, lChecks, mainDriveline):
f.write(" <checks>\n")
# A dictionary containing a list of indices of check structures
# that belong to this group.
dGroup2Indices = {"lap":[0]}
# Collect the indices of all check structures for all groups
ind = 1
for obj in lChecks:
name = stk_utils.getObjectProperty(obj, "type", obj.name.lower()).lower()
if len(name) == 0: name = obj.name.lower()
type = stk_utils.getObjectProperty(obj, "type", "")
if type == "cannonstart" or type == "cannonend":
continue
if name!="lap":
name = stk_utils.getObjectProperty(obj, "name", obj.name.lower()).lower()
if name in dGroup2Indices:
dGroup2Indices[name].append(ind)
else:
dGroup2Indices[name] = [ ind ]
ind = ind + 1
print("**** dGroup2Indices:", dGroup2Indices)
if mainDriveline:
lap = mainDriveline.getStartEdge()
strict_lapline = mainDriveline.isStrictLapline()
if lap[0] is None:
return # Invalid driveline (a message will have been printed)
coord = lap[0]
min_h = coord[2]
if coord[2] < min_h: min_h = coord[2]
# The main driveline is always the first entry, so remove
# only the first entry to get the list of all other lap lines
l = dGroup2Indices["lap"]
from functools import reduce
sSameGroup = reduce(lambda x,y: str(x)+" "+str(y), l, "")
activate = mainDriveline.getActivate()
if activate:
group = activate.lower()
if not group or group not in dGroup2Indices:
self.log.report({'WARNING'}, "Activate group '%s' not found!"%group)
print("Ignored - but lap counting might not work correctly.")
print("Make sure there is an object of type 'check' with")
print("the name '%s' defined."%group)
activate = ""
else:
activate = reduce(lambda x,y: str(x)+" "+str(y), dGroup2Indices[group])
else:
group = ""
activate = ""
self.log.report({'WARNING'}, "Warning : the main driveline does not activate any checkline. Lap counting and kart rescue will not work correctly.")
else:
# No main drive defined, print a warning and add some dummy
# driveline (makes the rest of this code easier)
lap = [ [-1, 0], [1, 0] ]
min_h = 0
sSameGroup = ""
activate = ""
strict_lapline = True
if sSameGroup:
sSameGroup="same-group=\"%s\""%sSameGroup.strip()
if activate:
activate = "other-ids=\"%s\""%activate
if not strict_lapline:
f.write(" <check-lap kind=\"lap\" %s %s />\n"%(sSameGroup, activate))
else:
f.write(" <check-line kind=\"lap\" p1=\"%.2f %.2f\" p2=\"%.2f %.2f\"\n"% \
(lap[0][0], lap[0][1],
lap[1][0], lap[1][1] ) )
f.write(" min-height=\"%.2f\" %s %s/>\n"% (min_h, sSameGroup, activate) )
ind = 1
for obj in lChecks:
try:
type = stk_utils.getObjectProperty(obj, "type", "")
if type == "cannonstart":
self.writeCannon(f, obj)
continue
elif type == "cannonend":
continue
elif type == "goal":
self.writeGoal(f, obj)
continue
mesh = obj.data.copy()
# Convert to world space
mesh.transform(obj.matrix_world)
# One of lap, activate, toggle, ambient
activate = stk_utils.getObjectProperty(obj, "activate", "")
kind=" "
if activate:
group = activate.lower()
if group not in dGroup2Indices:
self.log.report({'WARNING'}, "Activate group '%s' not found!"%group)
print("Ignored - but lap counting might not work correctly.")
print("Make sure there is an object of type 'check' with")
print("the name '%s' defined."%group)
continue
s = reduce(lambda x,y: str(x)+" "+str(y), dGroup2Indices[group])
kind = " kind=\"activate\" other-ids=\"%s\" "% s
toggle = stk_utils.getObjectProperty(obj, "toggle", "")
if toggle:
group = toggle.lower()
if group not in dGroup2Indices:
self.log.report({'WARNING'}, "Toggle group '%s' not found!"%group)
print("Ignored - but lap counting might not work correctly.")
print("Make sure there is an object of type 'check' with")
print("the name '%s' defined."%group)
continue
s = reduce(lambda x,y: str(x)+" "+str(y), dGroup2Indices[group])
kind = " kind=\"toggle\" other-ids=\"%s\" "% s
lap = stk_utils.getObjectProperty(obj, "type", obj.name).upper()
if lap[:3]=="LAP":
kind = " kind=\"lap\" " # xml needs a value for an attribute
activate = stk_utils.getObjectProperty(obj, "activate", "")
if activate:
group = activate.lower()
if group not in dGroup2Indices:
self.log.report({'WARNING'}, "Activate group '%s' not found for lap line!"%group)
print("Ignored - but lap counting might not work correctly.")
print("Make sure there is an object of type 'check' with")
print("the name '%s' defined."%group)
continue
s = reduce(lambda x,y: str(x)+" "+str(y), dGroup2Indices[group])
kind = "%sother-ids=\"%s\" "% (kind, s)
ambient = stk_utils.getObjectProperty(obj, "ambient", "").upper()
if ambient:
kind=" kind=\"ambient-light\" "
# Get the group name this object belongs to. If the objects
# is of type lap then 'lap' is the group name, otherwise
# it's taken from the name property (or the object name).
name = stk_utils.getObjectProperty(obj, "type", obj.name.lower()).lower()
if name!="lap":
name = stk_utils.getObjectProperty(obj, "name", obj.name.lower()).lower()
if len(name) == 0: name = obj.name.lower()
# Get the list of indices of this group, excluding
# the index of the current object. So create a copy
# of the list and remove the current index
l = dGroup2Indices[name][:]
sSameGroup = reduce(lambda x,y: str(x)+" "+str(y), l, "")
ind = ind + 1
if len(mesh.vertices)==2: # Check line
min_h = mesh.vertices[0].co[2]
if mesh.vertices[1].co[2] < min_h: min_h = mesh.vertices[1].co[2]
f.write(" <check-line%sp1=\"%.2f %.2f\" p2=\"%.2f %.2f\"\n" %
(kind, mesh.vertices[0].co[0], mesh.vertices[0].co[1],
mesh.vertices[1].co[0], mesh.vertices[1].co[1] ) )
f.write(" min-height=\"%.2f\" same-group=\"%s\"/>\n" \
% (min_h, sSameGroup.strip()) )
else:
radius = 0
for v in mesh.vertices:
r = (obj.location[0]-v[0])*(obj.location[0]-v[0]) + \
(obj.location[1]-v[1])*(obj.location[1]-v[1]) + \
(obj.location[2]-v[2])*(obj.loc[2]-v[2])
if r > radius:
radius = r
radius = math.sqrt(radius)
inner_radius = stk_utils.getObjectProperty(obj, "inner_radius", radius)
color = stk_utils.getObjectProperty(obj, "color", "255 120 120 120")
f.write(" <check-sphere%sxyz=\"%.2f %.2f %.2f\" radius=\"%.2f\"\n" % \
(kind, obj.location[0], obj.location[2], obj.location[1], radius) )
f.write(" same-group=\"%s\"\n"%sSameGroup.strip())
f.write(" inner-radius=\"%.2f\" color=\"%s\"/>\n"% \
(inner_radius, color) )
except:
self.log.report({'ERROR'}, "Error exporting checkline " + obj.name + ", make sure it is properly formed")
f.write(" </checks>\n")
# ==============================================================================
# A special class to store a driveline.
class Driveline:
def __init__(self, driveline, is_main, log):
self.name = driveline.name
self.is_main = is_main
self.log = log
# Transform the mesh to the right coordinates.
self.mesh = driveline.data.copy()
self.mesh.transform(driveline.matrix_world)
# Convert the mesh into a dictionary: each vertex is a key to a
# list of neighbours.
self.createNeighbourDict()
self.defineStartVertex()
self.convertToLists()
self.from_quad=None
self.from_driveline=None
self.to_driveline=None
self.is_last_main = 0
# Invisible drivelines are not shown in the minimap
self.invisible = stk_utils.getObjectProperty(driveline, "invisible", "false")
self.ai_ignore = stk_utils.getObjectProperty(driveline, "ai_ignore", "false")
self.direction = stk_utils.getObjectProperty(driveline, "direction", "both")
self.enabled = not stk_utils.getObjectProperty(driveline, "disable", 0)
self.activate = stk_utils.getObjectProperty(driveline, "activate", None)
self.strict_lap = stk_utils.convertTextToYN(stk_utils.getObjectProperty(driveline,
"strict_lapline", "N") ) \
== "Y"
self.min_height_testing = stk_utils.getObjectProperty(driveline, "min_height_testing", -1.0)
self.max_height_testing = stk_utils.getObjectProperty(driveline, "max_height_testing", 5.0)
# --------------------------------------------------------------------------
# Returns the name of the driveline
def getName(self):
return self.name
# --------------------------------------------------------------------------
# Returns if this is a main driveline or not.
def isMain(self):
return self.is_main
# --------------------------------------------------------------------------
# Returns if this driveline is disabled.
def isEnabled(self):
return self.enabled
# --------------------------------------------------------------------------
# Returns the 'activate' property of the driveline object.
def getActivate(self):
return self.activate
# --------------------------------------------------------------------------
# Returns if this driveline requests strict lap counting (i.e. exactly
# crossing the line between the start vertices)
def isStrictLapline(self):
return self.strict_lap
# --------------------------------------------------------------------------
# Stores that the start quad of this driveline is connected to quad
# quad_index of quad driveline.
def setFromQuad(self, driveline, quad_index):
# Convert the relative to driveline quad index to the global index:
self.from_quad = driveline.getFirstQuadIndex()+quad_index
self.from_driveline = driveline
# --------------------------------------------------------------------------
def setToDriveline(self, driveline):
self.to_driveline = driveline
# --------------------------------------------------------------------------
# Returns the global index of the quad this start point is connected to.
def getFromQuad(self):
return self.from_quad
# --------------------------------------------------------------------------
# Returns the number of quads of this driveline
def getNumberOfQuads(self):
return len(self.lCenter)
# --------------------------------------------------------------------------
# Stores the index of the first quad in this driveline in the global
# quad index.
def setStartQuadIndex(self, n):
self.global_quad_index_start = n
# --------------------------------------------------------------------------
# Returns the start index for this driveline in the global numbering of
# all quads
def getFirstQuadIndex(self):
return self.global_quad_index_start
# --------------------------------------------------------------------------
# Returns the global index of the last quad in this driveline.
def getLastQuadIndex(self):
return self.global_quad_index_start+len(self.lCenter)-1
# --------------------------------------------------------------------------
# Returns the start edge, which is the lap counting line for the main
# drivelines. See defineStartVertex() for setting self.start_line.
def getStartEdge(self):
return self.start_line
# --------------------------------------------------------------------------
# This driveline is the last main driveline. This means that it will get
# one additional quad added to connect this to the very first quad. Since
# the values are not actually needed (see write function), the arrays have
# to be made one element larger to account for this additional quad (e.g.
# in calls to getNumberOfQuads etc).
def setIsLastMain(self, first_driveline):
self.is_last_main = 1
cp=[]
for i in range(3):
if self.lRight[-1] is None or self.lLeft[-1] is None:
return # Invalid driveline (an error message will have been printed)
cp.append((self.mesh.vertices[self.lLeft[-1]].co[i] +
first_driveline.mesh.vertices[first_driveline.lLeft[0]].co[i]+
self.mesh.vertices[self.lRight[-1]].co[i] +
first_driveline.mesh.vertices[first_driveline.lRight[0]].co[i])*0.25)
self.lCenter.append(cp)
self.lLeft.append(None)
self.lRight.append(None)
# --------------------------------------------------------------------------
# This creates a dictionary for a mesh which contains for each vertex a list
# of all its neighbours.
def createNeighbourDict(self):
self.dNext = {}
for e in self.mesh.edges:
if e.vertices[0] in self.dNext:
self.dNext[e.vertices[0]].append(e.vertices[1])
else:
self.dNext[e.vertices[0]] = [e.vertices[1]]
if e.vertices[1] in self.dNext:
self.dNext[e.vertices[1]].append(e.vertices[0])
else:
self.dNext[e.vertices[1]] = [e.vertices[0]]
# --------------------------------------------------------------------------
# This helper function determines the start vertex for a driveline.
# Details are documented in convertDrivelines. It returns as list with
# the two starting lines.
def defineStartVertex(self):
# Find all vertices with exactly two neighbours
self.lStart = []
for i in self.dNext.keys():
if len(self.dNext[i])==1:
self.lStart.append( i )
if len(self.lStart)!=2:
self.log.report({'ERROR'}, "Driveline '%s' is incorrectly formed, cannot find the two 'antennas' that indicate where the driveline starts." % self.name)
self.start_point = (0,0,0)
return
print("self.lStart[0] =", self.lStart[0])
print("self.lStart[1] =", self.lStart[1])
start_coord_1 = self.mesh.vertices[self.lStart[0]].co
start_coord_2 = self.mesh.vertices[self.lStart[1]].co
# Save the middle of the first quad, which is used later for neareast
# quads computations.
self.start_point = ((start_coord_1[0] + start_coord_2[0])*0.5,
(start_coord_1[1] + start_coord_2[1])*0.5,
(start_coord_1[2] + start_coord_2[2])*0.5 )
# --------------------------------------------------------------------------
# Returns the startline of this driveline
def getStartPoint(self):
return self.start_point
# --------------------------------------------------------------------------
# Returns the distance of the start point from a given point
def getStartDistanceTo(self, p):
dx=self.start_point[0]-p[0]
dy=self.start_point[1]-p[1]
dz=self.start_point[2]-p[2]
return dx*dx+dy*dy+dz*dz
# --------------------------------------------------------------------------
# Convert the dictionary of list of neighbours to two lists - one for the
# left side, one for the right side.
def convertToLists(self):
if len(self.lStart) < 2:
self.lLeft = [None, None]
self.lRight = [None, None]
self.start_line = (None, None)
self.end_point = (0,0,0)
self.lCenter = []
return
self.lLeft = [self.lStart[0], self.dNext[self.lStart[0]][0]]
self.lRight = [self.lStart[1], self.dNext[self.lStart[1]][0]]
self.lCenter = []
# this is for error handling only
processed_vertices = {}
processed_vertices[self.lStart[0]] = True
processed_vertices[self.lStart[1]] = True
# The quads can be either clockwise or counter-clockwise oriented. STK
# expectes counter-clockwise, so if the orientation is wrong, swap
# left and right side.
left_0_coord = self.mesh.vertices[self.lLeft[0]].co
#left_1_coord = self.mesh.vertices[self.lLeft[1]].co
right_0_coord = self.mesh.vertices[self.lRight[0]].co
right_1_coord = self.mesh.vertices[self.lRight[1]].co
if (right_1_coord[0] - left_0_coord[0])*(right_0_coord[1] - left_0_coord[1]) \
- (right_1_coord[1] - left_0_coord[1])*(right_0_coord[0] - left_0_coord[0]) > 0:
r = self.lRight
self.lRight = self.lLeft
self.lLeft = r
# Save start edge, which will become the main lap counting line
# (on the main driveline). This must be done here after potentially
# switching since STK assumes that the first point of a check line (to
# which the first line of the main driveline is converted) is on the
# left side (this only applies for the lap counting line, see
# Track::setStartCoordinates/getStartTransform).
self.start_line = (self.mesh.vertices[self.lLeft[1]].co, self.mesh.vertices[self.lRight[1]].co)
count=0
# Just in case that we have an infinite loop due to a malformed graph:
# stop after 10000 vertices
max_count = 10000
warning_printed = 0
while count < max_count:
count = count + 1
processed_vertices[self.lLeft[-1]] = True
# Get all neighbours. One is the previous point, one
# points to the opposite side - we need the other one.
neighb = self.dNext[self.lLeft[-1]]
next_left = []
for i in neighb:
if i==self.lLeft[-2]: continue # pointing backwards
if i==self.lRight[-1]: continue # to opposite side
next_left.append(i)
if len(next_left) == 0:
# No new element found --> this must be the end
# of the list!!
break
if len(next_left)!=1 and not warning_printed:
lcoord = self.mesh.vertices[self.lLeft[-1]].co
rcoord = self.mesh.vertices[self.lRight[-1]].co
self.log.report({'WARNING'}, "Broken driveline at or around point ({0}, {1}, {2})".format\
(lcoord[0], lcoord[1], lcoord[2]))
print("Potential successors :")
for i in range(len(next_left)):
nextco = self.mesh.vertices[next_left[i]].co
print ("Successor %d: %f %f %f" % \
(i, nextco[0], nextco[1], nextco[2]))
print ("It might also possible that the corresponding right driveline point")
print (rcoord[0],rcoord[1],rcoord[2])
print ("has some inconsistencies.")
print ("The drivelines will most certainly not be useable.")
print ("Further warnings are likely and will be suppressed.")
warning_printed = 1
operator.report({'ERROR'}, "Problems with driveline detected, check console for details!")
# Blender.Draw.PupMenu("Problems with driveline detected, check console for details!")
self.lLeft.append(next_left[0])
processed_vertices[self.lRight[-1]] = True
# Same for other side:
neighb = self.dNext[self.lRight[-1]]
next_right = []
for i in neighb:
if i==self.lRight[-2]: continue # pointing backwards
# Note lLeft has already a new element appended,
# so we have to check for the 2nd last element!
if i==self.lLeft[-2]: continue # to opposite side
next_right.append(i)
if len(next_right)==0:
lcoord = self.mesh.vertices[self.lLeft[-1]].co
rcoord = self.mesh.vertices[self.lRight[-1]].co
self.log.report({'WARNING'}, "Malformed driveline at or around points ({0}, {1}, {2}) and ({3}, {4}, {5})".format\
(lcoord[0],lcoord[1],lcoord[2],
rcoord[0],rcoord[1],rcoord[2]))
print ("No more vertices on right side of quad line, but there are")
print ("still points on the left side. Check the points:")
print ("left: ", lcoord[0],lcoord[1],lcoord[2])
print ("right: ", rcoord[0],rcoord[1],rcoord[2])
print ("Last left point is ignored.")
break
if len(next_right)!=1 and not warning_printed:
lcoord = self.mesh.vertices[self.lLeft[-1]].co
rcoord = self.mesh.vertices[self.lRight[-1]].co
self.log.report({'ERROR'}, "Invalid driveline at or around point ({0}, {1}, {2})".format\
(rcoord[0],rcoord[1],rcoord[2]))
print ("Warning: More than one potential succesor found for right driveline point")
print (rcoord[0],rcoord[1],rcoord[2],":")
#for i in range(len(next_right)):
# print ("Successor %d: %f %f %f" % \
# (i,next_right[i][0],next_right[i][1],next_right[i][2]))
print ("It might also possible that the corresponding left driveline point")
print (lcoord[0],lcoord[1],lcoord[2])
print ("has some inconsistencies.")
print ("The drivelines will most certainly not be useable.")
print ("Further warnings are likely and will be suppressed.")
warning_printed = 1
operator.report({'ERROR'}, "Problems with driveline detected!")
break
self.lRight.append(next_right[0])
processed_vertices[self.lRight[-1]] = True
processed_vertices[self.lLeft[-1]] = True
processed_vertices[self.lRight[-2]] = True
processed_vertices[self.lLeft[-2]] = True
cp=[]
for i in range(3):
cp.append((self.mesh.vertices[self.lLeft[-2]].co[i] +
self.mesh.vertices[self.lLeft[-1]].co[i] +
self.mesh.vertices[self.lRight[-2]].co[i] +
self.mesh.vertices[self.lRight[-1]].co[i])*0.25)
self.lCenter.append(cp)
if count>=max_count and not warning_printed:
self.log.report({'WARNING'}, "Warning, Only the first %d vertices of driveline '%s' are exported" %\
(max_count, self.name))
if warning_printed != 1:
not_connected = None
not_connected_distance = 99999
for v in self.dNext:
if not v in processed_vertices:
# find closest connected vertex (this is only to improve the error message)
for pv in processed_vertices:
dist = (self.mesh.vertices[v].co - self.mesh.vertices[pv].co).length
if dist < not_connected_distance:
not_connected_distance = dist
not_connected = v
if not_connected:
self.log.report({'WARNING'}, "Warning, driveline '%s' appears to be broken in separate sections. Vertex at %f %f %f is not connected with the rest" % \
(self.name,
self.mesh.vertices[not_connected].co[0],
self.mesh.vertices[not_connected].co[1],
self.mesh.vertices[not_connected].co[2]))
# Now remove the first two points, which are only used to indicate
# the starting point:
del self.lLeft[0]
del self.lRight[0]
self.end_point =((self.mesh.vertices[self.lLeft[-1]].co[0] +
self.mesh.vertices[self.lRight[-1]].co[0])*0.5,
(self.mesh.vertices[self.lLeft[-1]].co[1] +
self.mesh.vertices[self.lRight[-1]].co[1])*0.5,
(self.mesh.vertices[self.lLeft[-1]].co[2] +
self.mesh.vertices[self.lRight[-1]].co[2])*0.5 )
# --------------------------------------------------------------------------
# Returns the end point of this driveline
def getEndPoint(self):
return self.end_point
# --------------------------------------------------------------------------
def getDistanceToStart(self, lDrivelines):
return self.getDistanceTo(self.start_point, lDrivelines)
# --------------------------------------------------------------------------
# Returns the shortest distance to any of the drivelines in the list
# lDrivelines from the given point p (it's actually a static function).
# The distance is defined to be the shortest distance from the
# start point of this driveline to all quads of all drivelines in
# lDrivelines. This function returns the distance, the index of the
# driveline in lDrivelines, and the local index of the quad within this
# driveline as a tuple.
def getDistanceTo(self, p, lDrivelines):
if not lDrivelines: return (None, None, None)
(min_dist, min_quad_index) = lDrivelines[0].getMinDistanceToPoint(p)
min_driveline_index = 0
for i in range(1, len(lDrivelines)):
if lDrivelines[i]==self: continue # ignore itself
(dist, quad_index) = lDrivelines[i].getMinDistanceToPoint(p)
if dist < min_dist:
min_dist = dist
min_quad_index = quad_index
min_driveline_index = i
return (min_dist, min_driveline_index, min_quad_index)
# --------------------------------------------------------------------------
# Returns the minimum distance from the center point of each quad to the
# point p.
def getMinDistanceToPoint(self, p):
pCenter = self.lCenter[0]
dx = pCenter[0]-p[0]
dy = pCenter[1]-p[1]
dz = pCenter[2]-p[2]
min_dist = dx*dx+dy*dy+dz*dz
min_index = 0
for i in range(1, len(self.lCenter)):
pCenter = self.lCenter[i]
dx = pCenter[0]-p[0]
dy = pCenter[1]-p[1]
dz = pCenter[2]-p[2]
d = dx*dx+dy*dy+dz*dz
if d<min_dist:
min_dist = d
min_index = i
return (min_dist, min_index)
# --------------------------------------------------------------------------
# Determine the driveline from lSorted which is closest to this driveline's
# endpoint (closest meaning: having a quad that is closest).
def computeSuccessor(self, lSorted):
(dist, driveline_index, quad_index)=self.getDistanceTo(self.end_point,
lSorted)
return quad_index + lSorted[driveline_index].getFirstQuadIndex()
# --------------------------------------------------------------------------
# Writes the quads into a file.
def writeQuads(self, f):
if self.lLeft[0] is None or self.lRight[0] is None:
return # Invalid driveline (a message will have been printed)
if self.lLeft[1] is None or self.lRight[1] is None:
return # Invalid driveline (a message will have been printed)
l = self.mesh.vertices[self.lLeft[0]].co
r = self.mesh.vertices[self.lRight[0]].co
l1 = self.mesh.vertices[self.lLeft[1]].co
r1 = self.mesh.vertices[self.lRight[1]].co
if self.invisible and self.invisible=="true":
sInv = " invisible=\"yes\" "
else:
sInv = " "
# AI-ignore will be applied to the first and last quad (to account for forward and reverse mode)
if self.ai_ignore and self.ai_ignore=="true":
sAIIgnore = "ai-ignore=\"yes\" "
else:
sAIIgnore = " "
if self.direction and self.direction != "both":
sDirection = "direction=\"" + self.direction + "\" "
else:
sDirection = " "
max_index = len(self.lLeft) - 1
# If this is the last main driveline, the last quad is a dummy element
# added by setLastMain(). So the number of elements is decreased by
# one.
if self.is_last_main:
max_index = max_index - 1
f.write(" <!-- Driveline: %s -->\n"%self.name)
# Note that only the first quad must be marked with ai-ignore
# (this results that the AI will not go to the first quad, but
# if it should end up somewhere on the shortcut, it will
# continue to drive on the shortcut.
f.write(" <quad%s%s%sp0=\"%.3f %.3f %.3f\" p1=\"%.3f %.3f %.3f\" p2=\"%.3f %.3f %.3f\" p3=\"%.3f %.3f %.3f\"/>\n" \
%(sInv, sAIIgnore, sDirection, l[0],l[2],l[1], r[0],r[2],r[1], r1[0],r1[2],r1[1], l1[0],l1[2],l1[1]) )
for i in range(1, max_index):
if self.lRight[i+1] is None: return # broken driveline (messages will already have been printed)
l1 = self.mesh.vertices[self.lLeft[i+1]].co
r1 = self.mesh.vertices[self.lRight[i+1]].co
f.write(" <quad%s%s%sp0=\"%d:3\" p1=\"%d:2\" p2=\"%.3f %.3f %.3f\" p3=\"%.3f %.3f %.3f\"/>\n" \
%(sInv,sAIIgnore if i == max_index - 1 else "",sDirection,self.global_quad_index_start+i-1, self.global_quad_index_start+i-1, \
r1[0],r1[2],r1[1], l1[0],l1[2],l1[1]) )
if self.is_last_main:
f.write(" <quad%sp0=\"%d:3\" p1=\"%d:2\" p2=\"0:1\" p3=\"0:0\"/>\n"\
% (sInv, self.global_quad_index_start+max_index-1, \
self.global_quad_index_start+max_index-1))
|
from typing import Callable, Dict, Optional, Tuple
from rest_framework.decorators import action
from rest_framework.exceptions import NotFound
from rest_framework.request import Request
from rest_framework.response import Response
from ee.clickhouse.client import sync_execute
from ee.clickhouse.models.person import delete_person
from ee.clickhouse.queries.clickhouse_retention import ClickhouseRetention
from ee.clickhouse.queries.clickhouse_stickiness import ClickhouseStickiness
from ee.clickhouse.queries.funnels import ClickhouseFunnelPersons, ClickhouseFunnelTrendsPersons
from ee.clickhouse.queries.trends.lifecycle import ClickhouseLifecycle
from ee.clickhouse.sql.person import GET_PERSON_PROPERTIES_COUNT
from posthog.api.person import PersonViewSet
from posthog.api.utils import format_next_absolute_url, format_next_url
from posthog.constants import INSIGHT_FUNNELS, FunnelVizType
from posthog.decorators import cached_function
from posthog.models import Event, Filter, Person
class ClickhousePersonViewSet(PersonViewSet):
lifecycle_class = ClickhouseLifecycle
retention_class = ClickhouseRetention
stickiness_class = ClickhouseStickiness
@action(methods=["GET", "POST"], detail=False)
def funnel(self, request: Request, **kwargs) -> Response:
if request.user.is_anonymous or not request.user.team:
return Response(data=[])
results, next_url = self.calculate_funnel_persons(request)["result"]
return Response(data={"results": [{"people": results, "count": len(results)}], "next": next_url})
@cached_function()
def calculate_funnel_persons(self, request: Request) -> Dict[str, Tuple[list, Optional[str]]]:
if request.user.is_anonymous or not request.user.team:
return {"result": ([], None)}
team = request.user.team
filter = Filter(request=request)
funnel_class: Callable = ClickhouseFunnelPersons
if filter.funnel_viz_type == FunnelVizType.TRENDS:
funnel_class = ClickhouseFunnelTrendsPersons
results, should_paginate = funnel_class(filter, team).run()
limit = filter.limit if filter.limit else 100
next_url = format_next_absolute_url(request, filter.offset, limit) if should_paginate else None
# cached_function expects a dict with the key result
return {"result": (results, next_url)}
def get_properties(self, request: Request):
rows = sync_execute(GET_PERSON_PROPERTIES_COUNT, {"team_id": self.team.pk})
return [{"name": name, "count": count} for name, count in rows]
def destroy(self, request: Request, pk=None, **kwargs): # type: ignore
try:
person = Person.objects.get(team=self.team, pk=pk)
events = Event.objects.filter(team=self.team, distinct_id__in=person.distinct_ids)
events.delete()
delete_person(
person.uuid, person.properties, person.is_identified, delete_events=True, team_id=self.team.pk
)
person.delete()
return Response(status=204)
except Person.DoesNotExist:
raise NotFound(detail="Person not found.")
|
# -*- coding: utf-8 -*-
# Author: XuMing <xuming624@qq.com>
# Brief: Corpus for model
import sys
from codecs import open
from collections import Counter
# Define constants associated with the usual special tokens.
PAD_TOKEN = 'PAD'
GO_TOKEN = 'GO'
EOS_TOKEN = 'EOS'
UNK_TOKEN = 'UNK'
def save_word_dict(dict_data, save_path):
with open(save_path, 'w', encoding='utf-8') as f:
for k, v in dict_data.items():
f.write("%s\t%d\n" % (k, v))
def load_word_dict(save_path):
dict_data = dict()
with open(save_path, 'r', encoding='utf-8') as f:
for line in f:
items = line.strip().split()
try:
dict_data[items[0]] = int(items[1])
except IndexError:
print('error', line)
return dict_data
def read_vocab(input_texts, max_size=50000, min_count=5):
token_counts = Counter()
special_tokens = [PAD_TOKEN, GO_TOKEN, EOS_TOKEN, UNK_TOKEN]
for line in input_texts:
for char in line.strip():
char = char.strip()
if not char:
continue
token_counts.update(char)
# Sort word count by value
count_pairs = token_counts.most_common()
vocab = [k for k, v in count_pairs if v >= min_count]
# Insert the special tokens to the beginning
vocab[0:0] = special_tokens
full_token_id = list(zip(vocab, range(len(vocab))))[:max_size]
vocab2id = dict(full_token_id)
return vocab2id
def read_samples_by_string(path):
with open(path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if not line:
continue
parts = line.lower().strip().split('\t')
if len(parts) != 2:
print('error ', line)
continue
source, target = parts[0], parts[1]
yield source, target
def build_dataset(path):
print('Read data, path:{0}'.format(path))
sources, targets = [], []
for source, target in read_samples_by_string(path):
sources.append(source)
targets.append(target)
return sources, targets
def show_progress(curr, total, time=""):
prog_ = int(round(100.0 * float(curr) / float(total)))
dstr = '[' + '>' * int(round(prog_ / 4)) + ' ' * (25 - int(round(prog_ / 4))) + ']'
sys.stdout.write(dstr + str(prog_) + '%' + time + '\r')
sys.stdout.flush()
def str2id(s, vocab2id, maxlen):
# 文字转id
return [vocab2id.get(c.strip(), vocab2id[UNK_TOKEN]) for c in s[:maxlen] if c.strip()]
def padding(x, vocab2id):
# padding至batch内的最大长度
ml = max([len(i) for i in x])
return [i + [vocab2id[PAD_TOKEN]] * (ml - len(i)) for i in x]
def id2str(ids, id2vocab):
# id转文字,找不到的用空字符代替
return ''.join([id2vocab.get(i, UNK_TOKEN) for i in ids])
|
'''
Enchant Spelling: Implements spelling backend based on enchant.
'''
import enchant
from kivy.core.spelling import SpellingBase, NoSuchLangError
from kivy.compat import PY2
class SpellingEnchant(SpellingBase):
'''
Spelling backend based on the enchant library.
'''
def __init__(self, language=None):
self._language = None
super(SpellingEnchant, self).__init__(language)
def select_language(self, language):
try:
self._language = enchant.Dict(language)
except enchant.DictNotFoundError:
err = 'Enchant Backend: No language for "%s"' % (language, )
raise NoSuchLangError(err)
def list_languages(self):
# Note: We do NOT return enchant.list_dicts because that also returns
# the enchant dict objects and not only the language identifiers.
return enchant.list_languages()
def check(self, word):
if not word:
return None
return self._language.check(word)
def suggest(self, fragment):
suggestions = self._language.suggest(fragment)
# Don't show suggestions that are invalid
suggestions = [s for s in suggestions if self.check(s)]
if PY2:
suggestions = [s.decode('utf-8') for s in suggestions]
return suggestions
|
# from __future__ import division
# import unittest
# import numpy as np
# # from mock import Mock
# from itertools import product
# from os.path import dirname, join, abspath, exists
# import sys
# ROOTDIR = dirname(dirname(dirname(abspath(__file__))))
# sys.path.append(dirname(dirname(__file__)))
# print dirname(dirname(__file__))
# from utils.datatype_handling import save_output
# import ops_bool
# from data_array import Site
# from functools import partial
#
#
# # Sample data and labels
# obj = ['nuclei', 'cytoplasm']
# ch = ['DAPI', 'YFP']
# prop = ['area', 'mean_intensity', 'min_intensity']
# labels = [i for i in product(obj, ch, prop)]
# data = np.zeros((len(labels), 10, 5)) # 10 cells, 5 frames
# data[:, :, 1:] = 10
#
# DATA_PATH = join(ROOTDIR, 'data', 'tests.npz')
#
# class Test_ops_filter(unittest.TestCase):
# def setUp(self):
# if not exists(DATA_PATH):
# save_output(data, labels, DATA_PATH)
#
#
# def test_normalize_data(self):
# site = Site(dirname(DATA_PATH), 'tests.npz')
# op = partial(ops_bool.filter_frames_by_range)
# site.operate(op, pid=1)
#
# if __name__ == '__main__':
# unittest.main()
|
import tensorflow as tf
def Conv_1D_Block(x, model_width, kernel, strides):
# 1D Convolutional Block with BatchNormalization
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.Conv1D(model_width, kernel, strides=strides, padding="same", kernel_initializer="he_normal")(x)
return x
def stem(inputs, num_filters):
# Construct the Stem Convolution Group
# inputs : input vector
# First Convolutional layer, where pooled feature maps will be reduced by 75%
conv = Conv_1D_Block(inputs, num_filters, 7, 2)
if conv.shape[1] <= 2:
pool = tf.keras.layers.MaxPooling1D(pool_size=1, strides=2, padding="valid")(conv)
else:
pool = tf.keras.layers.MaxPooling1D(pool_size=2, strides=2, padding="valid")(conv)
return pool
def conv_block(inputs, num_filters):
# Construct Block of Convolutions without Pooling
# x : input into the block
# n_filters: number of filters
conv = Conv_1D_Block(inputs, num_filters, 3, 2)
conv = Conv_1D_Block(conv, num_filters, 3, 2)
return conv
def residual_block(inputs, num_filters):
# Construct a Residual Block of Convolutions
# x : input into the block
# n_filters: number of filters
shortcut = inputs
#
conv = Conv_1D_Block(inputs, num_filters, 3, 1)
conv = Conv_1D_Block(conv, num_filters, 3, 1)
conv = tf.keras.layers.Add()([conv, shortcut])
out = tf.keras.layers.Activation('relu')(conv)
return out
def residual_group(inputs, num_filters, n_blocks, conv=True):
# x : input to the group
# n_filters: number of filters
# n_blocks : number of blocks in the group
# conv : flag to include the convolution block connector
out = inputs
for i in range(n_blocks):
out = residual_block(out, num_filters)
# Double the size of filters and reduce feature maps by 75% (strides=2, 2) to fit the next Residual Group
if conv:
out = conv_block(out, num_filters * 2)
return out
def stem_bottleneck(inputs, num_filters):
# Construct the Stem Convolution Group
# inputs : input vector
# First Convolutional layer, where pooled feature maps will be reduced by 75%
conv = Conv_1D_Block(inputs, num_filters, 7, 2)
if conv.shape[1] <= 2:
pool = tf.keras.layers.MaxPooling1D(pool_size=1, strides=2, padding="valid")(conv)
else:
pool = tf.keras.layers.MaxPooling1D(pool_size=2, strides=2, padding="valid")(conv)
return pool
def conv_block_bottleneck(inputs, num_filters):
# Construct Block of Convolutions without Pooling
# x : input into the block
# n_filters: number of filters
conv = Conv_1D_Block(inputs, num_filters, 3, 2)
conv = Conv_1D_Block(conv, num_filters, 3, 2)
conv = Conv_1D_Block(conv, num_filters, 3, 2)
return conv
def residual_block_bottleneck(inputs, num_filters):
# Construct a Residual Block of Convolutions
# x : input into the block
# n_filters: number of filters
shortcut = Conv_1D_Block(inputs, num_filters * 4, 1, 1)
#
conv = Conv_1D_Block(inputs, num_filters, 1, 1)
conv = Conv_1D_Block(conv, num_filters, 3, 1)
conv = Conv_1D_Block(conv, num_filters * 4, 1, 1)
conv = tf.keras.layers.Add()([conv, shortcut])
out = tf.keras.layers.Activation('relu')(conv)
return out
def residual_group_bottleneck(inputs, num_filters, n_blocks, conv=True):
# x : input to the group
# n_filters: number of filters
# n_blocks : number of blocks in the group
# conv : flag to include the convolution block connector
out = inputs
for i in range(n_blocks):
out = residual_block_bottleneck(out, num_filters)
# Double the size of filters and reduce feature maps by 75% (strides=2, 2) to fit the next Residual Group
if conv:
out = conv_block_bottleneck(out, num_filters * 2)
return out
def learner18(inputs, num_filters):
# Construct the Learner
x = residual_group(inputs, num_filters, 2) # First Residual Block Group of 64 filters
x = residual_group(x, num_filters * 2, 1) # Second Residual Block Group of 128 filters
x = residual_group(x, num_filters * 4, 1) # Third Residual Block Group of 256 filters
out = residual_group(x, num_filters * 8, 1, False) # Fourth Residual Block Group of 512 filters
return out
def learner34(inputs, num_filters):
# Construct the Learner
x = residual_group(inputs, num_filters, 3) # First Residual Block Group of 64 filters
x = residual_group(x, num_filters * 2, 3) # Second Residual Block Group of 128 filters
x = residual_group(x, num_filters * 4, 5) # Third Residual Block Group of 256 filters
out = residual_group(x, num_filters * 8, 2, False) # Fourth Residual Block Group of 512 filters
return out
def learner50(inputs, num_filters):
# Construct the Learner
x = residual_group_bottleneck(inputs, num_filters, 3) # First Residual Block Group of 64 filters
x = residual_group_bottleneck(x, num_filters * 2, 3) # Second Residual Block Group of 128 filters
x = residual_group_bottleneck(x, num_filters * 4, 5) # Third Residual Block Group of 256 filters
out = residual_group_bottleneck(x, num_filters * 8, 2, False) # Fourth Residual Block Group of 512 filters
return out
def learner101(inputs, num_filters):
# Construct the Learner
x = residual_group_bottleneck(inputs, num_filters, 3) # First Residual Block Group of 64 filters
x = residual_group_bottleneck(x, num_filters * 2, 3) # Second Residual Block Group of 128 filters
x = residual_group_bottleneck(x, num_filters * 4, 22) # Third Residual Block Group of 256 filters
out = residual_group_bottleneck(x, num_filters * 8, 2, False) # Fourth Residual Block Group of 512 filters
return out
def learner152(inputs, num_filters):
# Construct the Learner
x = residual_group_bottleneck(inputs, num_filters, 3) # First Residual Block Group of 64 filters
x = residual_group_bottleneck(x, num_filters * 2, 7) # Second Residual Block Group of 128 filters
x = residual_group_bottleneck(x, num_filters * 4, 35) # Third Residual Block Group of 256 filters
out = residual_group_bottleneck(x, num_filters * 8, 2, False) # Fourth Residual Block Group of 512 filters
return out
def classifier(inputs, class_number):
# Construct the Classifier Group
# inputs : input vector
# class_number : number of output classes
out = tf.keras.layers.Dense(class_number, activation='softmax')(inputs)
return out
def regressor(inputs, feature_number):
# Construct the Regressor Group
# inputs : input vector
# feature_number : number of output features
out = tf.keras.layers.Dense(feature_number, activation='linear')(inputs)
return out
class ResNetv2:
def __init__(self, length, num_channel, num_filters, problem_type='Regression',
output_nums=1, pooling='avg', dropout_rate=False):
self.length = length
self.num_channel = num_channel
self.num_filters = num_filters
self.problem_type = problem_type
self.output_nums = output_nums
self.pooling = pooling
self.dropout_rate = dropout_rate
def MLP(self, x):
if self.pooling == 'avg':
x = tf.keras.layers.GlobalAveragePooling1D()(x)
elif self.pooling == 'max':
x = tf.keras.layers.GlobalMaxPooling1D()(x)
# Final Dense Outputting Layer for the outputs
x = tf.keras.layers.Flatten(name='flatten')(x)
if self.dropout_rate:
x = tf.keras.layers.Dropout(self.dropout_rate, name='Dropout')(x)
outputs = tf.keras.layers.Dense(self.output_nums, activation='linear')(x)
if self.problem_type == 'Classification':
outputs = tf.keras.layers.Dense(self.output_nums, activation='softmax')(x)
return outputs
def ResNet18(self):
inputs = tf.keras.Input((self.length, self.num_channel)) # The input tensor
stem_ = stem(inputs, self.num_filters) # The Stem Convolution Group
x = learner18(stem_, self.num_filters) # The learner
outputs = self.MLP(x)
# Instantiate the Model
model = tf.keras.Model(inputs, outputs)
return model
def ResNet34(self):
inputs = tf.keras.Input((self.length, self.num_channel)) # The input tensor
stem_ = stem(inputs, self.num_filters) # The Stem Convolution Group
x = learner34(stem_, self.num_filters) # The learner
outputs = self.MLP(x)
# Instantiate the Model
model = tf.keras.Model(inputs, outputs)
return model
def ResNet50(self):
inputs = tf.keras.Input((self.length, self.num_channel)) # The input tensor
stem_b = stem_bottleneck(inputs, self.num_filters) # The Stem Convolution Group
x = learner50(stem_b, self.num_filters) # The learner
outputs = self.MLP(x)
# Instantiate the Model
model = tf.keras.Model(inputs, outputs)
return model
def ResNet101(self):
inputs = tf.keras.Input((self.length, self.num_channel)) # The input tensor
stem_b = stem_bottleneck(inputs, self.num_filters) # The Stem Convolution Group
x = learner101(stem_b, self.num_filters) # The learner
outputs = self.MLP(x)
# Instantiate the Model
model = tf.keras.Model(inputs, outputs)
return model
def ResNet152(self):
inputs = tf.keras.Input((self.length, self.num_channel)) # The input tensor
stem_b = stem_bottleneck(inputs, self.num_filters) # The Stem Convolution Group
x = learner152(stem_b, self.num_filters) # The learner
outputs = self.MLP(x)
# Instantiate the Model
model = tf.keras.Model(inputs, outputs)
return model
if __name__ == '__main__':
# Configurations
length = 1024 # Length of each Segment
model_name = 'ResNet152' # DenseNet Models
model_width = 16 # Width of the Initial Layer, subsequent layers start from here
num_channel = 1 # Number of Input Channels in the Model
problem_type = 'Regression' # Classification or Regression
output_nums = 1 # Number of Class for Classification Problems, always '1' for Regression Problems
#
Model = ResNetv2(length, num_channel, model_width, problem_type=problem_type, output_nums=output_nums, pooling='avg', dropout_rate=False).ResNet152()
Model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0003), loss=tf.keras.losses.MeanAbsoluteError(), metrics=tf.keras.metrics.MeanSquaredError())
Model.summary()
if __name__ == '__main__':
# Configurations
length = 1024 # Length of each Segment
model_name = 'ResNet152' # DenseNet Models
model_width = 16 # Width of the Initial Layer, subsequent layers start from here
num_channel = 1 # Number of Input Channels in the Model
problem_type = 'Regression' # Classification or Regression
output_nums = 1 # Number of Class for Classification Problems, always '1' for Regression Problems
#
Model = ResNetv2(length, num_channel, model_width, problem_type=problem_type, output_nums=output_nums, pooling='avg', dropout_rate=False).ResNet152()
Model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0003), loss=tf.keras.losses.MeanAbsoluteError(), metrics=tf.keras.metrics.MeanSquaredError())
Model.summary()
|
import logging
import networkx
import itertools
from angr.utils import weakref
import claripy
l = logging.getLogger(name=__name__)
class StateHierarchy(object):
def __init__(self):
# The New Order
self._graph = networkx.DiGraph()
self._leaves = set() # nodes with no children
self._twigs = set() # nodes with one child
self._weakref_cache = {} # map from object id to weakref
self._reverse_weakref_cache = {} # map from weakref to object id
def __getstate__(self):
histories = [ h() for h in networkx.algorithms.dfs_postorder_nodes(self._graph) ]
return (histories,)
def __setstate__(self, s):
self._graph = networkx.DiGraph()
self._leaves = set()
self._twigs = set()
self._weakref_cache = {}
self._reverse_weakref_cache = {}
nodes = s[0]
for n in nodes:
self.add_history(n)
def get_ref(self, obj):
if id(obj) not in self._weakref_cache:
ref = weakref.ref(obj, self.clear_ref)
self._weakref_cache[id(obj)] = ref
self._reverse_weakref_cache[ref] = id(obj)
return ref
else:
return self._weakref_cache[id(obj)]
def clear_ref(self, ref):
if ref not in self._reverse_weakref_cache:
l.error("Cleaning mystery weakref %s", ref)
return
self._remove_history(ref)
# TODO: this nonsense is very much not thread safe
del self._weakref_cache[self._reverse_weakref_cache[ref]]
del self._reverse_weakref_cache[ref]
#
# Graph management
#
def _remove_history(self, h):
try:
predecessors = self._graph.predecessors(h)
successors = self._graph.successors(h)
for p,s in itertools.product(predecessors, successors):
self._graph.add_edge(p, s)
self._graph.remove_node(h)
except networkx.NetworkXError:
pass
self._leaves.discard(h)
self._twigs.discard(h)
hh = h()
if hh is not None:
hh.demote()
def add_state(self, s):
h = s.history
self.add_history(h)
def add_history(self, h):
cur_node = self.get_ref(h)
self._graph.add_node(cur_node)
if h.parent is not None:
prev_node = self.get_ref(h.parent)
self._graph.add_edge(prev_node, cur_node)
self._leaves.discard(prev_node)
if len(list(self._graph.successors(prev_node))) == 1:
self._twigs.add(prev_node)
else:
self._twigs.discard(prev_node)
self._leaves.add(cur_node)
def simplify(self):
tw = self._twigs
self._twigs = set()
for h in tw:
self._remove_history(h)
def full_simplify(self):
for h in self._graph.nodes():
if self._graph.out_degree(h) == 1:
self._remove_history(h)
def lineage(self, h):
"""
Returns the lineage of histories leading up to `h`.
"""
lineage = [ ]
predecessors = list(self._graph.predecessors(h))
while len(predecessors):
lineage.append(predecessors[0])
predecessors = list(self._graph.predecessors(predecessors[0]))
lineage.reverse()
return lineage
def all_successors(self, h):
nodes = list(networkx.algorithms.dfs_postorder_nodes(self._graph, h))[:-1]
nodes.reverse()
return nodes
def history_successors(self, h):
return [ ref() for ref in self._graph.successors(self.get_ref(h)) ]
def history_predecessors(self, h):
return [ ref() for ref in self._graph.predecessors(self.get_ref(h)) ]
def history_contains(self, h):
return self.get_ref(h) in self._graph
#
# LAZY_SOLVES support
#
def _find_root_unreachable(self, h):
lineage = self.lineage(h)
if len(lineage) == 0 or lineage[-1]().reachable():
return h
good = 0
bad = len(lineage) - 1
while True:
l.debug("... looking between %d and %d in %d states", good, bad, len(lineage))
cur = (bad+good)//2
if cur == good or cur == bad:
if lineage[bad]().reachable():
bad += 1
root = lineage[bad]
l.debug("... returning %d (%s)", bad, root)
return root
elif lineage[cur]().reachable():
l.debug("... %d is reachable", cur)
good = cur
else:
l.debug("... %d is unreachable", bad)
bad = cur
def _prune_subtree(self, h):
ph = list(self._graph.predecessors(h))
if len(ph) == 1 and len(list(self._graph.successors(ph[0]))) == 1:
self._twigs.add(ph[0])
all_children = list(networkx.algorithms.dfs_postorder_nodes(self._graph, h))
for n in all_children:
n()._satisfiable = False
if n().state is not None:
n().state.add_constraints(claripy.false)
self._graph.remove_nodes_from(all_children)
def unreachable_state(self, state):
self.unreachable_history(state.history)
def unreachable_history(self, h):
href = self.get_ref(h)
try:
l.debug("Pruning tree given unreachable %s", h)
root = self._find_root_unreachable(href)
except networkx.NetworkXError:
l.debug("... not present in graph")
else:
l.debug("... root is %s", root)
self._prune_subtree(root)
#
# Smart merging support
#
def most_mergeable(self, states):
"""
Find the "most mergeable" set of states from those provided.
:param states: a list of states
:returns: a tuple of: (a list of states to merge, those states' common history, a list of states to not merge yet)
"""
histories = set(self.get_ref(s.history) for s in states)
for n in networkx.algorithms.dfs_postorder_nodes(self._graph):
intersection = histories.intersection(self.all_successors(n))
if len(intersection) > 1:
return (
[ s for s in states if self.get_ref(s.history) in intersection ],
n(),
[ s for s in states if self.get_ref(s.history) not in intersection ]
)
# didn't find any?
return set(), None, states
|
import FWCore.ParameterSet.Config as cms
from RecoEgamma.EgammaElectronProducers.defaultLowPtGsfElectronID_cfi import defaultLowPtGsfElectronID
lowPtGsfElectronID = defaultLowPtGsfElectronID.clone(
ModelNames = cms.vstring(['']),
ModelWeights = cms.vstring([
'RecoEgamma/ElectronIdentification/data/LowPtElectrons/RunII_Autumn18_LowPtElectrons_mva_id.xml.gz',
]),
ModelThresholds = cms.vdouble([-10.])
)
from Configuration.ProcessModifiers.run2_miniAOD_UL_cff import run2_miniAOD_UL
from Configuration.Eras.Modifier_run2_miniAOD_devel_cff import run2_miniAOD_devel
from Configuration.Eras.Modifier_bParking_cff import bParking
run2_miniAOD_UL.toModify(
lowPtGsfElectronID,
rho = "fixedGridRhoFastjetAll",
ModelWeights = ["RecoEgamma/ElectronIdentification/data/LowPtElectrons/LowPtElectrons_ID_2020Sept15.root"],
ModelThresholds = [-99.],
Version = "V1",
)
run2_miniAOD_devel.toModify(
lowPtGsfElectronID,
ModelWeights = ["RecoEgamma/ElectronIdentification/data/LowPtElectrons/LowPtElectrons_ID_2020Nov28.root"],
)
(bParking & run2_miniAOD_UL).toModify(
lowPtGsfElectronID,
ModelWeights = ["RecoEgamma/ElectronIdentification/data/LowPtElectrons/LowPtElectrons_ID_2021May17.root"],
)
from Configuration.Eras.Modifier_fastSim_cff import fastSim
from Configuration.ProcessModifiers.run2_miniAOD_UL_cff import run2_miniAOD_UL
from PhysicsTools.NanoAOD.nano_eras_cff import *
(fastSim & (run2_miniAOD_UL | run2_nanoAOD_106Xv2)).toModify(
lowPtGsfElectronID,
useGsfToTrack = True,
)
|
"""Core Learning regression tests for RLlib (torch and tf).
Runs Atari/PyBullet benchmarks for the most popular algorithms.
"""
import json
import os
from pathlib import Path
from ray.rllib.utils.test_utils import run_learning_tests_from_yaml
if __name__ == "__main__":
# Get path of this very script to look for yaml files.
abs_yaml_path = Path(__file__).parent
print("abs_yaml_path={}".format(abs_yaml_path))
yaml_files = abs_yaml_path.rglob("*.yaml")
yaml_files = sorted(
map(lambda path: str(path.absolute()), yaml_files), reverse=True
)
# Run all tests in the found yaml files.
results = run_learning_tests_from_yaml(yaml_files=yaml_files)
test_output_json = os.environ.get(
"TEST_OUTPUT_JSON", "/tmp/rllib_learning_test_core.json"
)
with open(test_output_json, "wt") as f:
json.dump(results, f)
print("Ok.")
|
'''
This program is used to recognize the driver's status (one of the 10 statuses) based on the image using pre-trained VGG16
deep convolutional neural network (CNN).
This program is modified from the blog post:
"Building powerful image classification models using very little data" from blog.keras.io.
This program do fine tunning for a modified VGG16 net, which consists of two parts:
the lower model: layer 0-layer24 of the original VGG16 net (frozen the first 4 blocks, train the weights of the 5-th block
with our dataset)
the upper model: newly added two layer dense net (train the weights using our dataset)
'''
import os
#The h5py package is a Pythonic interface to the HDF5 binary data format
import h5py
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
from keras.models import Sequential
from keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.layers import Activation, Dropout, Flatten, Dense
''' path to the model weights file in HDF5 binary data format
The vgg16 weights can be downloaded from the link below:
https://drive.google.com/file/d/0Bz7KyqmuGsilT0J5dmRCM0ROVHc/view
'''
weights_path = 'vgg16_weights.h5'
# dimensions of the images
img_width, img_height = 150, 150
# the path to the training data
train_data_dir = 'data/train'
# the path to the validation data
validation_data_dir = 'data/validation'
# the number of training samples. We have 20924 training images, but actually we can set the
# number of training samples can be augmented to much more, for example 2*20924
nb_train_samples = 20924
# We actually have 1500 validation samples, which can be augmented to much more
nb_validation_samples = 1500
# number of epoches for training
nb_epoch = 10
# build the VGG16 model
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(3, img_width, img_height)))
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
'''
# load the weights of the VGG16 networks (trained on ImageNet, won the ILSVRC competition in 2014)
# note: when there is a complete match between your model definition
# and your weight savefile, you can simply call model.load_weights(filename)
'''
# load the weights for each layer
assert os.path.exists(weights_path), 'Model weights not found (see "weights_path" variable in script).'
f = h5py.File(weights_path)
for k in range(f.attrs['nb_layers']):
if k >= len(model.layers):
# we don't look at the last (fully-connected) layers in the savefile
break
g = f['layer_{}'.format(k)]
weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
# set the weights to layer-k
model.layers[k].set_weights(weights)
f.close()
print('VGG16 model weights have been successfully loaded.')
# build a MLP classifier model to put on top of the VGG16 model
top_model = Sequential()
# flateen the output of VGG16 model to 2D Numpy matrix (n*D)
top_model.add(Flatten(input_shape=model.output_shape[1:]))
# hidden layer of 256 neurons
top_model.add(Dense(256, activation='relu'))
# add dropout for the dense layer
top_model.add(Dropout(0.5))
# the output layer: we have 10 claases
top_model.add(Dense(10, activation='softmax'))
# connect the two models onto the VGG16 net
model.add(top_model)
# set the first 25 layers (up to the last conv block) of VGFG16 net to non-trainable (weights will not be updated)
for layer in model.layers[:25]:
layer.trainable=False
# compile the model
model.compile(loss = 'categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
# augmentation configuration for training data
train_datagen = ImageDataGenerator(rescale=1.0/255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True)
# augmentation configuration for validation data (actually we did no augmentation to teh validation images)
test_datagen = ImageDataGenerator(rescale=1.0/255)
# training data generator from folder
train_generator = train_datagen.flow_from_directory(train_data_dir, target_size=(img_height, img_width),
batch_size=32, class_mode='categorical')
# validation data generator from folder
validation_generator = train_datagen.flow_from_directory(validation_data_dir, target_size=(img_height, img_width),
batch_size=32, class_mode='categorical')
# fit the model
model.fit_generator(train_generator, samples_per_epoch=nb_train_samples, nb_epoch=nb_epoch,
validation_data=validation_generator, nb_val_samples=nb_validation_samples)
# save the model weights
# model.save_weights('VGG16_and_MLP_model.h5')
|
from setuptools import setup
setup(
name='rsa',
version='1.0',
description='Implementación del criptosistema RSA',
url="https://github.com/JohannGordillo/RSA-Cryptosystem",
license="MIT",
author='Johann Gordillo',
author_email='jgordillo@ciencias.unam.mx',
packages=['rsa']
)
|
# Copyright (c) 2017 Alessandro Duca
#
# See the file LICENCE for copying permission.
import re
import logging
LOGGER = logging.getLogger(__name__)
def normalize_path(path):
normpath = re.sub(r'/+', '/', path)
result = re.sub(r'(^/)|(/$)', '', normpath)
return result
class NameError(Exception):
pass
class Cache:
root = dict()
__instance = None
__initialiazed = False
def __new__(cls, factory=None):
if cls.__instance is None:
cls.__instance = object.__new__(cls)
return cls.__instance
def __init__(self, factory=None):
if not Cache.__initialiazed:
self.factory = factory
Cache.__initialiazed = True
def lookup(self, path):
assert path is not None
normalized_path, chunks = self._chop(path)
assert len(chunks) > 0, 'insufficient path length'
return self._lookup(self.root, chunks, normalized_path)
def _lookup(self, node, chunks, original_path):
first, remaining = chunks[0], chunks[1:]
current_node = node.get(first)
if not remaining:
if current_node:
result = current_node
else:
if self.factory:
result = self.factory.new(original_path)
self.register(original_path, result)
else:
raise NameError(
'Can\'t find object at path {}'.format(original_path))
else:
if not current_node:
current_node = dict()
node[first] = current_node
result = self._lookup(current_node, remaining, original_path)
return result
@staticmethod
def _chop(path):
normalized_path = normalize_path(path)
return normalized_path, normalized_path.split('/')
def register(self, path, obj):
original_path, chunks, = self._chop(path)
self._append(self.root, chunks, obj, original_path)
def _append(self, node, chunks, obj, original_path):
LOGGER.debug('_append %s, %s, %s', node, chunks, obj)
first, remaining = chunks[0], chunks[1:]
if remaining:
if first not in node:
node[first] = dict()
elif not isinstance(node[first], dict):
raise Exception('node already exists {}'.format(original_path))
self._append(node[first], remaining, obj, original_path)
else:
if first not in node:
LOGGER.debug('append %s', first)
node[first] = obj
else:
raise Exception('node already exists {}'.format(original_path))
|
#Crie um programa que leia o ano de nascimento de sete pessoas.
# final, mostre quantas pessoas ainda não atingiram a maioridade e quantas já são maiores.
contotal = 0
maiores = 0
menores = 0
from datetime import date
ano = date.today().year
for c in range(1, 8):
nascimento = int(input(f'Em que ano a {c}ª pessoa nasceu?: '))
contotal += 1
idade = ano - nascimento
if idade >= 18:
maiores += 1
else:
menores += 1
print(f'Ao todo tivemos pessoas {maiores} MAIORES DE IDADE e {menores} MENORES DE IDADE')
|
#!/usr/bin/env python
# Notes on formulas
# -----------------
#
# There are four output types of formulas:
#
# 1. string
# 2. number
# 3. date — never a date range, unlike date properties
# 4. boolean
# Notes on rollups
# ----------------
#
# There are four signatures of rollup functions:
#
# 1. any -> array[any]
# * show_original
# * show_unique
# 2. any -> number
# * count / count_all
# * count_values
# * unique / count_unique_values
# * empty / count_empty
# * not_empty / count_not_empty
# * percent_empty
# * percent_not_empty
# 3. number -> number
# * sum
# * average
# * median
# * min
# * max
# * range
# 4. date -> date
# * earliest_date
# * latest_date
# * date_range
#
# Rollups returning arrays aren't implemented. Tables containing such rollups
# can stil be imported but these rollups will be ignored.
#
# Some functions have different names in the API / documentation. This is
# probably a documentation bug. We use the name that we get from the API.
import argparse
import datetime
import json
import logging
import os
import re
import time
import unicodedata
import httpx
import psycopg
logging.basicConfig(
format="%(asctime)s %(message)s",
level=logging.INFO,
)
DATE_RE = re.compile(r"[0-9]{4}-[0-9]{2}-[0-9]{2}")
def maybe_date(value):
"""Fix date values when Notion returns them as datetimes."""
if value is None:
return None
# Switch to str.removesuffix when dropping Python 3.8.
if value.endswith("T00:00:00.000+00:00"):
return value[:-19]
return value
INVALID_IN_NAME_RE = re.compile("[^a-z0-9_]")
# Maximum total delay for a single call is 2047 seconds which should let Notion
# recover from most temporary issues.
DELAY = 1 # before HTTP requests when reading databases, for throttling
RETRIES = 10 # retry queries up to RETRIES times
BACKOFF = 2 # multiply DELAY by BACKOFF between retries
PAGE_SIZE = 64 # lower than the default of 100 to prevent timeouts
TIMEOUT = 120 # seconds :-( Notion's API isn't all that fast
def get_database(database_id, token):
"""Get properties of a Notion database."""
t0 = time.perf_counter()
data = httpx.get(
f"https://api.notion.com/v1/databases/{database_id}",
headers={
"Authorization": f"Bearer {token}",
"Notion-Version": "2021-08-16",
},
).json()
t1 = time.perf_counter()
if data["object"] == "error":
logging.error(
"Failed to fetch the next pages: Notion API error: HTTP %s: %s",
data["status"],
data["message"],
)
raise RuntimeError(f"HTTP {data['status']}: {data['message']}")
logging.info(
"Fetched Notion database %s in %.1f seconds",
database_id,
t1 - t0,
)
return data
def iter_database(database_id, token):
"""Iterate over the pages of a Notion database."""
has_more = True
query = {
"sorts": [{"timestamp": "created_time", "direction": "descending"}],
"page_size": PAGE_SIZE,
}
while has_more:
t0 = time.perf_counter()
delay = DELAY
for retry in range(RETRIES):
try:
time.sleep(delay)
data = httpx.post(
f"https://api.notion.com/v1/databases/{database_id}/query",
headers={
"Authorization": f"Bearer {token}",
"Notion-Version": "2021-08-16",
},
json=query,
timeout=TIMEOUT,
).json()
except httpx.RequestError as exc:
logging.warning(
"Failed to fetch the next pages: HTTP request error: %s",
exc,
)
if retry == RETRIES - 1:
raise
else:
delay *= BACKOFF
continue
except json.JSONDecodeError as exc:
logging.warning(
"Failed to parse response: JSON decode error: %s",
exc,
)
if retry == RETRIES - 1:
raise
else:
delay *= BACKOFF
continue
if data["object"] == "error":
logging.error(
"Failed to fetch the next pages: Notion API error: HTTP %s: %s",
data["status"],
data["message"],
)
if retry == RETRIES - 1:
raise RuntimeError(f"HTTP {data['status']}: {data['message']}")
else:
delay *= BACKOFF
continue
break
t1 = time.perf_counter()
assert data["object"] == "list"
logging.info(
"Fetched %d Notion pages in %.1f seconds",
len(data["results"]),
t1 - t0,
)
has_more = data["has_more"]
query["start_cursor"] = data["next_cursor"]
yield from data["results"]
def get_value(property):
"""Convert a Notion property value to a Python value."""
type_ = property["type"]
if type_ == "title":
# Optional[str]
return "".join(t["plain_text"] for t in property["title"]) or None
# Basic properties
elif type_ == "rich_text":
# Optional[str]
return "".join(t["plain_text"] for t in property["rich_text"]) or None
elif type_ == "number":
# Optional[Number]
return property["number"]
elif type_ == "select":
# Optional[str]
if property["select"] is None:
return None
return property["select"]["name"]
elif type_ == "multi_select":
# List[str]
return [ms["name"] for ms in property["multi_select"]]
elif type_ == "date":
# Tuple[Optional[str], Optional[str]] - start and end date or datetime
if property["date"] is None:
return None, None
# "The public API will always return the time_zone field as null when
# rendering dates and time zone will be displayed as a UTC offset in
# the start and end date fields."
assert property["date"]["time_zone"] is None
return property["date"]["start"], property["date"]["end"]
elif type_ == "people":
# List[str] - UUID of person
return [p["id"] for p in property["people"]]
elif type_ == "files":
# List[str] - URL of the file
files = []
for f in property["files"]:
url = f["file"]["url"]
# Remove authentication information from files uploaded to Notion;
# it is too short lived to be worth storing in a database.
if "/secure.notion-static.com/" in url:
url = url.partition("?")[0]
files.append(url)
return files
elif type_ == "checkbox":
# bool
return property["checkbox"]
elif type_ == "url":
# Optional[str]
return property["url"]
elif type_ == "email":
# Optional[str]
return property["email"]
elif type_ == "phone_number":
# Optional[str]
return property["phone_number"]
# Advanced properties
elif type_ == "formula":
formula = property["formula"]
subtype = formula["type"]
if subtype == "string":
# str
return ("string", formula["string"])
elif subtype == "number":
# Optional[Number]
return ("number", formula["number"])
elif subtype == "date":
# Tuple[Optional[str], NoneType] - start date or datetime
if formula["date"] is None:
return ("date", (None, None))
assert formula["date"]["time_zone"] is None
assert formula["date"]["end"] is None
# Return the same format for consistency, even if end date is never set.
start_date = maybe_date(formula["date"]["start"])
return ("date", (start_date, None))
elif subtype == "boolean":
# bool
return ("boolean", formula["boolean"])
raise NotImplementedError(f"unsupported formula: {json.dumps(formula)}")
elif type_ == "relation":
# List[str] - UUID of related object
return [r["id"] for r in property["relation"]]
elif type_ == "rollup":
rollup = property["rollup"]
subtype = rollup["type"]
if subtype == "array":
# Skip rollups returning arrays
return ("array", [])
elif subtype == "number":
# Optional[Number]
return ("number", rollup["number"])
elif subtype == "date":
# Tuple[Optional[str], Optional[str]] - start and end date or datetime
if rollup["date"] is None:
return ("date", (None, None))
assert rollup["date"]["time_zone"] is None
start_date = maybe_date(rollup["date"]["start"])
end_date = maybe_date(rollup["date"]["end"])
return ("date", (start_date, end_date))
raise NotImplementedError(f"unsupported rollup: {json.dumps(rollup)}")
elif type_ == "created_time":
return property["created_time"]
elif type_ == "created_by":
return property["created_by"]["id"]
elif type_ == "last_edited_time":
return property["last_edited_time"]
elif type_ == "last_edited_by":
return property["last_edited_by"]["id"]
raise NotImplementedError(f"unsupported property: {json.dumps(property)}")
def convert(property, values):
"""Convert a Notion property to a PostgreSQL column."""
type_ = property["type"]
if type_ == "title":
return "text", values
# Basic properties
elif type_ == "rich_text":
return "text", values
elif type_ == "number":
if all(isinstance(value, int) for value in values if value is not None):
return "integer", values
else:
return "double precision", values
elif type_ == "select":
return "text", values
elif type_ == "multi_select":
return "text[]", values
elif type_ == "date":
if any(value[1] is not None for value in values):
# This is a range of dates or datetimes.
if all(
DATE_RE.fullmatch(value[0]) for value in values if value[0] is not None
) and all(
DATE_RE.fullmatch(value[1]) for value in values if value[1] is not None
):
return "daterange", values
else:
return "tstzrange", values
else:
# This is a date or datetime.
values = [value[0] for value in values]
if all(DATE_RE.fullmatch(value) for value in values if value is not None):
return "date", values
else:
return "timestamp with time zone", values
elif type_ == "people":
if all(len(value) <= 1 for value in values):
return "uuid", [value[0] if value else None for value in values]
else:
return "uuid[]", values
elif type_ == "files":
if all(len(value) <= 1 for value in values):
return "text", [value[0] if value else None for value in values]
else:
return "text[]", values
elif type_ == "checkbox":
return "boolean", values
elif type_ == "url":
return "text", values
elif type_ == "email":
return "text", values
elif type_ == "phone_number":
return "text", values
# Advanced properties
elif type_ == "formula":
(subtype,) = set(value[0] for value in values)
values = list(value[1] for value in values)
if subtype == "string":
return "text", values
elif subtype == "number":
return convert({"type": "number"}, values)
elif subtype == "date":
return convert({"type": "date"}, values)
elif subtype == "boolean":
return "boolean", values
formula = property["formula"]
raise NotImplementedError(f"unsupported formula: {json.dumps(formula)}")
elif type_ == "relation":
if all(len(value) <= 1 for value in values):
return "uuid", [value[0] if value else None for value in values]
else:
return "uuid[]", values
elif type_ == "rollup":
(subtype,) = set(value[0] for value in values)
values = list(value[1] for value in values)
if subtype == "array":
# Skip rollups returning arrays
return None, values
elif subtype == "number":
return convert({"type": "number"}, values)
elif subtype == "date":
return convert({"type": "date"}, values)
rollup = property["rollup"]
raise NotImplementedError(f"unsupported rollup: {json.dumps(rollup)}")
elif type_ == "created_time":
return "timestamp with time zone", values
elif type_ == "created_by":
return "uuid", values
elif type_ == "last_edited_time":
return "timestamp with time zone", values
elif type_ == "last_edited_by":
return "uuid", values
raise NotImplementedError(f"unsupported property: {json.dumps(property)}")
def sanitize_name(name):
"""Convert a Notion property name to a PostgreSQL column name."""
name = unicodedata.normalize("NFKD", name)
name = name.encode("ascii", "ignore").decode()
name = name.lower().strip().replace(" ", "_")
name = INVALID_IN_NAME_RE.sub("", name)
return name
def create_table(dsn, table_name, field_names, field_types, rows, drop, timestamp):
"""Create a PostgreSQL table."""
with psycopg.connect(dsn) as connection:
with connection.cursor() as cursor:
if timestamp is not None:
view_name, table_name = table_name, table_name + timestamp
if drop:
cursor.execute(f"DROP TABLE IF EXISTS {table_name}")
logging.info("Dropped PostgreSQL table %s", table_name)
columns = ", ".join(
f"{name} {type}" for name, type in zip(field_names, field_types)
)
cursor.execute(f"CREATE TABLE {table_name} ({columns})")
logging.info("Created PostgreSQL table %s", table_name)
columns = ", ".join(field_names)
with cursor.copy(f"COPY {table_name} ({columns}) FROM STDIN") as copy:
for row in rows:
copy.write_row(row)
logging.info("Wrote %d rows to PostgreSQL", len(rows))
if timestamp is not None:
cursor.execute(
f"CREATE OR REPLACE VIEW {view_name} AS "
f"SELECT * from {table_name}"
)
logging.info("Created PostgreSQL view %s", view_name)
connection.commit()
def sync_database(database_id, table_name, drop_existing=False, versioned=False):
"""Sync a database from Notion to PostgreSQL."""
# Validate env vars.
try:
# Integration needs access to tables that will be synced and to every
# table referenced by a relation or a rollup.
token = os.environ["NOTION_TOKEN"]
except KeyError:
raise RuntimeError("missing environment variable NOTION_TOKEN") from None
try:
dsn = os.environ["POSTGRESQL_DSN"]
except KeyError:
raise RuntimeError("missing environment variable POSTGRESQL_DSN") from None
# Validate arguments.
DATABASE_ID_RE = re.compile(r"[0-9a-f]{32}")
if not DATABASE_ID_RE.fullmatch(database_id):
raise ValueError(
f"invalid Notion database ID: {database_id}; "
f"must match {DATABASE_ID_RE.pattern}"
)
# PostgreSQL supports 31 characters in a name. We need 14 for the timestamp.
TABLE_NAME_RE = re.compile(r"[a-z_][a-z0-9_]+")
if not TABLE_NAME_RE.fullmatch(table_name):
raise ValueError(
f"invalid PostgreSQL table name: {table_name}; "
f"must match {TABLE_NAME_RE.pattern}"
)
TABLE_NAME_MAX_LENGTH = 17 if versioned else 31
if len(table_name) > TABLE_NAME_MAX_LENGTH:
raise ValueError(
f"invalid PostgreSQL table name: {table_name}; "
f"must contain no more than {TABLE_NAME_MAX_LENGTH} characters"
)
timestamp = datetime.datetime.utcnow().strftime("_%y%m%d_%H%M%S")
# Read the Notion database structure and content in memory.
database = get_database(database_id, token)
pages = list(iter_database(database_id, token))
# Convert to PostgreSQL field types and corresponding column values.
field_names = ["id"]
field_types = ["uuid"]
columns = [[page["id"] for page in pages]]
# Notion returns properties ordered by the opaque "id" attribute.
# Sort them alphabetically to get a more predictable result.
for name, property in sorted(database["properties"].items()):
assert name == property["name"] # Notion duplicates this info
values = [get_value(page["properties"][name]) for page in pages]
field_type, column = convert(property, values)
if field_type is None:
logging.info('Skipping unsupported property "%s"', name)
continue
logging.info('Converted property "%s" to %s', name, field_type)
field_names.append(sanitize_name(name))
field_types.append(field_type)
columns.append(column)
rows = list(zip(*columns))
# Write PostgreSQL table.
create_table(
dsn,
table_name,
field_names,
field_types,
rows,
drop=drop_existing,
timestamp=timestamp if versioned else None,
)
def main():
parser = argparse.ArgumentParser(
description="Import a Notion database to a PostgreSQL table"
)
parser.add_argument("database_id", help="Notion database ID")
parser.add_argument("table_name", help="PostgreSQL table name")
parser.add_argument(
"--drop-existing", action="store_true", help="Drop table if it exists"
)
parser.add_argument(
"--versioned", action="store_true", help="Import into a timestamped table"
)
sync_database(**vars(parser.parse_args()))
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .async_client import CertificateManagerAsyncClient
from .client import CertificateManagerClient
__all__ = (
"CertificateManagerClient",
"CertificateManagerAsyncClient",
)
|
# Copyright (c) 2020 Huawei Technologies Co., Ltd
# Copyright (c) 2019, Facebook CORPORATION.
# All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torchvision
torch.npu.set_device("npu:0")
#dummy_input = torch.randn(10, 3, 224, 224, device='npu:0')
dummy_input = torch.randn(10, 3, 224, 224)
dummy_input = dummy_input.to("npu")
model = torchvision.models.resnet50(pretrained=False)
# Providing input and output names sets the display names for values
# within the model's graph. Setting these does not change the semantics
# of the graph; it is only for readability.
#
# The inputs to the network consist of the flat list of inputs (i.e.
# the values you would pass to the forward() method) followed by the
# flat list of parameters. You can partially specify names, i.e. provide
# a list here shorter than the number of inputs to the model, and we will
# only set that subset of names, starting from the beginning.
input_names = [ "actual_input_1" ] + [ "learned_%d" % i for i in range(16) ]
output_names = [ "output1" ]
model = model.to("npu")
torch.onnx.export(model, dummy_input, "resnet50.onnx", verbose=True, input_names=input_names, output_names=output_names)
# 有坑 会提示下载不下来 修改下resnet.py,手动下载下来,然后放到 D:/Pytorch/models 目录下。
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch], model_dir="D:/Pytorch/models",
progress=progress)
model.load_state_dict(state_dict)
return model
|
#Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit test base class.
This class is intended to be used as the unit test base class in TensorFlow
Graphics. It implements new methods on top of the TensorFlow TestCase class
that are used to simplify the code and check for various kinds of failure.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_graphics.util import tfg_flags
FLAGS = flags.FLAGS
class TestCase(parameterized.TestCase, tf.test.TestCase):
"""Test case class implementing extra test functionalities."""
def setUp(self):
"""Sets the seed for tensorflow and numpy."""
super(TestCase, self).setUp()
try:
seed = flags.FLAGS.test_random_seed
except flags.UnparsedFlagAccessError:
seed = 301 # Default seed in case test_random_seed is not defined.
tf.compat.v1.set_random_seed(seed)
np.random.seed(seed)
FLAGS[tfg_flags.TFG_ADD_ASSERTS_TO_GRAPH].value = True
def _remove_dynamic_shapes(self, shapes):
for s in shapes:
if None in s:
return None
return shapes
def _compute_gradient_error(self, x, y, x_init_value, delta=1e-6):
"""Computes the gradient error.
Args:
x: a tensor or list of tensors.
y: a tensor.
x_init_value: a numpy array of the same shape as "x" representing the
initial value of x.
delta: (optional) the amount of perturbation.
Returns:
A tuple (max_error, row, column), with max_error the maxium error between
the two Jacobians, and row/column the position of said maximum error.
"""
x_shape = x.shape.as_list()
y_shape = y.shape.as_list()
with tf.compat.v1.Session():
grad = tf.compat.v1.test.compute_gradient(x, x_shape, y, y_shape,
x_init_value, delta)
if isinstance(grad, tuple):
grad = [grad]
error = 0
row_max_error = 0
column_max_error = 0
for j_t, j_n in grad:
if j_t.size or j_n.size: # Handle zero size tensors correctly
diff = np.fabs(j_t - j_n)
max_error = np.maximum(error, diff.max())
row_max_error, column_max_error = np.unravel_index(
diff.argmax(), diff.shape)
return max_error, row_max_error, column_max_error
def _create_placeholders_from_shapes(self, shapes, dtypes=None,
sparse_tensors=None):
"""Creates a list of placeholders based on a list of shapes.
Args:
shapes: A tuple or list of the input shapes.
dtypes: A list of input types.
sparse_tensors: A `bool` list denoting if placeholder is a SparseTensor.
This is ignored in eager mode - in eager execution, only dense
placeholders will be created.
Returns:
A list of placeholders.
"""
if dtypes is None:
dtypes = [tf.float32] * len(shapes)
if sparse_tensors is None:
sparse_tensors = [False] * len(shapes)
if tf.executing_eagerly():
placeholders = [
tf.compat.v1.placeholder_with_default(
tf.zeros(shape=shape, dtype=dtype), shape=shape)
for shape, dtype in zip(shapes, dtypes)
]
else:
placeholders = [
tf.compat.v1.sparse.placeholder(dtype, shape=shape)
if is_sparse else tf.compat.v1.placeholder(shape=shape, dtype=dtype)
for shape, dtype, is_sparse in zip(shapes, dtypes, sparse_tensors)
]
return placeholders
def _tile_tensors(self, tiling, tensors):
"""Tiles a set of tensors using the tiling information.
Args:
tiling: A list of integers defining how to tile the tensors.
tensors: A list of tensors to tile.
Returns:
A list of tiled tensors.
"""
tensors = [
np.tile(tensor, tiling + [1] * len(np.array(tensor).shape))
for tensor in tensors
]
return tensors
def assert_exception_is_not_raised(self,
func,
shapes,
dtypes=None,
sparse_tensors=None,
**kwargs):
"""Runs the function to make sure an exception is not raised.
Args:
func: A function to exectute.
shapes: A tuple or list of the input shapes.
dtypes: A list of input types.
sparse_tensors: A list of `bool` indicating if the inputs are
SparseTensors. Defaults to all `False`. This is used for creating
SparseTensor placeholders in graph mode.
**kwargs: A dict of keyword arguments to be passed to the function.
"""
if tf.executing_eagerly() and shapes:
# If a shape is given in eager mode, the tensor will be initialized with
# zeros, which can make some range checks fail for certain functions.
# But if only kwargs are passed and shapes is empty, this function
# still should run correctly.
return
placeholders = self._create_placeholders_from_shapes(
shapes=shapes, dtypes=dtypes, sparse_tensors=sparse_tensors)
try:
func(*placeholders, **kwargs)
except Exception as e: # pylint: disable=broad-except
self.fail("Exception raised: %s" % str(e))
def assert_exception_is_raised(self,
func,
error_msg,
shapes,
dtypes=None,
sparse_tensors=None,
**kwargs):
"""Runs the function to make sure an exception is raised.
Args:
func: A function to exectute.
error_msg: The error message of the exception.
shapes: A tuple or list of the input shapes.
dtypes: A list of input types.
sparse_tensors: A list of `bool` indicating if the inputs are
SparseTensors. Defaults to all `False`. This is used for creating
SparseTensor placeholders in graph mode.
**kwargs: A dict of keyword arguments to be passed to the function.
"""
if tf.executing_eagerly():
# If shapes is an empty list, we can continue with the test. If shapes
# has None values, we shoud return.
shapes = self._remove_dynamic_shapes(shapes)
if shapes is None:
return
placeholders = self._create_placeholders_from_shapes(
shapes=shapes, dtypes=dtypes, sparse_tensors=sparse_tensors)
with self.assertRaisesRegexp(ValueError, error_msg):
func(*placeholders, **kwargs)
def assert_jacobian_is_correct(self, x, x_init, y, atol=1e-6, delta=1e-6):
"""Tests that the gradient error of y(x) is small.
Args:
x: A tensor.
x_init: A numpy array containing the values at which to estimate the
gradients of y.
y: A tensor.
atol: Maximum absolute tolerance in gradient error.
delta: The amount of perturbation.
"""
if tf.executing_eagerly():
return
max_error, _, _ = self._compute_gradient_error(x, y, x_init, delta)
self.assertLessEqual(max_error, atol)
def assert_jacobian_is_finite(self, x, x_init, y):
"""Tests that the Jacobian only contains valid values.
The analytical gradients and numerical ones are expected to differ at points
where y is not smooth. This function can be used to check that the
analytical gradient is not NaN nor Inf.
Args:
x: A tensor.
x_init: A numpy array containing the values at which to estimate the
gradients of y.
y: A tensor.
"""
if tf.executing_eagerly():
return
x_shape = x.shape.as_list()
y_shape = y.shape.as_list()
with tf.compat.v1.Session():
gradient = tf.compat.v1.test.compute_gradient(
x, x_shape, y, y_shape, x_init_value=x_init)
theoretical_gradient = gradient[0][0]
self.assertFalse(
np.isnan(theoretical_gradient).any() or
np.isinf(theoretical_gradient).any())
def assert_output_is_correct(self,
func,
test_inputs,
test_outputs,
rtol=1e-3,
atol=1e-6,
tile=True):
"""Tests that the function gives the correct result.
Args:
func: A function to exectute.
test_inputs: A tuple or list of test inputs.
test_outputs: A tuple or list of test outputs against which the result of
calling `func` on `test_inputs` will be compared to.
rtol: The relative tolerance used during the comparison.
atol: The absolute tolerance used during the comparison.
tile: A `bool` indicating whether or not to automatically tile the test
inputs and outputs.
"""
if tile:
# Creates a rank 4 list of values between 1 and 10.
tensor_tile = np.random.randint(1, 10, size=np.random.randint(4)).tolist()
test_inputs = self._tile_tensors(tensor_tile, test_inputs)
test_outputs = self._tile_tensors(tensor_tile, test_outputs)
test_outputs = [
tf.convert_to_tensor(value=output) for output in test_outputs
]
test_outputs = test_outputs[0] if len(test_outputs) == 1 else test_outputs
self.assertAllClose(test_outputs, func(*test_inputs), rtol=rtol, atol=atol)
def assert_tf_lite_convertible(self,
func,
shapes,
dtypes=None,
test_inputs=None):
"""Runs the tf-lite converter to make sure the function can be exported.
Args:
func: A function to execute with tf-lite.
shapes: A tuple or list of input shapes.
dtypes: A list of input types.
test_inputs: A tuple or list of inputs. If not provided the test inputs
will be randomly generated.
"""
if tf.executing_eagerly():
# Currently TFLite conversion is not supported in eager mode.
return
# Generate graph with the function given as input.
in_tensors = self._create_placeholders_from_shapes(shapes, dtypes)
out_tensors = func(*in_tensors)
if not isinstance(out_tensors, (list, tuple)):
out_tensors = [out_tensors]
with tf.compat.v1.Session() as sess:
try:
sess.run(tf.compat.v1.global_variables_initializer())
# Convert to a TFLite model.
converter = tf.compat.v1.lite.TFLiteConverter.from_session(
sess, in_tensors, out_tensors)
tflite_model = converter.convert()
# Load TFLite model and allocate tensors.
interpreter = tf.lite.Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
# If no test inputs provided then randomly generate inputs.
if test_inputs is None:
test_inputs = [
np.array(np.random.sample(shape), dtype=np.float32)
for shape in shapes
]
else:
test_inputs = [
np.array(test, dtype=np.float32) for test in test_inputs
]
# Evaluate function using TensorFlow.
feed_dict = dict(zip(in_tensors, test_inputs))
test_outputs = sess.run(out_tensors, feed_dict)
# Set tensors for the TFLite model.
input_details = interpreter.get_input_details()
for i, test_input in enumerate(test_inputs):
index = input_details[i]["index"]
interpreter.set_tensor(index, test_input)
# Run TFLite model.
interpreter.invoke()
# Get tensors from the TFLite model and compare with TensorFlow.
output_details = interpreter.get_output_details()
for o, test_output in enumerate(test_outputs):
index = output_details[o]["index"]
self.assertAllClose(test_output, interpreter.get_tensor(index))
except Exception as e: # pylint: disable=broad-except
self.fail("Exception raised: %s" % str(e))
def main(argv=None):
"""Main function."""
tf.test.main(argv)
# The util functions or classes are not exported.
__all__ = []
|
from time import *
def linear(n):
""" linear O(n)"""
cont=0
for i in range (n):
cont=cont+1
print ("COUNTER",cont,end=" ")
def quadratic(n):
""" quadratic O(n**2)"""
cont=0
for i in range (n):
for j in range (n):
cont=cont+1
print ("COUNTER",cont,end=" ")
print("LINEAR TIMES (MILLISEC.)")
t1=0
t2=0
n=1000000
while t2-t1<5: #5 secons
t1=time()
linear(n)
t2=time()
print("n=",n,"***","time",int(1000*(t2-t1)))
n=n*2
print("QUADRATIC TIMES (MILLISEC.)")
t1=0
t2=0
n=100
while t2-t1<5: #5 seconds
t1=time()
quadratic(n)
t2=time()
print("n=",n,"***","time",int(1000*(t2-t1)))
n=n*2
|
import os
import random
random.seed(int(os.getenv("SEED"), 16))
from prjxray import util
from prjxray.db import Database
def gen_sites():
db = Database(util.get_db_root(), util.get_part())
grid = db.grid()
for tile_name in sorted(grid.tiles()):
loc = grid.loc_of_tilename(tile_name)
gridinfo = grid.gridinfo_at_loc(loc)
for site_name, site_type in gridinfo.sites.items():
if site_type in ['FIFO18E1']:
yield tile_name, site_name
def write_params(params):
pinstr = 'tile,val,site\n'
for tile, (site, val) in sorted(params.items()):
pinstr += '%s,%s,%s\n' % (tile, val, site)
open('params.csv', 'w').write(pinstr)
def run():
print(
'''
module top(input clk, stb, di, output do);
localparam integer DIN_N = 8;
localparam integer DOUT_N = 8;
reg [DIN_N-1:0] din;
wire [DOUT_N-1:0] dout;
reg [DIN_N-1:0] din_shr;
reg [DOUT_N-1:0] dout_shr;
always @(posedge clk) begin
din_shr <= {din_shr, di};
dout_shr <= {dout_shr, din_shr[DIN_N-1]};
if (stb) begin
din <= din_shr;
dout_shr <= dout;
end
end
assign do = dout_shr[DOUT_N-1];
''')
params = {}
sites = list(gen_sites())
for (tile_name, site_name), isone in zip(sites,
util.gen_fuzz_states(len(sites))):
params[tile_name] = (site_name, isone)
print(
'''
(* KEEP, DONT_TOUCH, LOC = "%s" *)
RAMB18E1 #(
.DOA_REG(%u)
) bram_%s (
.CLKARDCLK(),
.CLKBWRCLK(),
.ENARDEN(),
.ENBWREN(),
.REGCEAREGCE(),
.REGCEB(),
.RSTRAMARSTRAM(),
.RSTRAMB(),
.RSTREGARSTREG(),
.RSTREGB(),
.ADDRARDADDR(),
.ADDRBWRADDR(),
.DIADI(),
.DIBDI(),
.DIPADIP(),
.DIPBDIP(),
.WEA(),
.WEBWE(),
.DOADO(),
.DOBDO(),
.DOPADOP(),
.DOPBDOP());
''' % (site_name, isone, site_name))
print("endmodule")
write_params(params)
if __name__ == '__main__':
run()
|
# encoding: utf-8
# Copyright (c) 2008, Eric Moritz <eric@themoritzfamily.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# * notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# * copyright notice, this list of conditions and the following
# * disclaimer in the documentation and/or other materials provided
# * with the distribution. Neither the name of the <ORGANIZATION>
# * nor the names of its contributors may be used to endorse or
# * promote products derived from this software without specific
# * prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
This module provides a way to use Smisk as a WSGI backend.
Conforms to :pep:`333`
Example::
def hello_app(env, start_response):
start_response("200 OK", [])
return ["Hello, World"]
from smisk.wsgi import main
main(hello_app)
:author: Eric Moritz
:author: Rasmus Andersson
'''
import os, sys, smisk.core, logging
from smisk.util.main import *
from smisk.config import LOGGING_FORMAT, LOGGING_DATEFMT
__all__ = ['__version__', 'Request', 'Gateway', 'main']
__version__ = (0,1,0)
_hop_headers = {
'connection':1, 'keep-alive':1, 'proxy-authenticate':1,
'proxy-authorization':1, 'te':1, 'trailers':1, 'transfer-encoding':1,
'upgrade':1
}
def is_hop_by_hop(header_name):
'''Return true if 'header_name' is an HTTP/1.1 "Hop-by-Hop" header'''
return header_name.lower() in _hop_headers
class Request(smisk.core.Request):
'''WSGI request'''
def prepare(self, app):
'''Set up the environment for one request'''
self.env['wsgi.input'] = self.input
self.env['wsgi.errors'] = self.errors
self.env['wsgi.version'] = app.wsgi_version
self.env['wsgi.run_once'] = app.wsgi_run_once
self.env['wsgi.url_scheme'] = app.request.url.scheme
self.env['wsgi.multithread'] = app.wsgi_multithread
self.env['wsgi.multiprocess'] = app.wsgi_multiprocess
# Put a reference of ourselves in the environment so that the user
# might reference other parts of the framework and discover if they
# are running in Smisk or not.
self.env['smisk.app'] = app
# Rebind our send_file to the real send_file
self.send_file = app.response.send_file
def send_file(self, path):
raise NotImplementedError('unprepared request does not have a valid send_file method')
class Gateway(smisk.core.Application):
'''WSGI adapter
'''
# Configuration parameters; can override per-subclass or per-instance
wsgi_version = (1,0)
wsgi_multithread = False
wsgi_multiprocess = True
wsgi_run_once = False
def __init__(self, wsgi_app):
super(Gateway, self).__init__()
self.request_class = Request
self.wsgi_app = wsgi_app
def start_response(self, status, headers, exc_info=None):
'''`start_response()` callable as specified by
`PEP 333 <http://www.python.org/dev/peps/pep-0333/>`__'''
if exc_info:
try:
if self.response.has_begun:
raise exc_info[0],exc_info[1],exc_info[2]
else:
# In this case of response not being initiated yet, this will replace
# both headers and any buffered body.
self.error(exc_info[0], exc_info[1], exc_info[2])
finally:
exc_info = None # Avoid circular ref.
elif len(self.response.headers) != 0:
raise AssertionError("Headers already set!")
assert isinstance(status, str),"Status must be a string"
assert len(status)>=4,"Status must be at least 4 characters"
assert int(status[:3]),"Status message must begin w/3-digit code"
assert status[3]==" ", "Status message must have a space after code"
if __debug__:
for name,val in headers:
assert isinstance(name, str),"Header names must be strings"
assert isinstance(val, str),"Header values must be strings"
assert not is_hop_by_hop(name),"Hop-by-hop headers not allowed"
# Construct the headers
# Add the status to the headers
self.response.headers = ['Status: '+status]
# Append each of the headers provided by wsgi
self.response.headers += [": ".join(header) for header in headers]
# Add the X-Powered-By header to show off this extension
self.response.headers.append("X-Powered-By: smisk+wsgi/%d.%d.%d" % __version__)
# Return the write function as required by the WSGI spec
return self.response.write
def service(self):
self.request.prepare(self)
output = self.wsgi_app(self.request.env, self.start_response)
# Discussion about Content-Length:
# Output might be an iterable in which case we can not trust len()
# but in a perfect world, we did know how many parts we got and if
# we only got _one_ we could also add a Content-length. But no.
# Instead, we rely on the host server splitting up things in nice
# chunks, using chunked transfer encoding, (If the server complies
# to HTTP/1.1 it is required to do so, so we are pretty safe) or
# simply rely on the host server setting the Content-Length header.
for data in output:
self.response.write(data)
# XXX TODO replace this main function with the stuff from smisk.util.main
def main(wsgi_app, appdir=None, bind=None, forks=None, handle_errors=True, cli=True):
'''Helper for setting up and running an application.
This is normally what you do in your top module ``__init__``::
from smisk.wsgi import main
from your.app import wsgi_app
main(wsgi_app)
Your module is now a runnable program which automatically configures and
runs your application. There is also a Command Line Interface if `cli`
evaluates to ``True``.
:Parameters:
wsgi_app : callable
A WSGI application
appdir : string
Path to the applications base directory.
bind : string
Bind to address (and port). Note that this overrides ``SMISK_BIND``.
forks : int
Number of child processes to spawn.
handle_errors : bool
Handle any errors by wrapping calls in `handle_errors_wrapper()`
cli : bool
Act as a *Command Line Interface*, parsing command line arguments and
options.
:rtype: None
'''
if cli:
appdir, bind, forks = main_cli_filter(appdir=appdir, bind=bind, forks=forks)
# Setup logging
# Calling basicConfig has no effect if logging is already configured.
logging.basicConfig(format=LOGGING_FORMAT, datefmt=LOGGING_DATEFMT)
# Bind
if bind is not None:
os.environ['SMISK_BIND'] = bind
if 'SMISK_BIND' in os.environ:
smisk.core.bind(os.environ['SMISK_BIND'])
log.info('Listening on %s', smisk.core.listening())
# Configure appdir
setup_appdir(appdir)
# Create the application
application = Gateway(wsgi_app=wsgi_app)
# Forks
if isinstance(forks, int) and forks > -1:
application.forks = forks
# Runloop
if handle_errors:
return handle_errors_wrapper(application.run)
else:
return application.run()
if __name__ == '__main__':
from wsgiref.validate import validator # Import the wsgi validator app
def hello_app(env, start_response):
start_response("200 OK", [('Content-Type', 'text/plain')])
return ["Hello, World"]
if len(sys.argv) != 2:
print "Usage: %s hostname:port" % (sys.argv[0])
print "This runs a sample fastcgi server under the hostname and"
print "port given in argv[1]"
smisk.core.bind(sys.argv[1])
app = validator(hello_app)
Gateway(app).run()
|
# Generated by Django 3.2 on 2021-04-24 06:41
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ContextClass',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
options={
'verbose_name': 'Context Class',
'verbose_name_plural': 'Context Classes',
},
),
migrations.CreateModel(
name='ContextItem',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.TextField(blank=True, default=None, null=True)),
('context_class', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='template_email_manager.contextclass')),
],
options={
'verbose_name': 'Context Item',
'verbose_name_plural': 'Context Items',
},
),
migrations.CreateModel(
name='EmailAddress',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('address', models.EmailField(max_length=254)),
],
options={
'verbose_name': 'E-mail Address',
'verbose_name_plural': 'E-mail Addresses',
},
),
migrations.CreateModel(
name='ImageAttachment',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('image', models.ImageField(upload_to='uploads/email-images/')),
],
options={
'verbose_name': 'Image Attachment',
'verbose_name_plural': 'Image Attachments',
},
),
migrations.CreateModel(
name='HTMLTemplate',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('shortname', models.CharField(default='new_template', max_length=45, unique=True)),
('fullname', models.CharField(blank=True, max_length=255, null=True)),
('html_content', models.TextField(blank=True, null=True)),
('text_alternate', models.TextField(blank=True, null=True)),
('images', models.ManyToManyField(to='template_email_manager.ImageAttachment')),
('requested_context_classes', models.ManyToManyField(to='template_email_manager.ContextClass')),
],
options={
'verbose_name': 'HTML Template',
'verbose_name_plural': 'HTML Templates',
},
),
migrations.CreateModel(
name='EmailQueue',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.CharField(max_length=255)),
('status', models.CharField(choices=[('CRE', 'Creating'), ('REA', 'Ready'), ('INP', 'In Progress'), ('SEN', 'Sent'), ('FAI', 'Send Failed'), ('USC', 'User Canceled'), ('MAC', 'Canceled for Maximum number of sending attempts')], default='CRE', max_length=255)),
('created_on', models.DateTimeField(default=django.utils.timezone.now)),
('sent_on', models.DateTimeField(blank=True, null=True)),
('error_log', models.TextField(blank=True)),
('send_attempts', models.IntegerField(default=0)),
('retry_at', models.DateTimeField(blank=True, default=None, null=True)),
('last_operation', models.DateTimeField(default=django.utils.timezone.now)),
('bcc', models.ManyToManyField(blank=True, related_name='related_queue_bcc', to='template_email_manager.EmailAddress')),
('context_items', models.ManyToManyField(to='template_email_manager.ContextItem')),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
('sender', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='related_queue_sender', to='template_email_manager.emailaddress')),
('template_html', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='template_email_manager.htmltemplate')),
('to', models.ManyToManyField(related_name='related_queue_to', to='template_email_manager.EmailAddress')),
],
options={
'verbose_name': 'E-mail Queue',
'verbose_name_plural': 'E-mail Queues',
},
),
migrations.CreateModel(
name='EmailPrototype',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
('subject', models.CharField(max_length=255)),
('bcc', models.ManyToManyField(blank=True, related_name='related_prototype_bcc', to='template_email_manager.EmailAddress')),
('sender', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='related_prototype_sender', to='template_email_manager.emailaddress')),
('template_html', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='template_email_manager.htmltemplate')),
('to', models.ManyToManyField(related_name='related_prototype_to', to='template_email_manager.EmailAddress')),
],
options={
'verbose_name': 'E-mail Prototype',
'verbose_name_plural': 'E-mail Prototypes',
},
),
]
|
N = int(input())
print(sum([int(i) for i in input()]))
|
#!/usr/bin/env python
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Implementation for `pmg config` CLI.
"""
import glob
import os
import shutil
import subprocess
import sys
from urllib.request import urlretrieve
from monty.serialization import dumpfn, loadfn
from pymatgen.core import SETTINGS_FILE
def setup_potcars(args):
"""
Setup POTCAR directirt,
:param args: args from command.
"""
pspdir, targetdir = (os.path.abspath(d) for d in args.potcar_dirs)
try:
os.makedirs(targetdir)
except OSError:
r = input("Destination directory exists. Continue (y/n)? ")
if r != "y":
print("Exiting ...")
sys.exit(0)
print("Generating pymatgen resources directory...")
name_mappings = {
"potpaw_PBE": "POT_GGA_PAW_PBE",
"potpaw_PBE_52": "POT_GGA_PAW_PBE_52",
"potpaw_PBE_54": "POT_GGA_PAW_PBE_54",
"potpaw_PBE.52": "POT_GGA_PAW_PBE_52",
"potpaw_PBE.54": "POT_GGA_PAW_PBE_54",
"potpaw_LDA": "POT_LDA_PAW",
"potpaw_LDA.52": "POT_LDA_PAW_52",
"potpaw_LDA.54": "POT_LDA_PAW_54",
"potpaw_LDA_52": "POT_LDA_PAW_52",
"potpaw_LDA_54": "POT_LDA_PAW_54",
"potUSPP_LDA": "POT_LDA_US",
"potpaw_GGA": "POT_GGA_PAW_PW91",
"potUSPP_GGA": "POT_GGA_US_PW91",
}
for (parent, subdirs, files) in os.walk(pspdir):
basename = os.path.basename(parent)
basename = name_mappings.get(basename, basename)
for subdir in subdirs:
filenames = glob.glob(os.path.join(parent, subdir, "POTCAR*"))
if len(filenames) > 0:
try:
basedir = os.path.join(targetdir, basename)
if not os.path.exists(basedir):
os.makedirs(basedir)
fname = filenames[0]
dest = os.path.join(basedir, os.path.basename(fname))
shutil.copy(fname, dest)
ext = fname.split(".")[-1]
if ext.upper() in ["Z", "GZ"]:
with subprocess.Popen(["gunzip", dest]) as p:
p.communicate()
elif ext.upper() in ["BZ2"]:
with subprocess.Popen(["bunzip2", dest]) as p:
p.communicate()
if subdir == "Osmium":
subdir = "Os"
dest = os.path.join(basedir, f"POTCAR.{subdir}")
shutil.move(os.path.join(basedir, "POTCAR"), dest)
with subprocess.Popen(["gzip", "-f", dest]) as p:
p.communicate()
except Exception as ex:
print(f"An error has occurred. Message is {str(ex)}. Trying to continue... ")
print("")
print(
"PSP resources directory generated. It is recommended that you "
"run 'pmg config --add PMG_VASP_PSP_DIR %s'" % os.path.abspath(targetdir)
)
print("Start a new terminal to ensure that your environment variables are properly set.")
def build_enum(fortran_command="gfortran"):
"""
Build enum.
:param fortran_command:
"""
currdir = os.getcwd()
state = True
try:
subprocess.call(["git", "clone", "--recursive", "https://github.com/msg-byu/enumlib.git"])
os.chdir(os.path.join(currdir, "enumlib", "symlib", "src"))
os.environ["F90"] = fortran_command
subprocess.call(["make"])
enumpath = os.path.join(currdir, "enumlib", "src")
os.chdir(enumpath)
subprocess.call(["make"])
for f in ["enum.x", "makestr.x"]:
subprocess.call(["make", f])
shutil.copy(f, os.path.join("..", ".."))
except Exception as ex:
print(str(ex))
state = False
finally:
os.chdir(currdir)
shutil.rmtree("enumlib")
return state
def build_bader(fortran_command="gfortran"):
"""
Build bader package.
:param fortran_command:
"""
bader_url = "http://theory.cm.utexas.edu/henkelman/code/bader/download/bader.tar.gz"
currdir = os.getcwd()
state = True
try:
urlretrieve(bader_url, "bader.tar.gz")
subprocess.call(["tar", "-zxf", "bader.tar.gz"])
os.chdir("bader")
subprocess.call(["cp", "makefile.osx_" + fortran_command, "makefile"])
subprocess.call(["make"])
shutil.copy("bader", os.path.join("..", "bader_exe"))
os.chdir("..")
shutil.rmtree("bader")
os.remove("bader.tar.gz")
shutil.move("bader_exe", "bader")
except Exception as ex:
print(str(ex))
state = False
finally:
os.chdir(currdir)
return state
def install_software(args):
"""
Install all optional external software.
:param args:
"""
try:
subprocess.call(["ifort", "--version"])
print("Found ifort")
fortran_command = "ifort"
except Exception:
try:
subprocess.call(["gfortran", "--version"])
print("Found gfortran")
fortran_command = "gfortran"
except Exception as ex:
print(str(ex))
print("No fortran compiler found.")
sys.exit(-1)
enum = None
bader = None
if args.install == "enumlib":
print("Building enumlib")
enum = build_enum(fortran_command)
print("")
elif args.install == "bader":
print("Building bader")
bader = build_bader(fortran_command)
print("")
if bader or enum:
print(
"Please add {} to your PATH or move the executables multinum.x, "
"makestr.x and/or bader to a location in your PATH.".format(os.path.abspath("."))
)
print("")
def add_config_var(args):
"""
Add configuration args.
:param args:
"""
d = {}
if os.path.exists(SETTINGS_FILE):
shutil.copy(SETTINGS_FILE, SETTINGS_FILE + ".bak")
print(f"Existing {SETTINGS_FILE} backed up to {SETTINGS_FILE + '.bak'}")
d = loadfn(SETTINGS_FILE)
toks = args.var_spec
if len(toks) % 2 != 0:
print("Bad variable specification!")
sys.exit(-1)
for i in range(int(len(toks) / 2)):
d[toks[2 * i]] = toks[2 * i + 1]
dumpfn(d, SETTINGS_FILE)
print(f"New {SETTINGS_FILE} written!")
def configure_pmg(args):
"""
Handle configure command.
:param args:
"""
if args.potcar_dirs:
setup_potcars(args)
elif args.install:
install_software(args)
elif args.var_spec:
add_config_var(args)
|
"""
CLI引数を解釈するutilです。
"""
import os
import sys
from argparse import ArgumentParser, Namespace
from collections import OrderedDict
from typing import Any, List, Dict, NamedTuple, Callable, Optional
from abc import ABC, abstractmethod
import importlib
import pathlib
import yaml
class _Args(NamedTuple):
args: List[Any]
kwargs: Dict[str, Any]
class _Meta(NamedTuple):
parser: ArgumentParser
handler: Optional[Callable]
parents: List[str]
_METAKEY = ".meta:command"
def get_meta(args: Namespace):
return getattr(args, _METAKEY)
class Command:
"""
>>> cmd = Command("sample", "例のコマンド")
>>> foo = Command("foo", "fooコマンド") << cmd
>>> foo.option("--input")
>>> foo(lambda args: print("foo")).name
'foo'
>>> (cmd / "foo") == foo
True
>>> x = cmd.build()
"""
def __init__(self, name, doc, metakey=_METAKEY):
"""
Args:
name(str): コマンド名
doc(str): コマンド説明
metakey(str): コマンドに付随する情報をパース結果につける際のプロパティ名。関連するコマンド間で同じものを指定する。
"""
self.name = name
self.doc = doc
self.__metakey = metakey
self.__subcommands: Dict[str, Command] = OrderedDict()
self.__args: List[_Args] = []
self.__main_fn = None
def has_metakey(self, key):
"""
メタキーが等しいことを確認する
"""
return self.__metakey == key
def start(self, argv=None, *others, **kwargs):
"""
典型的な使い方のためのメソッド。
コマンドライン引数をparseしてコマンドの内容を実行します。
"""
if argv is None:
argv = sys.argv[1:]
parser = self.build()
args = parser.parse_args(argv)
meta: _Meta = getattr(args, self.__metakey)
if meta.handler is None:
meta.parser.print_help()
else:
meta.handler(args, *others, **kwargs)
def __call__(self, func):
self.__main_fn = func
return self
def __rshift__(self, command: "Command") -> "Command":
self.__add_subcommand(command)
return command
def __lshift__(self, command: "Command") -> "Command":
return command >> self
def __truediv__(self, command_name: str) -> Optional["Command"]:
return self.__get_subcommand(command_name)
def option(self, *args, **kwargs):
"""
引数を指定します。
argparse.ArgumentParser.add_argumentと同じ引数です。
"""
self.__args.append(_Args(args=args, kwargs=kwargs))
def build(self, parser: ArgumentParser = None, commands=None, **kwargs):
"""
parserを作成します。
引数にparserを指定するとそれに追加します。
"""
parents = [] if commands is None else commands
if parser is None:
parser = ArgumentParser(prog=self.name, description=self.doc, allow_abbrev=False, **kwargs)
self.__add_args(parser)
if self.__subcommands:
subparsers = parser.add_subparsers()
for cmd in self.__subcommands.values():
sub = subparsers.add_parser(cmd.name, description=cmd.doc)
cmd.build(sub, [*parents, self.name])
handler = None if self.__subcommands else self.__main_fn
meta = _Meta(parser=parser, handler=handler, parents=parents)
parser.set_defaults(**{self.__metakey: meta})
return parser
@property
def subcommands(self):
"""
Return: subcommandのリスト
"""
return list(self.__subcommands.values())
def __add_args(self, parser: ArgumentParser):
for args, kwargs in self.__args:
parser.add_argument(*args, **kwargs)
def __add_subcommand(self, command: "Command"):
assert command.has_metakey(self.__metakey)
assert command.name not in self.__subcommands, Exception(f"コマンド{command.name}が重複しています")
self.__subcommands[command.name] = command
def __get_subcommand(self, name: str) -> Optional["Command"]:
return self.__subcommands.get(name)
class PluginManger:
def __init__(self, command: Command) -> None:
"""
Args:
command(Command): プラグインからサブコマンドを生やす用
"""
self.__command = command
self.__plugin_types: Dict[str, "AbstractPlugin"] = {}
def add_plugin_type(self, name, type_):
assert name not in self.__plugin_types
self.__plugin_types[name] = type_
self.__plugins = []
def load_yml(self, path):
plugins = yaml.load(open(pathlib.Path(path).expanduser()))
if not plugins:
return
for obj in plugins:
type_name = obj.get("type")
args = obj.get("args")
kwargs = obj.get("kwargs")
if type_name not in self.__plugin_types:
raise Exception("unknown plugin_type")
plugin_type = self.get_plugin(type_name, args, kwargs)
plugin = plugin_type.load(self, self.__command)
if plugin is not None:
self.__plugins.append(plugin)
def get_plugin(self, type_name, args, kwargs):
args = [] if args is None else args
kwargs = {} if kwargs is None else kwargs
return self.__plugin_types[type_name](*args, **kwargs)
def update(self):
for plugin in self.__plugin_types.values():
plugin.update()
class PluginMangerWrapper:
def __init__(self, plugin_manager: PluginManger) -> None:
self.__plugin_manager = plugin_manager
def add_plugin_type(self, name, type_):
self.__plugin_manager.add_plugin_type(name, type_)
class AbstractPlugin(ABC):
@abstractmethod
def on_load(self, plugin_manager: PluginMangerWrapper, command: Command):
pass
def update(self):
pass
class AbstractPluginType(ABC):
@abstractmethod
def load(self, plugin_manager: PluginMangerWrapper, command: Command) -> Optional[AbstractPlugin]:
pass
class LocalPluginType(AbstractPluginType):
def __init__(self, path, plugin) -> None:
path = pathlib.Path(path).expanduser()
module_spec = importlib.util.spec_from_file_location(f"LocalPluginType:{path}", path)
module = importlib.util.module_from_spec(module_spec)
if module_spec.loader is not None:
module_spec.loader.exec_module(module)
self.__plugin_class = getattr(module, plugin)
else:
self.__plugin_class = None
def load(self, plugin_manager: PluginMangerWrapper, command: Command):
"""
AbstractPluginのインターフェイスを持つクラスをロードする
"""
if self.__plugin_class is None:
return
plugin = self.__plugin_class()
plugin.on_load(plugin_manager, command)
return plugin
class GitPluginType(AbstractPluginType):
"""
実装途中
"""
def __init__(
self,
path,
plugin,
repo,
tag=None,
commit=None,
local=False,
) -> None:
assert not (tag is not None and commit is not None)
self.__path = path
self.__plugin = plugin
self.__repo = repo
self.__tag = tag
self.__commit = commit
self.__checkout = tag if tag is not None else commit
self.__local = local
cache_root = pathlib.Path("~/.cache/mlbase").expanduser()
self.__cache = cache_root / repo.split()[-1]
def load(self, plugin_manager: PluginMangerWrapper, command: Command):
local_plugin = LocalPluginType(path=self.__cache / self.__path, plugin=self.__plugin)
plugin = local_plugin.load(plugin_manager, command)
return plugin
def update(self):
"""
git clone {self.__repo}
git checkout {}
"""
self.__cache.mkdir(parents=True, exist_ok=True)
os.system(f"git clone {self.__repo} {self.__cache}")
os.system(f"git checkout {self.__checkout}")
|
# 练习Python访问互联网的API
import requests
import json
base_url = "https://api.github.com"
def get_url(url):
return base_url+url
resp = requests.get(get_url("/users?page=0&per_page=1'"))
result = resp.text
print(resp.url , "返回结果为:" , json.dumps(result))
|
#!/usr/bin/env python3
# coding:utf-8
def solve(strings):
d = {}
for c, n in zip(range(65, 91), range(0, 52, 2)):
d[chr(c)] = n
d[chr(c+32)] = n+1
return ''.join(sorted(strings, key=lambda x: d[x]))
if __name__ == "__main__":
strings = "easqWAwaeq" # AaaeeqqsWw
print(solve(strings))
|
#! /usr/bin/env python3
# developed by Gabi Zapodeanu, TSA, GPO, Cisco Systems
# This file contains the Spark Auth, Tropo Key, Google Developer Key
SPARK_URL = 'https://api.ciscospark.com/v1'
SPARK_AUTH = 'Bearer ' + 'Paste your Spark token here'
GOOGLE_API_KEY = 'Paste your Google developer API key here'
DNAC_URL = 'https://your dna center'
DNAC_USER = 'user'
DNAC_PASS = 'password'
|
#!/usr/bin/env python
"""
Copyright 2018 Johns Hopkins University (Author: Jesus Villalba)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
import sys
import os
import argparse
import time
import logging
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D as plt3d
from sklearn.manifold import TSNE
from hyperion.hyp_defs import config_logger
from hyperion.io import DataWriterFactory as DWF
from hyperion.helpers import VectorClassReader as VCR
from hyperion.transforms import TransformList, PCA
colors = ["b", "g", "r", "c", "m", "y", "k"]
markers = ["x", "o", "+", "*", "s", "h", "D", "^", "v", "p", "8"]
def plot_vector_tsne(
iv_file,
v_list,
preproc_file,
output_path,
save_embed,
output_dim,
perplexity,
exag,
lr,
num_iter,
init_method,
rng_seed,
verbose,
pca_dim,
max_classes,
**kwargs
):
if preproc_file is not None:
preproc = TransformList.load(preproc_file)
else:
preproc = None
vr_args = VCR.filter_args(**kwargs)
vcr = VCR(iv_file, v_list, preproc, **vr_args)
x, class_ids = vcr.read()
t1 = time.time()
if pca_dim > 0:
pca = PCA(pca_dim=pca_dim)
pca.fit(x)
x = pca.predict(x)
if not os.path.exists(output_path):
os.makedirs(ouput_path)
tsne_obj = lambda n: TSNE(
n_components=n,
perplexity=perplexity,
early_exaggeration=exag,
learning_rate=lr,
n_iter=num_iter,
init=init_method,
random_state=rng_seed,
verbose=verbose,
)
if max_classes > 0:
index = class_ids < max_classes
x = x[index]
class_ids = class_ids[index]
if output_dim > 3:
tsne = tsne_obj(output_dim)
y = tsne.fit_transform(x)
if save_embed:
h5_file = "%s/embed_%dd.h5" % (output_path, ouput_dim)
hw = DWF.create(h5_file)
hw.write(vcr.u2c.key, y)
tsne = tsne_obj(2)
y = tsne.fit_transform(x)
if save_embed:
h5_file = "%s/embed_2d.h5" % output_path
hw = DWF.create(h5_file)
hw.write(vcr.u2c.key, y)
fig_file = "%s/tsne_2d.pdf" % (output_path)
# plt.scatter(y[:,0], y[:,1], c=class_ids, marker='x')
color_marker = [(c, m) for m in markers for c in colors]
for c in np.unique(class_ids):
idx = class_ids == c
plt.scatter(
y[idx, 0],
y[idx, 1],
c=color_marker[c][0],
marker=color_marker[c][1],
label=vcr.class_names[c],
)
plt.legend()
plt.grid(True)
plt.show()
plt.savefig(fig_file)
plt.clf()
# if max_classes > 0:
# fig_file = '%s/tsne_2d_n%d.pdf' % (output_path, max_classes)
# index = class_ids < max_classes
# plt.scatter(y[index,0], y[index,1], c=class_ids[index], marker='x')
# plt.grid(True)
# plt.show()
# plt.savefig(fig_file)
# plt.clf()
tsne = tsne_obj(3)
y = tsne.fit_transform(x)
if save_embed:
h5_file = "%s/embed_3d.h5" % output_path
hw = DWF.create(h5_file)
hw.write(vcr.u2c.key, y)
fig_file = "%s/tsne_3d.pdf" % (output_path)
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
# ax.scatter(y[:,0], y[:,1], y[:,2], c=class_ids, marker='x')
for c in np.unique(class_ids):
idx = class_ids == c
ax.scatter(
y[idx, 0],
y[idx, 1],
y[idx, 2],
c=color_marker[c][0],
marker=color_marker[c][1],
label=vcr.class_names[c],
)
plt.grid(True)
plt.show()
plt.savefig(fig_file)
plt.clf()
# if max_classes > 0:
# fig_file = '%s/tsne_3d_n%d.pdf' % (output_path, max_classes)
# index = class_ids < max_classes
# ax = fig.add_subplot(111, projection='3d')
# ax.scatter(y[index,0], y[index,1], y[index,2], c=class_ids[index], marker='x')
# plt.grid(True)
# plt.show()
# plt.savefig(fig_file)
# plt.clf()
logging.info("Elapsed time: %.2f s." % (time.time() - t1))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
fromfile_prefix_chars="@",
description="Plots TSNE embeddings",
)
parser.add_argument("--iv-file", dest="iv_file", required=True)
parser.add_argument("--v-list", dest="v_list", required=True)
parser.add_argument("--preproc-file", dest="preproc_file", default=None)
VCR.add_argparse_args(parser)
parser.add_argument("--output-path", dest="output_path", required=True)
parser.add_argument(
"--save-embed", dest="save_embed", default=False, action="store_true"
)
parser.add_argument("--output-dim", dest="output_dim", type=int, default=3)
parser.add_argument("--perplexity", dest="perplexity", type=float, default=30)
parser.add_argument("--exag", dest="exag", type=float, default=12)
parser.add_argument("--lr", dest="lr", type=float, default=200)
parser.add_argument("--num-iter", dest="num_iter", type=int, default=1000)
parser.add_argument(
"--init-method", dest="init_method", default="pca", choices=["random", "pca"]
)
parser.add_argument("--rng-seed", dest="rng_seed", type=int, default=1024)
parser.add_argument("--pca-dim", dest="pca_dim", type=int, default=50)
parser.add_argument("--max-classes", dest="max_classes", type=int, default=10)
parser.add_argument(
"-v", "--verbose", dest="verbose", default=1, choices=[0, 1, 2, 3], type=int
)
args = parser.parse_args()
config_logger(args.verbose)
logging.debug(args)
plot_vector_tsne(**vars(args))
|
import json
import random
import time
import itertools
from ethereum import utils
from ethereum.utils import parse_as_bin, big_endian_to_int
from ethereum.meta import apply_block
from ethereum.common import update_block_env_variables
from ethereum.messages import apply_transaction
import rlp
from rlp.utils import encode_hex
from ethereum.exceptions import InvalidNonce, InsufficientStartGas, UnsignedTransaction, \
BlockGasLimitReached, InsufficientBalance, InvalidTransaction, VerificationFailed
from ethereum.slogging import get_logger, configure_logging
from ethereum.config import Env
from ethereum.state import State, dict_to_prev_header
from ethereum.block import Block, BlockHeader, BLANK_UNCLES_HASH
from ethereum.pow.consensus import initialize
from ethereum.genesis_helpers import mk_basic_state, state_from_genesis_declaration, \
initialize_genesis_keys
from ethereum.db import RefcountDB
log = get_logger('eth.chain')
config_string = ':info,eth.chain:debug'
#config_string = ':info,eth.vm.log:trace,eth.vm.op:trace,eth.vm.stack:trace,eth.vm.exit:trace,eth.pb.msg:trace,eth.pb.tx:debug'
configure_logging(config_string=config_string)
class Chain(object):
def __init__(self, genesis=None, env=None, \
new_head_cb=None, reset_genesis=False, localtime=None, max_history=1000, **kwargs):
self.env = env or Env()
# Initialize the state
if 'head_hash' in self.db: # new head tag
self.state = self.mk_poststate_of_blockhash(self.db.get('head_hash'))
self.state.executing_on_head = True
print('Initializing chain from saved head, #%d (%s)' % \
(self.state.prev_headers[0].number, encode_hex(self.state.prev_headers[0].hash)))
elif genesis is None:
raise Exception("Need genesis decl!")
elif isinstance(genesis, State):
assert env is None
self.state = genesis
self.env = self.state.env
print('Initializing chain from provided state')
reset_genesis = True
elif "extraData" in genesis:
self.state = state_from_genesis_declaration(
genesis, self.env, executing_on_head=True)
reset_genesis = True
print('Initializing chain from provided genesis declaration')
elif "prev_headers" in genesis:
self.state = State.from_snapshot(genesis, self.env, executing_on_head=True)
reset_genesis = True
print('Initializing chain from provided state snapshot, %d (%s)' % \
(self.state.block_number, encode_hex(self.state.prev_headers[0].hash[:8])))
elif isinstance(genesis, dict):
print('Initializing chain from new state based on alloc')
self.state = mk_basic_state(genesis, {
"number": kwargs.get('number', 0),
"gas_limit": kwargs.get('gas_limit', 4712388),
"gas_used": kwargs.get('gas_used', 0),
"timestamp": kwargs.get('timestamp', 1467446877),
"difficulty": kwargs.get('difficulty', 2**25),
"hash": kwargs.get('prevhash', '00' * 32),
"uncles_hash": kwargs.get('uncles_hash', '0x' + encode_hex(BLANK_UNCLES_HASH))
}, self.env)
reset_genesis = True
assert self.env.db == self.state.db
initialize(self.state)
self.new_head_cb = new_head_cb
self.head_hash = self.state.prev_headers[0].hash
assert self.state.block_number == self.state.prev_headers[0].number
if reset_genesis:
self.genesis = Block(self.state.prev_headers[0], [], [])
initialize_genesis_keys(self.state, self.genesis)
else:
self.genesis = self.get_block_by_number(0)
self.time_queue = []
self.parent_queue = {}
self.localtime = time.time() if localtime is None else localtime
self.max_history = max_history
# Head (tip) of the chain
@property
def head(self):
try:
block_rlp = self.db.get(self.head_hash)
if block_rlp == 'GENESIS':
return self.genesis
else:
return rlp.decode(block_rlp, Block)
except Exception as e:
log.error(e)
return None
# Returns the post-state of the block
def mk_poststate_of_blockhash(self, blockhash):
if blockhash not in self.db:
raise Exception("Block hash %s not found" % encode_hex(blockhash))
block_rlp = self.db.get(blockhash)
if block_rlp == 'GENESIS':
return State.from_snapshot(json.loads(self.db.get('GENESIS_STATE')), self.env)
block = rlp.decode(block_rlp, Block)
state = State(env=self.env)
state.trie.root_hash = block.header.state_root
update_block_env_variables(state, block)
state.gas_used = block.header.gas_used
state.txindex = len(block.transactions)
state.recent_uncles = {}
state.prev_headers = []
b = block
header_depth = state.config['PREV_HEADER_DEPTH']
for i in range(header_depth + 1):
state.prev_headers.append(b.header)
if i < 6:
state.recent_uncles[state.block_number - i] = []
for u in b.uncles:
state.recent_uncles[state.block_number - i].append(u.hash)
try:
b = rlp.decode(state.db.get(b.header.prevhash), Block)
except:
break
if i < header_depth:
if state.db.get(b.header.prevhash) == 'GENESIS':
jsondata = json.loads(state.db.get('GENESIS_STATE'))
for h in jsondata["prev_headers"][:header_depth - i]:
state.prev_headers.append(dict_to_prev_header(h))
for blknum, uncles in jsondata["recent_uncles"].items():
if int(blknum) >= state.block_number - int(state.config['MAX_UNCLE_DEPTH']):
state.recent_uncles[blknum] = [parse_as_bin(u) for u in uncles]
else:
raise Exception("Dangling prevhash")
assert len(state.journal) == 0, state.journal
return state
# Gets the parent block of a given block
def get_parent(self, block):
if block.header.number == int(self.db.get('GENESIS_NUMBER')):
return None
return self.get_block(block.header.prevhash)
# Gets the block with a given blockhash
def get_block(self, blockhash):
try:
block_rlp = self.db.get(blockhash)
if block_rlp == 'GENESIS':
if not hasattr(self, 'genesis'):
self.genesis = rlp.decode(self.db.get('GENESIS_RLP'), sedes=Block)
return self.genesis
else:
return rlp.decode(block_rlp, Block)
except Exception as e:
log.debug("Failed to get block", hash=blockhash, error=e)
return None
# Add a record allowing you to later look up the provided block's
# parent hash and see that it is one of its children
def add_child(self, child):
try:
existing = self.db.get(b'child:' + child.header.prevhash)
except:
existing = b''
existing_hashes = []
for i in range(0, len(existing), 32):
existing_hashes.append(existing[i: i+32])
if child.header.hash not in existing_hashes:
self.db.put(b'child:' + child.header.prevhash, existing + child.header.hash)
# Gets the hash of the block with the given block number
def get_blockhash_by_number(self, number):
try:
return self.db.get(b'block:%d' % number)
except:
return None
# Gets the block with the given block number
def get_block_by_number(self, number):
return self.get_block(self.get_blockhash_by_number(number))
# Get the hashes of all known children of a given block
def get_child_hashes(self, blockhash):
o = []
try:
data = self.db.get(b'child:' + blockhash)
for i in range(0, len(data), 32):
o.append(data[i:i + 32])
return o
except:
return []
# Get the children of a block
def get_children(self, block):
if isinstance(block, Block):
block = block.header.hash
if isinstance(block, BlockHeader):
block = block.hash
return [self.get_block(h) for h in self.get_child_hashes(block)]
# Get the score (AKA total difficulty in PoW) of a given block
def get_score(self, block):
if not block:
return 0
key = b'score:' + block.header.hash
fills = []
while key not in self.db:
fills.insert(0, (block.header.hash, block.difficulty))
key = b'score:' + block.header.prevhash
block = self.get_parent(block)
score = int(self.db.get(key))
for h,d in fills:
key = b'score:' + h
score = score + d + random.randrange(d // 10**6 + 1)
self.db.put(key, str(score))
return score
# This function should be called periodically so as to
# process blocks that were received but laid aside because
# they were received too early
def process_time_queue(self, new_time=None):
self.localtime = time.time() if new_time is None else new_time
i = 0
while i < len(self.time_queue) and self.time_queue[i].timestamp <= new_time:
log.info('Adding scheduled block')
pre_len = len(self.time_queue)
self.add_block(self.time_queue.pop(i))
if len(self.time_queue) == pre_len:
i += 1
# Call upon receiving a block
def add_block(self, block):
now = self.localtime
# Are we receiving the block too early?
if block.header.timestamp > now:
i = 0
while i < len(self.time_queue) and block.timestamp > self.time_queue[i].timestamp:
i += 1
self.time_queue.insert(i, block)
log.info('Block received too early (%d vs %d). Delaying for %d seconds' %
(now, block.header.timestamp, block.header.timestamp - now))
return False
# Is the block being added to the head?
if block.header.prevhash == self.head_hash:
log.info('Adding to head', head=encode_hex(block.header.prevhash))
self.state.deletes = []
self.state.changed = {}
try:
apply_block(self.state, block)
except (AssertionError, KeyError, ValueError, InvalidTransaction, VerificationFailed) as e:
log.info('Block %d (%s) with parent %s invalid, reason: %s' %
(block.number, encode_hex(block.header.hash), encode_hex(block.header.prevhash), e))
return False
self.db.put(b'block:%d' % block.header.number, block.header.hash)
block_score = self.get_score(block) # side effect: put 'score:' cache in db
self.head_hash = block.header.hash
for i, tx in enumerate(block.transactions):
self.db.put(b'txindex:' + tx.hash, rlp.encode([block.number, i]))
assert self.get_blockhash_by_number(block.header.number) == block.header.hash
deletes = self.state.deletes
changed = self.state.changed
# Or is the block being added to a chain that is not currently the head?
elif block.header.prevhash in self.env.db:
log.info('Receiving block not on head (%s), adding to secondary post state %s' %
(encode_hex(self.head_hash), encode_hex(block.header.prevhash)))
temp_state = self.mk_poststate_of_blockhash(block.header.prevhash)
try:
apply_block(temp_state, block)
except (AssertionError, KeyError, ValueError, InvalidTransaction, VerificationFailed) as e:
log.info('Block %s with parent %s invalid, reason: %s' %
(encode_hex(block.header.hash), encode_hex(block.header.prevhash), e))
return False
deletes = temp_state.deletes
block_score = self.get_score(block)
changed = temp_state.changed
# If the block should be the new head, replace the head
if block_score > self.get_score(self.head):
b = block
new_chain = {}
# Find common ancestor
while b.header.number >= int(self.db.get('GENESIS_NUMBER')):
new_chain[b.header.number] = b
key = b'block:%d' % b.header.number
orig_at_height = self.db.get(key) if key in self.db else None
if orig_at_height == b.header.hash:
break
if b.prevhash not in self.db or self.db.get(b.prevhash) == 'GENESIS':
break
b = self.get_parent(b)
# Replace block index and tx indices, and edit the state cache
changed_accts = {}
replace_from = b.header.number
for i in itertools.count(replace_from):
log.info('Rewriting height %d' % i)
key = b'block:%d' % i
orig_at_height = self.db.get(key) if key in self.db else None
if orig_at_height:
self.db.delete(key)
orig_block_at_height = self.get_block(orig_at_height)
log.info('%s no longer in main chain' % encode_hex(orig_block_at_height.header.hash))
for tx in orig_block_at_height.transactions:
if b'txindex:' + tx.hash in self.db:
self.db.delete(b'txindex:' + tx.hash)
acct_list = self.db.get(b'changed:'+orig_block_at_height.hash)
for j in range(0, len(acct_list), 20):
changed_accts[acct_list[j: j+20]] = True
if i in new_chain:
new_block_at_height = new_chain[i]
log.info('%s now in main chain' % encode_hex(new_block_at_height.header.hash))
self.db.put(key, new_block_at_height.header.hash)
for i, tx in enumerate(new_block_at_height.transactions):
self.db.put(b'txindex:' + tx.hash,
rlp.encode([new_block_at_height.number, i]))
if i < b.number:
acct_list = self.db.get(b'changed:'+new_block_at_height.hash)
for j in range(0, len(acct_list), 20):
changed_accts[acct_list[j: j+20]] = True
if i not in new_chain and not orig_at_height:
break
for c in changed.keys():
changed_accts[c] = True
for addr in changed_accts.keys():
data = temp_state.trie.get(addr)
if data:
self.state.db.put(b'address:'+addr, data)
else:
try:
self.state.db.delete(b'address:'+addr)
except KeyError:
pass
self.head_hash = block.header.hash
self.state = temp_state
self.state.executing_on_head = True
# Block has no parent yet
else:
if block.header.prevhash not in self.parent_queue:
self.parent_queue[block.header.prevhash] = []
self.parent_queue[block.header.prevhash].append(block)
log.info('Got block %d (%s) with prevhash %s, parent not found. Delaying for now' %
(block.number, encode_hex(block.hash), encode_hex(block.prevhash)))
return False
self.add_child(block)
self.db.put('head_hash', self.head_hash)
self.db.put(block.hash, rlp.encode(block))
self.db.put(b'changed:'+block.hash, b''.join(list(changed.keys())))
print('Saved %d address change logs' % len(changed.keys()))
self.db.put(b'deletes:'+block.hash, b''.join(deletes))
print('Saved %d trie node deletes for block %d (%s)' % (len(deletes), block.number, utils.encode_hex(block.hash)))
# Delete old junk data
old_block_hash = self.get_blockhash_by_number(block.number - self.max_history)
if old_block_hash:
try:
deletes = self.db.get(b'deletes:'+old_block_hash)
print('Deleting up to %d trie nodes' % (len(deletes) // 32))
rdb = RefcountDB(self.db)
for i in range(0, len(deletes), 32):
rdb.delete(deletes[i: i+32])
self.db.delete(b'deletes:'+old_block_hash)
self.db.delete(b'changed:'+old_block_hash)
except KeyError as e:
print(e)
pass
self.db.commit()
assert (b'deletes:'+block.hash) in self.db
log.info('Added block %d (%s) with %d txs and %d gas' % \
(block.header.number, encode_hex(block.header.hash)[:8],
len(block.transactions), block.header.gas_used))
if self.new_head_cb and block.header.number != 0:
self.new_head_cb(block)
if block.header.hash in self.parent_queue:
for _blk in self.parent_queue[block.header.hash]:
self.add_block(_blk)
del self.parent_queue[block.header.hash]
return True
def __contains__(self, blk):
if isinstance(blk, (str, bytes)):
try:
blk = rlp.decode(self.db.get(blk), Block)
except:
return False
try:
o = self.get_block(self.get_blockhash_by_number(blk.number)).hash
assert o == blk.hash
return True
except Exception as e:
return False
def has_block(self, block):
return block in self
def has_blockhash(self, blockhash):
return blockhash in self.db
def get_chain(self, frm=None, to=2**63 - 1):
if frm is None:
frm = int(self.db.get('GENESIS_NUMBER')) + 1
chain = []
for i in itertools.islice(itertools.count(), frm, to):
h = self.get_blockhash_by_number(i)
if not h:
return chain
chain.append(self.get_block(h))
# Get block number and transaction index
def get_tx_position(self, tx):
if not isinstance(tx, (str, bytes)):
tx = tx.hash
if b'txindex:' + tx in self.db:
data = rlp.decode(self.db.get(b'txindex:' + tx))
return big_endian_to_int(data[0]), big_endian_to_int(data[1])
else:
return None
def get_transaction(self, tx):
print('Deprecated. Use get_tx_position')
blknum, index = self.get_tx_position(tx)
blk = self.get_block_by_number(blknum)
return blk.transactions[index], blk, index
# Get descendants of a block
def get_descendants(self, block):
output = []
blocks = [block]
while len(blocks):
b = blocks.pop()
blocks.extend(self.get_children(b))
output.append(b)
return output
@property
def db(self):
return self.env.db
# Get blockhashes starting from a hash and going backwards
def get_blockhashes_from_hash(self, hash, max):
block = self.get_block(hash)
if block is None:
return []
header = block.header
hashes = []
for i in xrange(max):
hash = header.prevhash
block = self.get_block(hash)
if block is None:
break
header = block.header
hashes.append(header.hash)
if header.number == 0:
break
return hashes
@property
def config(self):
return self.env.config
|
# Tests for the Part model
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.core.exceptions import ValidationError
import os
from .models import Part, PartTestTemplate
from .models import rename_part_image, match_part_names
from .templatetags import inventree_extras
import part.settings
from common.models import InvenTreeSetting
class TemplateTagTest(TestCase):
""" Tests for the custom template tag code """
def test_multiply(self):
self.assertEqual(int(inventree_extras.multiply(3, 5)), 15)
def test_version(self):
self.assertEqual(type(inventree_extras.inventree_version()), str)
def test_hash(self):
hash = inventree_extras.inventree_commit_hash()
self.assertGreater(len(hash), 5)
def test_date(self):
d = inventree_extras.inventree_commit_date()
self.assertEqual(len(d.split('-')), 3)
def test_github(self):
self.assertIn('github.com', inventree_extras.inventree_github_url())
def test_docs(self):
self.assertIn('inventree.readthedocs.io', inventree_extras.inventree_docs_url())
class PartTest(TestCase):
""" Tests for the Part model """
fixtures = [
'category',
'part',
'location',
]
def setUp(self):
self.r1 = Part.objects.get(name='R_2K2_0805')
self.r2 = Part.objects.get(name='R_4K7_0603')
self.c1 = Part.objects.get(name='C_22N_0805')
Part.objects.rebuild()
def test_tree(self):
# Test that the part variant tree is working properly
chair = Part.objects.get(pk=10000)
self.assertEqual(chair.get_children().count(), 3)
self.assertEqual(chair.get_descendant_count(), 4)
green = Part.objects.get(pk=10004)
self.assertEqual(green.get_ancestors().count(), 2)
self.assertEqual(green.get_root(), chair)
self.assertEqual(green.get_family().count(), 3)
self.assertEqual(Part.objects.filter(tree_id=chair.tree_id).count(), 5)
def test_str(self):
p = Part.objects.get(pk=100)
self.assertEqual(str(p), "BOB | Bob | A2 - Can we build it?")
def test_metadata(self):
self.assertEqual(self.r1.name, 'R_2K2_0805')
self.assertEqual(self.r1.get_absolute_url(), '/part/3/')
def test_category(self):
self.assertEqual(str(self.c1.category), 'Electronics/Capacitors - Capacitors')
orphan = Part.objects.get(name='Orphan')
self.assertIsNone(orphan.category)
self.assertEqual(orphan.category_path, '')
def test_rename_img(self):
img = rename_part_image(self.r1, 'hello.png')
self.assertEqual(img, os.path.join('part_images', 'hello.png'))
def test_stock(self):
# No stock of any resistors
res = Part.objects.filter(description__contains='resistor')
for r in res:
self.assertEqual(r.total_stock, 0)
self.assertEqual(r.available_stock, 0)
def test_barcode(self):
barcode = self.r1.format_barcode(brief=False)
self.assertIn('InvenTree', barcode)
self.assertIn(self.r1.name, barcode)
def test_copy(self):
self.r2.deep_copy(self.r1, image=True, bom=True)
def test_match_names(self):
matches = match_part_names('M2x5 LPHS')
self.assertTrue(len(matches) > 0)
class TestTemplateTest(TestCase):
fixtures = [
'category',
'part',
'location',
'test_templates',
]
def test_template_count(self):
chair = Part.objects.get(pk=10000)
# Tests for the top-level chair object (nothing above it!)
self.assertEqual(chair.test_templates.count(), 5)
self.assertEqual(chair.getTestTemplates().count(), 5)
self.assertEqual(chair.getTestTemplates(required=True).count(), 4)
self.assertEqual(chair.getTestTemplates(required=False).count(), 1)
# Test the lowest-level part which has more associated tests
variant = Part.objects.get(pk=10004)
self.assertEqual(variant.getTestTemplates().count(), 7)
self.assertEqual(variant.getTestTemplates(include_parent=False).count(), 1)
self.assertEqual(variant.getTestTemplates(required=True).count(), 5)
def test_uniqueness(self):
# Test names must be unique for this part and also parts above
variant = Part.objects.get(pk=10004)
with self.assertRaises(ValidationError):
PartTestTemplate.objects.create(
part=variant,
test_name='Record weight'
)
with self.assertRaises(ValidationError):
PartTestTemplate.objects.create(
part=variant,
test_name='Check that chair is especially green'
)
# Also should fail if we attempt to create a test that would generate the same key
with self.assertRaises(ValidationError):
PartTestTemplate.objects.create(
part=variant,
test_name='ReCoRD weiGHT '
)
# But we should be able to create a new one!
n = variant.getTestTemplates().count()
PartTestTemplate.objects.create(part=variant, test_name='A Sample Test')
self.assertEqual(variant.getTestTemplates().count(), n + 1)
class PartSettingsTest(TestCase):
"""
Tests to ensure that the user-configurable default values work as expected.
Some fields for the Part model can have default values specified by the user.
"""
def setUp(self):
# Create a user for auth
user = get_user_model()
self.user = user.objects.create_user(
username='testuser',
email='test@testing.com',
password='password',
is_staff=True
)
def make_part(self):
"""
Helper function to create a simple part
"""
part = Part.objects.create(
name='Test Part',
description='I am but a humble test part',
IPN='IPN-123',
)
return part
def test_defaults(self):
"""
Test that the default values for the part settings are correct
"""
self.assertTrue(part.settings.part_component_default())
self.assertFalse(part.settings.part_purchaseable_default())
self.assertFalse(part.settings.part_salable_default())
self.assertFalse(part.settings.part_trackable_default())
def test_initial(self):
"""
Test the 'initial' default values (no default values have been set)
"""
part = self.make_part()
self.assertTrue(part.component)
self.assertFalse(part.purchaseable)
self.assertFalse(part.salable)
self.assertFalse(part.trackable)
def test_custom(self):
"""
Update some of the part values and re-test
"""
for val in [True, False]:
InvenTreeSetting.set_setting('PART_COMPONENT', val, self.user)
InvenTreeSetting.set_setting('PART_PURCHASEABLE', val, self.user)
InvenTreeSetting.set_setting('PART_SALABLE', val, self.user)
InvenTreeSetting.set_setting('PART_TRACKABLE', val, self.user)
InvenTreeSetting.set_setting('PART_ASSEMBLY', val, self.user)
InvenTreeSetting.set_setting('PART_TEMPLATE', val, self.user)
self.assertEqual(val, InvenTreeSetting.get_setting('PART_COMPONENT'))
self.assertEqual(val, InvenTreeSetting.get_setting('PART_PURCHASEABLE'))
self.assertEqual(val, InvenTreeSetting.get_setting('PART_SALABLE'))
self.assertEqual(val, InvenTreeSetting.get_setting('PART_TRACKABLE'))
part = self.make_part()
self.assertEqual(part.component, val)
self.assertEqual(part.purchaseable, val)
self.assertEqual(part.salable, val)
self.assertEqual(part.trackable, val)
self.assertEqual(part.assembly, val)
self.assertEqual(part.is_template, val)
Part.objects.filter(pk=part.pk).delete()
def test_duplicate_ipn(self):
"""
Test the setting which controls duplicate IPN values
"""
# Create a part
Part.objects.create(name='Hello', description='A thing', IPN='IPN123')
# Attempt to create a duplicate item (should fail)
with self.assertRaises(ValidationError):
Part.objects.create(name='Hello', description='A thing', IPN='IPN123')
# Attempt to create item with duplicate IPN (should be allowed by default)
Part.objects.create(name='Hello', description='A thing', IPN='IPN123', revision='B')
# And attempt again with the same values (should fail)
with self.assertRaises(ValidationError):
Part.objects.create(name='Hello', description='A thing', IPN='IPN123', revision='B')
# Now update the settings so duplicate IPN values are *not* allowed
InvenTreeSetting.set_setting('PART_ALLOW_DUPLICATE_IPN', False, self.user)
with self.assertRaises(ValidationError):
Part.objects.create(name='Hello', description='A thing', IPN='IPN123', revision='C')
|
"""
SoftLayer.tests.managers.hardware_tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:license: MIT, see LICENSE for more details.
"""
import copy
import mock
import SoftLayer
from SoftLayer import fixtures
from SoftLayer import managers
from SoftLayer import testing
MINIMAL_TEST_CREATE_ARGS = {
'size': 'S1270_8GB_2X1TBSATA_NORAID',
'hostname': 'unicorn',
'domain': 'giggles.woo',
'location': 'wdc01',
'os': 'UBUNTU_14_64',
'port_speed': 10,
}
class HardwareTests(testing.TestCase):
def set_up(self):
self.hardware = SoftLayer.HardwareManager(self.client)
def test_init_with_ordering_manager(self):
ordering_manager = SoftLayer.OrderingManager(self.client)
mgr = SoftLayer.HardwareManager(self.client, ordering_manager)
self.assertEqual(mgr.ordering_manager, ordering_manager)
def test_list_hardware(self):
# Cast result back to list because list_hardware is now a generator
results = self.hardware.list_hardware()
self.assertEqual(results, fixtures.SoftLayer_Account.getHardware)
self.assert_called_with('SoftLayer_Account', 'getHardware')
def test_list_hardware_with_filters(self):
results = self.hardware.list_hardware(
tags=['tag1', 'tag2'],
cpus=2,
memory=1,
hostname='hostname',
domain='example.com',
datacenter='dal05',
nic_speed=100,
public_ip='1.2.3.4',
private_ip='4.3.2.1',
)
self.assertEqual(results, fixtures.SoftLayer_Account.getHardware)
_filter = {
'hardware': {
'datacenter': {'name': {'operation': '_= dal05'}},
'domain': {'operation': '_= example.com'},
'tagReferences': {
'tag': {'name': {
'operation': 'in',
'options': [
{'name': 'data', 'value': ['tag1', 'tag2']}]
}}
},
'memoryCapacity': {'operation': 1},
'processorPhysicalCoreAmount': {'operation': 2},
'hostname': {'operation': '_= hostname'},
'primaryIpAddress': {'operation': '_= 1.2.3.4'},
'networkComponents': {'maxSpeed': {'operation': 100}},
'primaryBackendIpAddress': {'operation': '_= 4.3.2.1'}}
}
self.assert_called_with('SoftLayer_Account', 'getHardware',
filter=_filter)
def test_resolve_ids_ip(self):
_id = self.hardware._get_ids_from_ip('172.16.1.100')
self.assertEqual(_id, [1000, 1001, 1002, 1003])
_id = self.hardware._get_ids_from_ip('nope')
self.assertEqual(_id, [])
# Now simulate a private IP test
mock = self.set_mock('SoftLayer_Account', 'getHardware')
mock.side_effect = [[], [{'id': 99}]]
_id = self.hardware._get_ids_from_ip('10.0.1.87')
self.assertEqual(_id, [99])
def test_resolve_ids_hostname(self):
_id = self.hardware._get_ids_from_hostname('hardware-test1')
self.assertEqual(_id, [1000, 1001, 1002, 1003])
def test_get_hardware(self):
result = self.hardware.get_hardware(1000)
self.assertEqual(fixtures.SoftLayer_Hardware_Server.getObject, result)
self.assert_called_with('SoftLayer_Hardware_Server', 'getObject',
identifier=1000)
def test_reload(self):
post_uri = 'http://test.sftlyr.ws/test.sh'
result = self.hardware.reload(1, post_uri=post_uri, ssh_keys=[1701])
self.assertEqual(result, 'OK')
self.assert_called_with('SoftLayer_Hardware_Server',
'reloadOperatingSystem',
args=('FORCE',
{'customProvisionScriptUri': post_uri,
'sshKeyIds': [1701]}),
identifier=1)
def test_get_create_options(self):
options = self.hardware.get_create_options()
expected = {
'extras': [{'key': '1_IPV6_ADDRESS', 'name': '1 IPv6 Address'}],
'locations': [{'key': 'wdc01', 'name': 'Washington 1'}],
'operating_systems': [{'key': 'UBUNTU_14_64',
'name': 'Ubuntu / 14.04-64'}],
'port_speeds': [{
'key': '10',
'name': '10 Mbps Public & Private Network Uplinks'
}],
'sizes': [
{
'key': 'S1270_8GB_2X1TBSATA_NORAID',
'name': 'Single Xeon 1270, 8GB Ram, 2x1TB SATA disks, Non-RAID'
},
{
'key': 'DGOLD_6140_384GB_4X960GB_SSD_SED_RAID_10',
'name': 'Dual Xeon Gold, 384GB Ram, 4x960GB SSD, RAID 10'
}
]
}
self.assertEqual(options, expected)
def test_get_create_options_package_missing(self):
packages = self.set_mock('SoftLayer_Product_Package', 'getAllObjects')
packages.return_value = []
ex = self.assertRaises(SoftLayer.SoftLayerError, self.hardware.get_create_options)
self.assertEqual("Package BARE_METAL_SERVER does not exist", str(ex))
def test_generate_create_dict_no_items(self):
packages = self.set_mock('SoftLayer_Product_Package', 'getAllObjects')
packages_copy = copy.deepcopy(
fixtures.SoftLayer_Product_Package.getAllObjects)
packages_copy[0]['items'] = []
packages.return_value = packages_copy
ex = self.assertRaises(SoftLayer.SoftLayerError,
self.hardware._generate_create_dict,
location="wdc01")
self.assertIn("Could not find valid price", str(ex))
def test_generate_create_dict_no_regions(self):
packages = self.set_mock('SoftLayer_Product_Package', 'getAllObjects')
packages_copy = copy.deepcopy(
fixtures.SoftLayer_Product_Package.getAllObjects)
packages_copy[0]['regions'] = []
packages.return_value = packages_copy
ex = self.assertRaises(SoftLayer.SoftLayerError,
self.hardware._generate_create_dict,
**MINIMAL_TEST_CREATE_ARGS)
self.assertIn("Could not find valid location for: 'wdc01'", str(ex))
def test_generate_create_dict_invalid_size(self):
args = {
'size': 'UNKNOWN_SIZE',
'hostname': 'unicorn',
'domain': 'giggles.woo',
'location': 'wdc01',
'os': 'UBUNTU_14_64',
'port_speed': 10,
}
ex = self.assertRaises(SoftLayer.SoftLayerError,
self.hardware._generate_create_dict, **args)
self.assertIn("Could not find valid size for: 'UNKNOWN_SIZE'", str(ex))
def test_generate_create_dict(self):
args = {
'size': 'S1270_8GB_2X1TBSATA_NORAID',
'hostname': 'unicorn',
'domain': 'giggles.woo',
'location': 'wdc01',
'os': 'UBUNTU_14_64',
'port_speed': 10,
'hourly': True,
'extras': ['1_IPV6_ADDRESS'],
'post_uri': 'http://example.com/script.php',
'ssh_keys': [10],
}
expected = {
'hardware': [{
'domain': 'giggles.woo',
'hostname': 'unicorn',
}],
'location': 'WASHINGTON_DC',
'packageId': 200,
'presetId': 64,
'prices': [{'id': 21},
{'id': 420},
{'id': 906},
{'id': 37650},
{'id': 1800},
{'id': 272},
{'id': 17129}],
'useHourlyPricing': True,
'provisionScripts': ['http://example.com/script.php'],
'sshKeys': [{'sshKeyIds': [10]}],
}
data = self.hardware._generate_create_dict(**args)
self.assertEqual(expected, data)
@mock.patch('SoftLayer.managers.hardware.HardwareManager'
'._generate_create_dict')
def test_verify_order(self, create_dict):
create_dict.return_value = {'test': 1, 'verify': 1}
self.hardware.verify_order(test=1, verify=1)
create_dict.assert_called_once_with(test=1, verify=1)
self.assert_called_with('SoftLayer_Product_Order', 'verifyOrder',
args=({'test': 1, 'verify': 1},))
@mock.patch('SoftLayer.managers.hardware.HardwareManager'
'._generate_create_dict')
def test_place_order(self, create_dict):
create_dict.return_value = {'test': 1, 'verify': 1}
self.hardware.place_order(test=1, verify=1)
create_dict.assert_called_once_with(test=1, verify=1)
self.assert_called_with('SoftLayer_Product_Order', 'placeOrder',
args=({'test': 1, 'verify': 1},))
def test_cancel_hardware_without_reason(self):
mock = self.set_mock('SoftLayer_Hardware_Server', 'getObject')
mock.return_value = {'id': 987, 'billingItem': {'id': 1234},
'openCancellationTicket': {'id': 1234}}
result = self.hardware.cancel_hardware(987)
self.assertEqual(result, True)
reasons = self.hardware.get_cancellation_reasons()
args = (False, False, reasons['unneeded'], '')
self.assert_called_with('SoftLayer_Billing_Item', 'cancelItem', identifier=1234, args=args)
def test_cancel_hardware_with_reason_and_comment(self):
mock = self.set_mock('SoftLayer_Hardware_Server', 'getObject')
mock.return_value = {'id': 987, 'billingItem': {'id': 1234},
'openCancellationTicket': {'id': 1234}}
result = self.hardware.cancel_hardware(6327, reason='sales', comment='Test Comment')
self.assertEqual(result, True)
reasons = self.hardware.get_cancellation_reasons()
args = (False, False, reasons['sales'], 'Test Comment')
self.assert_called_with('SoftLayer_Billing_Item', 'cancelItem', identifier=1234, args=args)
def test_cancel_hardware(self):
mock = self.set_mock('SoftLayer_Hardware_Server', 'getObject')
mock.return_value = {'id': 987, 'billingItem': {'id': 6327},
'openCancellationTicket': {'id': 4567}}
result = self.hardware.cancel_hardware(6327)
self.assertEqual(result, True)
self.assert_called_with('SoftLayer_Billing_Item', 'cancelItem',
identifier=6327, args=(False, False, 'No longer needed', ''))
def test_cancel_hardware_no_billing_item(self):
mock = self.set_mock('SoftLayer_Hardware_Server', 'getObject')
mock.return_value = {'id': 987, 'openCancellationTicket': {'id': 1234},
'openCancellationTicket': {'id': 1234}}
ex = self.assertRaises(SoftLayer.SoftLayerError,
self.hardware.cancel_hardware,
6327)
self.assertEqual("Ticket #1234 already exists for this server", str(ex))
def test_cancel_hardware_monthly_now(self):
mock = self.set_mock('SoftLayer_Hardware_Server', 'getObject')
mock.return_value = {'id': 987, 'billingItem': {'id': 1234},
'openCancellationTicket': {'id': 4567},
'hourlyBillingFlag': False}
with self.assertLogs('SoftLayer.managers.hardware', level='INFO') as logs:
result = self.hardware.cancel_hardware(987, immediate=True)
# should be 2 infom essages here
self.assertEqual(len(logs.records), 2)
self.assertEqual(result, True)
self.assert_called_with('SoftLayer_Billing_Item', 'cancelItem',
identifier=1234, args=(False, False, 'No longer needed', ''))
cancel_message = "Please reclaim this server ASAP, it is no longer needed. Thankyou."
self.assert_called_with('SoftLayer_Ticket', 'addUpdate',
identifier=4567, args=({'entry': cancel_message},))
def test_cancel_hardware_monthly_whenever(self):
mock = self.set_mock('SoftLayer_Hardware_Server', 'getObject')
mock.return_value = {'id': 987, 'billingItem': {'id': 6327},
'openCancellationTicket': {'id': 4567}}
with self.assertLogs('SoftLayer.managers.hardware', level='INFO') as logs:
result = self.hardware.cancel_hardware(987, immediate=False)
# should be 2 infom essages here
self.assertEqual(len(logs.records), 1)
self.assertEqual(result, True)
self.assert_called_with('SoftLayer_Billing_Item', 'cancelItem',
identifier=6327, args=(False, False, 'No longer needed', ''))
def test_change_port_speed_public(self):
self.hardware.change_port_speed(2, True, 100)
self.assert_called_with('SoftLayer_Hardware_Server',
'setPublicNetworkInterfaceSpeed',
identifier=2,
args=(100,))
def test_change_port_speed_private(self):
self.hardware.change_port_speed(2, False, 10)
self.assert_called_with('SoftLayer_Hardware_Server',
'setPrivateNetworkInterfaceSpeed',
identifier=2,
args=(10,))
def test_edit_meta(self):
# Test editing user data
self.hardware.edit(100, userdata='my data')
self.assert_called_with('SoftLayer_Hardware_Server',
'setUserMetadata',
args=(['my data'],),
identifier=100)
def test_edit_blank(self):
# Now test a blank edit
self.assertTrue(self.hardware.edit, 100)
self.assertEqual(self.calls(), [])
def test_edit(self):
# Finally, test a full edit
self.hardware.edit(100,
hostname='new-host',
domain='new.sftlyr.ws',
notes='random notes')
self.assert_called_with('SoftLayer_Hardware_Server',
'editObject',
args=({
'hostname': 'new-host',
'domain': 'new.sftlyr.ws',
'notes': 'random notes',
},),
identifier=100)
def test_rescue(self):
result = self.hardware.rescue(1234)
self.assertEqual(result, True)
self.assert_called_with('SoftLayer_Hardware_Server',
'bootToRescueLayer',
identifier=1234)
def test_update_firmware(self):
result = self.hardware.update_firmware(100)
self.assertEqual(result, True)
self.assert_called_with('SoftLayer_Hardware_Server',
'createFirmwareUpdateTransaction',
identifier=100, args=(1, 1, 1, 1))
def test_update_firmware_selective(self):
result = self.hardware.update_firmware(100,
ipmi=False,
hard_drive=False)
self.assertEqual(result, True)
self.assert_called_with('SoftLayer_Hardware_Server',
'createFirmwareUpdateTransaction',
identifier=100, args=(0, 1, 1, 0))
def test_reflash_firmware(self):
result = self.hardware.reflash_firmware(100)
self.assertEqual(result, True)
self.assert_called_with('SoftLayer_Hardware_Server',
'createFirmwareReflashTransaction',
identifier=100, args=(1, 1, 1))
def test_reflash_firmware_selective(self):
result = self.hardware.reflash_firmware(100,
raid_controller=False,
bios=False)
self.assertEqual(result, True)
self.assert_called_with('SoftLayer_Hardware_Server',
'createFirmwareReflashTransaction',
identifier=100, args=(1, 0, 0))
class HardwareHelperTests(testing.TestCase):
def test_get_extra_price_id_no_items(self):
ex = self.assertRaises(SoftLayer.SoftLayerError,
managers.hardware._get_extra_price_id,
[], 'test', True, None)
self.assertEqual("Could not find valid price for extra option, 'test'",
str(ex))
def test_get_default_price_id_item_not_first(self):
items = [{
'itemCategory': {'categoryCode': 'unknown', 'id': 325},
'keyName': 'UNKNOWN',
'prices': [{'accountRestrictions': [],
'currentPriceFlag': '',
'hourlyRecurringFee': '10.0',
'id': 1245172,
'recurringFee': '1.0'}],
}]
ex = self.assertRaises(SoftLayer.SoftLayerError,
managers.hardware._get_default_price_id,
items, 'unknown', True, None)
self.assertEqual("Could not find valid price for 'unknown' option",
str(ex))
def test_get_default_price_id_no_items(self):
ex = self.assertRaises(SoftLayer.SoftLayerError,
managers.hardware._get_default_price_id,
[], 'test', True, None)
self.assertEqual("Could not find valid price for 'test' option",
str(ex))
def test_get_bandwidth_price_id_no_items(self):
ex = self.assertRaises(SoftLayer.SoftLayerError,
managers.hardware._get_bandwidth_price_id,
[], hourly=True, no_public=False)
self.assertEqual("Could not find valid price for bandwidth option",
str(ex))
def test_get_os_price_id_no_items(self):
ex = self.assertRaises(SoftLayer.SoftLayerError,
managers.hardware._get_os_price_id,
[], 'UBUNTU_14_64', None)
self.assertEqual("Could not find valid price for os: 'UBUNTU_14_64'",
str(ex))
def test_get_port_speed_price_id_no_items(self):
ex = self.assertRaises(SoftLayer.SoftLayerError,
managers.hardware._get_port_speed_price_id,
[], 10, True, None)
self.assertEqual("Could not find valid price for port speed: '10'",
str(ex))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.