content stringlengths 27 928k | path stringlengths 4 230 | size int64 27 928k | nl_text stringlengths 21 396k | nl_size int64 21 396k | nl_language stringlengths 2 3 | nl_language_score float64 0.04 1 |
|---|---|---|---|---|---|---|
from json import load
import os
import argparse
import random
from copy import deepcopy
import torchvision
import torchvision.transforms as transforms
from torch import nn
import sys
import torch
import numpy as np
import cvxopt
torch.manual_seed(0)
from fedlab.core.client.serial_trainer import SubsetSerialTrainer
from fedlab.utils.aggregator import Aggregators
from fedlab.utils.serialization import SerializationTool
from fedlab.utils.functional import evaluate
from fedlab.utils.functional import get_best_gpu, load_dict
sys.path.append("../")
from models.cnn import CNN_MNIST
def quadprog(Q, q, G, h, A, b):
"""
Input: Numpy arrays, the format follows MATLAB quadprog function: https://www.mathworks.com/help/optim/ug/quadprog.html
Output: Numpy array of the solution
"""
Q = cvxopt.matrix(Q.tolist())
q = cvxopt.matrix(q.tolist(), tc='d')
G = cvxopt.matrix(G.tolist())
h = cvxopt.matrix(h.tolist())
A = cvxopt.matrix(A.tolist())
b = cvxopt.matrix(b.tolist(), tc='d')
sol = cvxopt.solvers.qp(Q, q.T, G.T, h.T, A.T, b)
return np.array(sol['x'])
def optim_lambdas(gradients, lambda0):
epsilon = 0.5
n = len(gradients)
J_t = [grad.numpy() for grad in gradients]
J_t = np.array(J_t)
# target function
Q = 2 * np.dot(J_t, J_t.T)
q = np.array([[0] for i in range(n)])
# equality constrint
A = np.ones(n).T
b = np.array([1])
# boundary
lb = np.array([max(0, lambda0[i] - epsilon) for i in range(n)])
ub = np.array([min(1, lambda0[i] + epsilon) for i in range(n)])
G = np.zeros((2 * n, n))
for i in range(n):
G[i][i] = -1
G[n + i][i] = 1
h = np.zeros((2 * n, 1))
for i in range(n):
h[i] = -lb[i]
h[n + i] = ub[i]
res = quadprog(Q, q, G, h, A, b)
return res
# python standalone.py --sample_ratio 0.1 --batch_size 10 --epochs 5 --partition iid
# configuration
parser = argparse.ArgumentParser(description="Standalone training example")
parser.add_argument("--total_client", type=int, default=10)
parser.add_argument("--com_round", type=int, default=5)
parser.add_argument("--sample_ratio", type=float)
parser.add_argument("--batch_size", type=int)
parser.add_argument("--lr", type=float)
parser.add_argument("--epochs", type=int)
args = parser.parse_args()
# get raw dataset
root = "../datasets/mnist/"
trainset = torchvision.datasets.MNIST(root=root,
train=True,
download=True,
transform=transforms.ToTensor())
testset = torchvision.datasets.MNIST(root=root,
train=False,
download=True,
transform=transforms.ToTensor())
test_loader = torch.utils.data.DataLoader(testset,
batch_size=len(testset),
drop_last=False,
shuffle=False)
# setup
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
gpu = get_best_gpu()
model = CNN_MNIST().cuda(gpu)
# FL settings
num_per_round = int(args.total_client * args.sample_ratio)
aggregator = Aggregators.fedavg_aggregate
total_client_num = args.total_client # client总数
data_indices = load_dict("./mnist_noniid.pkl")
# fedlab setup
local_model = deepcopy(model)
trainer = SubsetSerialTrainer(model=local_model,
dataset=trainset,
data_slices=data_indices,
aggregator=aggregator,
args={
"batch_size": args.batch_size,
"epochs": args.epochs,
"lr": args.lr
})
dynamic_lambdas = np.ones(num_per_round) * 1.0 / num_per_round
# train procedure
to_select = [i for i in range(total_client_num)]
for round in range(args.com_round):
model_parameters = SerializationTool.serialize_model(model)
selection = random.sample(to_select, num_per_round)
parameters = trainer.train(model_parameters=model_parameters,
id_list=selection,
aggregate=False)
gradients = [model_parameters - model for model in parameters]
for i, grad in enumerate(gradients):
gradients[i] = grad / grad.norm()
print(len(gradients))
print(gradients[0].shape)
# calculate lamda
lambda0 = [1.0 / num_per_round for _ in range(num_per_round)]
dynamic_lambdas = torch.Tensor(optim_lambdas(gradients, lambda0)).view(-1)
dt = Aggregators.fedavg_aggregate(gradients, dynamic_lambdas)
serialized_parameters = model_parameters - dt * args.lr
SerializationTool.deserialize_model(model, serialized_parameters)
criterion = nn.CrossEntropyLoss()
loss, acc = evaluate(model, criterion, test_loader)
print("loss: {:.4f}, acc: {:.2f}".format(loss, acc))
| fedlab_benchmarks/fedmgda+/standalone.py | 5,034 | Input: Numpy arrays, the format follows MATLAB quadprog function: https://www.mathworks.com/help/optim/ug/quadprog.html
Output: Numpy array of the solution
target function equality constrint boundary python standalone.py --sample_ratio 0.1 --batch_size 10 --epochs 5 --partition iid configuration get raw dataset setup FL settings client总数 fedlab setup train procedure calculate lamda | 386 | en | 0.408107 |
""" NNAPI Systrace parser - tracking of call tree based on trace lines
See contract-between-code-and-parser.txt for the
specification (cases in the specification are referred to with SPEC).
"""
import re
import sys
from parser.naming import (subphases, translate_hidl_mark_to_nn_and_tag,
get_function_name_from_mark, make_tag)
from parser.naming import LAYER_CPU, LAYER_DRIVER, LAYER_RUNTIME, LAYER_APPLICATION
from parser.naming import MARKER_SWITCH, MARKER_SUBTRACT
from parser.naming import PHASE_EXECUTION, PHASE_OVERALL, PHASE_WARMUP, PHASE_BENCHMARK
from parser.tree import SingleThreadCallTree
class AppPhase(object):
""" Class to track the overall phase of the program. Used to split up warmup and benchmark.
Needs to be separate from the call trees to propagate the difference to driver.
"""
def __init__(self):
self.reset()
def current(self):
if self.stack:
return self.stack[-1]
else:
return PHASE_OVERALL
def push(self, phase):
self.stack.append(phase)
def pop(self):
self.stack.pop()
def reset(self):
self.stack = []
class Tracker(object):
""" Class to track the stack trace of a single thread and feed it into a SingleThreadCallTree
as well as keeping track of entry and exit times for functions.
Exposes statistics for a single thread, transforming the call tree as needed.
All statistics are in milliseconds.
Layer Runtime, Phase Execution (LR_PE) is special-cased, see comment in get_stat().
Subphases of Execution are aggregated towards the overall Execution phase as needed.
"""
def __init__(self, tgid, is_driver, app_phase):
self.tgid = tgid
self.is_driver = is_driver
self.app_phase = app_phase
# Match the trace string
# "[NN_LA_PP]funcE1" in "B|<thread1>|[NN_LA_PP]funcE1"
# "[NN_LC_PCO]funcC1" in "B|<thread1>|[SW][NN_LC_PCO]funcC1"
self.matcher = re.compile(r"B\|\d+\|.*\[([^]]+)\]\[?([^]])\]?")
self.reset()
def reset(self):
self.stats = {}
self.items = {}
self.mytree = SingleThreadCallTree()
self.begins_and_ends_ms = {}
self.la_pe_counts = {}
self.debugstring = "\n"
def handle_mark(self, time, mark):
""" Handle a single trace item (scoped entry and exit).
Translates:
- Automatically generated HIDL traces into NNTRACE layers and phases
- SPEC:Switch phase during function into dummy items
- SPEC:Subtracting time when nesting is violated into "subtract"
markers
- CPU/Driver layer distinction based on whether the process is the
driver or an application
This function is called multiple times for a single application run,
afterwards the statistics can be calculated.
"""
if mark[0] == "B":
switch = False
subtract = False
# Workarounds for wrong tracepoints in early versions
# TODO(mikie): remove later
if ("ANeuralNetworksEvent_free" in mark) or ("ANeuralNetworksExecution_free" in mark):
mark = mark.replace("_PT", "_PE")
# Workarounds for trace marker for getSupportedExtensions (fixed in ag/9484333)
if ("getSupportedExtensions" in mark):
mark = mark.replace("_PC", "_PI")
elif ("[SW][NN_LA_PR]executeWithCompilation" in mark):
mark = mark.replace("[SW]", "")
if MARKER_SWITCH in mark:
switch = True
if MARKER_SUBTRACT in mark:
subtract = True
if switch:
# End previous item
self.handle_mark(time, "E")
# Push a placeholder item that will get popped by the 'real' end of the
# previous item.
self.mytree.push_dummy(time)
m = self.matcher.search(mark)
if m is None:
tag = translate_hidl_mark_to_nn_and_tag(mark)
if tag is None:
raise Exception("Couldn't parse mark " + mark)
else:
tag = m.group(1)
[_, layer, phase] = tag.split("_")
if layer == LAYER_APPLICATION and phase in [PHASE_WARMUP, PHASE_BENCHMARK]:
self.app_phase.push(phase)
if not self.is_driver:
layer = layer.replace(LAYER_DRIVER, LAYER_CPU)
else:
layer = layer.replace(LAYER_CPU, LAYER_DRIVER)
if layer == LAYER_APPLICATION and phase == PHASE_EXECUTION:
self.la_pe_counts[self.app_phase.current()] = (
self.la_pe_counts.get(self.app_phase.current(), 0) + 1)
self.mytree.push(time, mark, layer, phase, self.app_phase.current(), subtract)
elif mark[0] == "E":
try:
node = self.mytree.pop(time)
if node.is_dummy(): # Placeholder item
pass
else:
if node.layer == LAYER_APPLICATION and node.phase in [PHASE_WARMUP, PHASE_BENCHMARK]:
self.app_phase.pop()
function = node.app_phase + "::" + get_function_name_from_mark(node.mark)
self.begins_and_ends_ms[function] = (self.begins_and_ends_ms.get(function, []) +
[[float(node.start_time_s) * 1000.0,
float(node.end_time_s) * 1000.0]])
except IndexError as e:
raise Exception("Unable to process a trace termination mark, please check that the collected trace are including full application lifecycles.\n") from e
def is_complete(self):
""" Checks if we've seen all end tracepoints for the begin tracepoints.
"""
return self.mytree.current.is_root()
def calculate_stats(self):
assert self.is_complete()
self.mytree.remove_ignored()
self.mytree.remove_dummies()
self.mytree.copy_subtracted_init_and_wrong_la()
self.mytree.add_missing_la_nodes()
# self.mytree.print()
self.mytree.validate_nesting()
def recurse(node, prev_layer, prev_phase, indent, in_pe_layers):
[begun, mark, layer, phase] = [
node.start_time_s, node.mark, node.layer, node.phase()]
time = node.end_time_s
tag = None
elapsed0 = "DETAIL"
elapsed1 = node.elapsed_less_subtracted_ms()
if elapsed1 is None:
raise Exception("Elapsed for {} returned None".format(node.to_str()))
if not node.is_added_detail() and not node.subtract:
tag = node.app_phase + "_" + layer + "_" + phase
elapsed0 = elapsed1
self.stats[tag] = self.stats.get(tag, 0.0) + elapsed0
self.items[tag] = self.items.get(tag, []) + [
mark + " " + str(elapsed0) + " " + str(elapsed1) + " " + tag]
if phase in subphases[PHASE_EXECUTION]:
if not in_pe_layers.get(layer):
pe_tag = node.app_phase + "_" + make_tag(layer, PHASE_EXECUTION)
self.stats[pe_tag] = self.stats.get(pe_tag, 0.0) + elapsed0
self.items[pe_tag] = self.items.get(pe_tag, []) + [
mark + " " + str(elapsed0) + " " + str(elapsed1) + " " + pe_tag]
if phase == PHASE_EXECUTION:
in_pe_layers[layer] = in_pe_layers.get(layer, 0) + 1
for c in node.children:
recurse(c, layer or prev_layer, phase or prev_phase,
indent + " ", in_pe_layers)
if phase == PHASE_EXECUTION:
in_pe_layers[layer] = in_pe_layers[layer] - 1
return
for top in self.mytree.root.children:
recurse(top, None, None, "", {})
self.debugstring = self.mytree.to_str()
# We need to special case the driver execution time because:
# - The existing drivers don't have tracing, so we rely on HIDL traces
# - Best we can do is to take the start of the HIDL server side call as
# the starting point (which includes a bit of overhead, but not much) and
# the start of the callback as the end point (which should be pretty
# accurate)
# Note that the begin and end may be on different threads, hence the
# calculation needs to happen in aggregation rather than here.
def get_ld_pe_begins(self, app_phase):
return self.get_begins(app_phase, "HIDL::IPreparedModel::execute::server")
def get_ld_pe_ends(self, app_phase):
return self.get_begins(app_phase, "HIDL::IExecutionCallback::notify::client")
def get_stat(self, tag, app_phase, special_case_pe=True):
if not self.stats and not self.mytree.is_empty():
self.calculate_stats()
if tag == make_tag(LAYER_RUNTIME, PHASE_EXECUTION) and special_case_pe:
# Execution is exposed as an asynchronous event from the runtime, we
# calculate the runtime time as starting from when the async operation is
# kicked off until wait finishes + synchronous setup and teardown calls.
# This has two limitations:
# - multithreaded usage will not work correctly
# - should the application spend so much time before calling wait that
# execution has already finished, the time would get allocated to the
# runtime incorrectly
async_starts = self.get_begins(app_phase, "ANeuralNetworksExecution_startCompute")
async_ends = self.get_ends(app_phase, "ANeuralNetworksEvent_wait")
elapsed = 0.0
for i in range(0, len(async_starts)):
elapsed = elapsed + (async_ends[i] - async_starts[i])
for sync in ["ANeuralNetworksExecution_create", "ANeuralNetworksExecution_free",
"ANeuralNetworksEvent_create", "ANeuralNetworksEvent_free",
"ANeuralNetworksExecution_setInput", "ANeuralNetworksExecution_setOutput",
"ANeuralNetworksExecution_setInputFromMemory",
"ANeuralNetworksExecution_setOutputFromMemory"]:
sync_starts = self.get_begins(app_phase, sync)
sync_ends = self.get_ends(app_phase, sync)
for i in range(0, len(sync_starts)):
elapsed = elapsed + (sync_ends[i] - sync_starts[i])
return elapsed
return self.stats.get(app_phase + "_" + tag, 0.0)
def get_execution_count(self, app_phase):
# ANeuralNetworksExecution_create is reliable and comes from the runtime,
# but not available pre-P
count = len(self.get_begins(app_phase, "ANeuralNetworksExecution_create"))
if count > 0:
return count
# Application may have added tracepoints
return self.la_pe_counts.get(app_phase, 0)
def get_begins(self, app_phase, function):
name = app_phase + "::" + function
return [begin_and_end[0] for begin_and_end in self.begins_and_ends_ms.get(name, [])]
def get_ends(self, app_phase, function):
name = app_phase + "::" + function
return [begin_and_end[1] for begin_and_end in self.begins_and_ends_ms.get(name, [])]
def print_stats(self):
if not self.stats:
self.calculate_stats()
print(self.tgid, "Driver" if self.is_driver else "App")
for tag in self.stats:
print(tag, self.stats[tag])
if self.items.get(tag):
for item in self.items[tag]:
print(" ", item)
else:
print(" ", "calculated only")
def print(self):
self.mytree.print()
| tools/systrace_parser/parser/tracker.py | 10,908 | Class to track the overall phase of the program. Used to split up warmup and benchmark.
Needs to be separate from the call trees to propagate the difference to driver.
Class to track the stack trace of a single thread and feed it into a SingleThreadCallTree
as well as keeping track of entry and exit times for functions.
Exposes statistics for a single thread, transforming the call tree as needed.
All statistics are in milliseconds.
Layer Runtime, Phase Execution (LR_PE) is special-cased, see comment in get_stat().
Subphases of Execution are aggregated towards the overall Execution phase as needed.
Handle a single trace item (scoped entry and exit).
Translates:
- Automatically generated HIDL traces into NNTRACE layers and phases
- SPEC:Switch phase during function into dummy items
- SPEC:Subtracting time when nesting is violated into "subtract"
markers
- CPU/Driver layer distinction based on whether the process is the
driver or an application
This function is called multiple times for a single application run,
afterwards the statistics can be calculated.
Checks if we've seen all end tracepoints for the begin tracepoints.
NNAPI Systrace parser - tracking of call tree based on trace lines
See contract-between-code-and-parser.txt for the
specification (cases in the specification are referred to with SPEC).
Match the trace string "[NN_LA_PP]funcE1" in "B|<thread1>|[NN_LA_PP]funcE1" "[NN_LC_PCO]funcC1" in "B|<thread1>|[SW][NN_LC_PCO]funcC1" Workarounds for wrong tracepoints in early versions TODO(mikie): remove later Workarounds for trace marker for getSupportedExtensions (fixed in ag/9484333) End previous item Push a placeholder item that will get popped by the 'real' end of the previous item. Placeholder item self.mytree.print() We need to special case the driver execution time because: - The existing drivers don't have tracing, so we rely on HIDL traces - Best we can do is to take the start of the HIDL server side call as the starting point (which includes a bit of overhead, but not much) and the start of the callback as the end point (which should be pretty accurate) Note that the begin and end may be on different threads, hence the calculation needs to happen in aggregation rather than here. Execution is exposed as an asynchronous event from the runtime, we calculate the runtime time as starting from when the async operation is kicked off until wait finishes + synchronous setup and teardown calls. This has two limitations: - multithreaded usage will not work correctly - should the application spend so much time before calling wait that execution has already finished, the time would get allocated to the runtime incorrectly ANeuralNetworksExecution_create is reliable and comes from the runtime, but not available pre-P Application may have added tracepoints | 2,849 | en | 0.925993 |
from __future__ import print_function
import os
import percy
from percy import errors
from percy import utils
__all__ = ['Runner']
class Runner(object):
def __init__(self, loader=None, config=None, client=None):
self.loader = loader
self.config = config or percy.Config()
self.client = client or percy.Client(config=self.config)
self._current_build = None
self._is_enabled = os.getenv('PERCY_ENABLE', '1') == '1'
# Sanity check environment and auth setup. If in CI and Percy is disabled, print an error.
if self._is_enabled:
try:
self.client.config.access_token
except errors.AuthError:
if self.client.environment.current_ci:
utils.print_error('[percy] Warning: Percy is disabled, no PERCY_TOKEN set.')
self._is_enabled = False
def initialize_build(self, **kwargs):
# Silently pass if Percy is disabled.
if not self._is_enabled:
return
build_resources = []
build_resources = self.loader.build_resources if self.loader else []
sha_to_build_resource = {}
for build_resource in build_resources:
sha_to_build_resource[build_resource.sha] = build_resource
self._current_build = self.client.create_build(resources=build_resources, **kwargs)
try:
missing_resources = self._current_build['data']['relationships']['missing-resources']
missing_resources = missing_resources.get('data', [])
for missing_resource in missing_resources:
sha = missing_resource['id']
resource = sha_to_build_resource.get(sha)
# This resource should always exist, but if by chance it doesn't we make it safe here.
# A nicer error will be raised by the finalize API when the resource is still missing.
if resource:
print('Uploading new build resource: {}'.format(resource.resource_url))
# Optimization: we don't hold all build resources in memory. Instead we store a
# "local_path" variable that be used to read the file again if it is needed.
if resource.local_path:
with open(resource.local_path, 'rb') as f:
content = f.read()
else:
content = resource.content
self.client.upload_resource(self._current_build['data']['id'], content)
except KeyError:
print(self._current_build)
def snapshot(self, **kwargs):
# Silently pass if Percy is disabled.
if not self._is_enabled:
return
if not self._current_build:
raise errors.UninitializedBuildError('Cannot call snapshot before build is initialized')
root_resource = self.loader.snapshot_resources[0]
build_id = self._current_build['data']['id']
snapshot_data = self.client.create_snapshot(build_id, [root_resource], **kwargs)
missing_resources = snapshot_data['data']['relationships']['missing-resources']
missing_resources = missing_resources.get('data', [])
if missing_resources:
# There can only be one missing resource in this case, the root_resource.
self.client.upload_resource(build_id, root_resource.content)
self.client.finalize_snapshot(snapshot_data['data']['id'])
def finalize_build(self):
# Silently pass if Percy is disabled.
if not self._is_enabled:
return
if not self._current_build:
raise errors.UninitializedBuildError(
'Cannot finalize_build before build is initialized.')
self.client.finalize_build(self._current_build['data']['id'])
self._current_build = None
| percy/runner.py | 3,904 | Sanity check environment and auth setup. If in CI and Percy is disabled, print an error. Silently pass if Percy is disabled. This resource should always exist, but if by chance it doesn't we make it safe here. A nicer error will be raised by the finalize API when the resource is still missing. Optimization: we don't hold all build resources in memory. Instead we store a "local_path" variable that be used to read the file again if it is needed. Silently pass if Percy is disabled. There can only be one missing resource in this case, the root_resource. Silently pass if Percy is disabled. | 591 | en | 0.933328 |
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from armi.materials.water import SaturatedWater, SaturatedSteam
"""
unit tests for water materials
"""
class Test_Water(unittest.TestCase):
"""
unit tests for water materials
"""
def test_water_at_freezing(self):
"""
Reproduce verification results from IAPWS-IF97 for water at 0C
http://www.iapws.org/relguide/supsat.pdf
"""
water = SaturatedWater()
steam = SaturatedSteam()
Tk = 273.16
ref_vapor_pressure = 611.657
ref_dp_dT = 44.436693
ref_saturated_water_rho = 999.789
ref_saturated_steam_rho = 0.00485426
ref_alpha = -11.529101
ref_saturated_water_enthalpy = 0.611786
ref_saturated_steam_enthalpy = 2500.5e3
ref_phi = -0.04
ref_saturated_water_entropy = 0
ref_saturated_steam_entropy = 9.154e3
self.assertAlmostEqual(ref_vapor_pressure, water.vaporPressure(Tk=Tk), 3)
self.assertAlmostEqual(ref_vapor_pressure, steam.vaporPressure(Tk=Tk), 3)
self.assertAlmostEqual(ref_dp_dT, water.vaporPressurePrime(Tk=Tk), 3)
self.assertAlmostEqual(ref_dp_dT, steam.vaporPressurePrime(Tk=Tk), 3)
self.assertAlmostEqual(ref_saturated_water_rho, water.densityKgM3(Tk=Tk), 0)
self.assertAlmostEqual(ref_saturated_steam_rho, steam.densityKgM3(Tk=Tk), 0)
self.assertAlmostEqual(
ref_alpha, water.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 3
)
self.assertAlmostEqual(
ref_alpha, steam.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 3
)
self.assertAlmostEqual(ref_saturated_water_enthalpy, water.enthalpy(Tk=Tk), 2)
self.assertAlmostEqual(
ref_saturated_steam_enthalpy / steam.enthalpy(Tk=Tk), 1, 2
)
self.assertAlmostEqual(
ref_phi, water.auxiliaryQuantitySpecificEntropy(Tk=Tk), 2
)
self.assertAlmostEqual(
ref_phi, steam.auxiliaryQuantitySpecificEntropy(Tk=Tk), 2
)
self.assertAlmostEqual(ref_saturated_water_entropy, water.entropy(Tk=Tk), 3)
self.assertAlmostEqual(ref_saturated_steam_entropy / steam.entropy(Tk=Tk), 1, 3)
def test_water_at_boiling(self):
"""
Reproduce verification results from IAPWS-IF97 for water at 100C
http://www.iapws.org/relguide/supsat.pdf
"""
water = SaturatedWater()
steam = SaturatedSteam()
Tk = 373.1243
ref_vapor_pressure = 0.101325e6
ref_dp_dT = 3.616e3
ref_saturated_water_rho = 958.365
ref_saturated_steam_rho = 0.597586
ref_alpha = 417.65e3
ref_saturated_water_enthalpy = 417.05e3
ref_saturated_steam_enthalpy = 2675.7e3
ref_phi = 1.303e3
ref_saturated_water_entropy = 1.307e3
ref_saturated_steam_entropy = 7.355e3
self.assertAlmostEqual(ref_vapor_pressure / water.vaporPressure(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_vapor_pressure / steam.vaporPressure(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_dp_dT / water.vaporPressurePrime(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_dp_dT / steam.vaporPressurePrime(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_saturated_water_rho, water.densityKgM3(Tk=Tk), 0)
self.assertAlmostEqual(ref_saturated_steam_rho, steam.densityKgM3(Tk=Tk), 0)
self.assertAlmostEqual(
ref_alpha / water.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 1, 3
)
self.assertAlmostEqual(
ref_alpha / steam.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 1, 3
)
self.assertAlmostEqual(
ref_saturated_water_enthalpy / water.enthalpy(Tk=Tk), 1, 2
)
self.assertAlmostEqual(
ref_saturated_steam_enthalpy / steam.enthalpy(Tk=Tk), 1, 2
)
self.assertAlmostEqual(
ref_phi / water.auxiliaryQuantitySpecificEntropy(Tk=Tk), 1, 3
)
self.assertAlmostEqual(
ref_phi / steam.auxiliaryQuantitySpecificEntropy(Tk=Tk), 1, 3
)
self.assertAlmostEqual(ref_saturated_water_entropy / water.entropy(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_saturated_steam_entropy / steam.entropy(Tk=Tk), 1, 3)
def test_water_at_critcalPoint(self):
"""
Reproduce verification results from IAPWS-IF97 for water at 647.096K
http://www.iapws.org/relguide/supsat.pdf
"""
water = SaturatedWater()
steam = SaturatedSteam()
Tk = 647.096
ref_vapor_pressure = 22.064e6
ref_dp_dT = 268e3
ref_saturated_water_rho = 322
ref_saturated_steam_rho = 322
ref_alpha = 1548e3
ref_saturated_water_enthalpy = 2086.6e3
ref_saturated_steam_enthalpy = 2086.6e3
ref_phi = 3.578e3
ref_saturated_water_entropy = 4.410e3
ref_saturated_steam_entropy = 4.410e3
self.assertAlmostEqual(ref_vapor_pressure / water.vaporPressure(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_vapor_pressure / steam.vaporPressure(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_dp_dT / water.vaporPressurePrime(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_dp_dT / steam.vaporPressurePrime(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_saturated_water_rho, water.densityKgM3(Tk=Tk), 0)
self.assertAlmostEqual(ref_saturated_steam_rho, steam.densityKgM3(Tk=Tk), 0)
self.assertAlmostEqual(
ref_alpha / water.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 1, 3
)
self.assertAlmostEqual(
ref_alpha / steam.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 1, 3
)
self.assertAlmostEqual(
ref_saturated_water_enthalpy / water.enthalpy(Tk=Tk), 1, 2
)
self.assertAlmostEqual(
ref_saturated_steam_enthalpy / steam.enthalpy(Tk=Tk), 1, 2
)
self.assertAlmostEqual(
ref_phi / water.auxiliaryQuantitySpecificEntropy(Tk=Tk), 1, 3
)
self.assertAlmostEqual(
ref_phi / steam.auxiliaryQuantitySpecificEntropy(Tk=Tk), 1, 3
)
self.assertAlmostEqual(ref_saturated_water_entropy / water.entropy(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_saturated_steam_entropy / steam.entropy(Tk=Tk), 1, 3)
if __name__ == "__main__":
unittest.main()
| armi/materials/tests/test_water.py | 6,934 | unit tests for water materials
Reproduce verification results from IAPWS-IF97 for water at 100C
http://www.iapws.org/relguide/supsat.pdf
Reproduce verification results from IAPWS-IF97 for water at 647.096K
http://www.iapws.org/relguide/supsat.pdf
Reproduce verification results from IAPWS-IF97 for water at 0C
http://www.iapws.org/relguide/supsat.pdf
Copyright 2019 TerraPower, LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 905 | en | 0.817059 |
# Fix the code so that there's no error!
def count_evens(start, end):
"""Returns the number of even numbers between start and end."""
counter = start
num_evens = 0
while counter <= end:
if counter % 2 == 0:
num_evens += 1
counter += 1
return num_evens
def count_multiples(start, end, divisor):
"""Returns the number of multiples of divisor between start and end."""
counter = start
num_multiples = 0
while counter <= end:
if counter % divisor == 0:
num_multiples += 1
counter += 1
return num_multiples
count_both = count_evens(10, 20) + count_multiples(10, 20, 3)
| exercise_brokencounts_solution.py | 615 | Returns the number of even numbers between start and end.
Returns the number of multiples of divisor between start and end.
Fix the code so that there's no error! | 164 | en | 0.906706 |
"""My nifty plot-level RGB algorithm
"""
# Importing modules. Please add any additional import statements below
import numpy as np
# Definitions
# Please replace these definitions' values with the correct ones
VERSION = '1.0'
# Information on the creator of this algorithm
ALGORITHM_AUTHOR = 'Unknown'
ALGORITHM_AUTHOR_EMAIL = ''
ALGORITHM_CONTRIBUTORS = [""]
ALGORITHM_NAME = 'my nifty one'
ALGORITHM_DESCRIPTION = 'This algorithm calculates the niftyness of RGB plot-level images'
# Citation information for publication (more information in HOW_TO.md)
CITATION_AUTHOR = 'unknown'
CITATION_TITLE = ''
CITATION_YEAR = ''
# The name of one or more variables returned by the algorithm, separated by commas (more information in HOW_TO.md)
# If only one name is specified, no comma's are used.
# Note that variable names cannot have comma's in them: use a different separator instead. Also,
# all white space is kept intact; don't add any extra whitespace since it may cause name comparisons
# to fail.
# !! Replace the content of this string with your variable names
VARIABLE_NAMES = 'size of image channels'
# Variable units matching the order of VARIABLE_NAMES, also comma-separated.
# For each variable name in VARIABLE_NAMES add the unit of measurement the value represents.
# !! Replace the content of this string with your variables' unit
VARIABLE_UNITS = 'pixels'
# Variable labels matching the order of VARIABLE_NAMES, also comma-separated.
# This is an optional definition and can be left empty.
VARIABLE_LABELS = ''
# Optional override for the generation of a BETYdb compatible csv file
# Set to False to suppress the creation of a compatible file
WRITE_BETYDB_CSV = True
# Optional override for the generation of a TERRA REF Geostreams compatible csv file
# Set to False to suppress the creation of a compatible file
WRITE_GEOSTREAMS_CSV = True
# Entry point for plot-level RBG algorithm
def calculate(pxarray: np.ndarray):
"""Calculates one or more values from plot-level RGB data
Arguments:
pxarray: Array of RGB data for a single plot
Return:
Returns one or more calculated values
"""
# ALGORITHM: replace the following lines with your algorithm
channel_size = pxarray[:, :, 1].size
# RETURN: replace the following return with your calculated values. Be sure to order them as defined in VARIABLE_NAMES above
return channel_size
| .github/workflows/algorithm_rgb.py | 2,400 | Calculates one or more values from plot-level RGB data
Arguments:
pxarray: Array of RGB data for a single plot
Return:
Returns one or more calculated values
My nifty plot-level RGB algorithm
Importing modules. Please add any additional import statements below Definitions Please replace these definitions' values with the correct ones Information on the creator of this algorithm Citation information for publication (more information in HOW_TO.md) The name of one or more variables returned by the algorithm, separated by commas (more information in HOW_TO.md) If only one name is specified, no comma's are used. Note that variable names cannot have comma's in them: use a different separator instead. Also, all white space is kept intact; don't add any extra whitespace since it may cause name comparisons to fail. !! Replace the content of this string with your variable names Variable units matching the order of VARIABLE_NAMES, also comma-separated. For each variable name in VARIABLE_NAMES add the unit of measurement the value represents. !! Replace the content of this string with your variables' unit Variable labels matching the order of VARIABLE_NAMES, also comma-separated. This is an optional definition and can be left empty. Optional override for the generation of a BETYdb compatible csv file Set to False to suppress the creation of a compatible file Optional override for the generation of a TERRA REF Geostreams compatible csv file Set to False to suppress the creation of a compatible file Entry point for plot-level RBG algorithm ALGORITHM: replace the following lines with your algorithm RETURN: replace the following return with your calculated values. Be sure to order them as defined in VARIABLE_NAMES above | 1,742 | en | 0.759596 |
"""Implementation of Rule L042."""
from sqlfluff.core.rules.base import BaseCrawler, LintResult
from sqlfluff.core.rules.doc_decorators import document_configuration
@document_configuration
class Rule_L042(BaseCrawler):
"""Join/From clauses should not contain subqueries. Use CTEs instead.
By default this rule is configured to allow subqueries within `FROM`
clauses but not within `JOIN` clauses. If you prefer a stricter lint
then this is configurable.
NB: Some dialects don't allow CTEs, and for those dialects
this rule makes no sense and should be disabled.
| **Anti-pattern**
.. code-block:: sql
select
a.x, a.y, b.z
from a
join (
select x, z from b
) using(x)
| **Best practice**
.. code-block:: sql
with c as (
select x, z from b
)
select
a.x, a.y, c.z
from a
join c using(x)
"""
config_keywords = ["forbid_subquery_in"]
_config_mapping = {
"join": ["join_clause"],
"from": ["from_clause"],
"both": ["join_clause", "from_clause"],
}
def _eval(self, segment, **kwargs):
"""Join/From clauses should not contain subqueries. Use CTEs instead.
NB: No fix for this routine because it would be very complex to
implement reliably.
"""
parent_types = self._config_mapping[self.forbid_subquery_in]
for parent_type in parent_types:
if segment.is_type(parent_type):
# Get the referenced table segment
table_expression = segment.get_child("table_expression")
if not table_expression:
return None # There isn't one. We're done.
# Get the main bit
table_expression = table_expression.get_child("main_table_expression")
if not table_expression:
return None # There isn't one. We're done.
# If any of the following are found, raise an issue.
# If not, we're fine.
problem_children = [
"with_compound_statement",
"set_expression",
"select_statement",
]
for seg_type in problem_children:
seg = table_expression.get_child(seg_type)
if seg:
return LintResult(
anchor=seg,
description=f"{parent_type} clauses should not contain subqueries. Use CTEs instead",
)
| src/sqlfluff/core/rules/std/L042.py | 2,651 | Join/From clauses should not contain subqueries. Use CTEs instead.
By default this rule is configured to allow subqueries within `FROM`
clauses but not within `JOIN` clauses. If you prefer a stricter lint
then this is configurable.
NB: Some dialects don't allow CTEs, and for those dialects
this rule makes no sense and should be disabled.
| **Anti-pattern**
.. code-block:: sql
select
a.x, a.y, b.z
from a
join (
select x, z from b
) using(x)
| **Best practice**
.. code-block:: sql
with c as (
select x, z from b
)
select
a.x, a.y, c.z
from a
join c using(x)
Join/From clauses should not contain subqueries. Use CTEs instead.
NB: No fix for this routine because it would be very complex to
implement reliably.
Implementation of Rule L042.
Get the referenced table segment There isn't one. We're done. Get the main bit There isn't one. We're done. If any of the following are found, raise an issue. If not, we're fine. | 999 | en | 0.896995 |
#!/usr/bin/env python
"""
More complex demonstration of what's possible with the progress bar.
"""
import threading
import time
from quo.text import Text
from quo.progress import ProgressBar
def main():
with ProgressBar(
title=Text("<b>Example of many parallel tasks.</b>"),
bottom_toolbar=Text("<b>[Control-L]</b> clear <b>[Control-C]</b> abort"),
) as pb:
def run_task(label, total, sleep_time):
for i in pb(range(total), label=label):
time.sleep(sleep_time)
threads = [
threading.Thread(target=run_task, args=("First task", 50, 0.1)),
threading.Thread(target=run_task, args=("Second task", 100, 0.1)),
threading.Thread(target=run_task, args=("Third task", 8, 3)),
threading.Thread(target=run_task, args=("Fourth task", 200, 0.1)),
threading.Thread(target=run_task, args=("Fifth task", 40, 0.2)),
threading.Thread(target=run_task, args=("Sixth task", 220, 0.1)),
threading.Thread(target=run_task, args=("Seventh task", 85, 0.05)),
threading.Thread(target=run_task, args=("Eight task", 200, 0.05)),
]
for t in threads:
t.daemon = True
t.start()
# Wait for the threads to finish. We use a timeout for the join() call,
# because on Windows, join cannot be interrupted by Control-C or any other
# signal.
for t in threads:
while t.is_alive():
t.join(timeout=0.5)
if __name__ == "__main__":
main()
| examples/progress/many-parallel-tasks.py | 1,572 | More complex demonstration of what's possible with the progress bar.
!/usr/bin/env python Wait for the threads to finish. We use a timeout for the join() call, because on Windows, join cannot be interrupted by Control-C or any other signal. | 241 | en | 0.881375 |
import math
import lavalink
import ksoftapi
import discord
from discord.ext import commands
class Music(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.kclient = bot.kclient
if not hasattr(bot, 'lavalink'):
bot.lavalink = lavalink.Client(bot.user.id)
bot.lavalink.add_node('localhost', 1616, 'proto', 'in', 'default-node') # Host, Port, Password, Region, Name
bot.add_listener(bot.lavalink.voice_update_handler, 'on_socket_response')
lavalink.add_event_hook(self.track_hook)
def cog_unload(self):
""" Cog unload handler. This removes any event hooks that were registered. """
self.bot.lavalink._event_hooks.clear()
async def cog_command_error(self, ctx, error):
if isinstance(error, commands.CommandInvokeError):
await ctx.send(error.original)
async def track_hook(self, event):
if isinstance(event, lavalink.events.QueueEndEvent):
guild_id = int(event.player.guild_id)
await self.connect_to(guild_id, None)
await self.bot.change_presence(status=discord.Status.idle, activity=discord.Game(name="Nothing"))
async def cog_before_invoke(self, ctx):
""" Command before-invoke handler. """
guild_check = ctx.guild is not None
if guild_check:
await self.ensure_voice(ctx)
# Ensure that the bot and command author share a mutual voicechannel.
return guild_check
async def ensure_voice(self, ctx):
""" This check ensures that the bot and command author are in the same voicechannel. """
player = self.bot.lavalink.player_manager.create(ctx.guild.id, endpoint=str(ctx.guild.region))
should_connect = ctx.command.name in ('play',)
if not ctx.author.voice or not ctx.author.voice.channel:
raise commands.CommandInvokeError('Join a voice channel first :loud_sound:')
if not player.is_connected:
if not should_connect:
raise commands.CommandInvokeError('Not connected :mute:')
permissions = ctx.author.voice.channel.permissions_for(ctx.me)
if not permissions.connect or not permissions.speak: # Check user limit too?
raise commands.CommandInvokeError('I need the `CONNECT` and `SPEAK` permissions. :disappointed_relieved:')
player.store('channel', ctx.channel.id)
await self.connect_to(ctx.guild.id, str(ctx.author.voice.channel.id))
else:
if int(player.channel_id) != ctx.author.voice.channel.id:
raise commands.CommandInvokeError('You need to be in my voice channel :loud_sound:')
async def connect_to(self, guild_id: int, channel_id: str):
""" Connects to the given voicechannel ID. A channel_id of `None` means disconnect. """
ws = self.bot._connection._get_websocket(guild_id)
await ws.voice_state(str(guild_id), channel_id)
@commands.command(name='play', aliases=['p', 'sing'])
async def play(self, ctx, *, query):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
query = query.strip('<>')
if not query.startswith('http'):
query = f'ytsearch:{query}'
results = await player.node.get_tracks(query)
if not results or not results['tracks']:
return await ctx.send('Song not found :x: Please try again :mag_right:')
em = discord.Embed(colour=discord.Colour(0x59FFC8))
if results['loadType'] == 'PLAYLIST_LOADED':
tracks = results['tracks']
for track in tracks:
# Add all of the tracks from the playlist to the queue.
player.add(requester=ctx.author.id, track=track)
em.title = 'Playlist Enqueued!'
em.description = f'{results["playlistInfo"]["name"]} - {len(tracks)} tracks'
else:
track = results['tracks'][0]
em.title = 'Track Enqueued'
em.description = f'[{track["info"]["title"]}]({track["info"]["uri"]})'
em.set_thumbnail(url=f"http://i.ytimg.com/vi/{track['info']['identifier']}/hqdefault.jpg")
em.add_field(name='Channel', value=track['info']['author'])
if track['info']['isStream']:
duration = 'Live'
else:
duration = lavalink.format_time(track['info']['length']).lstrip('00:')
em.add_field(name='Duration', value=duration)
track = lavalink.models.AudioTrack(track, ctx.author.id, recommended=True)
player.add(requester=ctx.author.id, track=track)
msg = await ctx.send(embed=em)
if not player.is_playing:
await player.play()
await player.reset_equalizer()
await msg.delete(delay=1)
await self.now(ctx)
await self.bot.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=player.current.title))
@commands.command(name='seek')
async def seek(self, ctx, seconds=None):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Not playing anything :mute:')
if not seconds:
return await ctx.send('You need to specify the amount of seconds to seek :fast_forward:')
try:
track_time = player.position + int(seconds) * 1000
await player.seek(track_time)
except ValueError:
return await ctx.send('Specify valid amount of seconds :clock3:')
await ctx.send(f'Moved track to **{lavalink.format_time(track_time)}**')
@commands.command(name='skip', aliases=['forceskip', 'fs', 'next'])
async def skip(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Not playing anything :mute:')
await ctx.send('⏭ | Skipped.')
await player.skip()
@commands.command(name='now', aliases=['current', 'currentsong', 'playing', 'np'])
async def now(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
song = 'Nothing'
if player.current:
if player.current.stream:
dur = 'LIVE'
pos = ''
count = total = 1
else:
count = player.position
pos = lavalink.format_time(count)
total = player.current.duration
dur = lavalink.format_time(total)
if pos == dur: # When called immediatly after enqueue
count = 0
pos = '00:00:00'
dur = dur.lstrip('00:')
pos = pos[-len(dur):]
bar_len = 30 # bar length
filled_len = int(bar_len * count // float(total))
bar = '═' * filled_len + '◈' + '─' * (bar_len - filled_len)
song = f'[{player.current.title}]({player.current.uri})\n`{pos} {bar} {dur}`'
em = discord.Embed(colour=discord.Colour(0x59FFC8), description=song)
em.set_author(name="Now Playing 🎵", icon_url="https://i.ibb.co/DGsmTvh/star.gif")
em.set_thumbnail(url=f"http://i.ytimg.com/vi/{player.current.identifier}/hqdefault.jpg")
requester = ctx.guild.get_member(player.current.requester)
em.set_footer(text=f"Requested by: {requester}", icon_url=requester.avatar_url)
await ctx.send(embed=em)
await self.bot.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=player.current.title))
else:
await ctx.send('Not playing anything :mute:')
@commands.command(name='save', aliases=['star'])
async def savetodm(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if player.current:
if player.current.stream:
dur = 'Live'
else:
dur = lavalink.format_time(player.current.duration).lstrip('00:')
song = f'[{player.current.title}]({player.current.uri})'
em = discord.Embed(colour=discord.Colour(0x59FFC8), description=song)
em.set_author(name="Now Playing 🎵", icon_url="https://i.ibb.co/DGsmTvh/star.gif")
em.set_thumbnail(url=f"http://i.ytimg.com/vi/{player.current.identifier}/hqdefault.jpg")
em.add_field(name='Channel', value=player.current.author)
em.add_field(name='Duration', value=dur)
user = ctx.author
await user.send(embed=em)
await ctx.send(f"Current song has been sent to you {ctx.author.mention} :floppy_disk:")
else:
await ctx.send('Not playing anything :mute:')
@commands.command(name='queue', aliases=['q', 'playlist'])
async def queue(self, ctx, page: int=1):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.queue:
return await ctx.send('Queue empty! Why not queue something? :cd:')
items_per_page = 10
pages = math.ceil(len(player.queue) / items_per_page)
start = (page - 1) * items_per_page
end = start + items_per_page
queue_list = ''
for i, track in enumerate(player.queue[start:end], start=start):
queue_list += f'`{i + 1}.` [**{track.title}**]({track.uri})\n'
embed = discord.Embed(colour=ctx.guild.me.top_role.colour,
description=f'**{len(player.queue)} tracks**\n\n{queue_list}')
embed.set_footer(text=f'Viewing page {page}/{pages}')
await ctx.send(embed=embed)
@commands.command(name='pause', aliases=['resume'])
async def pause(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Not playing anything :mute:')
if player.paused:
await player.set_pause(False)
await ctx.message.add_reaction('▶')
else:
await player.set_pause(True)
await ctx.message.add_reaction('⏸')
@commands.command(name='volume', aliases=['vol'])
async def volume(self, ctx, volume: int=None):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not volume:
return await ctx.send(f'🔈 | {player.volume}%')
await player.set_volume(volume)
await ctx.send(f'🔈 | Set to {player.volume}%')
@commands.command(name='shuffle')
async def shuffle(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Not playing anything :mute:')
player.shuffle = not player.shuffle
await ctx.send('🔀 | Shuffle ' + ('enabled' if player.shuffle else 'disabled'))
@commands.command(name='repeat')
async def repeat(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Not playing anything :mute:')
player.repeat = not player.repeat
await ctx.send('🔁 | Repeat ' + ('enabled' if player.repeat else 'disabled'))
@commands.command(name='remove', aliases=['dequeue', 'pop'])
async def remove(self, ctx, index: int):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.queue:
return await ctx.send('Nothing queued :cd:')
if index > len(player.queue) or index < 1:
return await ctx.send('Index has to be >=1 and <=queue size')
index = index - 1
removed = player.queue.pop(index)
await ctx.send('Removed **' + removed.title + '** from the queue.')
@commands.command(name='disconnect', aliases=['dis', 'stop', 'leave'])
async def disconnect(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not ctx.author.voice or (player.is_connected and ctx.author.voice.channel.id != int(player.channel_id)):
return await ctx.send('You\'re not in my voice channel :loud_sound:')
if not player.is_connected:
return await ctx.send('Not connected :mute:')
player.queue.clear()
# Stop the current track so Lavalink consumes less resources.
await player.stop()
# Disconnect from the voice channel.
await self.connect_to(ctx.guild.id, None)
await ctx.send('Disconnected :mute:')
await self.bot.change_presence(status=discord.Status.idle, activity=discord.Game(name="Nothing"))
@commands.command(name='lyrics', aliases=['ly'])
async def get_lyrics(self, ctx, *, query: str=""):
"""Get lyrics of current song"""
if not query:
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('I\'m not currently playing anything :warning:')
query = player.current.title
try:
async with ctx.typing():
results = await self.kclient.music.lyrics(query, limit=1)
except ksoftapi.NoResults:
await ctx.send(f'No lyrics found for `{query}`')
else:
lyrics = results[0].lyrics
result = results[0]
embed = discord.Embed(title=f'{result.name} - {result.artist}', color=discord.Color(0xCCFF00), description=lyrics[:2048])
embed.set_thumbnail(url=result.album_art)
embed.set_author(name="Lyrics:")
lyrics = lyrics[2048:]
embeds = [embed] # create embeds' list for long lyrics
while len(lyrics) > 0 and len(embeds) < 10: # limiting embeds to 10
embed = discord.Embed(color=discord.Color(0xCCFF00), description=lyrics[:2048])
lyrics = lyrics[len(embeds)*2048:]
embeds.append(embed)
embeds[-1].set_footer(text="Source: KSoft.Si") # set footer for last embed
for embed in embeds:
await ctx.send(embed=embed)
@commands.command(name='equalizer', aliases=['eq'])
async def equalizer(self, ctx, *args):
"""Equalizer"""
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if len(args) == 0:
await ctx.send('Specify `band gain` or `preset` to change frequencies :control_knobs:')
elif len(args) == 1:
presets ={
'reset': 'Default',
'bassboost': [0.08, 0.12, 0.2, 0.18, 0.15, 0.1, 0.05, 0.0, 0.02, -0.04, -0.06, -0.08, -0.10, -0.12, -0.14],
'jazz': [-0.13, -0.11, -0.1, -0.1, 0.14, 0.2, -0.18, 0.0, 0.24, 0.22, 0.2, 0.0, 0.0, 0.0, 0.0],
'pop': [-0.02, -0.01, 0.08, 0.1, 0.15, 0.1, 0.03, -0.02, -0.035, -0.05, -0.05, -0.05, -0.05, -0.05, -0.05],
'treble': [-0.1, -0.12, -0.12, -0.12, -0.08, -0.04, 0.0, 0.3, 0.34, 0.4, 0.35, 0.3, 0.3, 0.3, 0.3]
}
preset = args[0].lower()
if preset in ['reset', 'default']:
await player.reset_equalizer()
elif preset in presets:
gain_list = enumerate(presets[preset])
await player.set_gains(*gain_list)
elif preset == '--list':
em = discord.Embed(title=':control_knobs: EQ presets:', color=discord.Color(0xFF6EFF), description='\n'.join(presets.keys()))
return await ctx.send(embed=em)
else:
return await ctx.send('Invalid preset specified :control_knobs:\nType `~eq --list` for all presets')
elif len(args) == 2:
try:
band = int(args[0])
gain = float(args[1])
await player.set_gain(band, gain)
except ValueError:
return await ctx.send('Specify valid `band gain` values :control_knobs:')
else:
return await ctx.send('Specify `band gain` or `preset` :control_knobs:')
# Print final EQ settings
eq_frequencies = [f"`{gain}`" for gain in player.equalizer]
await ctx.send(":level_slider: Current Values:\n" + ' '.join(eq_frequencies))
def setup(bot):
bot.add_cog(Music(bot)) | cogs/music.py | 16,422 | Cog unload handler. This removes any event hooks that were registered.
Host, Port, Password, Region, Name Ensure that the bot and command author share a mutual voicechannel. Check user limit too? Add all of the tracks from the playlist to the queue. When called immediatly after enqueue bar length Stop the current track so Lavalink consumes less resources. Disconnect from the voice channel. create embeds' list for long lyrics limiting embeds to 10 set footer for last embed Print final EQ settings | 504 | en | 0.860098 |
import math
class MinStack:
def __init__(self):
"""
initialize your data structure here.
"""
self.stack = []
self.min = math.inf
def push(self, x: int) -> None:
self.x = x
self.stack.append(x)
if x < self.min:
self.min = x
def pop(self) -> None:
t = self.stack.pop()
if t == self.min and len(self.stack):
self.min = min(self.stack)
elif t == self.min and not len(self.stack):
self.min = math.inf
def top(self) -> int:
return self.stack[-1]
def getMin(self) -> int:
return self.min
# Your MinStack object will be instantiated and called as such:
# obj = MinStack()
# obj.push(x)
# obj.pop()
# param_3 = obj.top()
# param_4 = obj.getMin()
| notes-n-resources/Data-Structures-N-Algo/_DS-n-Algos/Interview-Problems/LeetCode/MinStack.py | 805 | initialize your data structure here.
Your MinStack object will be instantiated and called as such: obj = MinStack() obj.push(x) obj.pop() param_3 = obj.top() param_4 = obj.getMin() | 182 | en | 0.750443 |
import argparse
import json
import os
from collections import Counter, defaultdict
from helper import _is_token_alnum
THRESHOLD = 0.01
GAP = 10
def get_full_mapping(src_filename, trg_filename, align_filename,
mapping_filename, reverse_src2trg=False, lowercase=True):
""" Get full mapping give align.
Args:
src_filename:
trg_filename:
align_filename:
mapping_filename:
reverse_src2trg:
lowercase:
Returns:
"""
print('src: {}, trg: {}, align: {}, mapping: {}, reverse: {}'.format(
src_filename, trg_filename, align_filename, mapping_filename,
reverse_src2trg))
src2trg_mapping = defaultdict(lambda: defaultdict(int))
processed_line = 0
with open(src_filename) as fs, open(trg_filename) as ft, open(
align_filename) as fa:
for ls, lt, la in zip(fs, ft, fa):
if lowercase:
ls = ls.lower()
lt = lt.lower()
processed_line += 1
ls_words = ls.split()
lt_words = lt.split()
la_aligns = la.split()
src_pos_counter = Counter()
trg_pos_counter = Counter()
valid_src_pos = set()
valid_trg_pos = set()
for align in la_aligns:
# only consider one-to-one mapping
src_pos, trg_pos = align.split('-')
src_pos = int(src_pos)
trg_pos = int(trg_pos)
# only consider alpha number token
if _is_token_alnum(ls_words[src_pos]):
src_pos_counter[src_pos] += 1
if _is_token_alnum(lt_words[trg_pos]):
trg_pos_counter[trg_pos] += 1
# ignore token that aligned twice
for pos, c in src_pos_counter.items():
if c == 1:
valid_src_pos.add(pos)
for pos, c in trg_pos_counter.items():
if c == 1:
valid_trg_pos.add(pos)
for align in la_aligns:
src_pos, trg_pos = align.split('-')
src_pos = int(src_pos)
trg_pos = int(trg_pos)
if _is_token_alnum(ls_words[src_pos]) and _is_token_alnum(
lt_words[trg_pos]) and (src_pos in valid_src_pos) and (
trg_pos in valid_trg_pos):
if reverse_src2trg:
src2trg_mapping[lt_words[trg_pos]][
ls_words[src_pos]] += 1
else:
src2trg_mapping[ls_words[src_pos]][
lt_words[trg_pos]] += 1
if processed_line % 1000000 == 0:
print('{} done.'.format(processed_line))
with open(mapping_filename, 'w') as fw:
print('dump to {} ...'.format(mapping_filename))
json.dump(src2trg_mapping, fw)
return src2trg_mapping
def refine_dict(full_mapping, clean_dict_filename, threshold, ignore_gap):
""" Clean dictionary based on frequency and gap of frequency.
For example,
{'s1': ['t1': 999, 't2': 199, 't3':1],
's2': ['m1': 2000, 'm2': 100]}
=>
{'s1': ['t1': 999, 't2': 199],
's2': ['m1': 2000]}
Args:
full_mapping:
clean_dict_filename:
threshold:
ignore_gap:
Returns:
"""
print('Refine dict to {}, threshold: {}, ignore_gap: {} ...'.format(
clean_dict_filename, threshold, ignore_gap))
full_mapping = sorted(
full_mapping.items(),
key=lambda x: sum(x[1].values()),
reverse=True)
with open(clean_dict_filename, 'w') as fw:
for idx, src2trg in enumerate(full_mapping):
src = src2trg[0]
trg = sorted(src2trg[1].items(), key=lambda x: x[1], reverse=True)
total_count = sum(c[1] for c in trg)
clean_trg = dict()
p = trg[0][1]
for w, c in trg:
if c / total_count < threshold:
# too rare
break
if (p / c > ignore_gap) and (c / total_count < THRESHOLD * 5):
# large gap
break
p = c
clean_trg.update({w: round(c / total_count, 3)})
fw.write('{}\n'.format(json.dumps({src: clean_trg}, ensure_ascii=False)))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Process alignments and do filter')
parser.add_argument('--src_filename',
help='Origin src file name before bsp',
type=str,
required=True)
parser.add_argument('--trg_filename',
help='Origin trg file name before bsp',
type=str,
required=True)
parser.add_argument('--align_filename',
help='align file name by atools',
type=str,
required=True)
parser.add_argument('--dict_filename',
help='clean dict file name',
type=str,
required=True)
parser.add_argument('--threshold',
help='threshold of ignore frequency',
type=float,
default=THRESHOLD)
parser.add_argument('--ignore_gap',
help='gap of ignore frequency',
type=float,
default=GAP)
parser.add_argument(
'--overwrite', dest='overwrite',
action='store_true', help='Overwrite existing output files')
args = parser.parse_args()
if args.overwrite:
print('Overwrite existing file')
src2trg_mapping_filename = '{}.{}'.format(args.align_filename,
'src2trg_mapping')
trg2src_mapping_filename = '{}.{}'.format(args.align_filename,
'trg2src_mapping')
if os.path.isfile(src2trg_mapping_filename) and (not args.overwrite):
print('loading mapping: {}'.format(src2trg_mapping_filename))
with open(src2trg_mapping_filename) as f:
full_src2trg_mapping = json.load(f)
else:
print('creating mapping: {}'.format(src2trg_mapping_filename))
full_src2trg_mapping = get_full_mapping(args.src_filename,
args.trg_filename,
args.align_filename,
src2trg_mapping_filename,
False)
if os.path.isfile(trg2src_mapping_filename) and (not args.overwrite):
print('loading mapping: {}'.format(trg2src_mapping_filename))
with open(trg2src_mapping_filename) as f:
full_trg2src_mapping = json.load(f)
else:
print('creating mapping: {}'.format(trg2src_mapping_filename))
full_trg2src_mapping = get_full_mapping(args.src_filename,
args.trg_filename,
args.align_filename,
trg2src_mapping_filename,
True)
src2trg_clean_dict_filename = '{}.{}'.format(args.dict_filename,
'src2trg')
refine_dict(full_src2trg_mapping, src2trg_clean_dict_filename,
args.threshold, args.ignore_gap)
trg2src_clean_dict_filename = '{}.{}'.format(args.dict_filename,
'trg2src')
refine_dict(full_trg2src_mapping, trg2src_clean_dict_filename,
args.threshold, args.ignore_gap)
| examples/wmt/tools/align/extract_bilingual_vocabulary.py | 7,887 | Get full mapping give align.
Args:
src_filename:
trg_filename:
align_filename:
mapping_filename:
reverse_src2trg:
lowercase:
Returns:
Clean dictionary based on frequency and gap of frequency.
For example,
{'s1': ['t1': 999, 't2': 199, 't3':1],
's2': ['m1': 2000, 'm2': 100]}
=>
{'s1': ['t1': 999, 't2': 199],
's2': ['m1': 2000]}
Args:
full_mapping:
clean_dict_filename:
threshold:
ignore_gap:
Returns:
only consider one-to-one mapping only consider alpha number token ignore token that aligned twice too rare large gap | 567 | en | 0.652323 |
#!/usr/bin/env python
# coding: utf-8
"""
Multi-Sensor Moving Platform Simulation Example
===============================================
This example looks at how multiple sensors can be mounted on a single moving platform and exploiting a defined moving
platform as a sensor target.
"""
# %%
# Building a Simulated Multi-Sensor Moving Platform
# -------------------------------------------------
# The focus of this example is to show how to setup and configure a simulation environment in order to provide a
# multi-sensor moving platform, as such the application of a tracker will not be covered in detail. For more information
# about trackers and how to configure them review of the tutorials and demonstrations is recommended.
#
# This example makes use of Stone Soup :class:`~.MovingPlatform`, :class:`~.MultiTransitionMovingPlatform` and
# :class:`~.Sensor` objects.
#
# In order to configure platforms, sensors and the simulation we will need to import some specific Stone Soup objects.
# As these have been introduced in previous tutorials they are imported upfront. New functionality within this example
# will be imported at the relevant point in order to draw attention to the new features.
# Some general imports and set up
from datetime import datetime
from datetime import timedelta
from matplotlib import pyplot as plt
import numpy as np
# Stone Soup imports:
from stonesoup.types.state import State, GaussianState
from stonesoup.types.array import StateVector
from stonesoup.types.array import CovarianceMatrix
from stonesoup.models.transition.linear import (
CombinedLinearGaussianTransitionModel, ConstantVelocity)
from stonesoup.predictor.particle import ParticlePredictor
from stonesoup.resampler.particle import SystematicResampler
from stonesoup.updater.particle import ParticleUpdater
from stonesoup.measures import Mahalanobis
from stonesoup.hypothesiser.distance import DistanceHypothesiser
from stonesoup.dataassociator.neighbour import GNNWith2DAssignment
from stonesoup.tracker.simple import SingleTargetTracker
# Define the simulation start time
start_time = datetime.now()
# %%
# Create a multi-sensor platform
# ------------------------------
# We have previously demonstrated how to create a :class:`~.FixedPlatform` which exploited a
# :class:`~.RadarRangeBearingElevation` *Sensor* in order to detect and track targets generated within a
# :class:`~.MultiTargetGroundTruthSimulator`.
#
# In this example we are going to create a moving platform which will be mounted with a pair of sensors and moves within
# a 6 dimensional state space according to the following :math:`\mathbf{x}`.
#
# .. math::
# \mathbf{x} = \begin{bmatrix}
# x\\ \dot{x}\\ y\\ \dot{y}\\ z\\ \dot{z} \end{bmatrix}
# = \begin{bmatrix}
# 0\\ 0\\ 0\\ 50\\ 8000\\ 0 \end{bmatrix}
#
# The platform will be initiated with a near constant velocity model which has been parameterised to have zero noise.
# Therefore the platform location at time :math:`k` is given by :math:`F_{k}x_{k-1}` where :math:`F_{k}` is given by:
#
# .. math::
# F_{k} = \begin{bmatrix}
# 1 & \triangle k & 0 & 0 & 0 & 0\\
# 0 & 1 & 0 & 0 & 0 & 0\\
# 0 & 0 & 1 & \triangle k & 0 & 0\\
# 0 & 0 & 0 & 1 & 0 & 0\\
# 0 & 0 & 0 & 0 & 1 & \triangle k \\
# 0 & 0 & 0 & 0 & 0 & 1\\
# \end{bmatrix}
# First import the Moving platform
from stonesoup.platform.base import MovingPlatform
# Define the initial platform position, in this case the origin
initial_loc = StateVector([[0], [0], [0], [50], [8000], [0]])
initial_state = State(initial_loc, start_time)
# Define transition model and position for 3D platform
transition_model = CombinedLinearGaussianTransitionModel(
[ConstantVelocity(0.), ConstantVelocity(0.), ConstantVelocity(0.)])
# create our fixed platform
sensor_platform = MovingPlatform(states=initial_state,
position_mapping=(0, 2, 4),
velocity_mapping=(1, 3, 5),
transition_model=transition_model)
# %%
# With our platform generated we now need to build a set of sensors which will be mounted onto the platform. In this
# case we will exploit a :class:`~.RadarElevationBearingRangeRate` and a :class:`~.PassiveElevationBearing` sensor
# (e.g. an optical sensor, which has no capability to directly measure range).
#
# First we will create a radar which is capable of measuring bearing (:math:`\phi`), elevation (:math:`\theta`), range
# (:math:`r`) and range-rate (:math:`\dot{r}`) of the target platform.
# Import a range rate bearing elevation capable radar
from stonesoup.sensor.radar.radar import RadarElevationBearingRangeRate
# Create a radar sensor
radar_noise_covar = CovarianceMatrix(np.diag(
np.array([np.deg2rad(3), # Elevation
np.deg2rad(3), # Bearing
100., # Range
25.]))) # Range Rate
# radar mountings
radar_mounting_offsets = StateVector([10, 0, 0]) # e.g. nose cone
radar_rotation_offsets = StateVector([0, 0, 0])
# Mount the radar onto the platform
radar = RadarElevationBearingRangeRate(ndim_state=6,
position_mapping=(0, 2, 4),
velocity_mapping=(1, 3, 5),
noise_covar=radar_noise_covar,
mounting_offset=radar_mounting_offsets,
rotation_offset=radar_rotation_offsets,
)
sensor_platform.add_sensor(radar)
# %%
# Our second sensor is a passive sensor, capable of measuring the bearing (:math:`\phi`) and elevation (:math:`\theta`)
# of the target platform. For the purposes of this example we will assume that the passive sensor is an imager.
# The imager sensor model is described by the following equations:
#
# .. math::
# \mathbf{z}_k = h(\mathbf{x}_k, \dot{\mathbf{x}}_k)
#
# where:
#
# * :math:`\mathbf{z}_k` is a measurement vector of the form:
#
# .. math::
# \mathbf{z}_k = \begin{bmatrix} \theta \\ \phi \end{bmatrix}
#
# * :math:`h` is a non - linear model function of the form:
#
# .. math::
# h(\mathbf{x}_k,\dot{\mathbf{x}}_k) = \begin{bmatrix}
# \arcsin(\mathcal{z} /\sqrt{\mathcal{x} ^ 2 + \mathcal{y} ^ 2 +\mathcal{z} ^ 2}) \\
# \arctan(\mathcal{y},\mathcal{x}) \ \
# \end{bmatrix} + \dot{\mathbf{x}}_k
#
# * :math:`\mathbf{z}_k` is Gaussian distributed with covariance :math:`R`, i.e.:
#
# .. math::
# \mathbf{z}_k \sim \mathcal{N}(0, R)
#
# .. math::
# R = \begin{bmatrix}
# \sigma_{\theta}^2 & 0 \\
# 0 & \sigma_{\phi}^2 \\
# \end{bmatrix}
# Import a passive sensor capability
from stonesoup.sensor.passive import PassiveElevationBearing
imager_noise_covar = CovarianceMatrix(np.diag(np.array([np.deg2rad(0.05), # Elevation
np.deg2rad(0.05)]))) # Bearing
# imager mounting offset
imager_mounting_offsets = StateVector([0, 8, -1]) # e.g. wing mounted imaging pod
imager_rotation_offsets = StateVector([0, 0, 0])
# Mount the imager onto the platform
imager = PassiveElevationBearing(ndim_state=6,
mapping=(0, 2, 4),
noise_covar=imager_noise_covar,
mounting_offset=imager_mounting_offsets,
rotation_offset=imager_rotation_offsets,
)
sensor_platform.add_sensor(imager)
# %%
# Notice that we have added sensors to specific locations on the aircraft, defined by the mounting_offset parameter.
# The values in this array are defined in the platforms local coordinate frame of reference. So in this case an offset
# of :math:`[0, 8, -1]` means the sensor is located 8 meters to the right and 1 meter below the center point of the
# platform.
#
# Now that we have mounted the two sensors we can see that the platform object has both associated with it:
sensor_platform.sensors
# %%
# Create a Target Platform
# ------------------------
# There are two ways of generating a target in Stone Soup. Firstly, we can use the inbuilt ground-truth generator
# functionality within Stone Soup, which we demonstrated in the previous example, and creates a random target based on
# our selected parameters. The second method provides a means to generate a target which will perform specific
# behaviours, this is the approach we will take here.
#
# In order to create a target which moves in pre-defined sequences we exploit the fact that platforms can be used as
# sensor targets within a simulation, coupled with the :class:`~.MultiTransitionMovingPlatform` which enables a platform
# to be provided with a pre-defined list of transition models and transition times. The platform will continue to loop
# over the transition sequence provided until the simulation ends.
#
# When simulating sensor platforms it is important to note that within the simulation Stone Soup treats all platforms as
# potential targets. Therefore if we created multiple sensor platforms they would each *sense* all other platforms
# within the simulation (sensor-target geometry dependant).
#
# For this example we will create an air target which will fly a sequence of straight and level followed by a
# coordinated turn in the :math:`x-y` plane. This is configured such that the target will perform each manoeuvre for 8
# seconds, and it will turn through 45 degrees over the course of the turn manoeuvre.
# Import a Constant Turn model to enable target to perform basic manoeuvre
from stonesoup.models.transition.linear import ConstantTurn
straight_level = CombinedLinearGaussianTransitionModel(
[ConstantVelocity(0.), ConstantVelocity(0.), ConstantVelocity(0.)])
# Configure the aircraft turn behaviour
turn_noise_diff_coeffs = np.array([0., 0.])
turn_rate = np.pi/32 # specified in radians per seconds...
turn_model = ConstantTurn(turn_noise_diff_coeffs=turn_noise_diff_coeffs, turn_rate=turn_rate)
# Configure turn model to maintain current altitude
turning = CombinedLinearGaussianTransitionModel(
[turn_model, ConstantVelocity(0.)])
manoeuvre_list = [straight_level, turning]
manoeuvre_times = [timedelta(seconds=8),
timedelta(seconds=8)]
# %%
# Now that we have created a list of manoeuvre behaviours and durations we can build our multi-transition moving
# platform. Because we intend for this platform to be a target we do not need to attach any sensors to it.
# Import a multi-transition moving platform
from stonesoup.platform.base import MultiTransitionMovingPlatform
initial_target_location = StateVector([[0], [-40], [1800], [0], [8000], [0]])
initial_target_state = State(initial_target_location, start_time)
target = MultiTransitionMovingPlatform(transition_models=manoeuvre_list,
transition_times=manoeuvre_times,
states=initial_target_state,
position_mapping=(0, 2, 4),
velocity_mapping=(1, 3, 5),
sensors=None)
# %%
# Creating the simulator
# ----------------------
# Now that we have build our sensor platform and a target platform we need to wrap them in a simulator. Because we do
# not want any additional ground truth objects, which is how most simulators work in Stone Soup, we need to use a
# :class:`~.DummyGroundTruthSimulator` which returns a set of empty ground truth paths with timestamps. These are then
# feed into a :class:`~.PlatformDetectionSimulator` with the two platforms we have already built.
# Import the required simulators
from stonesoup.simulator.simple import DummyGroundTruthSimulator
from stonesoup.simulator.platform import PlatformDetectionSimulator
# %%
# We now need to create an array of timestamps which starts at *datetime.now()* and enable the simulator to run for
# 25 seconds.
times = np.arange(0, 24, 1) # 25 seconds
timestamps = [start_time + timedelta(seconds=float(elapsed_time)) for elapsed_time in times]
truths = DummyGroundTruthSimulator(times=timestamps)
sim = PlatformDetectionSimulator(groundtruth=truths, platforms=[sensor_platform, target])
# %%
# Create a Tracker
# ------------------------------------
# Now that we have setup our sensor platform, target and simulation we need to create a tracker. For this example we
# will use a Particle Filter as this enables us to handle the non-linear nature of the imaging sensor. In this example
# we will use an inflated constant noise model to account for target motion uncertainty.
#
# Note that we don't add a measurement model to the updater, this is because each sensor adds their measurement model to
# each detection they generate. The tracker handles this internally by checking for a measurement model with each
# detection it receives and applying only the relevant measurement model.
target_transition_model = CombinedLinearGaussianTransitionModel(
[ConstantVelocity(5), ConstantVelocity(5), ConstantVelocity(1)])
# First add a Particle Predictor
predictor = ParticlePredictor(target_transition_model)
# Now create a resampler and particle updater
resampler = SystematicResampler()
updater = ParticleUpdater(measurement_model=None,
resampler=resampler)
# Create a particle initiator
from stonesoup.initiator.simple import GaussianParticleInitiator, SinglePointInitiator
single_point_initiator = SinglePointInitiator(
GaussianState([[0], [-40], [2000], [0], [8000], [0]], np.diag([10000, 1000, 10000, 1000, 10000, 1000])),
None)
initiator = GaussianParticleInitiator(number_particles=500,
initiator=single_point_initiator)
hypothesiser = DistanceHypothesiser(predictor, updater, measure=Mahalanobis(), missed_distance=np.inf)
data_associator = GNNWith2DAssignment(hypothesiser)
from stonesoup.deleter.time import UpdateTimeStepsDeleter
deleter = UpdateTimeStepsDeleter(time_steps_since_update=10)
# Create a Kalman single-target tracker
tracker = SingleTargetTracker(
initiator=initiator,
deleter=deleter,
detector=sim,
data_associator=data_associator,
updater=updater
)
# %%
# The final step is to iterate our tracker over the simulation and plot out the results. Because we have a bearing
# only sensor it does not make sense to plot out the detections without animating the resulting plot. This
# animation shows the sensor platform (blue) moving towards the true target position (red). The estimated target
# position is shown in black, radar detections are shown in yellow while the bearing only imager detections are
# coloured green.
from matplotlib import animation
import matplotlib
matplotlib.rcParams['animation.html'] = 'jshtml'
from stonesoup.models.measurement.nonlinear import CartesianToElevationBearingRangeRate
from stonesoup.functions import sphere2cart
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(1, 1, 1)
frames = []
for time, ctracks in tracker:
artists = []
ax.set_xlabel("$East$")
ax.set_ylabel("$North$")
ax.set_ylim(0, 2250)
ax.set_xlim(-1000, 1000)
X = [state.state_vector[0] for state in sensor_platform]
Y = [state.state_vector[2] for state in sensor_platform]
artists.extend(ax.plot(X, Y, color='b'))
for detection in sim.detections:
if isinstance(detection.measurement_model, CartesianToElevationBearingRangeRate):
x, y = detection.measurement_model.inverse_function(detection)[[0, 2]]
color = 'y'
else:
r = 10000000
# extract the platform rotation offsets
_, el_offset, az_offset = sensor_platform.orientation
# obtain measurement angles and map to cartesian
e, a = detection.state_vector
x, y, _ = sphere2cart(r, a + az_offset, e + el_offset)
color = 'g'
X = [sensor_platform.state_vector[0], x]
Y = [sensor_platform.state_vector[2], y]
artists.extend(ax.plot(X, Y, color=color))
X = [state.state_vector[0] for state in target]
Y = [state.state_vector[2] for state in target]
artists.extend(ax.plot(X, Y, color='r'))
for track in ctracks:
X = [state.state_vector[0] for state in track]
Y = [state.state_vector[2] for state in track]
artists.extend(ax.plot(X, Y, color='k'))
frames.append(artists)
animation.ArtistAnimation(fig, frames)
# %%
# To increase your confidence with simulated platform targets it would be good practice to modify the target to fly
# pre-defined shapes, a race track oval for example. You could also experiment with different sensor performance levels
# in order to see at what point the tracker is no longer able to generate a reasonable estimate of the target location.
# %%
# Key points
# ----------
# 1. Platforms, static or moving, can be used as targets for sensor platforms.
# 2. Simulations can be built with only known platform behaviours when you want to test specific scenarios.
# 3. A tracker can be configured to exploit all sensor data created in a simulation.
| docs/examples/Moving_Platform_Simulation.py | 17,381 | Multi-Sensor Moving Platform Simulation Example
===============================================
This example looks at how multiple sensors can be mounted on a single moving platform and exploiting a defined moving
platform as a sensor target.
!/usr/bin/env python coding: utf-8 %% Building a Simulated Multi-Sensor Moving Platform ------------------------------------------------- The focus of this example is to show how to setup and configure a simulation environment in order to provide a multi-sensor moving platform, as such the application of a tracker will not be covered in detail. For more information about trackers and how to configure them review of the tutorials and demonstrations is recommended. This example makes use of Stone Soup :class:`~.MovingPlatform`, :class:`~.MultiTransitionMovingPlatform` and :class:`~.Sensor` objects. In order to configure platforms, sensors and the simulation we will need to import some specific Stone Soup objects. As these have been introduced in previous tutorials they are imported upfront. New functionality within this example will be imported at the relevant point in order to draw attention to the new features. Some general imports and set up Stone Soup imports: Define the simulation start time %% Create a multi-sensor platform ------------------------------ We have previously demonstrated how to create a :class:`~.FixedPlatform` which exploited a :class:`~.RadarRangeBearingElevation` *Sensor* in order to detect and track targets generated within a :class:`~.MultiTargetGroundTruthSimulator`. In this example we are going to create a moving platform which will be mounted with a pair of sensors and moves within a 6 dimensional state space according to the following :math:`\mathbf{x}`. .. math:: \mathbf{x} = \begin{bmatrix} x\\ \dot{x}\\ y\\ \dot{y}\\ z\\ \dot{z} \end{bmatrix} = \begin{bmatrix} 0\\ 0\\ 0\\ 50\\ 8000\\ 0 \end{bmatrix} The platform will be initiated with a near constant velocity model which has been parameterised to have zero noise. Therefore the platform location at time :math:`k` is given by :math:`F_{k}x_{k-1}` where :math:`F_{k}` is given by: .. math:: F_{k} = \begin{bmatrix} 1 & \triangle k & 0 & 0 & 0 & 0\\ 0 & 1 & 0 & 0 & 0 & 0\\ 0 & 0 & 1 & \triangle k & 0 & 0\\ 0 & 0 & 0 & 1 & 0 & 0\\ 0 & 0 & 0 & 0 & 1 & \triangle k \\ 0 & 0 & 0 & 0 & 0 & 1\\ \end{bmatrix} First import the Moving platform Define the initial platform position, in this case the origin Define transition model and position for 3D platform create our fixed platform %% With our platform generated we now need to build a set of sensors which will be mounted onto the platform. In this case we will exploit a :class:`~.RadarElevationBearingRangeRate` and a :class:`~.PassiveElevationBearing` sensor (e.g. an optical sensor, which has no capability to directly measure range). First we will create a radar which is capable of measuring bearing (:math:`\phi`), elevation (:math:`\theta`), range (:math:`r`) and range-rate (:math:`\dot{r}`) of the target platform. Import a range rate bearing elevation capable radar Create a radar sensor Elevation Bearing Range Range Rate radar mountings e.g. nose cone Mount the radar onto the platform %% Our second sensor is a passive sensor, capable of measuring the bearing (:math:`\phi`) and elevation (:math:`\theta`) of the target platform. For the purposes of this example we will assume that the passive sensor is an imager. The imager sensor model is described by the following equations: .. math:: \mathbf{z}_k = h(\mathbf{x}_k, \dot{\mathbf{x}}_k) where: * :math:`\mathbf{z}_k` is a measurement vector of the form: .. math:: \mathbf{z}_k = \begin{bmatrix} \theta \\ \phi \end{bmatrix} * :math:`h` is a non - linear model function of the form: .. math:: h(\mathbf{x}_k,\dot{\mathbf{x}}_k) = \begin{bmatrix} \arcsin(\mathcal{z} /\sqrt{\mathcal{x} ^ 2 + \mathcal{y} ^ 2 +\mathcal{z} ^ 2}) \\ \arctan(\mathcal{y},\mathcal{x}) \ \ \end{bmatrix} + \dot{\mathbf{x}}_k * :math:`\mathbf{z}_k` is Gaussian distributed with covariance :math:`R`, i.e.: .. math:: \mathbf{z}_k \sim \mathcal{N}(0, R) .. math:: R = \begin{bmatrix} \sigma_{\theta}^2 & 0 \\ 0 & \sigma_{\phi}^2 \\ \end{bmatrix} Import a passive sensor capability Elevation Bearing imager mounting offset e.g. wing mounted imaging pod Mount the imager onto the platform %% Notice that we have added sensors to specific locations on the aircraft, defined by the mounting_offset parameter. The values in this array are defined in the platforms local coordinate frame of reference. So in this case an offset of :math:`[0, 8, -1]` means the sensor is located 8 meters to the right and 1 meter below the center point of the platform. Now that we have mounted the two sensors we can see that the platform object has both associated with it: %% Create a Target Platform ------------------------ There are two ways of generating a target in Stone Soup. Firstly, we can use the inbuilt ground-truth generator functionality within Stone Soup, which we demonstrated in the previous example, and creates a random target based on our selected parameters. The second method provides a means to generate a target which will perform specific behaviours, this is the approach we will take here. In order to create a target which moves in pre-defined sequences we exploit the fact that platforms can be used as sensor targets within a simulation, coupled with the :class:`~.MultiTransitionMovingPlatform` which enables a platform to be provided with a pre-defined list of transition models and transition times. The platform will continue to loop over the transition sequence provided until the simulation ends. When simulating sensor platforms it is important to note that within the simulation Stone Soup treats all platforms as potential targets. Therefore if we created multiple sensor platforms they would each *sense* all other platforms within the simulation (sensor-target geometry dependant). For this example we will create an air target which will fly a sequence of straight and level followed by a coordinated turn in the :math:`x-y` plane. This is configured such that the target will perform each manoeuvre for 8 seconds, and it will turn through 45 degrees over the course of the turn manoeuvre. Import a Constant Turn model to enable target to perform basic manoeuvre Configure the aircraft turn behaviour specified in radians per seconds... Configure turn model to maintain current altitude %% Now that we have created a list of manoeuvre behaviours and durations we can build our multi-transition moving platform. Because we intend for this platform to be a target we do not need to attach any sensors to it. Import a multi-transition moving platform %% Creating the simulator ---------------------- Now that we have build our sensor platform and a target platform we need to wrap them in a simulator. Because we do not want any additional ground truth objects, which is how most simulators work in Stone Soup, we need to use a :class:`~.DummyGroundTruthSimulator` which returns a set of empty ground truth paths with timestamps. These are then feed into a :class:`~.PlatformDetectionSimulator` with the two platforms we have already built. Import the required simulators %% We now need to create an array of timestamps which starts at *datetime.now()* and enable the simulator to run for 25 seconds. 25 seconds %% Create a Tracker ------------------------------------ Now that we have setup our sensor platform, target and simulation we need to create a tracker. For this example we will use a Particle Filter as this enables us to handle the non-linear nature of the imaging sensor. In this example we will use an inflated constant noise model to account for target motion uncertainty. Note that we don't add a measurement model to the updater, this is because each sensor adds their measurement model to each detection they generate. The tracker handles this internally by checking for a measurement model with each detection it receives and applying only the relevant measurement model. First add a Particle Predictor Now create a resampler and particle updater Create a particle initiator Create a Kalman single-target tracker %% The final step is to iterate our tracker over the simulation and plot out the results. Because we have a bearing only sensor it does not make sense to plot out the detections without animating the resulting plot. This animation shows the sensor platform (blue) moving towards the true target position (red). The estimated target position is shown in black, radar detections are shown in yellow while the bearing only imager detections are coloured green. extract the platform rotation offsets obtain measurement angles and map to cartesian %% To increase your confidence with simulated platform targets it would be good practice to modify the target to fly pre-defined shapes, a race track oval for example. You could also experiment with different sensor performance levels in order to see at what point the tracker is no longer able to generate a reasonable estimate of the target location. %% Key points ---------- 1. Platforms, static or moving, can be used as targets for sensor platforms. 2. Simulations can be built with only known platform behaviours when you want to test specific scenarios. 3. A tracker can be configured to exploit all sensor data created in a simulation. | 9,678 | en | 0.839142 |
#!/usr/bin/env python
import unittest
from weblogo.seq_io._nexus import Nexus
from . import data_stream
class test_nexus(unittest.TestCase):
def test_create(self):
n = Nexus()
self.assertNotEqual(n, None)
def test_parse_f0(self):
f = data_stream("nexus/test_Nexus_input.nex")
n = Nexus(f)
# self.output_basics(n)
expected = [
"t1",
"t2 the name",
"isn'that [a] strange name?",
"one should be punished, for (that)!",
"t5",
"t6",
"t7",
"t8",
"t9",
]
taxa = n.taxlabels
self.assertEqual(taxa, expected)
f.close()
def test_parse_protein(self):
f = data_stream("nexus/protein.nex")
Nexus(f)
f.close()
def test_parse_dna(self):
f = data_stream("nexus/dna.nex")
n = Nexus(f)
taxa = n.taxlabels
taxa.sort()
self.assertEqual(len(taxa), 10)
self.assertEqual(taxa[0], "Carp")
self.assertEqual(taxa[-1], "Whale")
f.close()
def test_TreeTest1(self):
"""Test Tree module."""
f = data_stream("nexus/test_Nexus_input.nex")
n = Nexus(f)
t3 = n.trees[2]
n.trees[2]
t3.root_with_outgroup(["t1", "t5"])
# Return node_id of common ancestor if
# taxon_list is monophyletic, -1 otherwise.
self.assertEqual(t3.is_monophyletic(["t1", "t5"]), 13)
t3.split(parent_id=t3.search_taxon("t9"))
f.close()
if __name__ == "__main__":
unittest.main()
| tests/test_nexus.py | 1,619 | Test Tree module.
!/usr/bin/env python self.output_basics(n) Return node_id of common ancestor if taxon_list is monophyletic, -1 otherwise. | 140 | en | 0.297664 |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import GPy
from emukit.quadrature.methods.vanilla_bq import VanillaBayesianQuadrature
from emukit.quadrature.loop.quadrature_loop import VanillaBayesianQuadratureLoop
from emukit.core.loop.user_function import UserFunctionWrapper
from emukit.model_wrappers.gpy_quadrature_wrappers import QuadratureRBF, RBFGPy, BaseGaussianProcessGPy
from numpy.testing import assert_array_equal
def func(x):
return np.ones((x.shape[0], 1))
def test_vanilla_bq_loop():
init_size = 5
x_init = np.random.rand(init_size, 2)
y_init = np.random.rand(init_size, 1)
bounds = [(-1, 1), (0, 1)]
gpy_model = GPy.models.GPRegression(X=x_init, Y=y_init, kernel=GPy.kern.RBF(input_dim=x_init.shape[1],
lengthscale=1., variance=1.))
emukit_qrbf = QuadratureRBF(RBFGPy(gpy_model.kern), integral_bounds=bounds)
emukit_model = BaseGaussianProcessGPy(kern=emukit_qrbf, gpy_model=gpy_model)
emukit_method = VanillaBayesianQuadrature(base_gp=emukit_model)
emukit_loop = VanillaBayesianQuadratureLoop(model=emukit_method)
num_iter = 5
emukit_loop.run_loop(user_function=UserFunctionWrapper(func), stopping_condition=num_iter)
assert emukit_loop.loop_state.X.shape[0] == num_iter + init_size
assert emukit_loop.loop_state.Y.shape[0] == num_iter + init_size
def test_vanilla_bq_loop_initial_state():
x_init = np.random.rand(5, 2)
y_init = np.random.rand(5, 1)
bounds = [(-1, 1), (0, 1)]
gpy_model = GPy.models.GPRegression(X=x_init, Y=y_init, kernel=GPy.kern.RBF(input_dim=x_init.shape[1],
lengthscale=1., variance=1.))
emukit_qrbf = QuadratureRBF(RBFGPy(gpy_model.kern), integral_bounds=bounds)
emukit_model = BaseGaussianProcessGPy(kern=emukit_qrbf, gpy_model=gpy_model)
emukit_method = VanillaBayesianQuadrature(base_gp=emukit_model)
emukit_loop = VanillaBayesianQuadratureLoop(model=emukit_method)
assert_array_equal(emukit_loop.loop_state.X, x_init)
assert_array_equal(emukit_loop.loop_state.Y, y_init)
assert emukit_loop.loop_state.iteration == 0
| integration_tests/emukit/quadrature/test_vanilla_bq_loop.py | 2,320 | Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 | 107 | en | 0.70371 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayCommerceAntestCaselistQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayCommerceAntestCaselistQueryResponse, self).__init__()
self._data = None
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
def parse_response_content(self, response_content):
response = super(AlipayCommerceAntestCaselistQueryResponse, self).parse_response_content(response_content)
if 'data' in response:
self.data = response['data']
| alipay/aop/api/response/AlipayCommerceAntestCaselistQueryResponse.py | 694 | !/usr/bin/env python -*- coding: utf-8 -*- | 42 | en | 0.34282 |
from markovp import Markov
from src.forms import Markov_Form
from flask import Flask, render_template, request, redirect, url_for, Blueprint, make_response
home = Blueprint("home", __name__)
@home.route("/")
def index():
#{
form = Markov_Form()
return render_template('form.html', form = form)
#}
# The submission page.
@home.route("/submit", methods=["GET"])
def submit():
#{
if request.method == "GET":
#{
if request.args.get('submit_button'):
#{
# Get form values.
# http://stackoverflow.com/a/20341272/5415895
text = request.args.get("input_text")
# We have to cast text as a string, otherwise C++ complains.
mark = Markov(str(text), 1)
output = mark.generate()
return render_template("output.html", input = str(text), output = output)
#}
else:
#{
# Make sure nobody can access the submit path without submitting.
return redirect(url_for('index'))
#}
#}
else:
#{
return redirect(url_for('index'))
#}
#} | web/src/views/home.py | 1,110 | {} The submission page.{{{ Get form values. http://stackoverflow.com/a/20341272/5415895 We have to cast text as a string, otherwise C++ complains.}{ Make sure nobody can access the submit path without submitting.}}{}} | 221 | en | 0.774356 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Construct the Kronecker product of one or more `LinearOperators`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl as linalg
from tensorflow.python.ops.linalg import linear_operator
def _vec(x):
"""Stacks column of matrix to form a single column."""
return array_ops.reshape(
array_ops.matrix_transpose(x),
array_ops.concat(
[array_ops.shape(x)[:-2], [-1]], axis=0))
def _unvec_by(y, num_col):
"""Unstack vector to form a matrix, with a specified amount of columns."""
return array_ops.matrix_transpose(
array_ops.reshape(
y,
array_ops.concat(
[array_ops.shape(y)[:-1], [num_col, -1]], axis=0)))
def _rotate_last_dim(x, rotate_right=False):
"""Rotate the last dimension either left or right."""
ndims = array_ops.rank(x)
if rotate_right:
transpose_perm = array_ops.concat(
[[ndims - 1], math_ops.range(0, ndims - 1)], axis=0)
else:
transpose_perm = array_ops.concat(
[math_ops.range(1, ndims), [0]], axis=0)
return array_ops.transpose(x, transpose_perm)
class LinearOperatorKronecker(linear_operator.LinearOperator):
"""Kronecker product between two `LinearOperators`.
This operator composes one or more linear operators `[op1,...,opJ]`,
building a new `LinearOperator` representing the Kronecker product:
`op1 x op2 x .. opJ` (we omit parentheses as the Kronecker product is
associative).
If `opj` has shape `batch_shape_j` + [M_j, N_j`, then the composed operator
will have shape equal to `broadcast_batch_shape + [prod M_j, prod N_j]`,
where the product is over all operators.
```python
# Create a 4 x 4 linear operator composed of two 2 x 2 operators.
operator_1 = LinearOperatorFullMatrix([[1., 2.], [3., 4.]])
operator_2 = LinearOperatorFullMatrix([[1., 0.], [2., 1.]])
operator = LinearOperatorKronecker([operator_1, operator_2])
operator.to_dense()
==> [[1., 2., 0., 0.],
[3., 4., 0., 0.],
[2., 4., 1., 2.],
[6., 8., 3., 4.]]
operator.shape
==> [4, 4]
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [4, 2] Tensor
operator.matmul(x)
==> Shape [4, 2] Tensor
# Create a [2, 3] batch of 4 x 5 linear operators.
matrix_45 = tf.random_normal(shape=[2, 3, 4, 5])
operator_45 = LinearOperatorFullMatrix(matrix)
# Create a [2, 3] batch of 5 x 6 linear operators.
matrix_56 = tf.random_normal(shape=[2, 3, 5, 6])
operator_56 = LinearOperatorFullMatrix(matrix_56)
# Compose to create a [2, 3] batch of 20 x 30 operators.
operator_large = LinearOperatorKronecker([operator_45, operator_56])
# Create a shape [2, 3, 20, 2] vector.
x = tf.random_normal(shape=[2, 3, 6, 2])
operator_large.matmul(x)
==> Shape [2, 3, 30, 2] Tensor
```
#### Performance
The performance of `LinearOperatorKronecker` on any operation is equal to
the sum of the individual operators' operations.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
operators,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name=None):
r"""Initialize a `LinearOperatorKronecker`.
`LinearOperatorKronecker` is initialized with a list of operators
`[op_1,...,op_J]`.
Args:
operators: Iterable of `LinearOperator` objects, each with
the same `dtype` and composable shape, representing the Kronecker
factors.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix\
#Extension_for_non_symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`. Default is the individual
operators names joined with `_x_`.
Raises:
TypeError: If all operators do not have the same `dtype`.
ValueError: If `operators` is empty.
"""
# Validate operators.
check_ops.assert_proper_iterable(operators)
operators = list(operators)
if not operators:
raise ValueError(
"Expected a list of >=1 operators. Found: %s" % operators)
self._operators = operators
# Validate dtype.
dtype = operators[0].dtype
for operator in operators:
if operator.dtype != dtype:
name_type = (str((o.name, o.dtype)) for o in operators)
raise TypeError(
"Expected all operators to have the same dtype. Found %s"
% " ".join(name_type))
# Auto-set and check hints.
# A Kronecker product is invertible, if and only if all factors are
# invertible.
if all(operator.is_non_singular for operator in operators):
if is_non_singular is False:
raise ValueError(
"The Kronecker product of non-singular operators is always "
"non-singular.")
is_non_singular = True
if all(operator.is_self_adjoint for operator in operators):
if is_self_adjoint is False:
raise ValueError(
"The Kronecker product of self-adjoint operators is always "
"self-adjoint.")
is_self_adjoint = True
# The eigenvalues of a Kronecker product are equal to the products of eigen
# values of the corresponding factors.
if all(operator.is_positive_definite for operator in operators):
if is_positive_definite is False:
raise ValueError("The Kronecker product of positive-definite operators "
"is always positive-definite.")
is_positive_definite = True
# Initialization.
graph_parents = []
for operator in operators:
graph_parents.extend(operator.graph_parents)
if name is None:
name = operators[0].name
for operator in operators[1:]:
name += "_x_" + operator.name
with ops.name_scope(name, values=graph_parents):
super(LinearOperatorKronecker, self).__init__(
dtype=dtype,
graph_parents=graph_parents,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
@property
def operators(self):
return self._operators
def _shape(self):
# Get final matrix shape.
domain_dimension = self.operators[0].domain_dimension
for operator in self.operators[1:]:
domain_dimension *= operator.domain_dimension
range_dimension = self.operators[0].range_dimension
for operator in self.operators[1:]:
range_dimension *= operator.range_dimension
matrix_shape = tensor_shape.TensorShape([
range_dimension, domain_dimension])
# Get broadcast batch shape.
# broadcast_shape checks for compatibility.
batch_shape = self.operators[0].batch_shape
for operator in self.operators[1:]:
batch_shape = common_shapes.broadcast_shape(
batch_shape, operator.batch_shape)
return batch_shape.concatenate(matrix_shape)
def _shape_tensor(self):
domain_dimension = self.operators[0].domain_dimension_tensor()
for operator in self.operators[1:]:
domain_dimension *= operator.domain_dimension_tensor()
range_dimension = self.operators[0].range_dimension_tensor()
for operator in self.operators[1:]:
range_dimension *= operator.range_dimension_tensor()
matrix_shape = [range_dimension, domain_dimension]
# Get broadcast batch shape.
# broadcast_shape checks for compatibility.
batch_shape = self.operators[0].batch_shape_tensor()
for operator in self.operators[1:]:
batch_shape = array_ops.broadcast_dynamic_shape(
batch_shape, operator.batch_shape_tensor())
return array_ops.concat((batch_shape, matrix_shape), 0)
def _matmul(self, x, adjoint=False, adjoint_arg=False):
# Here we heavily rely on Roth's column Lemma [1]:
# (A x B) * vec X = vec BXA^T,
# where vec stacks all the columns of the matrix under each other. In our
# case, x represents a batch of vec X (i.e. we think of x as a batch of
# column vectors, rather than a matrix). Each member of the batch can be
# reshaped to a matrix (hence we get a batch of matrices).
# We can iteratively apply this lemma by noting that if B is a Kronecker
# product, then we can apply the lemma again.
# [1] W. E. Roth, "On direct product matrices,"
# Bulletin of the American Mathematical Society, vol. 40, pp. 461-468,
# 1934
# Efficiency
# Naively doing the Kronecker product, by calculating the dense matrix and
# applying it will can take cubic time in the size of domain_dimension
# (assuming a square matrix). The other issue is that calculating the dense
# matrix can be prohibitively expensive, in that it can take a large amount
# of memory.
#
# This implementation avoids this memory blow up by only computing matmuls
# with the factors. In this way, we don't have to realize the dense matrix.
# In terms of complexity, if we have Kronecker Factors of size:
# (n1, n1), (n2, n2), (n3, n3), ... (nJ, nJ), with N = \prod n_i, and we
# have as input a [N, M] matrix, the naive approach would take O(N^2 M).
# With this approach (ignoring reshaping of tensors and transposes for now),
# the time complexity can be O(M * (\sum n_i) * N). There is also the
# benefit of batched multiplication (In this example, the batch size is
# roughly M * N) so this can be much faster. However, not factored in are
# the costs of the several transposing of tensors, which can affect cache
# behavior.
# Below we document the shape manipulation for adjoint=False,
# adjoint_arg=False, but the general case of different adjoints is still
# handled.
if adjoint_arg:
x = linalg.adjoint(x)
# Always add a batch dimension to enable broadcasting to work.
batch_shape = array_ops.concat(
[array_ops.ones_like(self.batch_shape_tensor()), [1, 1]], 0)
x += array_ops.zeros(batch_shape, dtype=x.dtype.base_dtype)
# x has shape [B, R, C], where B represent some number of batch dimensions,
# R represents the number of rows, and C represents the number of columns.
# In order to apply Roth's column lemma, we need to operate on a batch of
# column vectors, so we reshape into a batch of column vectors. We put it
# at the front to ensure that broadcasting between operators to the batch
# dimensions B still works.
output = _rotate_last_dim(x, rotate_right=True)
# Also expand the shape to be [A, C, B, R]. The first dimension will be
# used to accumulate dimensions from each operator matmul.
output = output[array_ops.newaxis, ...]
# In this loop, A is going to refer to the value of the accumulated
# dimension. A = 1 at the start, and will end up being self.range_dimension.
# V will refer to the last dimension. V = R at the start, and will end up
# being 1 in the end.
for operator in self.operators[:-1]:
# Reshape output from [A, C, B, V] to be
# [A, C, B, V / op.domain_dimension, op.domain_dimension]
if adjoint:
operator_dimension = operator.range_dimension_tensor()
else:
operator_dimension = operator.domain_dimension_tensor()
output = _unvec_by(output, operator_dimension)
# We are computing (XA^T) = (AX^T)^T.
# output has [A, C, B, V / op.domain_dimension, op.domain_dimension],
# which is being converted to:
# [A, C, B, V / op.domain_dimension, op.range_dimension]
output = array_ops.matrix_transpose(output)
output = operator.matmul(output, adjoint=adjoint, adjoint_arg=False)
output = array_ops.matrix_transpose(output)
# Rearrange it to [A * op.range_dimension, C, B, V / op.domain_dimension]
output = _rotate_last_dim(output, rotate_right=False)
output = _vec(output)
output = _rotate_last_dim(output, rotate_right=True)
# After the loop, we will have
# A = self.range_dimension / op[-1].range_dimension
# V = op[-1].domain_dimension
# We convert that using matvec to get:
# [A, C, B, op[-1].range_dimension]
output = self.operators[-1].matvec(output, adjoint=adjoint)
# Rearrange shape to be [B1, ... Bn, self.range_dimension, C]
output = _rotate_last_dim(output, rotate_right=False)
output = _vec(output)
output = _rotate_last_dim(output, rotate_right=False)
if x.shape.is_fully_defined():
column_dim = x.shape[-1]
broadcast_batch_shape = common_shapes.broadcast_shape(
x.shape[:-2], self.batch_shape)
if adjoint:
matrix_dimensions = [self.domain_dimension, column_dim]
else:
matrix_dimensions = [self.range_dimension, column_dim]
print("x: ", x)
print("bathc_shape:", self.batch_shape)
print("self.shape:", self.shape)
print("output: ", output)
output.set_shape(broadcast_batch_shape.concatenate(
matrix_dimensions))
return output
def _determinant(self):
# Note that we have |X1 x X2| = |X1| ** n * |X2| ** m, where X1 is an m x m
# matrix, and X2 is an n x n matrix. We can iteratively apply this property
# to get the determinant of |X1 x X2 x X3 ...|. If T is the product of the
# domain dimension of all operators, then we have:
# |X1 x X2 x X3 ...| =
# |X1| ** (T / m) * |X2 x X3 ... | ** m =
# |X1| ** (T / m) * |X2| ** (m * (T / m) / n) * ... =
# |X1| ** (T / m) * |X2| ** (T / n) * | X3 x X4... | ** (m * n)
# And by doing induction we have product(|X_i| ** (T / dim(X_i))).
total = self.domain_dimension_tensor()
determinant = 1.
for operator in self.operators:
determinant *= operator.determinant() ** math_ops.cast(
total / operator.domain_dimension_tensor(),
dtype=operator.dtype)
return determinant
def _log_abs_determinant(self):
# This will be sum((total / dim(x_i)) * log |X_i|)
total = self.domain_dimension_tensor()
log_abs_det = 0.
for operator in self.operators:
log_abs_det += operator.log_abs_determinant() * math_ops.cast(
total / operator.domain_dimension_tensor(),
dtype=operator.dtype)
return log_abs_det
def _trace(self):
# tr(A x B) = tr(A) * tr(B)
trace = 1.
for operator in self.operators:
trace *= operator.trace()
return trace
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
# Here we follow the same use of Roth's column lemma as in `matmul`, with
# the key difference that we replace all `matmul` instances with `solve`.
# This follows from the property that inv(A x B) = inv(A) x inv(B).
# Below we document the shape manipulation for adjoint=False,
# adjoint_arg=False, but the general case of different adjoints is still
# handled.
if adjoint_arg:
rhs = linalg.adjoint(rhs)
# Always add a batch dimension to enable broadcasting to work.
batch_shape = array_ops.concat(
[array_ops.ones_like(self.batch_shape_tensor()), [1, 1]], 0)
rhs += array_ops.zeros(batch_shape, dtype=rhs.dtype.base_dtype)
# rhs has shape [B, R, C], where B represent some number of batch
# dimensions,
# R represents the number of rows, and C represents the number of columns.
# In order to apply Roth's column lemma, we need to operate on a batch of
# column vectors, so we reshape into a batch of column vectors. We put it
# at the front to ensure that broadcasting between operators to the batch
# dimensions B still works.
output = _rotate_last_dim(rhs, rotate_right=True)
# Also expand the shape to be [A, C, B, R]. The first dimension will be
# used to accumulate dimensions from each operator matmul.
output = output[array_ops.newaxis, ...]
# In this loop, A is going to refer to the value of the accumulated
# dimension. A = 1 at the start, and will end up being self.range_dimension.
# V will refer to the last dimension. V = R at the start, and will end up
# being 1 in the end.
for operator in self.operators[:-1]:
# Reshape output from [A, C, B, V] to be
# [A, C, B, V / op.domain_dimension, op.domain_dimension]
if adjoint:
operator_dimension = operator.range_dimension_tensor()
else:
operator_dimension = operator.domain_dimension_tensor()
output = _unvec_by(output, operator_dimension)
# We are computing (XA^-1^T) = (A^-1 X^T)^T.
# output has [A, C, B, V / op.domain_dimension, op.domain_dimension],
# which is being converted to:
# [A, C, B, V / op.domain_dimension, op.range_dimension]
output = array_ops.matrix_transpose(output)
output = operator.solve(output, adjoint=adjoint, adjoint_arg=False)
output = array_ops.matrix_transpose(output)
# Rearrange it to [A * op.range_dimension, C, B, V / op.domain_dimension]
output = _rotate_last_dim(output, rotate_right=False)
output = _vec(output)
output = _rotate_last_dim(output, rotate_right=True)
# After the loop, we will have
# A = self.range_dimension / op[-1].range_dimension
# V = op[-1].domain_dimension
# We convert that using matvec to get:
# [A, C, B, op[-1].range_dimension]
output = self.operators[-1].solvevec(output, adjoint=adjoint)
# Rearrange shape to be [B1, ... Bn, self.range_dimension, C]
output = _rotate_last_dim(output, rotate_right=False)
output = _vec(output)
output = _rotate_last_dim(output, rotate_right=False)
if rhs.shape.is_fully_defined():
column_dim = rhs.shape[-1]
broadcast_batch_shape = common_shapes.broadcast_shape(
rhs.shape[:-2], self.batch_shape)
if adjoint:
matrix_dimensions = [self.domain_dimension, column_dim]
else:
matrix_dimensions = [self.range_dimension, column_dim]
output.set_shape(broadcast_batch_shape.concatenate(
matrix_dimensions))
return output
def _diag_part(self):
diag_part = self.operators[0].diag_part()
for operator in self.operators[1:]:
diag_part = diag_part[..., :, array_ops.newaxis]
op_diag_part = operator.diag_part()[..., array_ops.newaxis, :]
diag_part *= op_diag_part
diag_part = array_ops.reshape(
diag_part,
shape=array_ops.concat(
[array_ops.shape(diag_part)[:-2], [-1]], axis=0))
if self.range_dimension > self.domain_dimension:
diag_dimension = self.domain_dimension
else:
diag_dimension = self.range_dimension
diag_part.set_shape(
self.batch_shape.concatenate(diag_dimension))
return diag_part
def _to_dense(self):
product = self.operators[0].to_dense()
for operator in self.operators[1:]:
# Product has shape [B, R1, 1, C1].
product = product[
..., :, array_ops.newaxis, :, array_ops.newaxis]
# Operator has shape [B, 1, R2, 1, C2].
op_to_mul = operator.to_dense()[
..., array_ops.newaxis, :, array_ops.newaxis, :]
# This is now [B, R1, R2, C1, C2].
product *= op_to_mul
# Now merge together dimensions to get [B, R1 * R2, C1 * C2].
product = array_ops.reshape(
product,
shape=array_ops.concat(
[array_ops.shape(product)[:-4],
[array_ops.shape(product)[-4] * array_ops.shape(product)[-3],
array_ops.shape(product)[-2] * array_ops.shape(product)[-1]]
], axis=0))
product.set_shape(self.shape)
return product
def _assert_non_singular(self):
if all(operator.is_square for operator in self.operators):
asserts = [operator.assert_non_singular() for operator in self.operators]
return control_flow_ops.group(asserts)
else:
raise errors.InvalidArgumentError(
node_def=None, op=None, message="All Kronecker factors must be "
"square for the product to be invertible.")
def _assert_self_adjoint(self):
if all(operator.is_square for operator in self.operators):
asserts = [operator.assert_self_adjoint() for operator in self.operators]
return control_flow_ops.group(asserts)
else:
raise errors.InvalidArgumentError(
node_def=None, op=None, message="All Kronecker factors must be "
"square for the product to be self adjoint.")
| tensorflow/contrib/linalg/python/ops/linear_operator_kronecker.py | 22,561 | Kronecker product between two `LinearOperators`.
This operator composes one or more linear operators `[op1,...,opJ]`,
building a new `LinearOperator` representing the Kronecker product:
`op1 x op2 x .. opJ` (we omit parentheses as the Kronecker product is
associative).
If `opj` has shape `batch_shape_j` + [M_j, N_j`, then the composed operator
will have shape equal to `broadcast_batch_shape + [prod M_j, prod N_j]`,
where the product is over all operators.
```python
# Create a 4 x 4 linear operator composed of two 2 x 2 operators.
operator_1 = LinearOperatorFullMatrix([[1., 2.], [3., 4.]])
operator_2 = LinearOperatorFullMatrix([[1., 0.], [2., 1.]])
operator = LinearOperatorKronecker([operator_1, operator_2])
operator.to_dense()
==> [[1., 2., 0., 0.],
[3., 4., 0., 0.],
[2., 4., 1., 2.],
[6., 8., 3., 4.]]
operator.shape
==> [4, 4]
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [4, 2] Tensor
operator.matmul(x)
==> Shape [4, 2] Tensor
# Create a [2, 3] batch of 4 x 5 linear operators.
matrix_45 = tf.random_normal(shape=[2, 3, 4, 5])
operator_45 = LinearOperatorFullMatrix(matrix)
# Create a [2, 3] batch of 5 x 6 linear operators.
matrix_56 = tf.random_normal(shape=[2, 3, 5, 6])
operator_56 = LinearOperatorFullMatrix(matrix_56)
# Compose to create a [2, 3] batch of 20 x 30 operators.
operator_large = LinearOperatorKronecker([operator_45, operator_56])
# Create a shape [2, 3, 20, 2] vector.
x = tf.random_normal(shape=[2, 3, 6, 2])
operator_large.matmul(x)
==> Shape [2, 3, 30, 2] Tensor
```
#### Performance
The performance of `LinearOperatorKronecker` on any operation is equal to
the sum of the individual operators' operations.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
Initialize a `LinearOperatorKronecker`.
`LinearOperatorKronecker` is initialized with a list of operators
`[op_1,...,op_J]`.
Args:
operators: Iterable of `LinearOperator` objects, each with
the same `dtype` and composable shape, representing the Kronecker
factors.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix\
#Extension_for_non_symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`. Default is the individual
operators names joined with `_x_`.
Raises:
TypeError: If all operators do not have the same `dtype`.
ValueError: If `operators` is empty.
Rotate the last dimension either left or right.
Unstack vector to form a matrix, with a specified amount of columns.
Stacks column of matrix to form a single column.
Construct the Kronecker product of one or more `LinearOperators`.
Copyright 2018 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== Validate operators. Validate dtype. Auto-set and check hints. A Kronecker product is invertible, if and only if all factors are invertible. The eigenvalues of a Kronecker product are equal to the products of eigen values of the corresponding factors. Initialization. Get final matrix shape. Get broadcast batch shape. broadcast_shape checks for compatibility. Get broadcast batch shape. broadcast_shape checks for compatibility. Here we heavily rely on Roth's column Lemma [1]: (A x B) * vec X = vec BXA^T, where vec stacks all the columns of the matrix under each other. In our case, x represents a batch of vec X (i.e. we think of x as a batch of column vectors, rather than a matrix). Each member of the batch can be reshaped to a matrix (hence we get a batch of matrices). We can iteratively apply this lemma by noting that if B is a Kronecker product, then we can apply the lemma again. [1] W. E. Roth, "On direct product matrices," Bulletin of the American Mathematical Society, vol. 40, pp. 461-468, 1934 Efficiency Naively doing the Kronecker product, by calculating the dense matrix and applying it will can take cubic time in the size of domain_dimension (assuming a square matrix). The other issue is that calculating the dense matrix can be prohibitively expensive, in that it can take a large amount of memory. This implementation avoids this memory blow up by only computing matmuls with the factors. In this way, we don't have to realize the dense matrix. In terms of complexity, if we have Kronecker Factors of size: (n1, n1), (n2, n2), (n3, n3), ... (nJ, nJ), with N = \prod n_i, and we have as input a [N, M] matrix, the naive approach would take O(N^2 M). With this approach (ignoring reshaping of tensors and transposes for now), the time complexity can be O(M * (\sum n_i) * N). There is also the benefit of batched multiplication (In this example, the batch size is roughly M * N) so this can be much faster. However, not factored in are the costs of the several transposing of tensors, which can affect cache behavior. Below we document the shape manipulation for adjoint=False, adjoint_arg=False, but the general case of different adjoints is still handled. Always add a batch dimension to enable broadcasting to work. x has shape [B, R, C], where B represent some number of batch dimensions, R represents the number of rows, and C represents the number of columns. In order to apply Roth's column lemma, we need to operate on a batch of column vectors, so we reshape into a batch of column vectors. We put it at the front to ensure that broadcasting between operators to the batch dimensions B still works. Also expand the shape to be [A, C, B, R]. The first dimension will be used to accumulate dimensions from each operator matmul. In this loop, A is going to refer to the value of the accumulated dimension. A = 1 at the start, and will end up being self.range_dimension. V will refer to the last dimension. V = R at the start, and will end up being 1 in the end. Reshape output from [A, C, B, V] to be [A, C, B, V / op.domain_dimension, op.domain_dimension] We are computing (XA^T) = (AX^T)^T. output has [A, C, B, V / op.domain_dimension, op.domain_dimension], which is being converted to: [A, C, B, V / op.domain_dimension, op.range_dimension] Rearrange it to [A * op.range_dimension, C, B, V / op.domain_dimension] After the loop, we will have A = self.range_dimension / op[-1].range_dimension V = op[-1].domain_dimension We convert that using matvec to get: [A, C, B, op[-1].range_dimension] Rearrange shape to be [B1, ... Bn, self.range_dimension, C] Note that we have |X1 x X2| = |X1| ** n * |X2| ** m, where X1 is an m x m matrix, and X2 is an n x n matrix. We can iteratively apply this property to get the determinant of |X1 x X2 x X3 ...|. If T is the product of the domain dimension of all operators, then we have: |X1 x X2 x X3 ...| = |X1| ** (T / m) * |X2 x X3 ... | ** m = |X1| ** (T / m) * |X2| ** (m * (T / m) / n) * ... = |X1| ** (T / m) * |X2| ** (T / n) * | X3 x X4... | ** (m * n) And by doing induction we have product(|X_i| ** (T / dim(X_i))). This will be sum((total / dim(x_i)) * log |X_i|) tr(A x B) = tr(A) * tr(B) Here we follow the same use of Roth's column lemma as in `matmul`, with the key difference that we replace all `matmul` instances with `solve`. This follows from the property that inv(A x B) = inv(A) x inv(B). Below we document the shape manipulation for adjoint=False, adjoint_arg=False, but the general case of different adjoints is still handled. Always add a batch dimension to enable broadcasting to work. rhs has shape [B, R, C], where B represent some number of batch dimensions, R represents the number of rows, and C represents the number of columns. In order to apply Roth's column lemma, we need to operate on a batch of column vectors, so we reshape into a batch of column vectors. We put it at the front to ensure that broadcasting between operators to the batch dimensions B still works. Also expand the shape to be [A, C, B, R]. The first dimension will be used to accumulate dimensions from each operator matmul. In this loop, A is going to refer to the value of the accumulated dimension. A = 1 at the start, and will end up being self.range_dimension. V will refer to the last dimension. V = R at the start, and will end up being 1 in the end. Reshape output from [A, C, B, V] to be [A, C, B, V / op.domain_dimension, op.domain_dimension] We are computing (XA^-1^T) = (A^-1 X^T)^T. output has [A, C, B, V / op.domain_dimension, op.domain_dimension], which is being converted to: [A, C, B, V / op.domain_dimension, op.range_dimension] Rearrange it to [A * op.range_dimension, C, B, V / op.domain_dimension] After the loop, we will have A = self.range_dimension / op[-1].range_dimension V = op[-1].domain_dimension We convert that using matvec to get: [A, C, B, op[-1].range_dimension] Rearrange shape to be [B1, ... Bn, self.range_dimension, C] Product has shape [B, R1, 1, C1]. Operator has shape [B, 1, R2, 1, C2]. This is now [B, R1, R2, C1, C2]. Now merge together dimensions to get [B, R1 * R2, C1 * C2]. | 10,399 | en | 0.848733 |
import threading
import sys
class ThreadHandler(object):
def __init__(self, name, callable, *args, **kwargs):
# Set up exception handling
self.exception = None
def wrapper(*args, **kwargs):
try:
callable(*args, **kwargs)
except BaseException:
self.exception = sys.exc_info()
# Kick off thread
thread = threading.Thread(None, wrapper, name, args, kwargs)
thread.setDaemon(True)
thread.start()
# Make thread available to instantiator
self.thread = thread
| fabric/thread_handling.py | 588 | Set up exception handling Kick off thread Make thread available to instantiator | 79 | en | 0.775014 |
# -*- coding: utf-8 -*-
# ===============================================================
# Author: Rodolfo Ferro
# Email: ferro@cimat.mx
# Twitter: @FerroRodolfo
#
# ABOUT COPYING OR USING PARTIAL INFORMATION:
# This script was originally created by Rodolfo Ferro, for
# his workshop in HackSureste 2019 at Universidad Modelo
# in Mérida. Any explicit usage of this script or its
# contents is granted according to the license provided and
# its conditions.
# ===============================================================
from flask import Flask, jsonify, request, render_template
from iris import iris_classifier
from pprint import pprint
import numpy as np
import requests
import json
# Main app:
app = Flask(__name__)
# Global:
version = 'v0.0'
classifier = iris_classifier()
species = {
'0': 'I. setosa',
'1': 'I. versicolor',
'2': 'I. virginica'
}
# Static website:
@app.route('/')
def index():
return render_template("index.html")
# API MAIN STRUCTURE:
@app.route('/api/' + version, methods=['GET'])
def test():
"""
GET method to test the API.
"""
# Output message:
message = {
"response": [
{
"text": "Hello world!"
}
]
}
return jsonify(message)
@app.route('/api/' + version + '/predict', methods=['POST'])
def predict():
"""
POST method to predict with our classification model.
"""
# Get data from JSON object in POST method:
req_data = request.get_json()
# Parse data from JSON:
sl = req_data['sepal_length']
sw = req_data['sepal_width']
pl = req_data['petal_length']
pw = req_data['petal_width']
# Predict with model:
input_data = np.array([[sl, sw, pl, pw]])
prediction = classifier.predict(input_data)
print(prediction)
# Output message:
message = {"response": [
{"input": {
'sepal_length': sl,
'sepal_width': sw,
'petal_length': pl,
'petal_width': pw
}},
{"prediction": int(prediction[0])},
{"species": species[str(prediction[0])]}]}
return jsonify(message)
@app.errorhandler(404)
def not_found(error=None):
message = {
'status': 404,
'message': 'Not Found: ' + request.url,
}
response = jsonify(message)
response.status_code = 404
return response
if __name__ == '__main__':
app.run(debug=True, port=5000)
| app.py | 2,433 | POST method to predict with our classification model.
GET method to test the API.
-*- coding: utf-8 -*- =============================================================== Author: Rodolfo Ferro Email: ferro@cimat.mx Twitter: @FerroRodolfo ABOUT COPYING OR USING PARTIAL INFORMATION: This script was originally created by Rodolfo Ferro, for his workshop in HackSureste 2019 at Universidad Modelo in Mérida. Any explicit usage of this script or its contents is granted according to the license provided and its conditions. =============================================================== Main app: Global: Static website: API MAIN STRUCTURE: Output message: Get data from JSON object in POST method: Parse data from JSON: Predict with model: Output message: | 752 | en | 0.767996 |
from argparse import Action, Namespace
from typing import (List)
from .switch_config import SwitchConfigCLI
from ..switch import SwitchChip
class EraseConfigCLI(SwitchConfigCLI):
"""
The "erase" action that removes all stored items from the EEPROM memory.
"""
def __init__(self, subparsers: Action, switch: SwitchChip) -> None:
super().__init__(subparsers, switch)
self._subparser = self._subparsers.add_parser(
"erase",
help="Erase all configuration",
)
self._subparser.set_defaults(execute=self.apply)
def apply(self, args: Namespace) -> SwitchConfigCLI:
return self
def create_configuration(self) -> List[List[int]]:
return [[101, 0, 0, 0]]
| botblox_config/data_manager/erase.py | 746 | The "erase" action that removes all stored items from the EEPROM memory. | 72 | en | 0.872639 |
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
from flatten_dict import flatten
from fedlearner_webconsole.proto.workflow_definition_pb2 import Slot
from fedlearner_webconsole.workflow_template.template_validaor \
import YamlTemplate
class _YamlTemplate(YamlTemplate):
# Which placeholders in the template should be interpreted
idpattern = r'Slot_[a-z0-9_]*'
def substitute(self, mapping):
return super()._substitute(mapping,
fixed_placeholder=None,
ignore_invalid=True)
def format_yaml(yaml, **kwargs):
"""Formats a yaml template.
Example usage:
format_yaml('{"abc": ${x.y}}', x={'y': 123})
output should be '{"abc": 123}'
"""
template = _YamlTemplate(yaml)
try:
return template.substitute(flatten(kwargs or {},
reducer='dot'))
except KeyError as e:
raise RuntimeError(
'Unknown placeholder: {}'.format(e.args[0])) from e
def generate_yaml_template(base_yaml, slots_proto):
"""
Args:
base_yaml: A string representation of one type job's base yaml.
slots_proto: A proto map object representation of modification
template's operable smallest units.
Returns:
string: A yaml_template
"""
slots = {}
for key in slots_proto:
if slots_proto[key].reference_type == Slot.ReferenceType.DEFAULT:
slots[key] = slots_proto[key].default
else:
slots[key] = f'${{{slots_proto[key].reference}}}'
return format_yaml(base_yaml, **slots)
| web_console_v2/api/fedlearner_webconsole/workflow_template/slots_formatter.py | 2,207 | Formats a yaml template.
Example usage:
format_yaml('{"abc": ${x.y}}', x={'y': 123})
output should be '{"abc": 123}'
Args:
base_yaml: A string representation of one type job's base yaml.
slots_proto: A proto map object representation of modification
template's operable smallest units.
Returns:
string: A yaml_template
Copyright 2020 The FedLearner Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. coding: utf-8 Which placeholders in the template should be interpreted | 995 | en | 0.765837 |
from dataclasses import dataclass, field
from enum import Enum
from typing import (
Callable,
Dict,
List,
Optional,
Union
)
import weakref
import threading
import torch
import torch.distributed as dist
from torch.distributed import rpc
from torch.distributed import distributed_c10d
from torch.distributed._sharding_spec import (
ChunkShardingSpec,
EnumerableShardingSpec,
ShardMetadata,
ShardingSpec,
)
from torch.distributed._sharding_spec._internals import (
check_tensor,
get_split_size,
get_chunked_dim_size,
validate_non_overlapping_shards_metadata,
)
from torch.types import Number
from .metadata import TensorProperties, ShardedTensorMetadata
from .shard import Shard
from .utils import (
get_current_process_group,
_flatten_tensor_size,
_parse_and_validate_remote_device,
_validate_output_tensor_for_gather,
build_metadata_from_local_shards,
build_global_metadata
)
# Tracking for sharded tensor objects.
_sharded_tensor_lock = threading.Lock()
_sharded_tensor_current_id = 0
_sharded_tensor_map: Dict[int, 'weakref.ReferenceType[ShardedTensor]'] = {}
# Custom sharded ops
_SHARDED_OPS: Dict[str, Callable] = {}
def _register_sharded_op(op, func):
from inspect import signature
if len(signature(func).parameters) != 4:
raise TypeError(
f'Custom sharded op function expects signature: '
f'(types, args, kwargs, process_group), but received '
f'signature: {signature(func)}')
global _SHARDED_OPS
_SHARDED_OPS[op] = func
def _register_remote_shards(sharded_tensor_id: int, rrefs: List[rpc.RRef[Shard]], rpc_rank: int):
with _sharded_tensor_lock:
if sharded_tensor_id not in _sharded_tensor_map:
raise RuntimeError(
f'Could not find sharded_tensor_id: {sharded_tensor_id} in map: {_sharded_tensor_map.keys()}')
sharded_tensor = _sharded_tensor_map[sharded_tensor_id]()
if sharded_tensor is None:
raise RuntimeError('ShardedTensor weakref has been deallocated')
else:
sharded_tensor._register_remote_shards(rrefs, rpc_rank)
class CreateOp(Enum):
EMPTY = 0
FULL = 1
ONES = 2
RAND = 3
ZEROS = 4
@dataclass
class TensorInitParams(object):
""" Container for list of common params to create new local tensor. """
create_op: CreateOp
# needed when create_op is FULL
# default set to False (not None) since None is incompatible with Number.
fill_value: Number = field(default=False)
tensor_properties: TensorProperties = field(
default=TensorProperties(dtype=torch.get_default_dtype(),
layout=torch.strided,
requires_grad=False,
memory_format=torch.contiguous_format,
pin_memory=False))
class ShardedTensor(object):
"""
ShardedTensor is an abstraction to represent Tensors that are sharded
across multiple devices and multiple processes.
ShardedTensor is initialized in an SPMD like fashion where each rank
initializes the ShardedTensor. The ShardedTensor object on each rank
then only stores the local shard for the Tensor and provides global
metadata for all the shards.
ShardedTensor doesn't provide any Tensor like operations but is a wrapper
providing the Tensor representing the local shard and the global metadata.
Using these, users can build their custom distributed sharded computations
on top of this primitive. The local shards are all initialized using the
create_op specified by tensor_init_params.create_op, e.g., torch.ones, or
torch.empty
Args:
sharding_spec (:class:`torch.distributed._sharding_spec.ShardingSpec`): The specification
describing how to shard the Tensor.
size (int...): a sequence of integers defining the shape of the output
tensor. Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
tensor_init_params (:class: `TensorInitParams`): common params to create tensor.
init_rrefs (bool, optional): Whether or not to initialize
:class:`torch.distributed.rpc.RRef`s pointing to remote shards.
Need to initialize the RPC Framework if specified as ``True``.
Default: ``False``.
.. note:: ShardedTensor uses collectives to do various operations, i.e. it
uses all_gather to do cross rank validations. For NCCL-based processed
groups, internal tensor representations of objects must be moved to the
GPU device before communication takes place. In this case, the device
used is given by ``torch.cuda.current_device()`` and it is the user's
responsiblity to ensure that this is set so that each rank has an
individual GPU, via ``torch.cuda.set_device()``
"""
def __new__(cls, *args, **kwargs):
# Use __new__ for logging purposes.
torch._C._log_api_usage_once("torch.distributed.sharded_tensor")
return super(ShardedTensor, cls).__new__(cls)
def __init__(
self,
sharding_spec: ShardingSpec,
*size,
tensor_init_params: TensorInitParams,
process_group=None,
init_rrefs=False,
):
# prepare initialization, initialize fields like
# _process_group, _local_shards, etc.
self._prepare_init(process_group=process_group, init_rrefs=init_rrefs)
if tensor_init_params.tensor_properties is None:
raise ValueError('tensor_properties must not be None.')
if tensor_init_params.tensor_properties.dtype is None:
tensor_init_params.tensor_properties.dtype = torch.get_default_dtype()
if tensor_init_params.tensor_properties.layout != torch.strided:
raise ValueError('Only torch.strided layout is currently supported')
if tensor_init_params.tensor_properties.memory_format != torch.contiguous_format:
raise ValueError('Only torch.contiguous_format memory_format is currently supported')
dims = _flatten_tensor_size(size)
self._sharding_spec = sharding_spec
if isinstance(self._sharding_spec, ChunkShardingSpec):
self._init_chunked(dims, tensor_init_params)
elif isinstance(self._sharding_spec, EnumerableShardingSpec):
self._init_enumerable(dims, tensor_init_params)
else:
raise ValueError(f'Unsupported sharding_spec: {self._sharding_spec}')
# do post initialization (i.e. register sharded_tensor_id, initialize_rpc)
self._post_init()
def _prepare_init(self, process_group=None, init_rrefs=False):
self._init_rrefs = init_rrefs
self._sharded_tensor_id = None
self._process_group = (
process_group
if process_group is not None
else distributed_c10d._get_default_group()
)
self._local_shards: List[Shard] = []
self._remote_shards: Dict[int, List[rpc.RRef[Shard]]] = {}
def _post_init(self):
# Initialize RPC if available.
if self._init_rrefs:
with _sharded_tensor_lock:
global _sharded_tensor_current_id, _sharded_tensor_map
self._sharded_tensor_id = _sharded_tensor_current_id
_sharded_tensor_map[self._sharded_tensor_id] = weakref.ref(self)
_sharded_tensor_current_id += 1
if not rpc._is_current_rpc_agent_set():
raise RuntimeError(
'RPC Framework needs to be initialized using'
' torch.distributed.rpc.init_rpc if init_rrefs is set to True')
self._init_rpc()
def __del__(self):
# Clean up the global map.
with _sharded_tensor_lock:
global _sharded_tensor_current_id, _sharded_tensor_map
if self._sharded_tensor_id in _sharded_tensor_map:
_sharded_tensor_map.pop(self._sharded_tensor_id) # type: ignore[call-overload]
def _init_rpc(self):
# Validate PG and RPC ranks match.
pg_rank = dist.get_rank()
rpc_rank = rpc.get_worker_info().id
if pg_rank != rpc_rank:
raise ValueError(
f'Default ProcessGroup and RPC ranks must be '
f'the same for ShardedTensor, found process group rank: '
f'{pg_rank} and RPC rank: {rpc_rank}'
)
self._remote_shards = {}
# Gather all the sharded tensor ids.
worker_infos = rpc._get_current_rpc_agent().get_worker_infos()
rank_to_name = {}
name_to_rank = {}
for worker_info in worker_infos:
rank_to_name[worker_info.id] = worker_info.name
name_to_rank[worker_info.name] = worker_info.id
all_tensor_ids = rpc.api._all_gather(self._sharded_tensor_id)
# Share the local shards to the entire world.
futs = []
rpc_rank = rpc.get_worker_info().id
for rank in range(dist.get_world_size()):
# Skip self.
if rank == dist.get_rank():
continue
if len(self.local_shards()) != 0:
rrefs: List[rpc.RRef[Shard]] = [rpc.RRef(shard) for shard in self.local_shards()]
fut = rpc.rpc_async(
rank,
_register_remote_shards,
args=(all_tensor_ids[rank_to_name[rank]], rrefs, rpc_rank))
futs.append(fut)
torch.futures.wait_all(futs)
# Barrier for all RPCs to finish on all ranks.
rpc.api._all_gather(None)
def gather(
self,
dst: int = 0,
out: Optional[torch.Tensor] = None,
) -> None:
"""
Creates a full :class:`Tensor` on rank ``dst`` by gathering all shards of the
sharded tensor.
The API needs to be called on all ranks in SPMD fashion. All ranks should have
the same ``dst``. ``out`` should be a tensor of the same size as the overall
size of the sharded tensor on ``dst`` and ``None`` on all other ranks.
Args:
dst(int): The rank where full tensor is constructed.
Default: 0
out (:class `torch.Tensor`, optional): The output full tensor.
Must to be provided ONLY on ``dst`` rank.
Default: ``None``
"""
rank = dist.get_rank(self._process_group)
full_size = self.metadata().size
_validate_output_tensor_for_gather(rank, dst, full_size, out)
local_shards = self.local_shards()
world_size = dist.get_world_size(self._process_group)
gathered_shards = [None] * world_size
# will revise this part with CPU support and use dist.gather()
# once NCCL support for gather() is ready
# https://github.com/pytorch/pytorch/issues/66187
dist.all_gather_object(
obj=local_shards,
object_list=gathered_shards,
group=self._process_group,
)
if rank == dst:
dims = len(full_size)
for shards in gathered_shards:
if shards is None:
raise RuntimeError(
'Gathered shards cannot be None on dst rank {dst}'
)
for shard in shards:
metadata = shard.metadata
tensor = shard.tensor
out_narrow_view = out
for dim in range(dims):
out_narrow_view = out_narrow_view.narrow(
dim,
metadata.shard_offsets[dim],
metadata.shard_sizes[dim],
)
out_narrow_view.copy_(tensor)
@classmethod
def _init_from_local_shards(
cls,
local_shards: List[Shard],
*global_size,
process_group=None,
init_rrefs=False,
):
# STEP 1: Validate the Shardmetadatas locally
process_group = (
process_group
if process_group is not None
else distributed_c10d._get_default_group()
)
current_rank = dist.get_rank(process_group)
world_size = dist.get_world_size(process_group)
local_sharded_tensor_metadata: Optional[ShardedTensorMetadata] = None
global_tensor_size = _flatten_tensor_size(global_size)
if len(local_shards) > 0:
local_sharded_tensor_metadata = \
build_metadata_from_local_shards(local_shards, global_tensor_size, current_rank, process_group)
# STEP 2. Validate metadata across ranks, and build a global sharded tensor
# metadata by gathering local ShardedTensorMetadata
gathered_metadatas: List[Optional[ShardedTensorMetadata]] = []
if world_size > 1:
gathered_metadatas = [None for _ in range(world_size)]
dist.all_gather_object(
gathered_metadatas,
local_sharded_tensor_metadata,
group=process_group
)
else:
gathered_metadatas = [local_sharded_tensor_metadata]
global_sharded_tensor_metadata = build_global_metadata(gathered_metadatas)
# STEP 3: Validation done, create the actual ShardedTensor and populate fields
# prepare initialization
sharded_tensor = cls.__new__(cls)
sharded_tensor._prepare_init(process_group=process_group, init_rrefs=init_rrefs)
# add to metadata and local_shards
sharded_tensor._metadata = global_sharded_tensor_metadata
sharded_tensor._local_shards = local_shards
# make a EnumerableShardingSpec for sharded tensors that initialized from this API.
# TODO: make sharding spec a ChunkShardingSpec by inferring from the metadata list.
# see issue https://github.com/pytorch/pytorch/issues/67244
sharded_tensor._sharding_spec = EnumerableShardingSpec(global_sharded_tensor_metadata.shards_metadata)
# run post initialization, i.e. map registration, rpc initialization
sharded_tensor._post_init()
return sharded_tensor
@classmethod
def _init_from_local_shards_and_global_metadata(
cls,
local_shards: List[Shard],
sharded_tensor_metadata: ShardedTensorMetadata,
process_group=None,
init_rrefs=False,
) -> "ShardedTensor":
"""
Initialize a ShardedTensor with local shards and a global
ShardedTensorMetadata built on each rank.
Warning: This API is experimental and subject to change. It does
not do cross rank validations, and fully rely on the user
for the correctness of sharded_tensor_metadata on each rank
"""
process_group = (
process_group
if process_group is not None
else distributed_c10d._get_default_group()
)
current_rank = dist.get_rank(process_group)
shards_metadata = sharded_tensor_metadata.shards_metadata
tensor_properties = sharded_tensor_metadata.tensor_properties
if len(shards_metadata) == 0:
raise ValueError("shards_metadata must not be empty!")
if tensor_properties.layout != torch.strided:
raise ValueError('Only torch.strided layout is currently supported')
sharded_tensor = cls.__new__(cls)
sharded_tensor._prepare_init(process_group=process_group, init_rrefs=init_rrefs)
sharded_tensor._metadata = sharded_tensor_metadata
local_shard_metadatas = []
def _raise_if_mismatch(expected, actual, prop_name, rank, is_property=False):
tensor_property_or_metadata = "tensor property" if is_property else "local ShardMetadata"
if expected != actual:
raise ValueError(f"Local shards' tensor {prop_name} property is incompatible with "
f"{tensor_property_or_metadata} on rank {rank}: "
f"{tensor_property_or_metadata} {prop_name}={expected}, "
f"local shard tensor {prop_name}={actual}.")
# collect local shard metadatas from the global sharded_tensor_metadata
for shard_metadata in shards_metadata: # type: ignore[attr-defined]
rank, local_device = _parse_and_validate_remote_device(sharded_tensor._process_group, shard_metadata.placement)
if current_rank == rank:
local_shard_metadatas.append(shard_metadata)
if len(local_shards) != len(local_shard_metadatas):
raise RuntimeError(
f'Number of local shards ({len(local_shards)}) does not match number of local '
f'shards metadata in sharded_tensor_metadata ({len(local_shard_metadatas)}) '
f'on rank ({current_rank}) '
)
for shard in local_shards:
shard_meta = shard.metadata
local_shard_tensor = shard.tensor
rank, local_device = _parse_and_validate_remote_device(sharded_tensor._process_group, shard_meta.placement)
# validate if shard_meta in the metadatas collected from sharded_tensor_metadata
assert shard_meta in local_shard_metadatas, \
"local shard metadata not in sharded_tensor_metadata!"
_raise_if_mismatch(tensor_properties.layout, local_shard_tensor.layout, "layout", current_rank, True)
if not local_shard_tensor.is_contiguous():
raise ValueError('Only torch.contiguous_format memory_format is currently supported')
_raise_if_mismatch(shard_meta.shard_sizes, list(local_shard_tensor.size()), "size", current_rank)
_raise_if_mismatch(tensor_properties.pin_memory, local_shard_tensor.is_pinned(), "pin_memory", current_rank, True)
_raise_if_mismatch(local_device, local_shard_tensor.device, "device", current_rank)
_raise_if_mismatch(tensor_properties.dtype, local_shard_tensor.dtype, "dtype", current_rank, True)
_raise_if_mismatch(
tensor_properties.requires_grad, local_shard_tensor.requires_grad, "requires_grad", current_rank, True)
# check if shards_metadata have overlap shards
validate_non_overlapping_shards_metadata(shards_metadata)
# check if the shards_metadata is compatible with overall size of the sharded tensor.
check_tensor(shards_metadata, list(sharded_tensor_metadata.size))
# done validation, add local_shards
sharded_tensor._local_shards = local_shards
# make a EnumerableShardingSpec for sharded tensors that initialized from this API.
# TODO: make sharding spec a ChunkShardingSpec by inferring from the metadata list.
# see issue https://github.com/pytorch/pytorch/issues/67244
sharded_tensor._sharding_spec = EnumerableShardingSpec(shards_metadata)
# run post initialization, i.e. map registration, rpc initialization
sharded_tensor._post_init()
return sharded_tensor
def _init_chunked(self, dims, tensor_init_params: TensorInitParams, ):
current_rank = dist.get_rank(self._process_group)
sharding_dim = self._sharding_spec.dim # type: ignore[attr-defined]
# Validate the sharding spec.
if not isinstance(sharding_dim, int):
raise ValueError(
f"Sharding dim needs to be an integer, found: {sharding_dim}"
)
if sharding_dim >= len(dims) or sharding_dim < -len(dims):
raise ValueError(f"Invalid sharding dim: {sharding_dim}")
dim_size = dims[sharding_dim]
remote_devices = self._sharding_spec.placements # type: ignore[attr-defined]
chunks = len(remote_devices)
# split_size computed similar to 'torch.chunk'
split_size = get_split_size(dim_size, chunks)
shards_metadata = []
for idx, remote_device in enumerate(remote_devices):
rank, local_device = _parse_and_validate_remote_device(self._process_group, remote_device)
# Adjust the sharding dim for this rank.
sharded_dim_size = get_chunked_dim_size(dim_size, split_size, idx)
if sharded_dim_size > 0:
# Build sharding_metadata.
# deepcopy for modification.
rank_dims = dims.copy()
rank_offsets = [0] * len(dims)
rank_offsets[sharding_dim] = split_size * idx
rank_dims[sharding_dim] = sharded_dim_size
shard_metadata = ShardMetadata(rank_offsets, rank_dims, remote_device)
shards_metadata.append(shard_metadata)
# Build the local shard for the current rank if it is involved in the sharding spec.
if current_rank == rank:
# Initialize the local shard.
local_shard = _create_tensor_from_params(
*rank_dims, local_device=local_device, tensor_init_params=tensor_init_params)
self._local_shards.append(Shard(local_shard, shard_metadata))
# Build overall metadata
self._metadata = ShardedTensorMetadata(
shards_metadata, dims, tensor_init_params.tensor_properties, )
def _init_enumerable(self, dims, tensor_init_params: TensorInitParams):
# Validate the sharding spec is compatible with the tensor.
check_tensor(self._sharding_spec.shards, dims) # type: ignore[attr-defined]
current_rank = dist.get_rank(self._process_group)
shards_metadata = []
for shard_metadata in self._sharding_spec.shards: # type: ignore[attr-defined]
rank, local_device = _parse_and_validate_remote_device(self._process_group, shard_metadata.placement)
shards_metadata.append(shard_metadata)
if current_rank == rank:
# Initialize the local shard.
local_shard = _create_tensor_from_params(
*shard_metadata.shard_sizes, local_device=local_device,
tensor_init_params=tensor_init_params)
self._local_shards.append(Shard(local_shard, shard_metadata))
# Build overall metadata
self._metadata = ShardedTensorMetadata(
shards_metadata, dims, tensor_init_params.tensor_properties, )
def sharding_spec(self) -> ShardingSpec:
"""
Returns the ShardingSpec for the tensor.
"""
return self._sharding_spec
def __torch_function__(self, func, types, args=(), kwargs=None):
if func in _SHARDED_OPS:
return _SHARDED_OPS[func](types, args, kwargs, self._process_group)
raise RuntimeError(
f"torch function '{func.__name__}', with args: {args} and "
f"kwargs: {kwargs} not supported for ShardedTensor!")
def metadata(self) -> ShardedTensorMetadata:
"""
Returns a :class:`ShardedTensorMetadata` object corresponding to the
metadata for the entire tensor.
"""
return self._metadata
def local_shards(self) -> List[Shard]:
"""
Returns a list of :class:`Shard' corresponding to the
local shards for this rank. Returns an empty list if the current rank
does not host any shards for this Tensor.
"""
return self._local_shards
def size(self, dim: int = None) -> Union[torch.Size, int]:
"""
Returns a :Union:`[torch.Size, int]` which represents the size of the tensor.
The dimension can be specified.
Args:
dim (int, optional): the dimension over which the size represents.
If specified, it returns the size of the given dimension.
If not, it returns a subclass of tuple.
Default: ``None``
Returns:
A :Union:`[torch.Size, int]` represents the size of the tensor.
"""
size = self._metadata.size
if dim is None:
return size
if dim < 0 or dim >= len(size):
raise ValueError(
f"Argument ``dim`` must be within the range of tensor dimensions [0, {len(size)})"
)
return size[dim]
def is_pinned(self) -> bool:
"""
Returns True if the sharded tensor (each local shard) resides in pinned memory.
"""
return self._metadata.tensor_properties.pin_memory
def is_contiguous(self) -> bool:
"""
Returns True if the sharded tensor (each local shard) is contiguous in memory
in the order specified by memory format.
"""
return self._metadata.tensor_properties.memory_format == torch.contiguous_format
@property
def shape(self):
return self._metadata.size
@property
def requires_grad(self):
return self._metadata.tensor_properties.requires_grad
@property
def dtype(self):
return self._metadata.tensor_properties.dtype
@property
def layout(self):
return self._metadata.tensor_properties.layout
def _register_remote_shards(self, remote_shards: List[rpc.RRef[Shard]], rpc_rank: int):
self._remote_shards[rpc_rank] = remote_shards
def remote_shards(self) -> Dict[int, List[rpc.RRef[Shard]]]:
"""
Returns a Dict[int, RRef] with keys being the RPC rank and values
being RRefs to shards on that rank. Need to initialize the
RPC framework for this functionality.
Raises an exception if ShardedTensor was created with ``init_rrefs=False``
"""
if not self._init_rrefs:
raise RuntimeError(
'ShardedTensor created with init_rrefs=False, no RRefs to remote shards available'
)
return self._remote_shards
def __hash__(self):
return id(self)
def __repr__(self):
return f'ShardedTensor({self._metadata})'
@dataclass
class ProcessGroupState:
"""
State for ser-de of process group
"""
local_rank: int
global_rank: int
local_world_size: int
global_world_size: int
def __getstate__(self):
pg_state = ShardedTensor.ProcessGroupState(
distributed_c10d.get_rank(self._process_group),
distributed_c10d.get_rank(),
distributed_c10d.get_world_size(self._process_group),
distributed_c10d.get_world_size(),
)
return self._local_shards, self._metadata, pg_state, self._sharding_spec, self._init_rrefs
def __setstate__(self, state):
self._sharded_tensor_id = None
if not distributed_c10d.is_initialized():
raise RuntimeError(
'Need to initialize default process group using '
'"init_process_group" before loading ShardedTensor')
self._local_shards, self._metadata, pg_state, self._sharding_spec, self._init_rrefs = state
# Setup process group
self._process_group = get_current_process_group()
# Validate process group.
local_rank = distributed_c10d.get_rank(self._process_group)
if pg_state.local_rank != local_rank:
raise RuntimeError(
f'Local rank at save time was {pg_state.local_rank}, but at '
f'load time was {local_rank}')
global_rank = distributed_c10d.get_rank()
if pg_state.global_rank != global_rank:
raise RuntimeError(
f'Global rank at save time was {pg_state.global_rank}, but at '
f'load time was {global_rank}')
local_world_size = distributed_c10d.get_world_size(self._process_group)
if pg_state.local_world_size != local_world_size:
raise RuntimeError(
f'Local world size at save time was {pg_state.local_world_size}, '
f'but at load time was {local_world_size}')
global_world_size = distributed_c10d.get_world_size()
if pg_state.global_world_size != global_world_size:
raise RuntimeError(
f'Global world size at save time was {pg_state.global_world_size}, '
f'but at load time was {global_world_size}')
self._post_init()
def _create_tensor_from_params(*size, local_device, tensor_init_params: TensorInitParams):
""" Helper to construct tensor from size, device and common params. """
create_op = tensor_init_params.create_op
dtype = tensor_init_params.tensor_properties.dtype
layout = tensor_init_params.tensor_properties.layout
requires_grad = tensor_init_params.tensor_properties.requires_grad
memory_format = tensor_init_params.tensor_properties.memory_format
pin_memory = tensor_init_params.tensor_properties.pin_memory
if create_op == CreateOp.ONES:
return torch.ones(*size, dtype=dtype, layout=layout,
device=local_device, pin_memory=pin_memory,
requires_grad=requires_grad,)
elif create_op == CreateOp.EMPTY:
return torch.empty(*size, dtype=dtype, layout=layout,
device=local_device, requires_grad=requires_grad,
# NB: memory_format param is not accepted by torch.ones
memory_format=memory_format, pin_memory=pin_memory,)
elif tensor_init_params.create_op == CreateOp.ZEROS:
return torch.zeros(*size,
dtype=dtype,
layout=layout,
device=local_device,
pin_memory=pin_memory,
requires_grad=requires_grad,)
elif tensor_init_params.create_op == CreateOp.RAND:
return torch.rand(*size,
dtype=dtype,
layout=layout,
device=local_device,
pin_memory=pin_memory,
requires_grad=requires_grad,)
elif tensor_init_params.create_op == CreateOp.FULL:
return torch.full(size=size,
fill_value=tensor_init_params.fill_value,
layout=layout,
dtype=dtype,
requires_grad=requires_grad,
device=local_device, )
else:
raise ValueError(f'Unsupported create_op: {tensor_init_params.create_op}')
| torch/distributed/_sharded_tensor/api.py | 30,641 | State for ser-de of process group
ShardedTensor is an abstraction to represent Tensors that are sharded
across multiple devices and multiple processes.
ShardedTensor is initialized in an SPMD like fashion where each rank
initializes the ShardedTensor. The ShardedTensor object on each rank
then only stores the local shard for the Tensor and provides global
metadata for all the shards.
ShardedTensor doesn't provide any Tensor like operations but is a wrapper
providing the Tensor representing the local shard and the global metadata.
Using these, users can build their custom distributed sharded computations
on top of this primitive. The local shards are all initialized using the
create_op specified by tensor_init_params.create_op, e.g., torch.ones, or
torch.empty
Args:
sharding_spec (:class:`torch.distributed._sharding_spec.ShardingSpec`): The specification
describing how to shard the Tensor.
size (int...): a sequence of integers defining the shape of the output
tensor. Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
tensor_init_params (:class: `TensorInitParams`): common params to create tensor.
init_rrefs (bool, optional): Whether or not to initialize
:class:`torch.distributed.rpc.RRef`s pointing to remote shards.
Need to initialize the RPC Framework if specified as ``True``.
Default: ``False``.
.. note:: ShardedTensor uses collectives to do various operations, i.e. it
uses all_gather to do cross rank validations. For NCCL-based processed
groups, internal tensor representations of objects must be moved to the
GPU device before communication takes place. In this case, the device
used is given by ``torch.cuda.current_device()`` and it is the user's
responsiblity to ensure that this is set so that each rank has an
individual GPU, via ``torch.cuda.set_device()``
Container for list of common params to create new local tensor.
Helper to construct tensor from size, device and common params.
Initialize a ShardedTensor with local shards and a global
ShardedTensorMetadata built on each rank.
Warning: This API is experimental and subject to change. It does
not do cross rank validations, and fully rely on the user
for the correctness of sharded_tensor_metadata on each rank
Creates a full :class:`Tensor` on rank ``dst`` by gathering all shards of the
sharded tensor.
The API needs to be called on all ranks in SPMD fashion. All ranks should have
the same ``dst``. ``out`` should be a tensor of the same size as the overall
size of the sharded tensor on ``dst`` and ``None`` on all other ranks.
Args:
dst(int): The rank where full tensor is constructed.
Default: 0
out (:class `torch.Tensor`, optional): The output full tensor.
Must to be provided ONLY on ``dst`` rank.
Default: ``None``
Returns True if the sharded tensor (each local shard) is contiguous in memory
in the order specified by memory format.
Returns True if the sharded tensor (each local shard) resides in pinned memory.
Returns a list of :class:`Shard' corresponding to the
local shards for this rank. Returns an empty list if the current rank
does not host any shards for this Tensor.
Returns a :class:`ShardedTensorMetadata` object corresponding to the
metadata for the entire tensor.
Returns a Dict[int, RRef] with keys being the RPC rank and values
being RRefs to shards on that rank. Need to initialize the
RPC framework for this functionality.
Raises an exception if ShardedTensor was created with ``init_rrefs=False``
Returns the ShardingSpec for the tensor.
Returns a :Union:`[torch.Size, int]` which represents the size of the tensor.
The dimension can be specified.
Args:
dim (int, optional): the dimension over which the size represents.
If specified, it returns the size of the given dimension.
If not, it returns a subclass of tuple.
Default: ``None``
Returns:
A :Union:`[torch.Size, int]` represents the size of the tensor.
Tracking for sharded tensor objects. Custom sharded ops needed when create_op is FULL default set to False (not None) since None is incompatible with Number. Use __new__ for logging purposes. prepare initialization, initialize fields like _process_group, _local_shards, etc. do post initialization (i.e. register sharded_tensor_id, initialize_rpc) Initialize RPC if available. Clean up the global map. type: ignore[call-overload] Validate PG and RPC ranks match. Gather all the sharded tensor ids. Share the local shards to the entire world. Skip self. Barrier for all RPCs to finish on all ranks. will revise this part with CPU support and use dist.gather() once NCCL support for gather() is ready https://github.com/pytorch/pytorch/issues/66187 STEP 1: Validate the Shardmetadatas locally STEP 2. Validate metadata across ranks, and build a global sharded tensor metadata by gathering local ShardedTensorMetadata STEP 3: Validation done, create the actual ShardedTensor and populate fields prepare initialization add to metadata and local_shards make a EnumerableShardingSpec for sharded tensors that initialized from this API. TODO: make sharding spec a ChunkShardingSpec by inferring from the metadata list. see issue https://github.com/pytorch/pytorch/issues/67244 run post initialization, i.e. map registration, rpc initialization collect local shard metadatas from the global sharded_tensor_metadata type: ignore[attr-defined] validate if shard_meta in the metadatas collected from sharded_tensor_metadata check if shards_metadata have overlap shards check if the shards_metadata is compatible with overall size of the sharded tensor. done validation, add local_shards make a EnumerableShardingSpec for sharded tensors that initialized from this API. TODO: make sharding spec a ChunkShardingSpec by inferring from the metadata list. see issue https://github.com/pytorch/pytorch/issues/67244 run post initialization, i.e. map registration, rpc initialization type: ignore[attr-defined] Validate the sharding spec. type: ignore[attr-defined] split_size computed similar to 'torch.chunk' Adjust the sharding dim for this rank. Build sharding_metadata. deepcopy for modification. Build the local shard for the current rank if it is involved in the sharding spec. Initialize the local shard. Build overall metadata Validate the sharding spec is compatible with the tensor. type: ignore[attr-defined] type: ignore[attr-defined] Initialize the local shard. Build overall metadata Setup process group Validate process group. NB: memory_format param is not accepted by torch.ones | 6,641 | en | 0.761097 |
# -*- coding: utf-8 -*-
from CuAsm.CuInsAssemblerRepos import CuInsAssemblerRepos
from CuAsm.CuInsFeeder import CuInsFeeder
def constructReposFromFile(sassname, savname=None, arch='sm_75'):
# initialize a feeder with sass
feeder = CuInsFeeder(sassname, arch=arch)
# initialize an empty repos
repos = CuInsAssemblerRepos(arch=arch)#
# Update the repos with instructions from feeder
repos.update(feeder)
# reset the feeder back to start
# feeder.restart()
# verify the repos
# actually the codes is already verifed during repos construction
# repos.verify(feeder)
if savname is not None:
repos.save2file(savname)
return repos
def verifyReposFromFile(sassname, reposfile, arch='sm_75'):
# initialize a feeder with sass
feeder = CuInsFeeder(sassname, arch=arch)
# initialize an empty repos
repos = CuInsAssemblerRepos(reposfile, arch=arch)#
# verify the repos
repos.verify(feeder)
if __name__ == '__main__':
sassname = r"G:\\Temp\\NVSASS\\cudnn64_7.sm_50.sass"
# sassname = r'G:\\Temp\\Program.45.sm_50.sass'
reposfile = r'InsAsmRepos.sm_50.txt'
arch = 'sm_50'
constructReposFromFile(sassname, reposfile, arch=arch)
print('### Construction done!')
# verifyReposFromFile(sassname, reposfile, arch=arch)
# print('### Verification done!')
| Tests/test_CuInsAsmRepos_sm50.py | 1,420 | -*- coding: utf-8 -*- initialize a feeder with sass initialize an empty repos Update the repos with instructions from feeder reset the feeder back to start feeder.restart() verify the repos actually the codes is already verifed during repos construction repos.verify(feeder) initialize a feeder with sass initialize an empty repos verify the repos sassname = r'G:\\Temp\\Program.45.sm_50.sass' verifyReposFromFile(sassname, reposfile, arch=arch) print(' Verification done!') | 474 | en | 0.708215 |
import re, multiprocessing
from tqdm import tqdm
import numpy as np
class Cleaner():
def __init__(self, num_threads=1): # right now, it's single threaded
self.num_threads = min(num_threads, int(multiprocessing.cpu_count()/2))
"""
S- ar putea să fie necesar să- l recitiţi.
"""
self.r1 = re.compile(r"([\w]+-)[\s]([\w]+)", re.IGNORECASE)
"""
{LL/ AAAA}
Humalog Mix50 100 U/ ml
"""
self.r2 = re.compile(r"([\w]+/)\s([\w]+)", re.IGNORECASE)
"""
All unicode dashes to normal '-', see https://www.fileformat.info/info/unicode/category/Pd/list.htm
includes bull : • \u2022
"""
self.r3 = re.compile(r"([■\u2022\u007E\u00AD\u058A\u05BE\u1400\u1806\u2010\u2011\u2012\u2013\u2014\u2015\u2053\u207B\u208B\u2212\u2E17\u2E3A\u2E3B\u301C\u3030\u30A0\uFE31\uFE32\uFE63\uFF0D]+)", re.UNICODE)
"""
spaces after comma in numbers: 1, 4% -> 1,4%
"""
self.r4 = re.compile(r"([\d]+,)\s([\d]+)", re.IGNORECASE)
"""
soft hyphens #\u00AD
"""
self.r5 = re.compile(r"[\u00AD]")
"""
remove URLS
"""
self.r6 = re.compile(r'(?:www|http)\S+|<\S+|\w+\/*>')
"""
remove emails
"""
self.r7 = re.compile(r'([^@]+@[^@]+\.[^@]+)')
"""
table separators
"""
self.r8 = re.compile(r'[\─\─]+')
self.r9 = re.compile(r'[\-\-]+')
"""
multiple spaces
"""
self.space = re.compile(' +')
"""
forbiden chars that cause a lot of bad sentences
"""
self.forbidden_chars = "ºþÈ™ÓÑÄÈîƒ"
def process(self, lines, percent_max_numeric=0.7, percent_max_non_ascii=0.40, min_line_length=20, verbose=False, disable_pbar=True):
skipped_because_min_length = np.array([0,0], dtype=np.uint64)
skipped_alpha_count = np.array([0,0], dtype=np.uint64)
skipped_because_max_numeric = np.array([0,0], dtype=np.uint64)
skipped_because_max_non_ascii = np.array([0,0], dtype=np.uint64)
skipped_because_forbidden_chars = np.array([0,0], dtype=np.uint64)
total_original_length = 0
total_clean_length = 0
output = []
for line in tqdm(lines, disable = disable_pbar):
line = line.strip()
# get stats about line
length = len(line)
total_original_length += length
if length < min_line_length:
skipped_because_min_length += np.array([1,length], dtype=np.uint64)
continue
line = bytes(line, 'utf-8').decode('utf-8', 'ignore') # strip not utf-8 chars
digit_count = 0
alpha_count = 0
ascii_count = 0
forbidden_char = False
for char in line:
if char in self.forbidden_chars:
forbidden_char = True
break
if char.isnumeric():
digit_count+=1
if char.isalpha():
alpha_count+=1
if char.isascii():
ascii_count+=1
# reject if forbidden char
if forbidden_char:
skipped_because_forbidden_chars += np.array([1,length], dtype=np.uint64)
continue
# reject if number of letters is too small
if alpha_count == 0 or alpha_count / length < 0.5:
skipped_alpha_count += np.array([1,length], dtype=np.uint64)
if verbose:
print("Skipping alpha={:.3f}: [{}]".format(alpha_count / length, line))
continue
# reject if too many numbers
if digit_count / alpha_count >= percent_max_numeric and digit_count > 6:
skipped_because_max_numeric += np.array([1,length], dtype=np.uint64)
if verbose:
print("Skipping digit={:.3f}: [{}]".format(digit_count / alpha_count, line))
continue
# reject if too many non-ascii
if ascii_count / alpha_count < percent_max_non_ascii and length > 15:
skipped_because_max_non_ascii += np.array([1,length], dtype=np.uint64)
if verbose:
print("Skipping ascii={:.3f}: [{}]".format(digit_count / alpha_count, line))
continue
#skip lines that appear to be ascii tables │
if (line.strip()[0] == '|' and line.count('|') > 2) or (line.strip()[0] == '│' and line.count('│') > 2):
skipped_because_forbidden_chars += np.array([1,length], dtype=np.uint64)
if verbose:
print("Skipping table line: [{}]".format(line))
continue
# clean line
#print("\nbef: {}".format(line))
line = self.r1.sub(r"\1\2", line)
line = self.r2.sub(r"\1\2", line)
line = self.r3.sub("-", line)
line = self.r4.sub(r"\1\2", line)
line = self.r5.sub("", line)
line = self.r6.sub("", line)
line = self.r7.sub("", line)
# separators
line = self.r8.sub("", line)
line = self.r9.sub("", line)
line = line.replace("( ă)", "(ă)")
line = line.replace("ţ", "ț")
line = line.replace("ş", "ș")
line = line.replace("Ţ", "Ț")
line = line.replace("Ş", "Ș")
line = line.replace("â", "â")
#print("aft: {}".format(line))
line = self.space.sub(' ', line).strip()
# check that after processing the line is not too short
if len(line) < min_line_length:
skipped_because_min_length += np.array([1,length], dtype=np.uint64)
continue
total_clean_length += len(line)
output.append(line+"\n")
# pack stats
stats = {}
stats["skipped_because_min_length"] = skipped_because_min_length
stats["skipped_alpha_count"] = skipped_alpha_count
stats["skipped_because_max_numeric"] = skipped_because_max_numeric
stats["skipped_because_max_non_ascii"] = skipped_because_max_non_ascii
stats["skipped_because_forbidden_chars"] = skipped_because_forbidden_chars
stats["total_original_length"] = total_original_length
stats["total_clean_length"] = total_clean_length
return output, stats
def add_stats(self, a, b):
"""
Add two stats dict that are returned by the process function.
This is used for multiple files
:param a: stats dict
:param b: stats dict
:return: stats dict
"""
stats = {}
stats["skipped_because_min_length"] = a["skipped_because_min_length"] + b["skipped_because_min_length"]
stats["skipped_alpha_count"] = a["skipped_alpha_count"] + b["skipped_alpha_count"]
stats["skipped_because_max_numeric"] = a["skipped_because_max_numeric"] + b["skipped_because_max_numeric"]
stats["skipped_because_max_non_ascii"] = a["skipped_because_max_non_ascii"] + b["skipped_because_max_non_ascii"]
stats["skipped_because_forbidden_chars"] = a["skipped_because_forbidden_chars"] + b["skipped_because_forbidden_chars"]
stats["total_original_length"] = a["total_original_length"] + b["total_original_length"]
stats["total_clean_length"] = a["total_clean_length"] + b["total_clean_length"]
return stats
def print_stats(self, stats):
print("\nCleaning statistics:")
print("Total original length (chars) = {}".format(stats["total_original_length"]))
print("Total length after cleaning (chars) = {}".format(stats["total_clean_length"]))
print("Percent data kept = {:.3f} %".format(100.*stats["total_clean_length"]/stats["total_original_length"]))
print("Skipped because line length was below minimum (lines/chars): {} ".format(stats["skipped_because_min_length"]))
print("Skipped because line had forbidden characters (lines/chars): {} ".format(stats["skipped_because_forbidden_chars"]))
print("Skipped because alpha count was below minimum (lines/chars): {} ".format(stats["skipped_alpha_count"]))
print("Skipped because digit count was above maximum (lines/chars): {} ".format(stats["skipped_because_max_numeric"]))
print("Skipped because too many non-ascii characters (lines/chars): {} ".format(stats["skipped_because_max_non_ascii"]))
text = [" - ~~~~~Păstraţi acest prospect. S- ar putea să fie necesar să- l recitiţi.",
"- Dacă aveţi orice întrebări suplimentare, adresaţi- vă medicului dumneavoastră sau farmacistului.\n",
"{LL/ AAAA}\n",
"MANUALUL UTILIZATORULUI\n",
"Vezi textul manualului mai jos.\n",
"303 Informaţii detaliate privind acest medicament sunt disponibile pe website- ul Agenţiei Europene a Medicamentului (EMEA): http: // www. emea. europa. eu /.\n",
"304 PROSPECT: \n",
"INFORMAŢII PENTRU UTILIZATOR",
"Humalog Mix50 100 U/ ml • • • ~~~~",
"Τηλ: +30 210 629 4600 España Lilly S. A.",
"Tel: + 34- 91 663 50 00 France Lilly France S. A. S.",
"Tél: +33 - (0) 1 55 49 34 34 Ireland Eli Lilly and Company (Ireland) Limited Tel: + 353 - (0) 1 661 4377 Ísland Icepharma hf.",
"Sími + 354 540 8000 Italia Eli Lilly Italia S. p. A.",
"Tel: + 39 - 055 42571 Κύπρος Phadisco Ltd Τηλ: +357 22 715000 ",
"Luxembourg/ Luxemburg Eli Lilly Benelux S. A.",
"Tél/ Tel: + 32 - (0) 2 548 84 84 Magyarország Lilly Hungária Kft.",
"Tel: + 36 1 328 5100 Malta Charles de Giorgio Ltd.",
"Κύπρος Βαρνάβας Χατζηπαναγής Λτδ 7 Ανδροκλέους CY- 1060 Λευκωσία Tηλ"]
#tt = []
#for i in range(100000):
# tt.extend(text)
#print(len(tt))
"""
c = Cleaner(1)
lines, s1 = c.process(text)
lines, s2 = c.process(text)
stats = c.add_stats(s1, s2)
c.print_stats(s1)
c.print_stats(s2)
c.print_stats(stats)
print("DONE")
"""
| corpus/text_cleaner.py | 10,124 | Add two stats dict that are returned by the process function.
This is used for multiple files
:param a: stats dict
:param b: stats dict
:return: stats dict
right now, it's single threaded get stats about line strip not utf-8 chars reject if forbidden char reject if number of letters is too small reject if too many numbers reject if too many non-asciiskip lines that appear to be ascii tables │ clean lineprint("\nbef: {}".format(line)) separatorsprint("aft: {}".format(line)) check that after processing the line is not too short pack statstt = []for i in range(100000): tt.extend(text)print(len(tt)) | 607 | en | 0.758733 |
#!/usr/bin/env python
# import general use modules
import os
from pprint import pprint as pp
# import nornir specifics
from nornir import InitNornir
from nornir.plugins.functions.text import print_result
from nornir.core.filter import F
nr = InitNornir()
hosts = nr.inventory.hosts
arista1_filter = nr.filter(name="arista1")
arista1 = arista1_filter.inventory.hosts
#print(hosts)
print(arista1)
wan_filter = nr.filter(role="WAN")
wan_filter = wan_filter.inventory.hosts
print(wan_filter)
wan_port_filter = nr.filter(role="WAN").filter(port=22)
wan_port_filter = wan_port_filter.inventory.hosts
print(wan_port_filter)
sfo_filter = nr.filter(F(groups__contains="sfo"))
sfo_filter = sfo_filter.inventory.hosts
print(sfo_filter)
| class3/exercise2/exercise2.py | 735 | !/usr/bin/env python import general use modules import nornir specificsprint(hosts) | 83 | en | 0.125651 |
# -*- coding: utf-8 -*-
#
# Political Dynamics documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Political Dynamics'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'political-dynamicsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'political-dynamics.tex',
u'Political Dynamics Documentation',
u"Arya D. McCarthy", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'political-dynamics', u'Political Dynamics Documentation',
[u"Arya D. McCarthy"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'political-dynamics', u'Political Dynamics Documentation',
u"Arya D. McCarthy", 'Political Dynamics',
'A differential equations perspective on American National Election Studies (ANES) over time.[D[D[D', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| docs/conf.py | 7,943 | -*- coding: utf-8 -*- Political Dynamics documentation build configuration file, created by sphinx-quickstart. This file is execfile()d with the current directory set to its containing dir. Note that not all possible configuration values are present in this autogenerated file. All configuration values have a default; values that are commented out serve to show the default. If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here. If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('.')) -- General configuration ----------------------------------------------------- If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = '1.0' Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. Add any paths that contain templates here, relative to this directory. The suffix of source filenames. The encoding of source files. source_encoding = 'utf-8-sig' The master toctree document. General information about the project. The version info for the project you're documenting, acts as replacement for |version| and |release|, also used in various other places throughout the built documents. The short X.Y version. The full version, including alpha/beta/rc tags. The language for content autogenerated by Sphinx. Refer to documentation for a list of supported languages. language = None There are two options for replacing |today|: either, you set today to some non-false value, then it is used: today = '' Else, today_fmt is used as the format for a strftime call. today_fmt = '%B %d, %Y' List of patterns, relative to source directory, that match files and directories to ignore when looking for source files. The reST default role (used for this markup: `text`) to use for all documents. default_role = None If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True If true, the current module name will be prepended to all description unit titles (such as .. function::). add_module_names = True If true, sectionauthor and moduleauthor directives will be shown in the output. They are ignored by default. show_authors = False The name of the Pygments (syntax highlighting) style to use. A list of ignored prefixes for module index sorting. modindex_common_prefix = [] -- Options for HTML output --------------------------------------------------- The theme to use for HTML and HTML Help pages. See the documentation for a list of builtin themes. Theme options are theme-specific and customize the look and feel of a theme further. For a list of options available for each theme, see the documentation. html_theme_options = {} Add any paths that contain custom themes here, relative to this directory. html_theme_path = [] The name for this set of Sphinx documents. If None, it defaults to "<project> v<release> documentation". html_title = None A shorter title for the navigation bar. Default is the same as html_title. html_short_title = None The name of an image file (relative to this directory) to place at the top of the sidebar. html_logo = None The name of an image file (within the static path) to use as favicon of the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 pixels large. html_favicon = None Add any paths that contain custom static files (such as style sheets) here, relative to this directory. They are copied after the builtin static files, so a file named "default.css" will overwrite the builtin "default.css". If not '', a 'Last updated on:' timestamp is inserted at every page bottom, using the given strftime format. html_last_updated_fmt = '%b %d, %Y' If true, SmartyPants will be used to convert quotes and dashes to typographically correct entities. html_use_smartypants = True Custom sidebar templates, maps document names to template names. html_sidebars = {} Additional templates that should be rendered to pages, maps page names to template names. html_additional_pages = {} If false, no module index is generated. html_domain_indices = True If false, no index is generated. html_use_index = True If true, the index is split into individual pages for each letter. html_split_index = False If true, links to the reST sources are added to the pages. html_show_sourcelink = True If true, "Created using Sphinx" is shown in the HTML footer. Default is True. html_show_sphinx = True If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. html_show_copyright = True If true, an OpenSearch description file will be output, and all pages will contain a <link> tag referring to it. The value of this option must be the base URL from which the finished HTML is served. html_use_opensearch = '' This is the file name suffix for HTML files (e.g. ".xhtml"). html_file_suffix = None Output file base name for HTML help builder. -- Options for LaTeX output -------------------------------------------------- The paper size ('letterpaper' or 'a4paper'). 'papersize': 'letterpaper', The font size ('10pt', '11pt' or '12pt'). 'pointsize': '10pt', Additional stuff for the LaTeX preamble. 'preamble': '', Grouping the document tree into LaTeX files. List of tuples (source start file, target name, title, author, documentclass [howto/manual]). The name of an image file (relative to this directory) to place at the top of the title page. latex_logo = None For "manual" documents, if this is true, then toplevel headings are parts, not chapters. latex_use_parts = False If true, show page references after internal links. latex_show_pagerefs = False If true, show URL addresses after external links. latex_show_urls = False Documents to append as an appendix to all manuals. latex_appendices = [] If false, no module index is generated. latex_domain_indices = True -- Options for manual page output -------------------------------------------- One entry per manual page. List of tuples (source start file, name, description, authors, manual section). If true, show URL addresses after external links. man_show_urls = False -- Options for Texinfo output ------------------------------------------------ Grouping the document tree into Texinfo files. List of tuples (source start file, target name, title, author, dir menu entry, description, category) Documents to append as an appendix to all manuals. texinfo_appendices = [] If false, no module index is generated. texinfo_domain_indices = True How to display URL addresses: 'footnote', 'no', or 'inline'. texinfo_show_urls = 'footnote' | 6,666 | en | 0.676276 |
# Copyright 2020 University of New South Wales, University of Sydney, Ingham Institute
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import SimpleITK as sitk
import numpy as np
from loguru import logger
from platipy.imaging.registration.utils import apply_transform, convert_mask_to_reg_structure
from platipy.imaging.registration.linear import (
linear_registration,
)
from platipy.imaging.registration.deformable import (
fast_symmetric_forces_demons_registration,
)
from platipy.imaging.label.fusion import (
process_probability_image,
compute_weight_map,
combine_labels,
)
from platipy.imaging.label.iar import run_iar
from platipy.imaging.utils.vessel import vessel_spline_generation
from platipy.imaging.utils.valve import (
generate_valve_from_great_vessel,
generate_valve_using_cylinder,
)
from platipy.imaging.utils.conduction import (
geometric_sinoatrialnode,
geometric_atrioventricularnode,
)
from platipy.imaging.utils.crop import label_to_roi, crop_to_roi
from platipy.imaging.generation.mask import extend_mask
from platipy.imaging.label.utils import binary_encode_structure_list, correct_volume_overlap
ATLAS_PATH = "/atlas"
if "ATLAS_PATH" in os.environ:
ATLAS_PATH = os.environ["ATLAS_PATH"]
CARDIAC_SETTINGS_DEFAULTS = {
"atlas_settings": {
"atlas_id_list": [
"03",
"05",
"08",
"10",
"11",
"12",
"13",
"16",
"24",
"35",
],
"atlas_structure_list": [
"AORTICVALVE",
"ASCENDINGAORTA",
"LANTDESCARTERY",
"LCIRCUMFLEXARTERY",
"LCORONARYARTERY",
"LEFTATRIUM",
"LEFTVENTRICLE",
"MITRALVALVE",
"PULMONARYARTERY",
"PULMONICVALVE",
"RCORONARYARTERY",
"RIGHTATRIUM",
"RIGHTVENTRICLE",
"SVC",
"TRICUSPIDVALVE",
"WHOLEHEART",
],
"atlas_path": ATLAS_PATH,
"atlas_image_format": "Case_{0}/Images/Case_{0}_CROP.nii.gz",
"atlas_label_format": "Case_{0}/Structures/Case_{0}_{1}_CROP.nii.gz",
"crop_atlas_to_structures": False,
"crop_atlas_expansion_mm": (20, 20, 40),
"guide_structure_name": "WHOLEHEART",
"superior_extension": 30,
},
"auto_crop_target_image_settings": {
"expansion_mm": [20, 20, 40],
},
"linear_registration_settings": {
"reg_method": "affine",
"shrink_factors": [16, 8, 4],
"smooth_sigmas": [0, 0, 0],
"sampling_rate": 0.75,
"default_value": -1000,
"number_of_iterations": 50,
"metric": "mean_squares",
"optimiser": "gradient_descent_line_search",
"verbose": False,
},
"structure_guided_registration_settings": {
"isotropic_resample": True,
"resolution_staging": [
16,
8,
2,
], # specify voxel size (mm) since isotropic_resample is set
"iteration_staging": [50, 50, 50],
"smoothing_sigmas": [0, 0, 0],
"ncores": 8,
"default_value": 0,
"verbose": False,
},
"deformable_registration_settings": {
"isotropic_resample": True,
"resolution_staging": [
6,
3,
1.5,
], # specify voxel size (mm) since isotropic_resample is set
"iteration_staging": [200, 150, 100],
"smoothing_sigmas": [0, 0, 0],
"ncores": 8,
"default_value": 0,
"verbose": False,
},
"iar_settings": {
"reference_structure": False,
"smooth_distance_maps": True,
"smooth_sigma": 1,
"z_score_statistic": "mad",
"outlier_method": "iqr",
"outlier_factor": 1.5,
"min_best_atlases": 5,
"project_on_sphere": False,
},
"label_fusion_settings": {
"vote_type": "unweighted",
"vote_params": None,
"optimal_threshold": {
"AORTICVALVE": 0.5,
"ASCENDINGAORTA": 0.44,
"LEFTATRIUM": 0.40,
"LEFTVENTRICLE": 0.45,
"MITRALVALVE": 0.5,
"PULMONARYARTERY": 0.46,
"PULMONICVALVE": 0.5,
"RIGHTATRIUM": 0.38,
"RIGHTVENTRICLE": 0.42,
"SVC": 0.44,
"TRICUSPIDVALVE": 0.5,
"WHOLEHEART": 0.5,
},
},
"vessel_spline_settings": {
"vessel_name_list": [
"LANTDESCARTERY",
"LCIRCUMFLEXARTERY",
"LCORONARYARTERY",
"RCORONARYARTERY",
],
"vessel_radius_mm_dict": {
"LANTDESCARTERY": 2,
"LCIRCUMFLEXARTERY": 2,
"LCORONARYARTERY": 2,
"RCORONARYARTERY": 2,
},
"scan_direction_dict": {
"LANTDESCARTERY": "z",
"LCIRCUMFLEXARTERY": "z",
"LCORONARYARTERY": "x",
"RCORONARYARTERY": "z",
},
"stop_condition_type_dict": {
"LANTDESCARTERY": "count",
"LCIRCUMFLEXARTERY": "count",
"LCORONARYARTERY": "count",
"RCORONARYARTERY": "count",
},
"stop_condition_value_dict": {
"LANTDESCARTERY": 2,
"LCIRCUMFLEXARTERY": 2,
"LCORONARYARTERY": 2,
"RCORONARYARTERY": 2,
},
},
"geometric_segmentation_settings": {
"run_geometric_algorithms": True,
"geometric_name_suffix": "_GEOMETRIC",
"atlas_structure_names": {
"atlas_left_ventricle": "LEFTVENTRICLE",
"atlas_right_ventricle": "RIGHTVENTRICLE",
"atlas_left_atrium": "LEFTATRIUM",
"atlas_right_atrium": "RIGHTATRIUM",
"atlas_ascending_aorta": "ASCENDINGAORTA",
"atlas_pulmonary_artery": "PULMONARYARTERY",
"atlas_superior_vena_cava": "SVC",
"atlas_whole_heart": "WHOLEHEART",
},
"valve_definitions": {
"mitral_valve_thickness_mm": 10,
"mitral_valve_radius_mm": 15,
"tricuspid_valve_thickness_mm": 10,
"tricuspid_valve_radius_mm": 15,
"pulmonic_valve_thickness_mm": 10,
"aortic_valve_thickness_mm": 10,
},
"conduction_system_definitions": {
"sinoatrial_node_radius_mm": 10,
"atrioventricular_node_radius_mm": 10,
},
},
"postprocessing_settings": {
"run_postprocessing": True,
"binaryfillhole_mm": 3,
"structures_for_binaryfillhole": [
"ASCENDINGAORTA",
"LEFTATRIUM",
"LEFTVENTRICLE",
"RIGHTATRIUM",
"RIGHTVENTRICLE",
"SVC",
"AORTICVALVE",
"MITRALVALVE",
"PULMONICVALVE",
"TRICUSPIDVALVE",
"WHOLEHEART",
],
"structures_for_overlap_correction": [
"ASCENDINGAORTA",
"LEFTATRIUM",
"LEFTVENTRICLE",
"RIGHTATRIUM",
"RIGHTVENTRICLE",
"PULMONARYARTERY",
"SVC",
],
},
"return_atlas_guide_structure": False,
"return_as_cropped": False,
"return_proba_as_contours": False,
}
def run_cardiac_segmentation(img, guide_structure=None, settings=CARDIAC_SETTINGS_DEFAULTS):
"""Runs the atlas-based cardiac segmentation
Args:
img (sitk.Image):
settings (dict, optional): Dictionary containing settings for algorithm.
Defaults to default_settings.
Returns:
dict: Dictionary containing output of segmentation
"""
results = {}
results_prob = {}
return_as_cropped = settings["return_as_cropped"]
"""
Initialisation - Read in atlases
- image files
- structure files
Atlas structure:
'ID': 'Original': 'CT Image' : sitk.Image
'Struct A' : sitk.Image
'Struct B' : sitk.Image
'RIR' : 'CT Image' : sitk.Image
'Transform' : transform parameter map
'Struct A' : sitk.Image
'Struct B' : sitk.Image
'DIR' : 'CT Image' : sitk.Image
'Transform' : displacement field transform
'Weight Map' : sitk.Image
'Struct A' : sitk.Image
'Struct B' : sitk.Image
"""
logger.info("")
# Settings
atlas_path = settings["atlas_settings"]["atlas_path"]
atlas_id_list = settings["atlas_settings"]["atlas_id_list"]
atlas_structure_list = settings["atlas_settings"]["atlas_structure_list"]
atlas_image_format = settings["atlas_settings"]["atlas_image_format"]
atlas_label_format = settings["atlas_settings"]["atlas_label_format"]
crop_atlas_to_structures = settings["atlas_settings"]["crop_atlas_to_structures"]
crop_atlas_expansion_mm = settings["atlas_settings"]["crop_atlas_expansion_mm"]
atlas_set = {}
for atlas_id in atlas_id_list:
atlas_set[atlas_id] = {}
atlas_set[atlas_id]["Original"] = {}
image = sitk.ReadImage(f"{atlas_path}/{atlas_image_format.format(atlas_id)}")
structures = {
struct: sitk.ReadImage(f"{atlas_path}/{atlas_label_format.format(atlas_id, struct)}")
for struct in atlas_structure_list
}
if crop_atlas_to_structures:
logger.info(f"Automatically cropping atlas: {atlas_id}")
original_volume = np.product(image.GetSize())
crop_box_size, crop_box_index = label_to_roi(
structures.values(), expansion_mm=crop_atlas_expansion_mm
)
image = crop_to_roi(image, size=crop_box_size, index=crop_box_index)
final_volume = np.product(image.GetSize())
logger.info(f" > Volume reduced by factor {original_volume/final_volume:.2f}")
for struct in atlas_structure_list:
structures[struct] = crop_to_roi(
structures[struct], size=crop_box_size, index=crop_box_index
)
atlas_set[atlas_id]["Original"]["CT Image"] = image
for struct in atlas_structure_list:
atlas_set[atlas_id]["Original"][struct] = structures[struct]
"""
Step 1 - Automatic cropping
If we have a guide structure:
- use structure to crop target image
Otherwise:
- using a quick registration to register each atlas
- expansion of the bounding box to ensure entire volume of interest is enclosed
- target image is cropped
"""
expansion_mm = settings["auto_crop_target_image_settings"]["expansion_mm"]
if guide_structure:
crop_box_size, crop_box_index = label_to_roi(guide_structure, expansion_mm=expansion_mm)
img_crop = crop_to_roi(img, crop_box_size, crop_box_index)
guide_structure = crop_to_roi(guide_structure, crop_box_size, crop_box_index)
target_reg_structure = convert_mask_to_reg_structure(guide_structure, expansion=2)
else:
quick_reg_settings = {
"reg_method": "similarity",
"shrink_factors": [8],
"smooth_sigmas": [0],
"sampling_rate": 0.75,
"default_value": -1000,
"number_of_iterations": 25,
"final_interp": sitk.sitkLinear,
"metric": "mean_squares",
"optimiser": "gradient_descent_line_search",
}
registered_crop_images = []
logger.info("Running initial Translation tranform to crop image volume")
for atlas_id in atlas_id_list[: min([8, len(atlas_id_list)])]:
logger.info(f" > atlas {atlas_id}")
# Register the atlases
atlas_set[atlas_id]["RIR"] = {}
atlas_image = atlas_set[atlas_id]["Original"]["CT Image"]
reg_image, _ = linear_registration(
img,
atlas_image,
**quick_reg_settings,
)
registered_crop_images.append(sitk.Cast(reg_image, sitk.sitkFloat32))
del reg_image
combined_image = sum(registered_crop_images) / len(registered_crop_images) > -1000
crop_box_size, crop_box_index = label_to_roi(combined_image, expansion_mm=expansion_mm)
img_crop = crop_to_roi(img, crop_box_size, crop_box_index)
logger.info("Calculated crop box:")
logger.info(f" > {crop_box_index}")
logger.info(f" > {crop_box_size}")
logger.info(f" > Vol reduction = {np.product(img.GetSize())/np.product(crop_box_size):.2f}")
"""
Step 2 - Rigid registration of target images
- Individual atlas images are registered to the target
- The transformation is used to propagate the labels onto the target
"""
linear_registration_settings = settings["linear_registration_settings"]
logger.info(
f"Running {linear_registration_settings['reg_method']} tranform to align atlas images"
)
for atlas_id in atlas_id_list:
# Register the atlases
logger.info(f" > atlas {atlas_id}")
atlas_set[atlas_id]["RIR"] = {}
if guide_structure:
guide_structure_name = settings["atlas_settings"]["guide_structure_name"]
target_reg_image = target_reg_structure
atlas_reg_image = convert_mask_to_reg_structure(
atlas_set[atlas_id]["Original"][guide_structure_name], expansion=2
)
else:
target_reg_image = img_crop
atlas_reg_image = atlas_set[atlas_id]["Original"]["CT Image"]
_, initial_tfm = linear_registration(
target_reg_image,
atlas_reg_image,
**linear_registration_settings,
)
# Save in the atlas dict
atlas_set[atlas_id]["RIR"]["Transform"] = initial_tfm
if guide_structure:
atlas_set[atlas_id]["RIR"]["Reg Mask"] = apply_transform(
input_image=atlas_reg_image,
reference_image=img_crop,
transform=initial_tfm,
default_value=0,
interpolator=sitk.sitkLinear,
)
expanded_atlas_guide_structure = extend_mask(
atlas_set[atlas_id]["Original"][guide_structure_name],
direction=("ax", "sup"),
extension_mm=settings["atlas_settings"]["superior_extension"],
interior_mm_shape=settings["atlas_settings"]["superior_extension"] / 2,
)
atlas_set[atlas_id]["RIR"][guide_structure_name + "EXPANDED"] = apply_transform(
input_image=expanded_atlas_guide_structure,
reference_image=img_crop,
transform=initial_tfm,
default_value=0,
interpolator=sitk.sitkNearestNeighbor,
)
atlas_set[atlas_id]["RIR"]["CT Image"] = apply_transform(
input_image=atlas_set[atlas_id]["Original"]["CT Image"],
reference_image=img_crop,
transform=initial_tfm,
default_value=-1000,
interpolator=sitk.sitkLinear,
)
# sitk.WriteImage(rigid_image, f"./RR_{atlas_id}.nii.gz")
for struct in atlas_structure_list:
input_struct = atlas_set[atlas_id]["Original"][struct]
atlas_set[atlas_id]["RIR"][struct] = apply_transform(
input_image=input_struct,
reference_image=img_crop,
transform=initial_tfm,
default_value=0,
interpolator=sitk.sitkNearestNeighbor,
)
atlas_set[atlas_id]["Original"] = None
"""
Step 3 - Deformable image registration
- Using Fast Symmetric Diffeomorphic Demons
"""
if guide_structure:
structure_guided_registration_settings = settings["structure_guided_registration_settings"]
logger.info("Running structure-guided deformable registration on atlas labels")
for atlas_id in atlas_id_list:
logger.info(f" > atlas {atlas_id}")
# Register the atlases
atlas_set[atlas_id]["DIR_STRUCT"] = {}
deform_image, struct_guided_tfm, _ = fast_symmetric_forces_demons_registration(
target_reg_structure,
atlas_set[atlas_id]["RIR"]["Reg Mask"],
**structure_guided_registration_settings,
)
# Save in the atlas dict
atlas_set[atlas_id]["DIR_STRUCT"]["Reg Mask"] = deform_image
atlas_set[atlas_id]["DIR_STRUCT"]["Transform"] = struct_guided_tfm
atlas_set[atlas_id]["DIR_STRUCT"]["CT Image"] = apply_transform(
input_image=atlas_set[atlas_id]["RIR"]["CT Image"],
transform=struct_guided_tfm,
default_value=-1000,
interpolator=sitk.sitkLinear,
)
atlas_set[atlas_id]["DIR_STRUCT"][guide_structure_name + "EXPANDED"] = apply_transform(
input_image=atlas_set[atlas_id]["RIR"][guide_structure_name + "EXPANDED"],
reference_image=img_crop,
transform=struct_guided_tfm,
default_value=0,
interpolator=sitk.sitkNearestNeighbor,
)
# sitk.WriteImage(deform_image, f"./DIR_STRUCT_{atlas_id}.nii.gz")
for struct in atlas_structure_list:
input_struct = atlas_set[atlas_id]["RIR"][struct]
atlas_set[atlas_id]["DIR_STRUCT"][struct] = apply_transform(
input_image=input_struct,
transform=struct_guided_tfm,
default_value=0,
interpolator=sitk.sitkNearestNeighbor,
)
atlas_set[atlas_id]["RIR"] = None
# Settings
deformable_registration_settings = settings["deformable_registration_settings"]
logger.info("Running DIR to refine atlas image registration")
for atlas_id in atlas_id_list:
logger.info(f" > atlas {atlas_id}")
# Register the atlases
atlas_set[atlas_id]["DIR"] = {}
if guide_structure:
label = "DIR_STRUCT"
else:
label = "RIR"
atlas_reg_image = atlas_set[atlas_id][label]["CT Image"]
target_reg_image = img_crop
if guide_structure:
expanded_atlas_mask = atlas_set[atlas_id]["DIR_STRUCT"][
guide_structure_name + "EXPANDED"
]
expanded_target_mask = extend_mask(
guide_structure,
direction=("ax", "sup"),
extension_mm=settings["atlas_settings"]["superior_extension"],
interior_mm_shape=settings["atlas_settings"]["superior_extension"] / 2,
)
combined_mask = sitk.Maximum(expanded_atlas_mask, expanded_target_mask)
atlas_reg_image = sitk.Mask(atlas_reg_image, combined_mask, outsideValue=-1000)
atlas_reg_image = sitk.Mask(
atlas_reg_image, atlas_reg_image > -400, outsideValue=-1000
)
target_reg_image = sitk.Mask(target_reg_image, combined_mask, outsideValue=-1000)
target_reg_image = sitk.Mask(
target_reg_image, atlas_reg_image > -400, outsideValue=-1000
)
deform_image, dir_tfm, _ = fast_symmetric_forces_demons_registration(
target_reg_image,
atlas_reg_image,
**deformable_registration_settings,
)
# Save in the atlas dict
atlas_set[atlas_id]["DIR"]["Transform"] = dir_tfm
atlas_set[atlas_id]["DIR"]["CT Image"] = apply_transform(
input_image=atlas_set[atlas_id][label]["CT Image"],
transform=dir_tfm,
default_value=-1000,
interpolator=sitk.sitkLinear,
)
for struct in atlas_structure_list:
input_struct = atlas_set[atlas_id][label][struct]
atlas_set[atlas_id]["DIR"][struct] = apply_transform(
input_image=input_struct,
transform=dir_tfm,
default_value=0,
interpolator=sitk.sitkNearestNeighbor,
)
atlas_set[atlas_id][label] = None
"""
Step 4 - Iterative atlas removal
- This is an automatic process that will attempt to remove inconsistent atlases from the entire set
"""
# Compute weight maps
# Here we use simple GWV as this minises the potentially negative influence of mis-registered
# atlases
iar_settings = settings["iar_settings"]
if iar_settings["reference_structure"]:
for atlas_id in atlas_id_list:
atlas_image = atlas_set[atlas_id]["DIR"]["CT Image"]
weight_map = compute_weight_map(img_crop, atlas_image, vote_type="global")
atlas_set[atlas_id]["DIR"]["Weight Map"] = weight_map
atlas_set = run_iar(atlas_set=atlas_set, **iar_settings)
else:
logger.info("IAR: No reference structure, skipping iterative atlas removal.")
"""
Step 4 - Vessel Splining
"""
vessel_spline_settings = settings["vessel_spline_settings"]
if len(vessel_spline_settings["vessel_name_list"]) > 0:
segmented_vessel_dict = vessel_spline_generation(
img_crop, atlas_set, **vessel_spline_settings
)
else:
logger.info("No vessel splining required, continue.")
"""
Step 5 - Label Fusion
"""
# Compute weight maps
vote_type = settings["label_fusion_settings"]["vote_type"]
vote_params = settings["label_fusion_settings"]["vote_params"]
# Compute weight maps
for atlas_id in list(atlas_set.keys()):
atlas_image = atlas_set[atlas_id]["DIR"]["CT Image"]
weight_map = compute_weight_map(
img_crop, atlas_image, vote_type=vote_type, vote_params=vote_params
)
atlas_set[atlas_id]["DIR"]["Weight Map"] = weight_map
combined_label_dict = combine_labels(atlas_set, atlas_structure_list)
"""
Step 6 - Paste the cropped structure into the original image space
"""
logger.info("Generating binary segmentations.")
template_img_binary = sitk.Cast((img * 0), sitk.sitkUInt8)
template_img_prob = sitk.Cast((img * 0), sitk.sitkFloat64)
vote_structures = settings["label_fusion_settings"]["optimal_threshold"].keys()
vote_structures = [i for i in vote_structures if i in atlas_structure_list]
for structure_name in vote_structures:
probability_map = combined_label_dict[structure_name]
optimal_threshold = settings["label_fusion_settings"]["optimal_threshold"][structure_name]
binary_struct = process_probability_image(probability_map, optimal_threshold)
if return_as_cropped:
results[structure_name] = binary_struct
if settings["return_proba_as_contours"]:
atlas_contours = [
atlas_set[atlas_id]["DIR"][structure_name] >= 2 for atlas_id in atlas_id_list
]
results_prob[structure_name] = binary_encode_structure_list(atlas_contours)
else:
results_prob[structure_name] = probability_map
# We also generate another version of the guide_structure using the atlas contours
# We *can* return this, but probably don't want to
# Here this check is performed
if (not settings["return_atlas_guide_structure"]) and (guide_structure is not None):
results[guide_structure_name] = guide_structure
results_prob[guide_structure_name] = guide_structure
else:
if settings["return_proba_as_contours"]:
atlas_contours = [
atlas_set[atlas_id]["DIR"][structure_name] >= 2 for atlas_id in atlas_id_list
]
probability_img = binary_encode_structure_list(atlas_contours)
template_img_prob = sitk.Cast((img * 0), sitk.sitkUInt32)
else:
probability_img = probability_map
# Un-crop binary structure
paste_img_binary = sitk.Paste(
template_img_binary,
binary_struct,
binary_struct.GetSize(),
(0, 0, 0),
crop_box_index,
)
results[structure_name] = paste_img_binary
# Un-crop probability map
paste_prob_img = sitk.Paste(
template_img_prob,
probability_img,
probability_img.GetSize(),
(0, 0, 0),
crop_box_index,
)
results_prob[structure_name] = paste_prob_img
# Un-crop the guide structure
if (not settings["return_atlas_guide_structure"]) and (guide_structure is not None):
new_guide_structure = sitk.Paste(
template_img_binary,
guide_structure,
guide_structure.GetSize(),
(0, 0, 0),
crop_box_index,
)
results[guide_structure_name] = new_guide_structure
results_prob[guide_structure_name] = new_guide_structure
for structure_name in vessel_spline_settings["vessel_name_list"]:
binary_struct = segmented_vessel_dict[structure_name]
if return_as_cropped:
results[structure_name] = binary_struct
vessel_list = [
atlas_set[atlas_id]["DIR"][structure_name] for atlas_id in list(atlas_set.keys())
]
else:
# Un-crop binary vessel
paste_img_binary = sitk.Paste(
template_img_binary,
binary_struct,
binary_struct.GetSize(),
(0, 0, 0),
crop_box_index,
)
results[structure_name] = paste_img_binary
vessel_list = []
for atlas_id in list(atlas_set.keys()):
paste_img_binary = sitk.Paste(
template_img_binary,
atlas_set[atlas_id]["DIR"][structure_name],
atlas_set[atlas_id]["DIR"][structure_name].GetSize(),
(0, 0, 0),
crop_box_index,
)
vessel_list.append(paste_img_binary)
# Encode list of vessels
encoded_vessels = binary_encode_structure_list(vessel_list)
results_prob[structure_name] = encoded_vessels
"""
Step 7 - Geometric definitions of cardiac valves and conduction system nodes
"""
geometric_segmentation_settings = settings["geometric_segmentation_settings"]
if geometric_segmentation_settings["run_geometric_algorithms"]:
logger.info("Computing geometric definitions for valves and conduction system.")
geom_atlas_names = geometric_segmentation_settings["atlas_structure_names"]
geom_valve_defs = geometric_segmentation_settings["valve_definitions"]
geom_conduction_defs = geometric_segmentation_settings["conduction_system_definitions"]
# 1 - MITRAL VALVE
mv_name = "MITRALVALVE" + geometric_segmentation_settings["geometric_name_suffix"]
results[mv_name] = generate_valve_using_cylinder(
label_atrium=results[geom_atlas_names["atlas_left_atrium"]],
label_ventricle=results[geom_atlas_names["atlas_left_ventricle"]],
radius_mm=geom_valve_defs["mitral_valve_radius_mm"],
height_mm=geom_valve_defs["mitral_valve_thickness_mm"],
)
# 2 - TRICUSPID VALVE
tv_name = "TRICUSPIDVALVE" + geometric_segmentation_settings["geometric_name_suffix"]
results[tv_name] = generate_valve_using_cylinder(
label_atrium=results[geom_atlas_names["atlas_right_atrium"]],
label_ventricle=results[geom_atlas_names["atlas_right_ventricle"]],
radius_mm=geom_valve_defs["tricuspid_valve_radius_mm"],
height_mm=geom_valve_defs["tricuspid_valve_thickness_mm"],
)
# 3 - AORTIC VALVE
av_name = "AORTICVALVE" + geometric_segmentation_settings["geometric_name_suffix"]
results[av_name] = generate_valve_from_great_vessel(
label_great_vessel=results[geom_atlas_names["atlas_ascending_aorta"]],
label_ventricle=results[geom_atlas_names["atlas_left_ventricle"]],
valve_thickness_mm=geom_valve_defs["aortic_valve_thickness_mm"],
)
# 4 - PULMONIC VALVE
pv_name = "PULMONICVALVE" + geometric_segmentation_settings["geometric_name_suffix"]
results[pv_name] = generate_valve_from_great_vessel(
label_great_vessel=results[geom_atlas_names["atlas_pulmonary_artery"]],
label_ventricle=results[geom_atlas_names["atlas_right_ventricle"]],
valve_thickness_mm=geom_valve_defs["pulmonic_valve_thickness_mm"],
)
# 5 - SINOATRIAL NODE
san_name = "SAN" + geometric_segmentation_settings["geometric_name_suffix"]
results[san_name] = geometric_sinoatrialnode(
label_svc=results[geom_atlas_names["atlas_superior_vena_cava"]],
label_ra=results[geom_atlas_names["atlas_right_atrium"]],
label_wholeheart=results[geom_atlas_names["atlas_whole_heart"]],
radius_mm=geom_conduction_defs["sinoatrial_node_radius_mm"],
)
# 6 - ATRIOVENTRICULAR NODE
avn_name = "AVN" + geometric_segmentation_settings["geometric_name_suffix"]
results[avn_name] = geometric_atrioventricularnode(
label_la=results[geom_atlas_names["atlas_left_atrium"]],
label_lv=results[geom_atlas_names["atlas_left_ventricle"]],
label_ra=results[geom_atlas_names["atlas_right_atrium"]],
label_rv=results[geom_atlas_names["atlas_right_ventricle"]],
radius_mm=geom_conduction_defs["atrioventricular_node_radius_mm"],
)
"""
Step 8 - Post-processing
"""
postprocessing_settings = settings["postprocessing_settings"]
if postprocessing_settings["run_postprocessing"]:
logger.info("Running post-processing.")
# Remove any smaller components and perform morphological closing (hole filling)
binaryfillhole_img = [
int(postprocessing_settings["binaryfillhole_mm"] / sp) for sp in img.GetSpacing()
]
for structure_name in postprocessing_settings["structures_for_binaryfillhole"]:
if structure_name not in results.keys():
continue
contour_s = results[structure_name]
contour_s = sitk.RelabelComponent(sitk.ConnectedComponent(contour_s)) == 1
contour_s = sitk.BinaryMorphologicalClosing(contour_s, binaryfillhole_img)
results[structure_name] = contour_s
# Remove any overlaps
input_overlap = {
s: results[s] for s in postprocessing_settings["structures_for_overlap_correction"]
}
output_overlap = correct_volume_overlap(input_overlap)
for s in postprocessing_settings["structures_for_overlap_correction"]:
results[s] = output_overlap[s]
if return_as_cropped:
results["CROP_IMAGE"] = img_crop
logger.info("Done!")
return results, results_prob
| platipy/imaging/projects/cardiac/run.py | 32,147 | Runs the atlas-based cardiac segmentation
Args:
img (sitk.Image):
settings (dict, optional): Dictionary containing settings for algorithm.
Defaults to default_settings.
Returns:
dict: Dictionary containing output of segmentation
Copyright 2020 University of New South Wales, University of Sydney, Ingham Institute Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. specify voxel size (mm) since isotropic_resample is set specify voxel size (mm) since isotropic_resample is set Settings Register the atlases Register the atlases Save in the atlas dict sitk.WriteImage(rigid_image, f"./RR_{atlas_id}.nii.gz") Register the atlases Save in the atlas dict sitk.WriteImage(deform_image, f"./DIR_STRUCT_{atlas_id}.nii.gz") Settings Register the atlases Save in the atlas dict Compute weight maps Here we use simple GWV as this minises the potentially negative influence of mis-registered atlases Compute weight maps Compute weight maps We also generate another version of the guide_structure using the atlas contours We *can* return this, but probably don't want to Here this check is performed Un-crop binary structure Un-crop probability map Un-crop the guide structure Un-crop binary vessel Encode list of vessels 1 - MITRAL VALVE 2 - TRICUSPID VALVE 3 - AORTIC VALVE 4 - PULMONIC VALVE 5 - SINOATRIAL NODE 6 - ATRIOVENTRICULAR NODE Remove any smaller components and perform morphological closing (hole filling) Remove any overlaps | 1,945 | en | 0.743822 |
from behave import *
import requests
from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
from host.models import Event
use_step_matcher("re")
# @given("that I am a registered host of privilege walk events and want to create questions and answer choices for the event")
# def step_impl(context):
# context.username = "12thMan"
# context.password = "SomePassword123"
# context.first_name = "12th"
# context.last_name = "Man"
# context.email = "twelve@testtamu.edu"
# usr = User.objects.create_user(
# context.username,
# context.email,
# context.password
# )
# usr.first_name = context.first_name
# usr.last_name = context.last_name
# usr.save()
# registered_user = User.objects.filter(username="12thMan")
# assert len(registered_user) == 1
# user_auth_token, _ = Token.objects.get_or_create(user=usr)
# context.key = user_auth_token.key
# data = {
# "name": "New year event"
# }
# headers = {
# 'Authorization':'Token '+ context.key
# }
# resp = requests.post(context.test.live_server_url + "/host/events/create/", data, headers=headers)
# context.event_api_response_data = resp.json()
# context.eventId = context.event_api_response_data["id"]
# @when("I make an API call to create questions API with my correct username, questions, answer choices and correct eventid")
# def step_impl(context):
# data = {
# "event_id": context.eventId,
# "title": "The question's title goes here",
# "choices": [
# {
# "description": "Pizza",
# "value": 1
# },
# {
# "description": "Ice Cream",
# "value": 2
# },
# {
# "description": "Salt Water",
# "value": -1
# }
# ]
# }
# headers = {
# 'Authorization':'Token '+ context.key
# }
# resp = requests.post(context.test.live_server_url + "/host/qa/create/", data, headers=headers)
# assert resp.status_code >= 200 and resp.status_code < 300
# context.api_response_data = resp.json()
# @then("I expect the response that gives the status and id of the created question")
# def step_impl(context):
# assert context.api_response_data["status"] == "created"
# assert context.api_response_data["id"] != ""
# @given("that I am a registered host of privilege walk and wants to create questions but with wrong eventid")
# def step_impl(context):
# context.username = "12thMan"
# context.password = "SomePassword123"
# context.first_name = "12th"
# context.last_name = "Man"
# context.email = "twelve@testtamu.edu"
# usr = User.objects.create_user(
# context.username,
# context.email,
# context.password
# )
# usr.first_name = context.first_name
# usr.last_name = context.last_name
# usr.save()
# registered_user = User.objects.filter(username="12thMan")
# assert len(registered_user) == 1
# user_auth_token, _ = Token.objects.get_or_create(user=usr)
# context.key = user_auth_token.key
# data = {
# "name": "New year event"
# }
# headers = {
# 'Authorization':'Token '+ context.key
# }
# resp = requests.post(context.test.live_server_url + "/host/events/create/", data, headers=headers)
# context.event_api_response_data = resp.json()
# context.eventId = context.event_api_response_data["id"]
# @when("I make an API call to create questions API with my username, questions, answer choices and wrong event id")
# def step_impl(context):
# data = {
# "event_id": 12,
# "title": "Are you under 20?",
# "choices": [
# {
# "description": "Yes",
# "value": "1"
# },
# {
# "description": "No",
# "value": "-1"
# }
# ]
# }
# headers = {
# 'Authorization':'Token '+ context.key
# }
# resp = requests.post(context.test.live_server_url + "/host/qa/create/", data, headers=headers)
# assert resp.status_code >= 500
# context.api_response_data = resp.json()
# @then("I expect the response that says questions cannot be created as event id doesn't exist")
# def step_impl(context):
# pass
# @given("that I am a registered host of privilege walk and wants to create questions but without giving eventid")
# def step_impl(context):
# context.username = "12thMan"
# @when("I make an API call to create questions API with my username, questions, answer choices and without event id")
# def step_impl(context):
# data = {
# "title": "Are you under 20?",
# "choices": [
# {
# "description": "Yes",
# "value": "1"
# },
# {
# "description": "No",
# "value": "-1"
# }
# ]
# }
# headers = {
# 'Authorization':'Token '+ context.key
# }
# resp = requests.post(context.test.live_server_url + "/host/qa/create/", data, headers=headers)
# assert resp.status_code >= 500
# context.api_response_data = resp.json()
# @then("I expect the response that says questions cannot be created as event id is missing")
# def step_impl(context):
# pass
@given("that I am a registered host of privilege walk events and want to create questions but forgets to give username")
def step_impl(context):
context.username = "11thMan"
@when("I make an API call to create questions API with missing username in request")
def step_impl(context):
data = {
"title": "Are you under 20?",
"choices": [
{
"description": "Yes",
"value": "1"
},
{
"description": "No",
"value": "-1"
}
]
}
resp = requests.post(context.test.live_server_url + "/host/events/create/", data)
assert resp.status_code >= 400 and resp.status_code < 500
context.api_response_data = resp.json()
@then("I expect the response that says questions cannot be created and username is required in request")
def step_impl(context):
assert context.api_response_data["detail"] == "Authentication credentials were not provided."
| behave_tests/steps/create_question.py | 6,464 | @given("that I am a registered host of privilege walk events and want to create questions and answer choices for the event") def step_impl(context): context.username = "12thMan" context.password = "SomePassword123" context.first_name = "12th" context.last_name = "Man" context.email = "twelve@testtamu.edu" usr = User.objects.create_user( context.username, context.email, context.password ) usr.first_name = context.first_name usr.last_name = context.last_name usr.save() registered_user = User.objects.filter(username="12thMan") assert len(registered_user) == 1 user_auth_token, _ = Token.objects.get_or_create(user=usr) context.key = user_auth_token.key data = { "name": "New year event" } headers = { 'Authorization':'Token '+ context.key } resp = requests.post(context.test.live_server_url + "/host/events/create/", data, headers=headers) context.event_api_response_data = resp.json() context.eventId = context.event_api_response_data["id"] @when("I make an API call to create questions API with my correct username, questions, answer choices and correct eventid") def step_impl(context): data = { "event_id": context.eventId, "title": "The question's title goes here", "choices": [ { "description": "Pizza", "value": 1 }, { "description": "Ice Cream", "value": 2 }, { "description": "Salt Water", "value": -1 } ] } headers = { 'Authorization':'Token '+ context.key } resp = requests.post(context.test.live_server_url + "/host/qa/create/", data, headers=headers) assert resp.status_code >= 200 and resp.status_code < 300 context.api_response_data = resp.json() @then("I expect the response that gives the status and id of the created question") def step_impl(context): assert context.api_response_data["status"] == "created" assert context.api_response_data["id"] != "" @given("that I am a registered host of privilege walk and wants to create questions but with wrong eventid") def step_impl(context): context.username = "12thMan" context.password = "SomePassword123" context.first_name = "12th" context.last_name = "Man" context.email = "twelve@testtamu.edu" usr = User.objects.create_user( context.username, context.email, context.password ) usr.first_name = context.first_name usr.last_name = context.last_name usr.save() registered_user = User.objects.filter(username="12thMan") assert len(registered_user) == 1 user_auth_token, _ = Token.objects.get_or_create(user=usr) context.key = user_auth_token.key data = { "name": "New year event" } headers = { 'Authorization':'Token '+ context.key } resp = requests.post(context.test.live_server_url + "/host/events/create/", data, headers=headers) context.event_api_response_data = resp.json() context.eventId = context.event_api_response_data["id"] @when("I make an API call to create questions API with my username, questions, answer choices and wrong event id") def step_impl(context): data = { "event_id": 12, "title": "Are you under 20?", "choices": [ { "description": "Yes", "value": "1" }, { "description": "No", "value": "-1" } ] } headers = { 'Authorization':'Token '+ context.key } resp = requests.post(context.test.live_server_url + "/host/qa/create/", data, headers=headers) assert resp.status_code >= 500 context.api_response_data = resp.json() @then("I expect the response that says questions cannot be created as event id doesn't exist") def step_impl(context): pass @given("that I am a registered host of privilege walk and wants to create questions but without giving eventid") def step_impl(context): context.username = "12thMan" @when("I make an API call to create questions API with my username, questions, answer choices and without event id") def step_impl(context): data = { "title": "Are you under 20?", "choices": [ { "description": "Yes", "value": "1" }, { "description": "No", "value": "-1" } ] } headers = { 'Authorization':'Token '+ context.key } resp = requests.post(context.test.live_server_url + "/host/qa/create/", data, headers=headers) assert resp.status_code >= 500 context.api_response_data = resp.json() @then("I expect the response that says questions cannot be created as event id is missing") def step_impl(context): pass | 4,971 | en | 0.479169 |
import logging
import unittest
import random
from math import sqrt
from scipy.stats import chisquare
from type_system import Type, PolymorphicType, PrimitiveType, Arrow, List, UnknownType, INT, BOOL, STRING
from program import Program, Function, Variable, BasicPrimitive, New
from program_as_list import evaluation_from_compressed, reconstruct_from_compressed
from dsl import DSL
from DSL.deepcoder import semantics,primitive_types
from Algorithms.a_star import a_star
class TestSum(unittest.TestCase):
def test_programs(self):
"""
Checks the evaluation of programs
"""
p1 = BasicPrimitive("MAP")
p2 = BasicPrimitive("MAP", type_=PolymorphicType(name="test"))
# checking whether they represent the same programs and same types
self.assertTrue(repr(p1) == repr(p2))
self.assertTrue(p1.typeless_eq(p2))
self.assertFalse(p1.__eq__(p2))
self.assertFalse(id(p1) == id(p2))
t0 = PolymorphicType("t0")
t1 = PolymorphicType("t1")
semantics = {
"+1": lambda x: x + 1,
"MAP": lambda f: lambda l: list(map(f, l)),
}
primitive_types = {
"+1": Arrow(INT, INT),
"MAP": Arrow(Arrow(t0, t1), Arrow(List(t0), List(t1))),
}
toy_DSL = DSL(semantics, primitive_types)
p0 = Function(BasicPrimitive("+1"), [Variable(0)])
env = (2, None)
self.assertTrue(p0.eval(toy_DSL, env, 0) == 3)
p1 = Function(BasicPrimitive("MAP"), [BasicPrimitive("+1"), Variable(0)])
env = ([2, 4], None)
self.assertTrue(p1.eval(toy_DSL, env, 0) == [3, 5])
def test_evaluation_from_compressed(self):
"""
Check if evaluation_from_compressed evaluates correctly the programs
"""
N = 20_000 # we test against the first N programs
deepcoder = DSL(semantics, primitive_types)
type_request = Arrow(List(INT), List(INT))
deepcoder_CFG = deepcoder.DSL_to_CFG(type_request)
deepcoder_PCFG = deepcoder_CFG.CFG_to_Random_PCFG()
gen_a_star = a_star(deepcoder_PCFG)
environment = ([2, 3, 1], None)
r = type_request.returns()
for i in range(N):
program_compressed = next(gen_a_star)
program = reconstruct_from_compressed(program_compressed, r)
program_as_list = []
eval_from_compressed = evaluation_from_compressed(
program_compressed, deepcoder, environment, r
)
eval_from_program = program.eval_naive(deepcoder, environment)
self.assertEqual(eval_from_compressed, eval_from_program)
if __name__ == "__main__":
unittest.main(verbosity=2)
| unit_tests_programs.py | 2,732 | Check if evaluation_from_compressed evaluates correctly the programs
Checks the evaluation of programs
checking whether they represent the same programs and same types we test against the first N programs | 206 | en | 0.788612 |
import os
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
def prerelease_local_scheme(version):
"""
Return local scheme version unless building on master in CircleCI.
This function returns the local scheme version number
(e.g. 0.0.0.dev<N>+g<HASH>) unless building on CircleCI for a
pre-release in which case it ignores the hash and produces a
PEP440 compliant pre-release version number (e.g. 0.0.0.dev<N>).
"""
from setuptools_scm.version import get_local_node_and_date
if os.getenv('CIRCLE_BRANCH') in ('master', ):
return ''
else:
return get_local_node_and_date(version)
setup(
name='histomicsui',
use_scm_version={'local_scheme': prerelease_local_scheme},
setup_requires=['setuptools-scm'],
description='Organize, visualize, and analyze histology images.',
author='Kitware, Inc.',
author_email='kitware@kitware.com',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
install_requires=[
'girder-large-image-annotation>=1.4.2',
'girder-slicer-cli-web[girder]>=1.2.0',
'girder-worker[girder]>=0.6.0',
'celery>=4.4.0rc5',
],
license='Apache Software License 2.0',
long_description=readme,
long_description_content_type='text/x-rst',
include_package_data=True,
keywords='girder-plugin, histomicsui',
packages=find_packages(exclude=['test', 'test.*']),
url='https://github.com/DigitalSlideArchive/histomicsui',
zip_safe=False,
python_requires='>=3.6',
entry_points={
'girder.plugin': [
'histomicsui = histomicsui:GirderPlugin'
]
},
)
| setup.py | 2,074 | Return local scheme version unless building on master in CircleCI.
This function returns the local scheme version number
(e.g. 0.0.0.dev<N>+g<HASH>) unless building on CircleCI for a
pre-release in which case it ignores the hash and produces a
PEP440 compliant pre-release version number (e.g. 0.0.0.dev<N>). | 309 | en | 0.604492 |
import copy
import functools
import warnings
from types import MethodType
from typing import Dict, List, Optional, Type, Union
import dill
import pandas as pd
from feast.base_feature_view import BaseFeatureView
from feast.data_source import RequestSource
from feast.errors import RegistryInferenceFailure, SpecifiedFeaturesNotPresentError
from feast.feature import Feature
from feast.feature_view import FeatureView
from feast.feature_view_projection import FeatureViewProjection
from feast.field import Field, from_value_type
from feast.protos.feast.core.OnDemandFeatureView_pb2 import (
OnDemandFeatureView as OnDemandFeatureViewProto,
)
from feast.protos.feast.core.OnDemandFeatureView_pb2 import (
OnDemandFeatureViewMeta,
OnDemandFeatureViewSpec,
OnDemandSource,
)
from feast.protos.feast.core.OnDemandFeatureView_pb2 import (
UserDefinedFunction as UserDefinedFunctionProto,
)
from feast.type_map import (
feast_value_type_to_pandas_type,
python_type_to_feast_value_type,
)
from feast.usage import log_exceptions
from feast.value_type import ValueType
warnings.simplefilter("once", DeprecationWarning)
class OnDemandFeatureView(BaseFeatureView):
"""
[Experimental] An OnDemandFeatureView defines a logical group of features that are
generated by applying a transformation on a set of input sources, such as feature
views and request data sources.
Attributes:
name: The unique name of the on demand feature view.
features: The list of features in the output of the on demand feature view.
source_feature_view_projections: A map from input source names to actual input
sources with type FeatureViewProjection.
source_request_sources: A map from input source names to the actual input
sources with type RequestSource.
udf: The user defined transformation function, which must take pandas dataframes
as inputs.
description: A human-readable description.
tags: A dictionary of key-value pairs to store arbitrary metadata.
owner: The owner of the on demand feature view, typically the email of the primary
maintainer.
"""
# TODO(adchia): remove inputs from proto and declaration
name: str
features: List[Field]
source_feature_view_projections: Dict[str, FeatureViewProjection]
source_request_sources: Dict[str, RequestSource]
udf: MethodType
description: str
tags: Dict[str, str]
owner: str
@log_exceptions
def __init__(
self,
*args,
name: Optional[str] = None,
features: Optional[List[Feature]] = None,
sources: Optional[
Dict[str, Union[FeatureView, FeatureViewProjection, RequestSource]]
] = None,
udf: Optional[MethodType] = None,
inputs: Optional[
Dict[str, Union[FeatureView, FeatureViewProjection, RequestSource]]
] = None,
schema: Optional[List[Field]] = None,
description: str = "",
tags: Optional[Dict[str, str]] = None,
owner: str = "",
):
"""
Creates an OnDemandFeatureView object.
Args:
name: The unique name of the on demand feature view.
features (deprecated): The list of features in the output of the on demand
feature view, after the transformation has been applied.
sources (optional): A map from input source names to the actual input sources,
which may be feature views, feature view projections, or request data sources.
These sources serve as inputs to the udf, which will refer to them by name.
udf (optional): The user defined transformation function, which must take pandas
dataframes as inputs.
inputs (optional): A map from input source names to the actual input sources,
which may be feature views, feature view projections, or request data sources.
These sources serve as inputs to the udf, which will refer to them by name.
schema (optional): The list of features in the output of the on demand feature
view, after the transformation has been applied.
description (optional): A human-readable description.
tags (optional): A dictionary of key-value pairs to store arbitrary metadata.
owner (optional): The owner of the on demand feature view, typically the email
of the primary maintainer.
"""
positional_attributes = ["name", "features", "inputs", "udf"]
_name = name
_schema = schema or []
if len(_schema) == 0 and features is not None:
_schema = [Field.from_feature(feature) for feature in features]
if features is not None:
warnings.warn(
(
"The `features` parameter is being deprecated in favor of the `schema` parameter. "
"Please switch from using `features` to `schema`. This will also requiring switching "
"feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not "
"support the `features` parameter."
),
DeprecationWarning,
)
_sources = sources or inputs
if inputs and sources:
raise ValueError("At most one of `sources` or `inputs` can be specified.")
elif inputs:
warnings.warn(
(
"The `inputs` parameter is being deprecated. Please use `sources` instead. "
"Feast 0.21 and onwards will not support the `inputs` parameter."
),
DeprecationWarning,
)
_udf = udf
if args:
warnings.warn(
(
"On demand feature view parameters should be specified as keyword arguments "
"instead of positional arguments. Feast 0.23 and onwards will not support "
"positional arguments in on demand feature view definitions."
),
DeprecationWarning,
)
if len(args) > len(positional_attributes):
raise ValueError(
f"Only {', '.join(positional_attributes)} are allowed as positional args "
f"when defining feature views, for backwards compatibility."
)
if len(args) >= 1:
_name = args[0]
if len(args) >= 2:
_schema = args[1]
# Convert Features to Fields.
if len(_schema) > 0 and isinstance(_schema[0], Feature):
_schema = [Field.from_feature(feature) for feature in _schema]
warnings.warn(
(
"The `features` parameter is being deprecated in favor of the `schema` parameter. "
"Please switch from using `features` to `schema`. This will also requiring switching "
"feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not "
"support the `features` parameter."
),
DeprecationWarning,
)
if len(args) >= 3:
_sources = args[2]
warnings.warn(
(
"The `inputs` parameter is being deprecated. Please use `sources` instead. "
"Feast 0.21 and onwards will not support the `inputs` parameter."
),
DeprecationWarning,
)
if len(args) >= 4:
_udf = args[3]
if not _name:
raise ValueError(
"The name of the on demand feature view must be specified."
)
if not _sources:
raise ValueError("The `sources` parameter must be specified.")
super().__init__(
name=_name,
features=_schema,
description=description,
tags=tags,
owner=owner,
)
assert _sources is not None
self.source_feature_view_projections: Dict[str, FeatureViewProjection] = {}
self.source_request_sources: Dict[str, RequestSource] = {}
for source_name, odfv_source in _sources.items():
if isinstance(odfv_source, RequestSource):
self.source_request_sources[source_name] = odfv_source
elif isinstance(odfv_source, FeatureViewProjection):
self.source_feature_view_projections[source_name] = odfv_source
else:
self.source_feature_view_projections[
source_name
] = odfv_source.projection
if _udf is None:
raise ValueError("The `udf` parameter must be specified.")
assert _udf
self.udf = _udf
@property
def proto_class(self) -> Type[OnDemandFeatureViewProto]:
return OnDemandFeatureViewProto
def __copy__(self):
fv = OnDemandFeatureView(
name=self.name,
schema=self.features,
sources=dict(
**self.source_feature_view_projections, **self.source_request_sources,
),
udf=self.udf,
description=self.description,
tags=self.tags,
owner=self.owner,
)
fv.projection = copy.copy(self.projection)
return fv
def __eq__(self, other):
if not super().__eq__(other):
return False
if (
not self.source_feature_view_projections
== other.source_feature_view_projections
or not self.source_request_sources == other.source_request_sources
or not self.udf.__code__.co_code == other.udf.__code__.co_code
):
return False
return True
def __hash__(self):
return super().__hash__()
def to_proto(self) -> OnDemandFeatureViewProto:
"""
Converts an on demand feature view object to its protobuf representation.
Returns:
A OnDemandFeatureViewProto protobuf.
"""
meta = OnDemandFeatureViewMeta()
if self.created_timestamp:
meta.created_timestamp.FromDatetime(self.created_timestamp)
if self.last_updated_timestamp:
meta.last_updated_timestamp.FromDatetime(self.last_updated_timestamp)
sources = {}
for source_name, fv_projection in self.source_feature_view_projections.items():
sources[source_name] = OnDemandSource(
feature_view_projection=fv_projection.to_proto()
)
for (source_name, request_sources,) in self.source_request_sources.items():
sources[source_name] = OnDemandSource(
request_data_source=request_sources.to_proto()
)
spec = OnDemandFeatureViewSpec(
name=self.name,
features=[feature.to_proto() for feature in self.features],
sources=sources,
user_defined_function=UserDefinedFunctionProto(
name=self.udf.__name__, body=dill.dumps(self.udf, recurse=True),
),
description=self.description,
tags=self.tags,
owner=self.owner,
)
return OnDemandFeatureViewProto(spec=spec, meta=meta)
@classmethod
def from_proto(cls, on_demand_feature_view_proto: OnDemandFeatureViewProto):
"""
Creates an on demand feature view from a protobuf representation.
Args:
on_demand_feature_view_proto: A protobuf representation of an on-demand feature view.
Returns:
A OnDemandFeatureView object based on the on-demand feature view protobuf.
"""
sources = {}
for (
source_name,
on_demand_source,
) in on_demand_feature_view_proto.spec.sources.items():
if on_demand_source.WhichOneof("source") == "feature_view":
sources[source_name] = FeatureView.from_proto(
on_demand_source.feature_view
).projection
elif on_demand_source.WhichOneof("source") == "feature_view_projection":
sources[source_name] = FeatureViewProjection.from_proto(
on_demand_source.feature_view_projection
)
else:
sources[source_name] = RequestSource.from_proto(
on_demand_source.request_data_source
)
on_demand_feature_view_obj = cls(
name=on_demand_feature_view_proto.spec.name,
schema=[
Field(
name=feature.name,
dtype=from_value_type(ValueType(feature.value_type)),
)
for feature in on_demand_feature_view_proto.spec.features
],
sources=sources,
udf=dill.loads(
on_demand_feature_view_proto.spec.user_defined_function.body
),
description=on_demand_feature_view_proto.spec.description,
tags=dict(on_demand_feature_view_proto.spec.tags),
owner=on_demand_feature_view_proto.spec.owner,
)
# FeatureViewProjections are not saved in the OnDemandFeatureView proto.
# Create the default projection.
on_demand_feature_view_obj.projection = FeatureViewProjection.from_definition(
on_demand_feature_view_obj
)
if on_demand_feature_view_proto.meta.HasField("created_timestamp"):
on_demand_feature_view_obj.created_timestamp = (
on_demand_feature_view_proto.meta.created_timestamp.ToDatetime()
)
if on_demand_feature_view_proto.meta.HasField("last_updated_timestamp"):
on_demand_feature_view_obj.last_updated_timestamp = (
on_demand_feature_view_proto.meta.last_updated_timestamp.ToDatetime()
)
return on_demand_feature_view_obj
def get_request_data_schema(self) -> Dict[str, ValueType]:
schema: Dict[str, ValueType] = {}
for request_source in self.source_request_sources.values():
if isinstance(request_source.schema, List):
new_schema = {}
for field in request_source.schema:
new_schema[field.name] = field.dtype.to_value_type()
schema.update(new_schema)
elif isinstance(request_source.schema, Dict):
schema.update(request_source.schema)
else:
raise Exception(
f"Request source schema is not correct type: ${str(type(request_source.schema))}"
)
return schema
def get_transformed_features_df(
self, df_with_features: pd.DataFrame, full_feature_names: bool = False,
) -> pd.DataFrame:
# Apply on demand transformations
columns_to_cleanup = []
for source_fv_projection in self.source_feature_view_projections.values():
for feature in source_fv_projection.features:
full_feature_ref = f"{source_fv_projection.name}__{feature.name}"
if full_feature_ref in df_with_features.keys():
# Make sure the partial feature name is always present
df_with_features[feature.name] = df_with_features[full_feature_ref]
columns_to_cleanup.append(feature.name)
elif feature.name in df_with_features.keys():
# Make sure the full feature name is always present
df_with_features[full_feature_ref] = df_with_features[feature.name]
columns_to_cleanup.append(full_feature_ref)
# Compute transformed values and apply to each result row
df_with_transformed_features = self.udf.__call__(df_with_features)
# Work out whether the correct columns names are used.
rename_columns: Dict[str, str] = {}
for feature in self.features:
short_name = feature.name
long_name = f"{self.projection.name_to_use()}__{feature.name}"
if (
short_name in df_with_transformed_features.columns
and full_feature_names
):
rename_columns[short_name] = long_name
elif not full_feature_names:
# Long name must be in dataframe.
rename_columns[long_name] = short_name
# Cleanup extra columns used for transformation
df_with_features.drop(columns=columns_to_cleanup, inplace=True)
return df_with_transformed_features.rename(columns=rename_columns)
def infer_features(self):
"""
Infers the set of features associated to this feature view from the input source.
Raises:
RegistryInferenceFailure: The set of features could not be inferred.
"""
df = pd.DataFrame()
for feature_view_projection in self.source_feature_view_projections.values():
for feature in feature_view_projection.features:
dtype = feast_value_type_to_pandas_type(feature.dtype.to_value_type())
df[f"{feature_view_projection.name}__{feature.name}"] = pd.Series(
dtype=dtype
)
df[f"{feature.name}"] = pd.Series(dtype=dtype)
for request_data in self.source_request_sources.values():
for field in request_data.schema:
dtype = feast_value_type_to_pandas_type(field.dtype.to_value_type())
df[f"{field.name}"] = pd.Series(dtype=dtype)
output_df: pd.DataFrame = self.udf.__call__(df)
inferred_features = []
for f, dt in zip(output_df.columns, output_df.dtypes):
inferred_features.append(
Field(
name=f,
dtype=from_value_type(
python_type_to_feast_value_type(f, type_name=str(dt))
),
)
)
if self.features:
missing_features = []
for specified_features in self.features:
if specified_features not in inferred_features:
missing_features.append(specified_features)
if missing_features:
raise SpecifiedFeaturesNotPresentError(
[f.name for f in missing_features], self.name
)
else:
self.features = inferred_features
if not self.features:
raise RegistryInferenceFailure(
"OnDemandFeatureView",
f"Could not infer Features for the feature view '{self.name}'.",
)
@staticmethod
def get_requested_odfvs(feature_refs, project, registry):
all_on_demand_feature_views = registry.list_on_demand_feature_views(
project, allow_cache=True
)
requested_on_demand_feature_views: List[OnDemandFeatureView] = []
for odfv in all_on_demand_feature_views:
for feature in odfv.features:
if f"{odfv.name}:{feature.name}" in feature_refs:
requested_on_demand_feature_views.append(odfv)
break
return requested_on_demand_feature_views
# TODO(felixwang9817): Force this decorator to accept kwargs and switch from
# `features` to `schema`.
def on_demand_feature_view(
*args,
features: Optional[List[Feature]] = None,
sources: Optional[Dict[str, Union[FeatureView, RequestSource]]] = None,
inputs: Optional[Dict[str, Union[FeatureView, RequestSource]]] = None,
schema: Optional[List[Field]] = None,
description: str = "",
tags: Optional[Dict[str, str]] = None,
owner: str = "",
):
"""
Creates an OnDemandFeatureView object with the given user function as udf.
Args:
features (deprecated): The list of features in the output of the on demand
feature view, after the transformation has been applied.
sources (optional): A map from input source names to the actual input sources,
which may be feature views, feature view projections, or request data sources.
These sources serve as inputs to the udf, which will refer to them by name.
inputs (optional): A map from input source names to the actual input sources,
which may be feature views, feature view projections, or request data sources.
These sources serve as inputs to the udf, which will refer to them by name.
schema (optional): The list of features in the output of the on demand feature
view, after the transformation has been applied.
description (optional): A human-readable description.
tags (optional): A dictionary of key-value pairs to store arbitrary metadata.
owner (optional): The owner of the on demand feature view, typically the email
of the primary maintainer.
"""
positional_attributes = ["features", "inputs"]
_schema = schema or []
if len(_schema) == 0 and features is not None:
_schema = [Field.from_feature(feature) for feature in features]
if features is not None:
warnings.warn(
(
"The `features` parameter is being deprecated in favor of the `schema` parameter. "
"Please switch from using `features` to `schema`. This will also requiring switching "
"feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not "
"support the `features` parameter."
),
DeprecationWarning,
)
_sources = sources or inputs
if inputs and sources:
raise ValueError("At most one of `sources` or `inputs` can be specified.")
elif inputs:
warnings.warn(
(
"The `inputs` parameter is being deprecated. Please use `sources` instead. "
"Feast 0.21 and onwards will not support the `inputs` parameter."
),
DeprecationWarning,
)
if args:
warnings.warn(
(
"On demand feature view parameters should be specified as keyword arguments "
"instead of positional arguments. Feast 0.23 and onwards will not support "
"positional arguments in on demand feature view definitions."
),
DeprecationWarning,
)
if len(args) > len(positional_attributes):
raise ValueError(
f"Only {', '.join(positional_attributes)} are allowed as positional args "
f"when defining feature views, for backwards compatibility."
)
if len(args) >= 1:
_schema = args[0]
# Convert Features to Fields.
if len(_schema) > 0 and isinstance(_schema[0], Feature):
_schema = [Field.from_feature(feature) for feature in _schema]
warnings.warn(
(
"The `features` parameter is being deprecated in favor of the `schema` parameter. "
"Please switch from using `features` to `schema`. This will also requiring switching "
"feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not "
"support the `features` parameter."
),
DeprecationWarning,
)
if len(args) >= 2:
_sources = args[1]
warnings.warn(
(
"The `inputs` parameter is being deprecated. Please use `sources` instead. "
"Feast 0.21 and onwards will not support the `inputs` parameter."
),
DeprecationWarning,
)
if not _sources:
raise ValueError("The `sources` parameter must be specified.")
def decorator(user_function):
on_demand_feature_view_obj = OnDemandFeatureView(
name=user_function.__name__,
sources=_sources,
schema=_schema,
udf=user_function,
description=description,
tags=tags,
owner=owner,
)
functools.update_wrapper(
wrapper=on_demand_feature_view_obj, wrapped=user_function
)
return on_demand_feature_view_obj
return decorator
| sdk/python/feast/on_demand_feature_view.py | 24,653 | [Experimental] An OnDemandFeatureView defines a logical group of features that are
generated by applying a transformation on a set of input sources, such as feature
views and request data sources.
Attributes:
name: The unique name of the on demand feature view.
features: The list of features in the output of the on demand feature view.
source_feature_view_projections: A map from input source names to actual input
sources with type FeatureViewProjection.
source_request_sources: A map from input source names to the actual input
sources with type RequestSource.
udf: The user defined transformation function, which must take pandas dataframes
as inputs.
description: A human-readable description.
tags: A dictionary of key-value pairs to store arbitrary metadata.
owner: The owner of the on demand feature view, typically the email of the primary
maintainer.
Creates an OnDemandFeatureView object.
Args:
name: The unique name of the on demand feature view.
features (deprecated): The list of features in the output of the on demand
feature view, after the transformation has been applied.
sources (optional): A map from input source names to the actual input sources,
which may be feature views, feature view projections, or request data sources.
These sources serve as inputs to the udf, which will refer to them by name.
udf (optional): The user defined transformation function, which must take pandas
dataframes as inputs.
inputs (optional): A map from input source names to the actual input sources,
which may be feature views, feature view projections, or request data sources.
These sources serve as inputs to the udf, which will refer to them by name.
schema (optional): The list of features in the output of the on demand feature
view, after the transformation has been applied.
description (optional): A human-readable description.
tags (optional): A dictionary of key-value pairs to store arbitrary metadata.
owner (optional): The owner of the on demand feature view, typically the email
of the primary maintainer.
Creates an on demand feature view from a protobuf representation.
Args:
on_demand_feature_view_proto: A protobuf representation of an on-demand feature view.
Returns:
A OnDemandFeatureView object based on the on-demand feature view protobuf.
Infers the set of features associated to this feature view from the input source.
Raises:
RegistryInferenceFailure: The set of features could not be inferred.
Creates an OnDemandFeatureView object with the given user function as udf.
Args:
features (deprecated): The list of features in the output of the on demand
feature view, after the transformation has been applied.
sources (optional): A map from input source names to the actual input sources,
which may be feature views, feature view projections, or request data sources.
These sources serve as inputs to the udf, which will refer to them by name.
inputs (optional): A map from input source names to the actual input sources,
which may be feature views, feature view projections, or request data sources.
These sources serve as inputs to the udf, which will refer to them by name.
schema (optional): The list of features in the output of the on demand feature
view, after the transformation has been applied.
description (optional): A human-readable description.
tags (optional): A dictionary of key-value pairs to store arbitrary metadata.
owner (optional): The owner of the on demand feature view, typically the email
of the primary maintainer.
Converts an on demand feature view object to its protobuf representation.
Returns:
A OnDemandFeatureViewProto protobuf.
TODO(adchia): remove inputs from proto and declaration Convert Features to Fields. FeatureViewProjections are not saved in the OnDemandFeatureView proto. Create the default projection. Apply on demand transformations Make sure the partial feature name is always present Make sure the full feature name is always present Compute transformed values and apply to each result row Work out whether the correct columns names are used. Long name must be in dataframe. Cleanup extra columns used for transformation TODO(felixwang9817): Force this decorator to accept kwargs and switch from `features` to `schema`. Convert Features to Fields. | 4,501 | en | 0.853598 |
#!/usr/bin/env python
# coding=utf8
from copy import deepcopy
class Deque:
def __init__(self):
self.data = []
def addFront(self, item):
self.data.insert(0, item)
def addTail(self, item):
self.data.append(item)
def removeFront(self):
if self.size() == 0:
return None
else:
value = deepcopy(self.data[0])
del self.data[0]
return value
def removeTail(self):
if self.size() == 0:
return None
else:
value = deepcopy(self.data[-1])
del self.data[-1]
return value
def size(self):
return len(self.data)
def check_palindrome(check_value):
deque = Deque()
# Reading data into deque
for c in check_value:
deque.addTail(c)
# Comparing each symbol on both sides, if not equal - not palindrome
while deque.size() > 1:
if deque.removeTail() != deque.removeFront():
return False
# If all check was succeeded, string is a palindrome
return True
| palindrome_check.py | 1,080 | !/usr/bin/env python coding=utf8 Reading data into deque Comparing each symbol on both sides, if not equal - not palindrome If all check was succeeded, string is a palindrome | 174 | en | 0.770204 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from PyQt5 import QtWidgets, QtGui, QtCore
import sys, os.path as op
path1 = op.join( op.abspath(op.dirname(__file__)), '..', 'Structure')
path2 = op.join( op.abspath(op.dirname(__file__)), '..')
sys.path.append(path1)
sys.path.append(path2)
from Structure import *
from VisObject import *
class SubVision( QtWidgets.QWidget ):
""" Базовый класс-окно для показа подчиненных объектов """
def __init__( self, main_object, is_change=True, parent=None ):
super().__init__( parent=parent )
#Устанавливаем главный объект
self.__obj = main_object
#Устанавливаем параметр возможности изменения элементов (по умолчанию - Да)
self.is_change = is_change
self.initUI()
def initUI( self ):
''' Инициализируем содержимое окна '''
#Добавляем окно данных и устанавливаем в него подчиненные объекты
self.sub_objs = QtWidgets.QListWidget( )
for obj in self.__obj.sub_objects:
#Делаем ячейку
a = QtWidgets.QListWidgetItem()
#Устанавливаем в ней подчиненный базовому объект
a.sub_obj = obj
#Устанавливаем в ней текст-имя объекта подчиненного объекта
a.setText( obj.name )
#Добавляем в список
self.sub_objs.addItem( a )
#Объявляем форму и добавляем в нее список подчиненных объектов
self.form = QtWidgets.QFormLayout()
self.form.addRow(self.sub_objs)
self.setLayout(self.form)
#Соединяем двойной щелчок с методом
self.sub_objs.itemDoubleClicked.connect( self.isDoubleClicked )
def isDoubleClicked( self, obj ):
#Если окно возможно изменить, вызываем окно изменения, иначе - окно просмотра
if self.is_change:
sub_window = ChangeVisObject( obj.sub_obj, parent=self )
else:
sub_window = SimpleVisObject( obj.sub_obj, parent=self )
sub_window.setWindowTitle( "Редактирование объекта: " + obj.sub_obj.name )
#Делаем это или родительское окно неактивным
if self.parent() is None:
self.setEnabled( False )
else:
self.parent().setEnabled( False )
#Делаем дочернее окно активным и показываем его
sub_window.setEnabled( True )
sub_window.show()
| src/gui/SubVision.py | 2,955 | Базовый класс-окно для показа подчиненных объектов
Инициализируем содержимое окна
!/usr/bin/env python3 -*- coding: utf-8 -*-Устанавливаем главный объектУстанавливаем параметр возможности изменения элементов (по умолчанию - Да)Добавляем окно данных и устанавливаем в него подчиненные объектыДелаем ячейкуУстанавливаем в ней подчиненный базовому объектУстанавливаем в ней текст-имя объекта подчиненного объектаДобавляем в списокОбъявляем форму и добавляем в нее список подчиненных объектовСоединяем двойной щелчок с методомЕсли окно возможно изменить, вызываем окно изменения, иначе - окно просмотраДелаем это или родительское окно неактивнымДелаем дочернее окно активным и показываем его | 690 | ru | 0.995869 |
#!/usr/bin/env python
#version 2.1
from PyQt4 import QtGui
from PyQt4 import QtCore
from PyQt4 import Qt
import PyQt4.Qwt5 as Qwt
from PyQt4.QtCore import pyqtSignal
class control_button_frame(QtGui.QFrame):
def __init__(self, parent=None, az_el = None):
super(control_button_frame, self).__init__()
self.parent = parent
self.az_el = az_el
self.initUI()
def initUI(self):
self.setFrameShape(QtGui.QFrame.StyledPanel)
self.init_widgets()
self.connect_signals()
def init_widgets(self):
self.MinusTenButton = QtGui.QPushButton(self)
self.MinusTenButton.setText("-10.0")
self.MinusTenButton.setMinimumWidth(45)
self.MinusOneButton = QtGui.QPushButton(self)
self.MinusOneButton.setText("-1.0")
self.MinusOneButton.setMinimumWidth(45)
self.MinusPtOneButton = QtGui.QPushButton(self)
self.MinusPtOneButton.setText("-0.1")
self.MinusPtOneButton.setMinimumWidth(45)
self.PlusPtOneButton = QtGui.QPushButton(self)
self.PlusPtOneButton.setText("+0.1")
self.PlusPtOneButton.setMinimumWidth(45)
self.PlusOneButton = QtGui.QPushButton(self)
self.PlusOneButton.setText("+1.0")
self.PlusOneButton.setMinimumWidth(45)
self.PlusTenButton = QtGui.QPushButton(self)
self.PlusTenButton.setText("+10.0")
self.PlusTenButton.setMinimumWidth(45)
hbox1 = QtGui.QHBoxLayout()
hbox1.addWidget(self.MinusTenButton)
hbox1.addWidget(self.MinusOneButton)
hbox1.addWidget(self.MinusPtOneButton)
hbox1.addWidget(self.PlusPtOneButton)
hbox1.addWidget(self.PlusOneButton)
hbox1.addWidget(self.PlusTenButton)
self.setLayout(hbox1)
def connect_signals(self):
self.PlusPtOneButton.clicked.connect(self.button_clicked)
self.PlusOneButton.clicked.connect(self.button_clicked)
self.PlusTenButton.clicked.connect(self.button_clicked)
self.MinusPtOneButton.clicked.connect(self.button_clicked)
self.MinusOneButton.clicked.connect(self.button_clicked)
self.MinusTenButton.clicked.connect(self.button_clicked)
def button_clicked(self):
sender = self.sender()
self.parent.increment_target_angle(self.az_el,float(sender.text()))
| gui/v2.1/control_button_frame.py | 2,361 | !/usr/bin/env pythonversion 2.1 | 31 | hu | 0.155024 |
'''base config for emanet'''
# config for dataset
DATASET_CFG = {
'train': {
'type': '',
'set': 'train',
'rootdir': '',
'aug_opts': [('Resize', {'output_size': (2048, 512), 'keep_ratio': True, 'scale_range': (0.5, 2.0)}),
('RandomCrop', {'crop_size': (512, 512), 'one_category_max_ratio': 0.75}),
('RandomFlip', {'flip_prob': 0.5}),
('PhotoMetricDistortion', {}),
('Normalize', {'mean': [123.675, 116.28, 103.53], 'std': [58.395, 57.12, 57.375]}),
('ToTensor', {}),
('Padding', {'output_size': (512, 512), 'data_type': 'tensor'}),]
},
'test': {
'type': '',
'set': 'val',
'rootdir': '',
'aug_opts': [('Resize', {'output_size': (2048, 512), 'keep_ratio': True, 'scale_range': None}),
('Normalize', {'mean': [123.675, 116.28, 103.53], 'std': [58.395, 57.12, 57.375]}),
('ToTensor', {}),]
}
}
# config for dataloader
DATALOADER_CFG = {
'train': {
'type': ['nondistributed', 'distributed'][1],
'batch_size': 16,
'num_workers': 16,
'shuffle': True,
'pin_memory': True,
'drop_last': True,
},
'test': {
'type': ['nondistributed', 'distributed'][1],
'batch_size': 1,
'num_workers': 16,
'shuffle': False,
'pin_memory': True,
'drop_last': False,
}
}
# config for optimizer
OPTIMIZER_CFG = {
'type': 'sgd',
'sgd': {
'learning_rate': 0.01,
'momentum': 0.9,
'weight_decay': 5e-4,
},
'max_epochs': 0,
'params_rules': {},
'filter_params': True,
'policy': {
'type': 'poly',
'opts': {'power': 0.9, 'max_iters': None, 'num_iters': None, 'num_epochs': None}
},
'adjust_period': ['iteration', 'epoch'][0],
}
# config for losses
LOSSES_CFG = {
'loss_aux': {
'celoss': {'scale_factor': 0.4, 'opts': {'ignore_index': 255, 'reduction': 'mean'}}
},
'loss_cls': {
'celoss': {'scale_factor': 1.0, 'opts': {'ignore_index': 255, 'reduction': 'mean'}}
},
}
# config for model
MODEL_CFG = {
'type': 'emanet',
'num_classes': -1,
'benchmark': True,
'is_multi_gpus': True,
'align_corners': False,
'distributed': {'is_on': True, 'backend': 'nccl'},
'norm_cfg': {'type': 'syncbatchnorm', 'opts': {}},
'act_cfg': {'type': 'relu', 'opts': {'inplace': True}},
'backbone': {
'type': 'resnet101',
'series': 'resnet',
'pretrained': True,
'outstride': 8,
'use_stem': True,
'selected_indices': (2, 3),
},
'ema': {
'in_channels': 2048,
'ema_channels': 512,
'momentum': 0.1,
'num_stages': 3,
'num_bases': 64,
},
'decoder': {
'in_channels': 2560,
'out_channels': 512,
'dropout': 0.1,
},
'auxiliary': {
'in_channels': 1024,
'out_channels': 512,
'dropout': 0.1,
}
}
# config for inference
INFERENCE_CFG = {
'mode': 'whole',
'opts': {},
'tricks': {
'multiscale': [1],
'flip': False,
'use_probs_before_resize': False
}
}
# config for common
COMMON_CFG = {
'train': {
'backupdir': '',
'logfilepath': '',
'loginterval': 50,
'saveinterval': 1
},
'test': {
'backupdir': '',
'logfilepath': '',
'resultsavepath': ''
}
} | ssseg/cfgs/emanet/base_cfg.py | 3,542 | base config for emanet
config for dataset config for dataloader config for optimizer config for losses config for model config for inference config for common | 160 | en | 0.580843 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-05-29 06:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djeddit', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='thread',
name='locked',
field=models.BooleanField(default=False),
),
]
| djeddit/migrations/0002_thread_locked.py | 441 | -*- coding: utf-8 -*- Generated by Django 1.10.3 on 2017-05-29 06:35 | 68 | en | 0.648036 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import logging
import re
import os
import requests
from builtins import str
from typing import Text, List, Dict, Any
logger = logging.getLogger(__name__)
INTENT_MESSAGE_PREFIX = "/"
class NaturalLanguageInterpreter(object):
def parse(self, text):
raise NotImplementedError(
"Interpreter needs to be able to parse "
"messages into structured output.")
@staticmethod
def create(obj):
if isinstance(obj, NaturalLanguageInterpreter):
return obj
if isinstance(obj, str):
return RasaNLUInterpreter(model_directory=obj)
else:
return RegexInterpreter() # default interpreter
class RegexInterpreter(NaturalLanguageInterpreter):
@staticmethod
def allowed_prefixes():
return INTENT_MESSAGE_PREFIX + "_" # _ is deprecated but supported
@staticmethod
def _create_entities(parsed_entities, sidx, eidx):
entities = []
for k, vs in parsed_entities.items():
if not isinstance(vs, list):
vs = [vs]
for value in vs:
entities.append({
"entity": k,
"start": sidx,
"end": eidx, # can't be more specific
"value": value
})
return entities
@staticmethod
def _parse_parameters(entitiy_str, sidx, eidx, user_input):
# type: (Text, int, int, Text) -> List[Dict[Text, Any]]
if entitiy_str is None or not entitiy_str.strip():
# if there is nothing to parse we will directly exit
return []
try:
parsed_entities = json.loads(entitiy_str)
if isinstance(parsed_entities, dict):
return RegexInterpreter._create_entities(parsed_entities,
sidx, eidx)
else:
raise Exception("Parsed value isn't a json object "
"(instead parser found '{}')"
".".format(type(parsed_entities)))
except Exception as e:
logger.warning("Invalid to parse arguments in line "
"'{}'. Failed to decode parameters"
"as a json object. Make sure the intent"
"followed by a proper json object. "
"Error: {}".format(user_input, e))
return []
@staticmethod
def extract_intent_and_entities(user_input):
# type: (Text) -> object
"""Parse the user input using regexes to extract intent & entities."""
prefixes = re.escape(RegexInterpreter.allowed_prefixes())
# the regex matches "slot{"a": 1}"
m = re.search('^['+prefixes+']?([^{]+)([{].+)?', user_input)
if m is not None:
event_name = m.group(1).strip()
entities = RegexInterpreter._parse_parameters(m.group(2),
m.start(2),
m.end(2),
user_input)
return event_name, entities
else:
logger.warning("Failed to parse intent end entities from "
"'{}'. ".format(user_input))
return None, []
@staticmethod
def deprecated_extraction(user_input):
"""DEPRECATED parse of user input message."""
value_assign_rx = '\s*(.+)\s*=\s*(.+)\s*'
prefixes = re.escape(RegexInterpreter.allowed_prefixes())
structured_message_rx = '^['+prefixes+']?([^\[]+)(\[(.+)\])?'
m = re.search(structured_message_rx, user_input)
if m is not None:
intent = m.group(1).lower()
offset = m.start(3)
entities_str = m.group(3)
entities = []
if entities_str is not None:
for entity_str in entities_str.split(','):
for match in re.finditer(value_assign_rx, entity_str):
start = match.start(2) + offset
end = match.end(0) + offset
entity = {
"entity": match.group(1),
"start": start,
"end": end,
"value": match.group(2)}
entities.append(entity)
return intent, entities
else:
return None, []
@staticmethod
def is_using_deprecated_format(text):
"""Indicates if the text string is using the deprecated intent format.
In the deprecated format entities where annotated using `[name=Rasa]`
which has been replaced with `{"name": "Rasa"}`."""
return (text.find("[") != -1
and (text.find("{") == -1 or
text.find("[") < text.find("{")))
def parse(self, text):
"""Parse a text message."""
if self.is_using_deprecated_format(text):
intent, entities = self.deprecated_extraction(text)
else:
intent, entities = self.extract_intent_and_entities(text)
return {
'text': text,
'intent': {
'name': intent,
'confidence': 1.0,
},
'intent_ranking': [{
'name': intent,
'confidence': 1.0,
}],
'entities': entities,
}
class RasaNLUHttpInterpreter(NaturalLanguageInterpreter):
def __init__(self, model_name=None, token=None, server='http://localhost:5000', project_name='default'):
self.model_name = model_name
self.token = token
self.server = server
self.project_name = project_name
def parse(self, text):
"""Parse a text message.
Return a default value if the parsing of the text failed."""
default_return = {"intent": {"name": "", "confidence": 0.0},
"entities": [], "text": ""}
result = self._rasa_http_parse(text)
return result if result is not None else default_return
def _rasa_http_parse(self, text):
"""Send a text message to a running rasa NLU http server.
Return `None` on failure."""
if not self.server:
logger.error(
"Failed to parse text '{}' using rasa NLU over http. "
"No rasa NLU server specified!".format(text))
return None
params = {
"token": self.token,
"model": self.model_name,
"project": self.project_name,
"q": text
}
url = "{}/parse".format(self.server)
try:
result = requests.get(url, params=params)
if result.status_code == 200:
return result.json()
else:
logger.error(
"Failed to parse text '{}' using rasa NLU over http. "
"Error: {}".format(text, result.text))
return None
except Exception as e:
logger.error(
"Failed to parse text '{}' using rasa NLU over http. "
"Error: {}".format(text, e))
return None
class RasaNLUInterpreter(NaturalLanguageInterpreter):
def __init__(self, model_directory, config_file=None, lazy_init=False):
self.model_directory = model_directory
self.lazy_init = lazy_init
self.config_file = config_file
if not lazy_init:
self._load_interpreter()
else:
self.interpreter = None
def parse(self, text):
"""Parse a text message.
Return a default value if the parsing of the text failed."""
if self.lazy_init and self.interpreter is None:
self._load_interpreter()
return self.interpreter.parse(text)
def _load_interpreter(self):
from rasa_nlu.model import Interpreter
self.interpreter = Interpreter.load(self.model_directory)
| rasa_core/interpreter.py | 8,304 | Send a text message to a running rasa NLU http server.
Return `None` on failure.
DEPRECATED parse of user input message.
Parse the user input using regexes to extract intent & entities.
Indicates if the text string is using the deprecated intent format.
In the deprecated format entities where annotated using `[name=Rasa]`
which has been replaced with `{"name": "Rasa"}`.
Parse a text message.
Parse a text message.
Return a default value if the parsing of the text failed.
Parse a text message.
Return a default value if the parsing of the text failed.
default interpreter _ is deprecated but supported can't be more specific type: (Text, int, int, Text) -> List[Dict[Text, Any]] if there is nothing to parse we will directly exit type: (Text) -> object the regex matches "slot{"a": 1}" | 794 | en | 0.567113 |
# Copyright 2022 The SeqIO Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for seqio.dataset_providers."""
import copy
import functools
import os
import shutil
from typing import Any, Callable, Mapping, Optional, Sequence
from absl.testing import absltest
from absl.testing import parameterized
from seqio import dataset_providers
from seqio import feature_converters
from seqio import metrics as metrics_lib
from seqio import preprocessors
from seqio import test_utils
from seqio import utils
from seqio import vocabularies
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
tf.compat.v1.enable_eager_execution()
TaskRegistry = dataset_providers.TaskRegistry
MixtureRegistry = dataset_providers.MixtureRegistry
mock = absltest.mock
assert_dataset = test_utils.assert_dataset
create_default_dataset = test_utils.create_default_dataset
class TasksTest(test_utils.FakeTaskTest):
def test_invalid_name(self):
with self.assertRaisesRegex(
ValueError,
"Task name 'invalid/name' contains invalid characters. "
"Must match regex: .*"):
self.add_task("invalid/name", self.function_source)
def test_repeat_name(self):
with self.assertRaisesWithLiteralMatch(
ValueError,
"Attempting to register duplicate provider: text_line_task"):
self.add_task("text_line_task", self.text_line_source)
def test_function_source_signature(self):
# Good signatures.
def good_fn(split, shuffle_files):
del split
del shuffle_files
dataset_providers.FunctionDataSource(good_fn, splits=("train",))
def default_good_fn(split, shuffle_files=False):
del split
del shuffle_files
dataset_providers.FunctionDataSource(default_good_fn, splits=("train",))
def seed_fn(split, shuffle_files=True, seed=0):
del split
del shuffle_files
del seed
dataset_providers.FunctionDataSource(seed_fn, splits=("train",))
def extra_kwarg_good_fn(split, shuffle_files, unused_kwarg=True):
del split
del shuffle_files
dataset_providers.FunctionDataSource(extra_kwarg_good_fn, splits=("train",))
# Bad signatures.
with self.assertRaisesWithLiteralMatch(
ValueError,
"'missing_shuff' must have positional args ('split', 'shuffle_files'), "
"got: ('split',)"):
def missing_shuff(split):
del split
dataset_providers.FunctionDataSource(missing_shuff, splits=("train",))
with self.assertRaisesWithLiteralMatch(
ValueError,
"'missing_split' must have positional args ('split', 'shuffle_files'), "
"got: ('shuffle_files',)"):
def missing_split(shuffle_files):
del shuffle_files
dataset_providers.FunctionDataSource(missing_split, splits=("train",))
with self.assertRaisesWithLiteralMatch(
ValueError,
"'extra_pos_arg' may only have positional args ('split', "
"'shuffle_files'), got: ('split', 'shuffle_files', 'unused_arg')"):
def extra_pos_arg(split, shuffle_files, unused_arg):
del split
del shuffle_files
dataset_providers.FunctionDataSource(extra_pos_arg, splits=("train",))
def test_metric_fn_signature(self):
# pylint:disable=unused-argument
add_task = functools.partial(self.add_task, source=self.function_source)
def score_metric_fn(targets, scores):
return {}
def predict_metric_fn(targets, predictions):
return {}
valid_task = add_task(
"valid_metrics", metric_fns=[score_metric_fn, predict_metric_fn])
self.assertSameElements(
[score_metric_fn, predict_metric_fn], valid_task.metric_fns)
self.assertSameElements(
[score_metric_fn], valid_task.score_metric_fns)
self.assertSameElements(
[predict_metric_fn], valid_task.predict_metric_fns)
def extra_arg_metric_fn(targets, predictions, extra_param):
return {}
expected_error_message_prefix = (
"Metric functions must have positional arguments matching either "
"('targets', 'predictions') or ('targets', 'scores'). Got: ")
with self.assertRaisesWithLiteralMatch(
ValueError,
expected_error_message_prefix +
"('targets', 'predictions', 'extra_param')"):
valid_task = add_task(
"extra_arg_metric", metric_fns=[extra_arg_metric_fn])
def bad_order_metric_fn(predictions, targets):
return {}
with self.assertRaisesWithLiteralMatch(
ValueError,
expected_error_message_prefix + "('predictions', 'targets')"):
valid_task = add_task(
"bad_order_metric", metric_fns=[bad_order_metric_fn])
def bad_default_metric_fn(targets, predictions=(0)):
return {}
with self.assertRaisesWithLiteralMatch(
ValueError,
expected_error_message_prefix + "('targets',)"):
valid_task = add_task(
"bad_default_metric", metric_fns=[bad_default_metric_fn])
def ok_default_metric_fn(targets, predictions, extra_param=3):
return {}
valid_task_2 = add_task(
"valid_metrics_2", metric_fns=[ok_default_metric_fn])
self.assertSameElements([ok_default_metric_fn], valid_task_2.metric_fns)
self.assertEmpty(valid_task_2.score_metric_fns)
self.assertSameElements(
[ok_default_metric_fn], valid_task_2.predict_metric_fns)
def predict_metric_fn_with_types(
targets: Sequence[Mapping[str,
Any]], predictions: Sequence[Mapping[str,
Any]]
) -> Mapping[str, metrics_lib.MetricValue]:
return {}
valid_task_with_types = TaskRegistry.add(
"valid_metrics_with_types",
source=self.function_source,
output_features={
"inputs":
dataset_providers.Feature(test_utils.sentencepiece_vocab()),
"targets":
dataset_providers.Feature(test_utils.sentencepiece_vocab())
},
metric_fns=[predict_metric_fn_with_types])
self.assertSameElements([predict_metric_fn_with_types],
valid_task_with_types.metric_fns)
# pylint:enable=unused-argument
def test_no_tfds_version(self):
with self.assertRaisesWithLiteralMatch(
ValueError, "TFDS name must contain a version number, got: fake"):
dataset_providers.TfdsDataSource(tfds_name="fake")
def test_tfds_splits(self):
self.assertSameElements(
["train", "validation"],
dataset_providers.TfdsDataSource(tfds_name="fake:0.0.0").splits)
self.assertSameElements(
["validation"],
dataset_providers.TfdsDataSource(
tfds_name="fake:0.0.0", splits=["validation"]).splits)
self.assertSameElements(
["validation"],
dataset_providers.TfdsDataSource(
tfds_name="fake:0.0.0", splits={"validation": "train"}).splits)
def test_tfds_task(self):
self.verify_task_matches_fake_datasets(
"tfds_task", use_cached=False)
def test_function_task(self):
self.verify_task_matches_fake_datasets(
"function_task", use_cached=False)
def test_text_line_task(self):
self.verify_task_matches_fake_datasets(
"text_line_task", use_cached=False, splits=["train"])
def test_tf_example_task(self):
self.verify_task_matches_fake_datasets(
"tf_example_task", use_cached=False, splits=["train"])
@mock.patch.object(tf.io.gfile, "glob")
def test_file_data_source_shuffle_buffer_low(self, mock_glob):
mock_glob.return_value = [f"{i}" for i in range(20)]
fds = dataset_providers.FileDataSource(
read_file_fn=lambda x: tf.data.Dataset.from_tensor_slices([x]),
split_to_filepattern={"train": "filepattern"},
file_shuffle_buffer_size=2)
for _ in range(10):
ds = [
d.decode() for d in tfds.as_numpy(
fds.get_dataset("train", shuffle=True, seed=23))
]
self.assertListEqual(
ds,
[ # Not a great shuffle.
"0", "2", "1", "4", "5", "3", "7", "6", "9", "10", "11", "8",
"13", "14", "12", "16", "15", "18", "17", "19"
])
@mock.patch.object(tf.io.gfile, "glob")
def test_file_data_source_shuffle_buffer_full(self, mock_glob):
mock_glob.return_value = [f"{i}" for i in range(20)]
fds = dataset_providers.FileDataSource(
read_file_fn=lambda x: tf.data.Dataset.from_tensor_slices([x]),
split_to_filepattern={"train": "filepattern"},
file_shuffle_buffer_size=None)
for _ in range(10):
ds = [
d.decode() for d in tfds.as_numpy(
fds.get_dataset("train", shuffle=True, seed=23))
]
self.assertListEqual(
ds,
[ # Good shuffle.
"2", "13", "12", "19", "15", "5", "9", "1", "6", "8", "3", "0",
"10", "4", "14", "7", "16", "17", "18", "11"
])
def _get_preps_with_cache_placeholder_buffer_size(self, buffer_size):
preps = list(self.DEFAULT_PREPROCESSORS)
for i, p in enumerate(preps):
if isinstance(p, dataset_providers.CacheDatasetPlaceholder):
preps[i] = dataset_providers.CacheDatasetPlaceholder(
file_shuffle_buffer_size=buffer_size)
return preps
def _mock_and_assert_cached_source(self, task_name, buffer_size):
cached_task = dataset_providers.get_mixture_or_task(task_name)
cached_task._get_cached_source = mock.MagicMock(
side_effect=cached_task._get_cached_source)
_ = cached_task.get_dataset(None, "train", use_cached=True)
cached_task._get_cached_source.assert_called_once_with(
"train", buffer_size)
def test_cached_data_source_shuffle_buffer_default(self):
self._mock_and_assert_cached_source("cached_task", None)
def test_cached_data_source_shuffle_buffer_set(self):
self.add_task("cached_task_buf_2", self.tfds_source,
self._get_preps_with_cache_placeholder_buffer_size(2))
shutil.copytree(self.cached_task_dir,
os.path.join(self.test_data_dir, "cached_task_buf_2"))
self._mock_and_assert_cached_source("cached_task_buf_2", 2)
def test_cached_data_source_shuffle_buffer_None(self):
self.add_task("cached_task_buf_None", self.tfds_source,
self._get_preps_with_cache_placeholder_buffer_size(None))
shutil.copytree(self.cached_task_dir,
os.path.join(self.test_data_dir, "cached_task_buf_None"))
self._mock_and_assert_cached_source("cached_task_buf_None", None)
def test_proto_task(self):
self.verify_task_matches_fake_datasets(
"proto_task", use_cached=False, splits=["train"])
def test_num_input_examples(self):
self.assertEqual(30, self.cached_task.num_input_examples("train"))
self.assertEqual(10, self.cached_task.num_input_examples("validation"))
def test_disallow_shuffle(self):
task = dataset_providers.Task(
"no_shuffle",
source=self.function_source,
output_features=self.DEFAULT_OUTPUT_FEATURES,
preprocessors=self.DEFAULT_PREPROCESSORS,
shuffle_buffer_size=None)
with self.assertRaisesWithLiteralMatch(
ValueError, "Shuffling is disallowed for Task 'no_shuffle' since its "
"`shuffle_buffer_size` was set to `None` on construction."):
task.get_dataset(None, shuffle=True)
with self.assertRaisesWithLiteralMatch(
ValueError, "Shuffling is disallowed for Task 'no_shuffle' since its "
"`shuffle_buffer_size` was set to `None` on construction."):
task.get_dataset(None, shuffle=True, shuffle_buffer_size=100)
task.get_dataset(None, shuffle=False)
def test_supports_caching(self):
self.assertFalse(
dataset_providers.Task(
"nosupports_cache",
source=self.function_source,
output_features=self.DEFAULT_OUTPUT_FEATURES,
preprocessors=[]).supports_caching)
self.assertFalse(
dataset_providers.Task(
"nosupports_cache",
source=self.function_source,
output_features=self.DEFAULT_OUTPUT_FEATURES,
preprocessors=[preprocessors.tokenize]).supports_caching)
self.assertTrue(
dataset_providers.Task(
"supports_cache",
source=self.function_source,
output_features=self.DEFAULT_OUTPUT_FEATURES,
preprocessors=[
preprocessors.tokenize,
dataset_providers.CacheDatasetPlaceholder()
]).supports_caching)
self.assertTrue(
dataset_providers.Task(
"supports_cache",
source=self.function_source,
output_features=self.DEFAULT_OUTPUT_FEATURES,
preprocessors=[
dataset_providers.CacheDatasetPlaceholder(required=True),
preprocessors.tokenize,
]).supports_caching)
self.assertTrue(
dataset_providers.Task(
"supports_cache",
source=self.function_source,
output_features=self.DEFAULT_OUTPUT_FEATURES,
preprocessors=[
dataset_providers.CacheDatasetPlaceholder(),
]).supports_caching)
def test_requires_caching(self):
self.assertFalse(
dataset_providers.Task(
"nosupports_cache",
output_features=self.DEFAULT_OUTPUT_FEATURES,
source=self.function_source,
preprocessors=[preprocessors.tokenize]).requires_caching)
self.assertFalse(
dataset_providers.Task(
"supports_cache",
output_features=self.DEFAULT_OUTPUT_FEATURES,
source=self.function_source,
preprocessors=[
preprocessors.tokenize,
dataset_providers.CacheDatasetPlaceholder()
]).requires_caching)
task = dataset_providers.Task(
"requires_cache",
output_features=self.DEFAULT_OUTPUT_FEATURES,
source=self.function_source,
preprocessors=[
dataset_providers.CacheDatasetPlaceholder(required=True),
preprocessors.tokenize,
])
self.assertTrue(task.requires_caching)
with self.assertRaisesWithLiteralMatch(
ValueError,
"Task 'requires_cache' requires caching, but was called with "
"`use_cached=False`."):
task.get_dataset({"inputs": 512, "targets": 512}, use_cached=False)
# We haven't actually cached the task, so it still fails but with a
# different error.
with self.assertRaisesWithLiteralMatch(
AssertionError,
"'requires_cache' does not exist in any of the task cache "
"directories."):
task.get_dataset({"inputs": 512, "targets": 512}, use_cached=True)
def test_datasource_prohibits_caching(self):
function_source_no_cache = dataset_providers.FunctionDataSource(
dataset_fn=test_utils.get_fake_dataset,
splits=["train", "validation"],
caching_permitted=False)
with self.assertRaisesWithLiteralMatch(
ValueError,
"Caching was requested for 'prohibits_cache', but the underlying data "
"source prohibits caching. Please remove `CacheDatasetPlaceholder` and "
"try again."
):
dataset_providers.Task(
"prohibits_cache",
output_features=self.DEFAULT_OUTPUT_FEATURES,
source=function_source_no_cache,
preprocessors=[
dataset_providers.CacheDatasetPlaceholder(required=True),
preprocessors.tokenize,
])
def test_cache_exists(self):
self.assertTrue(self.cached_task.cache_dir)
self.cached_task.assert_cached()
self.assertEqual(
os.path.join(self.test_data_dir, "cached_task"),
self.cached_task.cache_dir)
self.assertFalse(self.uncached_task.cache_dir)
with self.assertRaisesWithLiteralMatch(
AssertionError,
"'tfds_task' does not exist in any of the task cache directories."):
TaskRegistry.get("tfds_task").assert_cached()
def test_get_cached_stats(self):
expected_train_stats = {
"examples": 3,
"inputs_tokens": 36, "inputs_max_tokens": 13,
"targets_tokens": 18, "targets_max_tokens": 6}
self.assertEqual(
expected_train_stats,
self.cached_task.get_cached_stats("train"))
# Check repeated call.
self.assertEqual(
expected_train_stats,
self.cached_task.get_cached_stats("train"))
expected_validation_stats = {
"examples": 2,
"inputs_tokens": 23, "inputs_max_tokens": 12,
"targets_tokens": 36, "targets_max_tokens": 21}
self.assertEqual(
expected_validation_stats,
self.cached_task.get_cached_stats("validation"))
with self.assertRaisesWithLiteralMatch(
ValueError, "Stats do not exist for 'cached_task' split: fake"):
self.cached_task.get_cached_stats("fake")
with self.assertRaisesWithLiteralMatch(
AssertionError,
"'uncached_task' does not exist in any of the task cache directories."):
self.uncached_task.get_cached_stats("train")
def test_set_global_cache_dirs(self):
utils.set_global_cache_dirs([])
self.assertFalse(self.cached_task.cache_dir)
utils.set_global_cache_dirs([self.test_data_dir])
self.assertTrue(self.cached_task.cache_dir)
def test_get_dataset_cached(self):
self.verify_task_matches_fake_datasets(
"cached_task", use_cached=True, token_preprocessed=False)
# Test with token preprocessor.
self.cached_task._preprocessors = self.DEFAULT_PREPROCESSORS + (
test_utils.test_token_preprocessor,)
self.verify_task_matches_fake_datasets(
"cached_task", use_cached=True, token_preprocessed=True)
def test_get_dataset_onthefly(self):
self.verify_task_matches_fake_datasets(
"uncached_task", use_cached=False)
# Test with token preprocessor.
self.cached_task._preprocessors = self.DEFAULT_PREPROCESSORS + (
test_utils.test_token_preprocessor,)
self.verify_task_matches_fake_datasets(
"cached_task", use_cached=False, token_preprocessed=True)
def test_get_dataset_no_truncation(self):
self.verify_task_matches_fake_datasets(
"uncached_task", use_cached=False, sequence_length=None)
def test_sharding(self):
for i in range(3):
self.verify_task_matches_fake_datasets(
"cached_task", use_cached=False, num_shards=i,
token_preprocessed=False)
self.verify_task_matches_fake_datasets(
"cached_task", use_cached=True, num_shards=i,
token_preprocessed=False)
def test_feature_validation(self):
default_vocab = test_utils.sentencepiece_vocab()
features = {
"inputs":
dataset_providers.Feature(vocabulary=default_vocab, required=False),
"targets":
dataset_providers.Feature(vocabulary=default_vocab, required=True),
"inputs_rank2":
dataset_providers.Feature(
vocabulary=vocabularies.PassThroughVocabulary(5),
required=False,
rank=2),
"continuous_features":
dataset_providers.ContinuousFeature(
required=False,
rank=2)
}
def _materialize(output):
task = dataset_providers.Task(
"feature_validation_task",
self.function_source,
output_features=features,
preprocessors=(lambda _: tf.data.Dataset.from_tensors(output),),
metric_fns=[],
)
list(
task.get_dataset(
{"inputs": 13, "targets": 13, "inputs_rank2": 13}, "train",
use_cached=False
).as_numpy_iterator()
)
# Missing optional feature: OK
_materialize({"targets": [0]})
# Missing required feature.
with self.assertRaisesWithLiteralMatch(
ValueError,
"Task dataset is missing expected output feature after preprocessing: "
"targets"):
_materialize({"inputs": [0]})
# Wrong type.
with self.assertRaisesWithLiteralMatch(
ValueError,
"Task dataset has incorrect type for feature 'targets' after "
"preprocessing: Got string, expected int32"):
_materialize({"targets": ["wrong type"]})
# Wrong rank.
with self.assertRaisesWithLiteralMatch(
ValueError,
"Task dataset has incorrect rank for feature 'targets' after "
"preprocessing: Got 0, expected 1"):
_materialize({"targets": 0})
# Verify rank > 1 works.
_materialize({"targets": [0], "inputs_rank2": [[0, 0, 0], [0, 0, 0]]})
# Wrong rank (1 when 2 is expected).
with self.assertRaisesWithLiteralMatch(
ValueError,
"Task dataset has incorrect rank for feature 'inputs_rank2' after "
"preprocessing: Got 1, expected 2"):
_materialize({"targets": [0], "inputs_rank2": [0]})
# Test ContinuousFeature
_materialize({
"targets": [0],
"continuous_features": [[1, 1], [0, 1]]
})
def test_value_errors(self):
dataset_fn = (
lambda split, shuffle_files: tf.data.Dataset.from_tensors(["test"]))
output_features = {
"inputs": dataset_providers.Feature(test_utils.sentencepiece_vocab())
}
with self.assertRaisesWithLiteralMatch(
ValueError, "`CacheDatasetPlaceholder` can appear at most once in the "
"preprocessing pipeline. Found 2 in 'multiple_cache_placeholders'."):
dataset_providers.Task(
"multiple_cache_placeholders",
source=dataset_providers.FunctionDataSource(
dataset_fn=dataset_fn,
splits=["train", "validation"]
),
preprocessors=[
test_utils.test_text_preprocessor,
preprocessors.tokenize,
dataset_providers.CacheDatasetPlaceholder(),
test_utils.test_token_preprocessor,
dataset_providers.CacheDatasetPlaceholder()
],
output_features=output_features,
metric_fns=[])
with self.assertRaisesWithLiteralMatch(
ValueError,
"'test_token_preprocessor' has a `sequence_length` argument but occurs "
"before `CacheDatasetPlaceholder` in 'sequence_length_pre_cache'. This "
"is not allowed since the sequence length is specified at run time."):
dataset_providers.Task(
"sequence_length_pre_cache",
dataset_providers.FunctionDataSource(
dataset_fn=dataset_fn,
splits=["train"],
),
preprocessors=[
test_utils.test_text_preprocessor,
preprocessors.tokenize,
test_utils.test_token_preprocessor,
dataset_providers.CacheDatasetPlaceholder()
],
output_features=output_features,
metric_fns=[])
def test_tfds_source_splits(self):
default_splits_src = dataset_providers.TfdsDataSource("fake:0.0.0")
self.assertSameElements(["train", "validation"], default_splits_src.splits)
validation_split_src = dataset_providers.TfdsDataSource(
"fake:0.0.0", splits=["validation"])
self.assertSameElements(["validation"], validation_split_src.splits)
sliced_split_src = dataset_providers.TfdsDataSource(
"fake:0.0.0", splits={"validation": "train[0:1%]"})
self.assertSameElements(["validation"], sliced_split_src.splits)
def test_no_eos(self):
default_vocab = test_utils.sentencepiece_vocab()
features = {
"inputs":
dataset_providers.Feature(add_eos=True, vocabulary=default_vocab),
"targets":
dataset_providers.Feature(add_eos=False, vocabulary=default_vocab),
}
self.add_task("task_no_eos", self.function_source, output_features=features)
self.verify_task_matches_fake_datasets("task_no_eos", use_cached=False)
def test_dtype(self):
default_vocab = test_utils.sentencepiece_vocab()
features = {
"inputs":
# defaults to int32
dataset_providers.Feature(vocabulary=default_vocab),
"targets":
dataset_providers.Feature(dtype=tf.int64, vocabulary=default_vocab),
}
self.add_task(
"task_dtypes",
self.function_source,
preprocessors=self.DEFAULT_PREPROCESSORS + (
utils.map_over_dataset(
lambda x: {k: tf.cast(v, tf.int64) if k == "targets" else v # pylint:disable=g-long-lambda
for k, v in x.items()}
),
),
output_features=features
)
self.verify_task_matches_fake_datasets("task_dtypes", use_cached=False)
def test_num_epochs(self):
# Try repeating after preprocessing the dataset to verify the outputs are
# the same.
epoch1_ds = self.random_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=False, shuffle=True, seed=0)
# `random_task` has 3 examples per epoch.
epoch2_ds = self.random_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=False, shuffle=True, seed=0
).repeat(2).skip(3)
test_utils.assert_datasets_eq(epoch1_ds, epoch2_ds)
# Try repeating before preprocessing the dataset to verify the outputs are
# different.
epoch1_ds = self.random_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=False, shuffle=True, seed=0)
# `random_task` has 3 examples per epoch.
epoch2_ds = self.random_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=False, shuffle=True, seed=0, num_epochs=2
).skip(3)
test_utils.assert_datasets_neq(epoch1_ds, epoch2_ds)
def test_same_seeds_cached_match(self):
dataset1 = self.cached_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=True, shuffle=True, seed=0)
dataset2 = self.cached_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=True, shuffle=True, seed=0)
test_utils.assert_datasets_eq(dataset1, dataset2)
def test_different_seeds_cached_mismatch(self):
dataset1 = self.cached_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=True, shuffle=True, seed=0)
dataset2 = self.cached_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=True, shuffle=True, seed=42)
test_utils.assert_datasets_neq(dataset1, dataset2)
def test_same_seeds_uncached_match(self):
dataset1 = self.uncached_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=False, shuffle=True, seed=0)
dataset2 = self.uncached_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=False, shuffle=True, seed=0)
test_utils.assert_datasets_eq(dataset1, dataset2)
def test_different_seeds_uncached_mismatch(self):
dataset1 = self.uncached_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=False, shuffle=True, seed=0)
dataset2 = self.uncached_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=False, shuffle=True, seed=42)
test_utils.assert_datasets_neq(dataset1, dataset2)
def test_same_seeds_random_tp_uncached_match(self):
dataset1 = self.random_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=False, shuffle=True, seed=0).repeat(4)
dataset2 = self.random_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=False, shuffle=True, seed=0).repeat(4)
test_utils.assert_datasets_eq(dataset1, dataset2)
def test_different_seeds_random_tp_uncached_mismatch(self):
dataset1 = self.random_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=False, shuffle=True, seed=0)
dataset2 = self.random_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=False, shuffle=True, seed=42)
test_utils.assert_datasets_neq(dataset1, dataset2)
def test_no_shuffle_with_seed_cached_match(self):
dataset1 = self.cached_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=True, shuffle=False, seed=0)
dataset2 = self.cached_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=True, shuffle=False, seed=42)
test_utils.assert_datasets_eq(dataset1, dataset2)
def test_no_shuffle_with_seed_uncached_match(self):
dataset1 = self.uncached_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=False, shuffle=False, seed=0)
dataset2 = self.uncached_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=False, shuffle=False, seed=42)
test_utils.assert_datasets_eq(dataset1, dataset2)
def test_no_shuffle_different_seeds_random_tp_uncached_mismatch(self):
dataset1 = self.random_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=False, shuffle=False, seed=0)
dataset2 = self.random_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=False, shuffle=False, seed=42)
test_utils.assert_datasets_neq(dataset1, dataset2)
def test_plaintext_to_pretokenized_rename(self):
ds = self.cached_plaintext_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=True, shuffle=False)
keys = next(ds.as_numpy_iterator()).keys()
self.assertSetEqual(
set(keys),
set(["inputs", "inputs_pretokenized",
"targets", "targets_pretokenized"]))
def test_list_shards(self):
def _get_formatted_shards_list(task_name, split):
shards = dataset_providers.get_mixture_or_task(
task_name).source.list_shards(split)
shards = [s.split("/")[-1] for s in shards]
return sorted(shards)
self.assertListEqual(
_get_formatted_shards_list("tfds_task", "train"),
["train.tfrecord-00000-of-00002", "train.tfrecord-00001-of-00002"])
self.assertListEqual(
_get_formatted_shards_list("text_line_task", "train"),
["train.tsv-00000-of-00002", "train.tsv-00001-of-00002"])
self.assertListEqual(
_get_formatted_shards_list("tf_example_task", "train"),
["train.tfrecord-00000-of-00002", "train.tfrecord-00001-of-00002"])
self.assertListEqual(
_get_formatted_shards_list("proto_task", "train"),
["train.tfrecord-00000-of-00002", "train.tfrecord-00001-of-00002"])
self.assertListEqual(
_get_formatted_shards_list("function_task", "train"), ["train"])
self.assertListEqual(
_get_formatted_shards_list("fully_processed_precache", "train"),
["train"])
self.assertListEqual(
_get_formatted_shards_list("tokenized_postcache", "train"), ["train"])
self.assertListEqual(
_get_formatted_shards_list("random_task", "train"), ["train"])
self.assertListEqual(
_get_formatted_shards_list("uncached_task", "train"),
["train.tfrecord-00000-of-00002", "train.tfrecord-00001-of-00002"])
self.assertListEqual(
_get_formatted_shards_list("cached_task", "train"),
["train.tfrecord-00000-of-00002", "train.tfrecord-00001-of-00002"])
self.assertListEqual(
_get_formatted_shards_list("cached_plaintext_task", "train"),
["train.tfrecord-00000-of-00002", "train.tfrecord-00001-of-00002"])
class MixturesTest(test_utils.FakeTaskTest):
def test_tasks(self):
self.add_task("task1", self.function_source)
self.add_task("task2", self.function_source)
MixtureRegistry.add("test_mix1", [("task1", 1), ("task2", 1)])
mix = MixtureRegistry.get("test_mix1")
self.assertEqual(len(mix.tasks), 2)
for task in mix.tasks:
self.verify_task_matches_fake_datasets(task.name, use_cached=False)
self.assertEqual(mix.get_rate(task), 1)
def test_num_examples(self):
MixtureRegistry.add("test_mix2", [(self.cached_task.name, 1)])
mix = MixtureRegistry.get("test_mix2")
self.assertEqual(mix.num_input_examples(split="train"), 30)
def test_splits(self):
MixtureRegistry.add(
"test_mix",
[(self.cached_task.name, 1), (self.uncached_task.name, 1)]
)
mix = MixtureRegistry.get("test_mix")
self.assertSameElements(["train", "validation"], mix.splits, 30)
def test_get_dataset(self):
MixtureRegistry.add("test_mix3", [(self.cached_task.name, 1)])
task_ds = TaskRegistry.get_dataset(
self.cached_task.name, {
"inputs": 13,
"targets": 13
},
"validation",
use_cached=False,
shuffle=False)
mix_ds = MixtureRegistry.get("test_mix3").get_dataset(
{
"inputs": 13,
"targets": 13
}, "validation", use_cached=False, shuffle=False)
# mix.get_dataset strips non-output features
task_ds = task_ds.map(lambda x: {k: x[k] for k in ["inputs", "targets"]})
# limit size since get_dataset repeats the dataset
test_utils.assert_datasets_eq(task_ds.repeat(2), mix_ds.take(4))
def test_get_dataset_mix(self):
@utils.map_over_dataset
def _constant_preprocessor(unused_x, val):
return {
"targets": tf.constant([val], tf.int32),
"inputs": tf.constant([val], tf.int32),
}
self.add_task(
"two_task",
self.function_source,
preprocessors=(functools.partial(_constant_preprocessor, val=2),)
)
self.add_task(
"three_task",
self.function_source,
preprocessors=(functools.partial(_constant_preprocessor, val=3),)
)
MixtureRegistry.add("test_mix", [("two_task", 1), ("three_task", 1)])
sequence_length = {"inputs": 2, "targets": 2}
mix_ds = MixtureRegistry.get("test_mix").get_dataset(
sequence_length, "train", seed=13).take(1000)
res = sum(int(item["inputs"][0]) for item in mix_ds.as_numpy_iterator())
self.assertEqual(res, 2481)
def test_get_dataset_passthrough_features(self):
@utils.map_over_dataset
def _constant_feature_preprocessor(unused_x, val):
return {
"targets": tf.constant([val], tf.int32),
"inputs": tf.constant([val], tf.int32),
"feature": tf.constant([val], tf.int32),
}
self.add_task(
"two_task",
self.function_source,
preprocessors=(functools.partial(_constant_feature_preprocessor,
val=2),))
self.add_task(
"three_task",
self.function_source,
preprocessors=(functools.partial(_constant_feature_preprocessor,
val=3),))
MixtureRegistry.add("test_mix", [("two_task", 1), ("three_task", 1)])
sequence_length = {"inputs": 2, "targets": 2}
passthrough_features = ["feature"]
mix_ds = MixtureRegistry.get("test_mix").get_dataset(
sequence_length,
"train",
seed=13,
passthrough_features=passthrough_features).take(1000)
# output features are defined as "inputs" and "targets" by default.
res = sum(int(item["feature"][0]) for item in mix_ds.as_numpy_iterator())
self.assertEqual(res, 2481)
def test_copy_pretokenized(self):
@utils.map_over_dataset
def _constant_preprocessor(unused_x, val):
return {
"targets": tf.constant([val], tf.int32),
"targets_pretokenized": tf.constant(f"targets_{val}"),
"inputs": tf.constant([val], tf.int32),
"inputs_pretokenized": tf.constant(f"inputs_{val}")
}
self.add_task(
"two_task",
self.function_source,
preprocessors=(functools.partial(_constant_preprocessor, val=2),)
)
self.add_task(
"three_task",
self.function_source,
preprocessors=(functools.partial(_constant_preprocessor, val=3),)
)
MixtureRegistry.add("test_mix", [("two_task", 1), ("three_task", 1)])
sequence_length = {"inputs": 2, "targets": 2}
mix_ds = MixtureRegistry.get("test_mix").get_dataset(
sequence_length, "train", seed=13, copy_pretokenized=True).take(1000)
inputs_pretokenized = set(
ex["inputs_pretokenized"] for ex in mix_ds.as_numpy_iterator())
targets_pretokenized = set(
ex["targets_pretokenized"] for ex in mix_ds.as_numpy_iterator())
self.assertCountEqual([b"inputs_2", b"inputs_3"], inputs_pretokenized)
self.assertCountEqual([b"targets_2", b"targets_3"], targets_pretokenized)
mix_ds = MixtureRegistry.get("test_mix").get_dataset(
sequence_length, "train", seed=13, copy_pretokenized=False).take(1000)
for ex in mix_ds.as_numpy_iterator():
self.assertNoCommonElements(
["inputs_pretokenized", "targets_pretokenized"], ex.keys())
def test_get_rate_with_callable(self):
def fn(t):
self.assertEqual(t.name, "task4")
return 42
self.add_task("task4", self.function_source)
task = TaskRegistry.get("task4")
MixtureRegistry.add("test_mix5", [("task4", fn)])
mix = MixtureRegistry.get("test_mix5")
self.assertEqual(mix.get_rate(task), 42)
def test_mixture_of_mixtures(self):
self.add_task("task_a", self.function_source)
self.add_task("task_b", self.function_source)
self.add_task("task_c", self.function_source)
MixtureRegistry.add("another_mix", [("task_a", 1), ("task_b", 1)])
MixtureRegistry.add("supermix", [("another_mix", 1), ("task_c", 1)])
supermix = MixtureRegistry.get("supermix")
names = [task.name for task in supermix.tasks]
self.assertEqual(names, ["task_a", "task_b", "task_c"])
self.assertEqual([supermix.get_rate(t) for t in supermix.tasks],
[0.5, 0.5, 1])
def test_mixture_of_mixtures_dupe(self):
self.add_task("task2_a", self.function_source)
self.add_task("task2_b", self.function_source)
self.add_task("task2_c", self.function_source)
MixtureRegistry.add("yet_another_mix", [("task2_a", 1), ("task2_b", 1)])
MixtureRegistry.add("supermix_with_dupe", [("yet_another_mix", 1),
("task2_a", 1), ("task2_c", 1)])
supermix = MixtureRegistry.get("supermix_with_dupe")
names = [task.name for task in supermix.tasks]
self.assertEqual(names, ["task2_a", "task2_b", "task2_c"])
self.assertEqual([supermix.get_rate(t) for t in supermix.tasks],
[1.5, 0.5, 1])
def test_mixture_with_sample_fn(self):
def sequential_intereave(datasets: Sequence[tf.data.Dataset],
rates: Sequence[float],
sample_seed: Optional[int]) -> tf.data.Dataset:
"""Sample function that simply concatenates two datasets."""
del rates, sample_seed
return datasets[0].concatenate(datasets[1])
def gen_dataset(split,
shuffle_files=False,
seed=None,
val: str = "") -> tf.data.Dataset:
del split, shuffle_files, seed # Need this to pass arg validation.
return tf.data.Dataset.from_tensor_slices({
"inputs": [[val]] * 3,
})
# Register two very simple tasks, each with 3 repeated string values.
vocab = vocabularies.PassThroughVocabulary(0)
tasks = []
for task_name in ["first", "second"]:
tasks.append(self.add_task(
task_name,
dataset_providers.FunctionDataSource(
dataset_fn=functools.partial(gen_dataset, val=task_name),
splits=["train"]),
preprocessors=[],
output_features={
"inputs": dataset_providers.Feature(vocab, dtype=tf.string)
}))
# Verify that by default, interleaving of datasets is random.
MixtureRegistry.add("default_mix", [("first", 1), ("second", 1)])
default_ds = MixtureRegistry.get("default_mix").get_dataset(
None, "train", shuffle=False, seed=2, num_epochs=1)
expected = [b"second", b"first", b"second", b"first", b"second", b"first"]
actual = [x["inputs"] for x in default_ds.as_numpy_iterator()]
self.assertEqual(expected, actual)
# Verify that we can modify sampling function correctly.
MixtureRegistry.add(
"sequential_mix", [("first", 1), ("second", 1)],
sample_fn=sequential_intereave)
sequential_ds = MixtureRegistry.get("sequential_mix").get_dataset(
None, "train", shuffle=False, seed=2, num_epochs=1)
expected = [b"first"] * 3 + [b"second"] * 3
actual = [x["inputs"] for x in sequential_ds.as_numpy_iterator()]
self.assertEqual(expected, actual)
class GetDatasetTest(parameterized.TestCase, tf.test.TestCase):
def test_get_dataset_enc_dec_unpacked(self):
mixture_or_task_name = "enc_dec_unpacked"
x = [{"inputs": [7, 8, 5, 6, 9, 4, 3], "targets": [3, 9]},
{"inputs": [8, 4], "targets": [4]},
{"inputs": [5, 6, 7], "targets": [6, 5]}]
ds = create_default_dataset(x)
dataset_fn = lambda split, shuffle_files: ds
register_dummy_task(mixture_or_task_name, dataset_fn=dataset_fn)
task_feature_lengths = {"inputs": 7, "targets": 5}
converter = feature_converters.EncDecFeatureConverter(pack=False)
output_ds = dataset_providers.get_dataset(
mixture_or_task_name=mixture_or_task_name,
task_feature_lengths=task_feature_lengths,
dataset_split="train",
shuffle=False,
feature_converter=converter)
expected = [{
"encoder_input_tokens": [7, 8, 5, 6, 9, 4, 1],
"decoder_target_tokens": [3, 9, 1, 0, 0],
"decoder_input_tokens": [0, 3, 9, 1, 0],
"decoder_loss_weights": [1, 1, 1, 0, 0],
}, {
"encoder_input_tokens": [8, 4, 1, 0, 0, 0, 0],
"decoder_target_tokens": [4, 1, 0, 0, 0],
"decoder_input_tokens": [0, 4, 1, 0, 0],
"decoder_loss_weights": [1, 1, 0, 0, 0],
}, {
"encoder_input_tokens": [5, 6, 7, 1, 0, 0, 0],
"decoder_target_tokens": [6, 5, 1, 0, 0],
"decoder_input_tokens": [0, 6, 5, 1, 0],
"decoder_loss_weights": [1, 1, 1, 0, 0],
}]
expected_dtypes = {feat: tf.int32 for feat in expected[0].keys()}
assert_dataset(output_ds, expected, expected_dtypes=expected_dtypes)
@parameterized.parameters(
dict(
task_name="enc_dec_partial_trim_both",
task_feature_lengths={
"inputs": 7,
"targets": 2
},
expect_trim_inputs=True,
expect_trim_targets=True),
dict(
task_name="enc_dec_partial_trim_targets",
task_feature_lengths={
"inputs": None,
"targets": 2
},
expect_trim_inputs=False,
expect_trim_targets=True),
dict(
task_name="enc_dec_partial_trim_inputs",
task_feature_lengths={
"inputs": 7,
"targets": None
},
expect_trim_inputs=True,
expect_trim_targets=False),
dict(
task_name="enc_dec_partial_trim_neither",
task_feature_lengths={
"inputs": None,
"targets": None
},
expect_trim_inputs=False,
expect_trim_targets=False),
dict(
task_name="enc_dec_partial_trim_nothing",
task_feature_lengths=None,
expect_trim_inputs=False,
expect_trim_targets=False))
def test_partial_sequence_length(self, task_name, task_feature_lengths,
expect_trim_inputs, expect_trim_targets):
x = [{"inputs": [7, 8, 5, 6, 9, 4, 3], "targets": [3, 9]},
{"inputs": [8, 4], "targets": [4]},
{"inputs": [5, 6, 7], "targets": [6, 5]}]
ds = create_default_dataset(x)
dataset_fn = lambda split, shuffle_files: ds
register_dummy_task(task_name, dataset_fn=dataset_fn)
# Unlike the other tests, don't use a feature converter. Instead, test the
# task.get_dataset method directly, which is similar to how evaluation.py
# infers feature lengths w/trimming.
task = dataset_providers.get_mixture_or_task(task_name)
output_ds = task.get_dataset(
sequence_length=task_feature_lengths,
shuffle=False)
expected = [{
"inputs": [7, 8, 5, 6, 9, 4, 3, 1],
"targets": [3, 9, 1],
}, {
"inputs": [8, 4, 1],
"targets": [4, 1],
}, {
"inputs": [5, 6, 7, 1],
"targets": [6, 5, 1],
}]
if expect_trim_inputs:
expected[0]["inputs"] = [7, 8, 5, 6, 9, 4, 1]
if expect_trim_targets:
expected[0]["targets"] = [3, 1]
expected[2]["targets"] = [6, 1]
expected_dtypes = {feat: tf.int32 for feat in expected[0].keys()}
assert_dataset(output_ds, expected, expected_dtypes=expected_dtypes)
@parameterized.parameters(
dict(
task_name="enc_dec_multidim_trim_both",
task_feature_lengths={
"inputs": (2, 5),
"targets": 2
},
expect_trim_inputs=True,
expect_trim_targets=True,
),
dict(
task_name="enc_dec_multidim_trim_inputs",
task_feature_lengths={
"inputs": (2, 5),
"targets": None
},
expect_trim_inputs=True,
expect_trim_targets=False,
),
dict(
task_name="enc_dec_multidim_trim_targets",
task_feature_lengths={
"inputs": None,
"targets": 2
},
expect_trim_inputs=False,
expect_trim_targets=True,
),
dict(
task_name="enc_dec_no_multidim_trim",
task_feature_lengths={
"inputs": None,
"targets": None
},
expect_trim_inputs=False,
expect_trim_targets=False
)
)
def test_multidimension_sequence_length(self,
task_name,
task_feature_lengths,
expect_trim_inputs,
expect_trim_targets):
x = [{"inputs": [[7, 8, 5, 6, 9, 4, 3],
[2, 3, 4, 5, 0, 0, 0],
[6, 7, 1, 0, 0, 0, 0]],
"targets": [3, 9]},
{"inputs": [[8, 4],
[1, 0],
[2, 3]],
"targets": [4]},
{"inputs": [[5, 6, 7]],
"targets": [6, 5, 1]},
{"inputs": [[7, 8, 9, 1, 2, 3, 4, 5, 6]],
"targets": [10, 11, 1]}]
ds = tf.data.Dataset.from_generator(
lambda: x,
output_types={"inputs": tf.int32, "targets": tf.int32},
output_shapes={"inputs": (None, None), "targets": (None,)})
dataset_fn = lambda split, shuffle_files: ds
dataset_providers.TaskRegistry.add(
task_name,
source=dataset_providers.FunctionDataSource(
dataset_fn=dataset_fn, splits=["train", "validation"]),
preprocessors=[
dataset_providers.CacheDatasetPlaceholder(),
],
output_features={
"inputs": dataset_providers.Feature(
test_utils.sentencepiece_vocab(), rank=2),
"targets": dataset_providers.Feature(
test_utils.sentencepiece_vocab())
},
metric_fns=[])
# Unlike the other tests, don't use a feature converter. Instead, test the
# task.get_dataset method directly, which is similar to how evaluation.py
# infers feature lengths w/trimming.
task = dataset_providers.get_mixture_or_task(task_name)
output_ds = task.get_dataset(
sequence_length=task_feature_lengths,
shuffle=False)
expected = copy.deepcopy(x)
if expect_trim_inputs:
expected[0]["inputs"] = [[7, 8, 5, 6, 9],
[2, 3, 4, 5, 0]]
expected[1]["inputs"] = [[8, 4],
[1, 0]]
expected[3]["inputs"] = [[7, 8, 9, 1, 2]]
if expect_trim_targets:
expected[2]["targets"] = [6, 5]
expected[3]["targets"] = [10, 11]
expected_dtypes = {feat: tf.int32 for feat in expected[0].keys()}
assert_dataset(output_ds, expected, expected_dtypes=expected_dtypes)
def test_get_dataset_enc_dec_packed(self):
mixture_or_task_name = "enc_dec_packed"
x = [{"inputs": [7, 8, 5, 6, 9, 4, 3], "targets": [3, 9]},
{"inputs": [8, 4], "targets": [4]},
{"inputs": [5, 6, 7], "targets": [6, 5]}]
ds = create_default_dataset(x)
dataset_fn = lambda split, shuffle_files: ds
register_dummy_task(mixture_or_task_name, dataset_fn=dataset_fn)
task_feature_lengths = {"inputs": 7, "targets": 5}
converter = feature_converters.EncDecFeatureConverter(pack=True)
output_ds = dataset_providers.get_dataset(
mixture_or_task_name=mixture_or_task_name,
task_feature_lengths=task_feature_lengths,
dataset_split="train",
shuffle=False,
feature_converter=converter)
expected = [{
# Example 1 is trimmed
"encoder_input_tokens": [7, 8, 5, 6, 9, 4, 1],
"encoder_segment_ids": [1, 1, 1, 1, 1, 1, 1],
"encoder_positions": [0, 1, 2, 3, 4, 5, 6],
"decoder_target_tokens": [3, 9, 1, 0, 0],
"decoder_input_tokens": [0, 3, 9, 0, 0],
"decoder_loss_weights": [1, 1, 1, 0, 0],
"decoder_segment_ids": [1, 1, 1, 0, 0],
"decoder_positions": [0, 1, 2, 0, 0],
}, {
# Example 2 and 3 are packed together
"encoder_input_tokens": [8, 4, 1, 5, 6, 7, 1],
"encoder_segment_ids": [1, 1, 1, 2, 2, 2, 2],
"encoder_positions": [0, 1, 2, 0, 1, 2, 3],
"decoder_target_tokens": [4, 1, 6, 5, 1],
"decoder_input_tokens": [0, 4, 0, 6, 5],
"decoder_loss_weights": [1, 1, 1, 1, 1],
"decoder_segment_ids": [1, 1, 2, 2, 2],
"decoder_positions": [0, 1, 0, 1, 2],
}]
expected_dtypes = {feat: tf.int32 for feat in expected[0].keys()}
assert_dataset(output_ds, expected, expected_dtypes=expected_dtypes)
def test_get_dataset_both_train_and_validation_splits(self):
mixture_or_task_name = "both_train_and_validation_splits"
x_train = [{"inputs": [7, 8, 5, 6, 9, 4, 3], "targets": [3, 9]}]
x_val = [{"inputs": [8, 4], "targets": [4]}]
datasets = {
"train": create_default_dataset(x_train),
"validation": create_default_dataset(x_val)
}
dataset_fn = lambda split, shuffle_files: datasets[split]
register_dummy_task(mixture_or_task_name, dataset_fn=dataset_fn)
task_feature_lengths = {"inputs": 7, "targets": 5}
output_ds = {}
for split in ["train", "validation"]:
converter = feature_converters.EncDecFeatureConverter(pack=False)
output_ds[split] = dataset_providers.get_dataset(
mixture_or_task_name=mixture_or_task_name,
task_feature_lengths=task_feature_lengths,
dataset_split=split,
shuffle=False,
feature_converter=converter)
expected_train = {
"encoder_input_tokens": [7, 8, 5, 6, 9, 4, 1],
"decoder_target_tokens": [3, 9, 1, 0, 0],
"decoder_input_tokens": [0, 3, 9, 1, 0],
"decoder_loss_weights": [1, 1, 1, 0, 0],
}
expected_val = {
"encoder_input_tokens": [8, 4, 1, 0, 0, 0, 0],
"decoder_target_tokens": [4, 1, 0, 0, 0],
"decoder_input_tokens": [0, 4, 1, 0, 0],
"decoder_loss_weights": [1, 1, 0, 0, 0],
}
expected_dtypes = {feat: tf.int32 for feat in expected_train.keys()}
assert_dataset(
output_ds["train"], expected_train, expected_dtypes=expected_dtypes)
assert_dataset(
output_ds["validation"], expected_val, expected_dtypes=expected_dtypes)
def test_get_dataset_enc_dec_sharded(self):
mixture_or_task_name = "enc_dec_sharded"
x = [{"inputs": [7, 8, 5, 6, 9, 4, 3], "targets": [3, 9]},
{"inputs": [8, 4], "targets": [4]},
{"inputs": [5, 6, 7], "targets": [6, 5]}]
ds = create_default_dataset(x)
dataset_fn = lambda split, shuffle_files: ds
register_dummy_task(mixture_or_task_name, dataset_fn=dataset_fn)
task_feature_lengths = {"inputs": 7, "targets": 5}
converter = feature_converters.EncDecFeatureConverter(pack=False)
shard_info = dataset_providers.ShardInfo(index=0, num_shards=2)
output_ds = dataset_providers.get_dataset(
mixture_or_task_name=mixture_or_task_name,
task_feature_lengths=task_feature_lengths,
dataset_split="train",
shuffle=False,
feature_converter=converter,
shard_info=shard_info)
# Example index 1 should not be present in the sharded dataset.
expected = [{
"encoder_input_tokens": [7, 8, 5, 6, 9, 4, 1],
"decoder_target_tokens": [3, 9, 1, 0, 0],
"decoder_input_tokens": [0, 3, 9, 1, 0],
"decoder_loss_weights": [1, 1, 1, 0, 0],
}, {
"encoder_input_tokens": [5, 6, 7, 1, 0, 0, 0],
"decoder_target_tokens": [6, 5, 1, 0, 0],
"decoder_input_tokens": [0, 6, 5, 1, 0],
"decoder_loss_weights": [1, 1, 1, 0, 0],
}]
expected_dtypes = {feat: tf.int32 for feat in expected[0].keys()}
assert_dataset(output_ds, expected, expected_dtypes=expected_dtypes)
def test_get_dataset_enc_dec_sharded_and_packed(self):
mixture_or_task_name = "enc_dec_sharded_and_packed"
x = [{"inputs": [7, 8], "targets": [3, 9]},
{"inputs": [8, 4], "targets": [4]},
{"inputs": [5, 6, 7], "targets": [6]}]
ds = create_default_dataset(x)
dataset_fn = lambda split, shuffle_files: ds
register_dummy_task(mixture_or_task_name, dataset_fn=dataset_fn)
task_feature_lengths = {"inputs": 7, "targets": 5}
converter = feature_converters.EncDecFeatureConverter(pack=True)
shard_info = dataset_providers.ShardInfo(index=0, num_shards=2)
output_ds = dataset_providers.get_dataset(
mixture_or_task_name=mixture_or_task_name,
task_feature_lengths=task_feature_lengths,
dataset_split="train",
shuffle=False,
feature_converter=converter,
shard_info=shard_info)
# Packing should be done after the sharding.
expected = {
"encoder_input_tokens": [7, 8, 1, 5, 6, 7, 1],
"encoder_segment_ids": [1, 1, 1, 2, 2, 2, 2],
"encoder_positions": [0, 1, 2, 0, 1, 2, 3],
"decoder_target_tokens": [3, 9, 1, 6, 1],
"decoder_input_tokens": [0, 3, 9, 0, 6],
"decoder_loss_weights": [1, 1, 1, 1, 1],
"decoder_segment_ids": [1, 1, 1, 2, 2],
"decoder_positions": [0, 1, 2, 0, 1],
}
expected_dtypes = {feat: tf.int32 for feat in expected.keys()}
assert_dataset(output_ds, expected, expected_dtypes=expected_dtypes)
def register_dummy_task(
task_name: str,
dataset_fn: Callable[[str, str], tf.data.Dataset],
output_feature_names: Sequence[str] = ("inputs", "targets")) -> None:
"""Register a dummy task for GetDatasetTest."""
dataset_providers.TaskRegistry.add(
task_name,
source=dataset_providers.FunctionDataSource(
dataset_fn=dataset_fn, splits=["train", "validation"]),
preprocessors=[
dataset_providers.CacheDatasetPlaceholder(),
preprocessors.append_eos_after_trim,
],
output_features={
feat: dataset_providers.Feature(test_utils.sentencepiece_vocab())
for feat in output_feature_names
},
metric_fns=[])
if __name__ == "__main__":
absltest.main()
| seqio/dataset_providers_test.py | 56,109 | Register a dummy task for GetDatasetTest.
Sample function that simply concatenates two datasets.
Tests for seqio.dataset_providers.
Copyright 2022 The SeqIO Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Lint as: python3 Good signatures. Bad signatures. pylint:disable=unused-argument pylint:enable=unused-argument Not a great shuffle. Good shuffle. We haven't actually cached the task, so it still fails but with a different error. Check repeated call. Test with token preprocessor. Test with token preprocessor. Missing optional feature: OK Missing required feature. Wrong type. Wrong rank. Verify rank > 1 works. Wrong rank (1 when 2 is expected). Test ContinuousFeature defaults to int32 pylint:disable=g-long-lambda Try repeating after preprocessing the dataset to verify the outputs are the same. `random_task` has 3 examples per epoch. Try repeating before preprocessing the dataset to verify the outputs are different. `random_task` has 3 examples per epoch. mix.get_dataset strips non-output features limit size since get_dataset repeats the dataset output features are defined as "inputs" and "targets" by default. Need this to pass arg validation. Register two very simple tasks, each with 3 repeated string values. Verify that by default, interleaving of datasets is random. Verify that we can modify sampling function correctly. Unlike the other tests, don't use a feature converter. Instead, test the task.get_dataset method directly, which is similar to how evaluation.py infers feature lengths w/trimming. Unlike the other tests, don't use a feature converter. Instead, test the task.get_dataset method directly, which is similar to how evaluation.py infers feature lengths w/trimming. Example 1 is trimmed Example 2 and 3 are packed together Example index 1 should not be present in the sharded dataset. Packing should be done after the sharding. | 2,349 | en | 0.821461 |
from copy import copy
from typing import Optional
import torch
import pytorch_lightning as pl
from transformers import (
EncoderDecoderModel,
RobertaModel,
RobertaConfig,
GPT2LMHeadModel,
GPT2Config,
RobertaTokenizer,
GPT2Tokenizer,
AdamW,
get_linear_schedule_with_warmup,
)
import nltk
nltk.download("wordnet")
class EncoderDecoderModule(pl.LightningModule):
def __init__(
self,
learning_rate: float,
src_tokenizer: RobertaTokenizer,
trg_tokenizer: GPT2Tokenizer,
num_epochs: int,
num_batches: int,
num_gpus: int,
num_layers_encoder: Optional[int] = None,
num_layers_decoder: Optional[int] = None,
encoder_name_or_path: Optional[str] = None,
decoder_name_or_path: Optional[str] = None,
**kwargs,
):
super().__init__()
self._src_tokenizer = src_tokenizer
self._trg_tokenizer = trg_tokenizer
self._num_epochs = num_epochs
self._num_batches = num_batches
self._num_gpus = num_gpus
self.learning_rate = learning_rate
self.save_hyperparameters()
if encoder_name_or_path is not None and decoder_name_or_path is not None:
# use pretrained RoBERTa as encoder
encoder = RobertaModel.from_pretrained(encoder_name_or_path)
# resize embeddings to match vocabulary size
encoder.resize_token_embeddings(len(self._src_tokenizer))
# remove layers if necessary
if num_layers_encoder is not None and num_layers_encoder < encoder.config.num_hidden_layers:
encoder = EncoderDecoderModule.remove_layers_from_model(encoder, num_layers_encoder, is_gpt=False)
# use pretrained GPT-2 as decoder
config = GPT2Config.from_pretrained(decoder_name_or_path)
config.is_decoder = True
config.add_cross_attention = True
decoder = GPT2LMHeadModel.from_pretrained(decoder_name_or_path, config=config)
# remove layers if necessary
if num_layers_decoder is not None and num_layers_decoder < decoder.config.n_layer:
decoder = EncoderDecoderModule.remove_layers_from_model(decoder, num_layers_decoder, is_gpt=True)
elif num_layers_decoder is not None and num_layers_encoder is not None:
# use randomly initialized RoBERTa as encoder
encoder_config = RobertaConfig()
encoder_config.num_hidden_layers = num_layers_encoder
encoder = RobertaModel(config=encoder_config)
# resize embeddings to match vocabulary size
encoder.resize_token_embeddings(len(self._src_tokenizer))
# use randomly initialized GPT-2 as decoder
decoder_config = GPT2Config()
decoder_config.n_layer = num_layers_decoder
decoder_config.is_decoder = True
decoder_config.add_cross_attention = True
decoder = GPT2LMHeadModel(config=decoder_config)
else:
raise ValueError(
"You have to specify either num_layers for training from scratch \
or paths for loading pretrained models"
)
self.model = EncoderDecoderModel(encoder=encoder, decoder=decoder)
# cache is currently not supported by EncoderDecoder framework
self.model.decoder.config.use_cache = False
# do not tie output embeddings to input embeddings
self.model.config.tie_word_embeddings = False
# to make logs for different batch sizes prettier
self.examples_count = 0
def forward(self, batch):
return self.model(
input_ids=batch["diff_input_ids"],
attention_mask=batch["diff_attention_mask"],
decoder_input_ids=batch["msg_input_ids"],
decoder_attention_mask=batch["msg_attention_mask"],
labels=batch["msg_labels"],
)
def training_step(self, batch, batch_idx):
self.examples_count += len(batch["diff_input_ids"])
loss, logits = self(batch)[:2]
self.logger.experiment.log({"train_loss_step": loss}, step=self.examples_count)
return {"loss": loss}
def training_epoch_end(self, outputs):
train_loss_mean = torch.stack([x["loss"] for x in outputs]).mean()
self.logger.experiment.log({"train_loss_epoch": train_loss_mean}, step=self.examples_count)
def next_token_metrics_step(self, batch):
loss, scores = self(batch)[:2]
return {"loss": loss}
def next_token_metrics_epoch_end(self, outputs, stage):
"""
Logic for validation & testing epoch end:
1) Calculate accuracy@1, accuracy@5, MRR@5
2) (in val stage only) Aggregate loss and log metric(s) for ModelCheckpoint
3) Log everything to wandb
"""
loss = torch.stack([x["loss"] for x in outputs]).mean()
metrics = {f"{stage}_loss_epoch": loss}
if stage == "val":
self.log("val_loss_epoch", metrics["val_loss_epoch"], on_step=False, on_epoch=True, prog_bar=True, logger=False)
self.logger.experiment.log(metrics, step=self.examples_count)
def validation_step(self, batch, batch_idx, dataloader_idx=0):
return self.next_token_metrics_step(batch)
def validation_epoch_end(self, outputs):
self.next_token_metrics_epoch_end(outputs, stage="val")
def test_step(self, batch, batch_idx):
return self.next_token_metrics_step(batch)
def test_epoch_end(self, outputs):
self.next_token_metrics_epoch_end(outputs, stage="test")
def configure_optimizers(self):
optimizer = AdamW(self.parameters(), lr=self.learning_rate)
scheduler = {
"scheduler": get_linear_schedule_with_warmup(
optimizer, 4000 // self._num_gpus, self._num_epochs * self._num_batches
),
"interval": "step",
"frequency": 1,
}
return [optimizer], [scheduler]
@staticmethod
def remove_layers_from_model(teacher, num_layers, is_gpt):
if not is_gpt:
teacher_config = teacher.config
student_config = copy(teacher.config)
student_config.num_hidden_layers = num_layers
student = RobertaModel(config=student_config)
# copy all embeddings
student.embeddings.word_embeddings = teacher.embeddings.word_embeddings
student.embeddings.position_embeddings = teacher.embeddings.position_embeddings
student.embeddings.token_type_embeddings = teacher.embeddings.token_type_embeddings
student.embeddings.LayerNorm = teacher.embeddings.LayerNorm
student.embeddings.dropout = teacher.embeddings.dropout
# uniformly pick from middle layers from teacher
# it is basically np.linspace(0, teacher_config.num_hidden_layers,
# num=student_config.num_hidden_layers, endpoint=True)
step = (teacher_config.num_hidden_layers - 1) / (student_config.num_hidden_layers - 1)
for student_layer, teacher_layer in enumerate(
int(i * step) for i in range(student_config.num_hidden_layers)
):
student.encoder.layer[student_layer] = teacher.encoder.layer[teacher_layer]
else:
teacher_config = teacher.config
student_config = copy(teacher.config)
student_config.n_layer = num_layers
student = GPT2LMHeadModel(config=student_config)
# Copying all embeddings
student.transformer.wte = teacher.transformer.wte
student.transformer.wpe = teacher.transformer.wpe
student.transformer.drop = teacher.transformer.drop
# Maybe there is something else in BERT that need to be copied!
# Specific thing for GPT2LMHead. Not necessary for BERT
student.tie_weights()
# Uniformly pick from middle layers from teacher
# It is basically np.linspace(0, teacher_config.n_layer, num=student_config.n_layer, endpoint=True)
step = (teacher_config.n_layer - 1) / (student_config.n_layer - 1)
for student_layer, teacher_layer in enumerate(int(i * step) for i in range(student_config.n_layer)):
student.transformer.h[student_layer] = teacher.transformer.h[teacher_layer]
return student
| src/model/encoder_decoder_module.py | 8,505 | Logic for validation & testing epoch end:
1) Calculate accuracy@1, accuracy@5, MRR@5
2) (in val stage only) Aggregate loss and log metric(s) for ModelCheckpoint
3) Log everything to wandb
use pretrained RoBERTa as encoder resize embeddings to match vocabulary size remove layers if necessary use pretrained GPT-2 as decoder remove layers if necessary use randomly initialized RoBERTa as encoder resize embeddings to match vocabulary size use randomly initialized GPT-2 as decoder cache is currently not supported by EncoderDecoder framework do not tie output embeddings to input embeddings to make logs for different batch sizes prettier copy all embeddings uniformly pick from middle layers from teacher it is basically np.linspace(0, teacher_config.num_hidden_layers, num=student_config.num_hidden_layers, endpoint=True) Copying all embeddings Maybe there is something else in BERT that need to be copied! Specific thing for GPT2LMHead. Not necessary for BERT Uniformly pick from middle layers from teacher It is basically np.linspace(0, teacher_config.n_layer, num=student_config.n_layer, endpoint=True) | 1,136 | en | 0.765186 |
from soup import soup_collector
def name_collector(spl_id, spl_type):
soup = soup_collector(spl_id, spl_type)
sample_info_type = soup.findAll('a')
#unwanted till now START
try:
sample_info_name1 = sample_info_type[0].get('name').split('_')[1].strip()
sample_info_name2 = sample_info_type[0].get('name').split('_')[2].strip()
sample_info_name = sample_info_name1 + "_" + sample_info_name2
except:
sample_info_name = sample_info_type[0].get('name').split('_')[1].strip()
return sample_info_name
#END
#intro
| src/name_collect.py | 571 | unwanted till now STARTENDintro | 31 | ro | 0.309985 |
import os
import numpy as np
from glob import glob
from scipy import optimize, spatial, ndimage
from tifffile import imread, imsave
from skimage.segmentation import find_boundaries
from skimage.morphology import remove_small_objects
from skimage.draw import line
from utils import random_colormap
import pdb
# define binarization function
def prepare_binary(fn):
# generate binary segmentaiton result
seg = np.squeeze(imread(fn)) > bw_th
seg = remove_small_objects(seg>0, min_size=min_obj_size)
return seg
# params
max_matching_dist = 45
approx_inf = 65535
track_display_legnth = 20
min_obj_size = 20
bw_th = -0.5
parent_path = "/mnt/data/"
all_movies = glob(parent_path + "timelapse/*.tiff")
for M_idx, movies in enumerate(all_movies):
movie_basename = os.path.basename(movies)
well_name = movie_basename[:-5]
seg_path = f"{parent_path}timelapse_seg/{well_name}/"
# vis_path = f"{parent_path}timelapse_track/{well_name}"
# os.makedirs(vis_path, exist_ok=True)
raw_path = f"{parent_path}timelapse/{well_name}"
track_result = f"{parent_path}timelapse_track/{well_name}_result.npy"
total_time = len(glob(raw_path + "/*.tiff"))
traj = dict()
lineage = dict()
for tt in range(total_time):
seg_fn = seg_path + f"img_{tt}_segmentation.tiff"
seg = prepare_binary(seg_fn)
# get label image
seg_label, num_cells = ndimage.label(seg)
# calculate center of mass
centroid = ndimage.center_of_mass(seg, labels=seg_label, index=np.arange(1, num_cells + 1))
# generate cell information of this frame
traj.update({
tt : {"centroid": centroid, "parent": [], "child": [], "ID": []}
})
# initialize trajectory ID, parent node, track pts for the first frame
max_cell_id = len(traj[0].get("centroid"))
traj[0].update(
{"ID": np.arange(0, max_cell_id, 1)}
)
traj[0].update(
{"parent": -1 * np.ones(max_cell_id, dtype=int)}
)
centers = traj[0].get("centroid")
pts = []
for ii in range(max_cell_id):
pts.append([centers[ii]])
lineage.update({ii: [centers[ii]]})
traj[0].update({"track_pts": pts})
for tt in np.arange(1, total_time):
p_prev = traj[tt-1].get("centroid")
p_next = traj[tt].get("centroid")
###########################################################
# simple LAP tracking
###########################################################
num_cell_prev = len(p_prev)
num_cell_next = len(p_next)
# calculate distance between each pair of cells
cost_mat = spatial.distance.cdist(p_prev, p_next)
# if the distance is too far, change to approx. Inf.
cost_mat[cost_mat > max_matching_dist] = approx_inf
# add edges from cells in previous frame to auxillary vertices
# in order to accomendate segmentation errors and leaving cells
cost_mat_aug = max_matching_dist * 1.2 * np.ones(
(num_cell_prev, num_cell_next + num_cell_prev), dtype=float
)
cost_mat_aug[:num_cell_prev, :num_cell_next] = cost_mat[:, :]
# solve the optimization problem
row_ind, col_ind = optimize.linear_sum_assignment(cost_mat_aug)
#########################################################
# parse the matching result
#########################################################
prev_child = np.ones(num_cell_prev, dtype=int)
next_parent = np.ones(num_cell_next, dtype=int)
next_ID = np.zeros(num_cell_next, dtype=int)
next_track_pts = []
# assign child for cells in previous frame
for ii in range(num_cell_prev):
if col_ind[ii] >= num_cell_next:
prev_child[ii] = -1
else:
prev_child[ii] = col_ind[ii]
# assign parent for cells in next frame, update ID and track pts
prev_pt = traj[tt-1].get("track_pts")
prev_id = traj[tt-1].get("ID")
for ii in range(num_cell_next):
if ii in col_ind:
# a matched cell is found
next_parent[ii] = np.where(col_ind == ii)[0][0]
next_ID[ii] = prev_id[next_parent[ii]]
current_pts = prev_pt[next_parent[ii]].copy()
current_pts.append(p_next[ii])
if len(current_pts) > track_display_legnth:
current_pts.pop(0)
next_track_pts.append(current_pts)
# attach this point to the lineage
single_lineage = lineage.get(next_ID[ii])
try:
single_lineage.append(p_next[ii])
except Exception:
pdb.set_trace()
lineage.update({next_ID[ii]: single_lineage})
else:
# a new cell
next_parent[ii] = -1
next_ID[ii] = max_cell_id
next_track_pts.append([p_next[ii]])
lineage.update({max_cell_id: [p_next[ii]]})
max_cell_id += 1
# update record
traj[tt-1].update({"child": prev_child})
traj[tt].update({"parent": next_parent})
traj[tt].update({"ID": next_ID})
traj[tt].update({"track_pts": next_track_pts})
np.save(track_result, [traj, lineage])
"""
######################################################
# generate track visualization
######################################################
cmap = random_colormap()
for tt in range(total_time):
# print(traj[tt].get("ID"))
# load segmentation and extract contours
seg_fn = seg_path + f"img_{tt}_segmentation.tiff"
seg = prepare_binary(seg_fn)
seg_label, num_cells = ndimage.label(seg)
cell_contours = find_boundaries(seg, mode='inner').astype(np.uint16)
cell_contours[cell_contours > 0] = 1
cell_contours = cell_contours * seg_label.astype(np.uint16)
cell_contours = cell_contours - 1 # to make the first object has label 0, to match index
# load raw image and create visualizaiton in RGB
# TODO: use real raw images
# raw = seg.astype(np.uint8)
raw = np.squeeze(imread(raw_path + f"img_{tt}.tiff")).astype(np.float32)
raw = (raw - raw.min())/ (raw.max() - raw.min())
raw = raw * 255
raw = raw.astype(np.uint8)
vis = np.zeros((raw.shape[0], raw.shape[1], 3), dtype=np.uint8)
for cc in range(3):
vis[:, :, cc] = raw
# loop through all cells, for each cell, we do the following
# 1- find ID, 2- load the color, 3- draw contour 4- draw track
cell_id = traj[tt].get("ID")
pts = traj[tt].get("track_pts")
for cid in range(num_cells):
# find ID
this_id = cell_id[cid]
# load the color
this_color = 255 * cmap.colors[this_id]
this_color = this_color.astype(np.uint8)
# draw contour
for cc in range(3):
vis_c = vis[:, :, cc]
vis_c[cell_contours == cid] = this_color[cc]
vis[:, :, cc] = vis_c # TODO: check if we need this line
# draw track
this_track = pts[cid]
if len(this_track) < 2:
continue
else:
for pid in range(len(this_track) - 1):
p1 = this_track[pid]
p2 = this_track[pid + 1]
rr, cc = line(int(round(p1[0])), int(round(p1[1])), int(round(p2[0])), int(round(p2[1])))
for ch in range(3):
vis[rr, cc ,ch] = this_color[ch]
imsave(vis_path + f"img_{tt+1}.tiff", vis)
"""
| run_tracking.py | 7,613 | define binarization function generate binary segmentaiton result params vis_path = f"{parent_path}timelapse_track/{well_name}" os.makedirs(vis_path, exist_ok=True) get label image calculate center of mass generate cell information of this frame initialize trajectory ID, parent node, track pts for the first frame simple LAP tracking calculate distance between each pair of cells if the distance is too far, change to approx. Inf. add edges from cells in previous frame to auxillary vertices in order to accomendate segmentation errors and leaving cells solve the optimization problem parse the matching result assign child for cells in previous frame assign parent for cells in next frame, update ID and track pts a matched cell is found attach this point to the lineage a new cell update record | 796 | en | 0.733842 |
import ast
import csv
import logging
import math
import os
from nose_parameterized import parameterized
import numpy
import SimpleITK as sitk
import six
from radiomics import getTestCase, imageoperations
# Get the logger. This is done outside the class, as it is needed by both the class and the custom_name_func
logger = logging.getLogger('radiomics.testing')
TEST_CASES = ('brain1', 'brain2', 'breast1', 'lung1', 'lung2')
def custom_name_func(testcase_func, param_num, param):
"""
A custom test name function that will ensure that the tests are run such that they're batched with all tests for a
given data set are run together, avoiding re-reading the data more than necessary. Tests are run in alphabetical
order, so put the test case first. An alternate option is to right justify the test number (param_num) with zeroes
so that the numerical and alphabetical orders are the same. Not providing this method when there are more than 10
tests results in tests running in an order similar to:
test_*.test_scenario_0_*
test_*.test_scenario_10_*
test_*.test_scenario_11_*
...
test_*.test_scenario_19_*
test_*.test_scenario_1_*
test_*.test_scenario_20_*
"""
global logger
logger.debug('custom_name_func: function name = %s, param_num = {0:0>3}, param.args = %s'.format(param_num),
testcase_func.__name__, param.args)
return str("%s_%s" % (
testcase_func.__name__,
parameterized.to_safe_name("_".join(str(x) for x in param.args)),
))
class RadiomicsTestUtils:
"""
This utility class reads in and stores the baseline files stored in 'data\baseline' (one per feature class)
It provides utility methods to get the baseline feature value for a feature class and compare it to the result generated
by the test.
"""
def __init__(self):
self._logger = logging.getLogger('radiomics.testing.utils')
self._logger.debug('RadiomicsTestUtils')
# the image and mask volumes
self._image = None
self._mask = None
self._current_image = None
self._current_mask = None
self._bb = None
self._imageType = None
# set up file paths
self._dataDir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "data")
self._baselineDir = os.path.join(self._dataDir, 'baseline')
self._tests = set()
self._test = None # Test, specifies an image and mask and some configuration (settings)
self._testCase = None # Test image and mask to use in configured test
self._testedSet = set()
self._baseline = {}
self.readBaselineFiles()
self._current_config = {}
self._featureClassName = None
self._results = {}
self._diffs = {}
for test in self.getTests():
self._results[test] = {}
self._diffs[test] = {}
def readBaselineFiles(self):
"""
Reads the 'baseline' folder contained in dataDir. All files starting with 'baseline_' are read as baseline files.
These files should therefore be named as follows: 'baseline_<className>.csv'.
"""
baselineFiles = [fileName for fileName in os.listdir(self._baselineDir)
if os.path.isfile(os.path.join(self._baselineDir, fileName)) and fileName.startswith('baseline_')]
assert len(baselineFiles) > 0
for baselineFile in baselineFiles:
newBaseline = PyRadiomicsBaseline.readBaselineFile(os.path.join(self._baselineDir, baselineFile))
cls = newBaseline.cls
self._logger.debug('Read baseline for class %s', cls)
self._baseline[cls] = newBaseline
self._tests |= newBaseline.tests
def getTests(self):
"""
Return all the tests for which there are baseline information.
"""
return self._tests
def getFeatureNames(self, className, test):
"""
Gets all features for which a baseline value is available for the current class and test case. Returns a list
containing the feature names (without image type and feature class specifiers, i.e. just the feature name).
"""
if className not in self._baseline:
return None # No baseline available for specified class
return self._baseline[className].getTestFeatures(test)
def setFeatureClassAndTestCase(self, className, test):
"""
Set testing suite to specified testCase and feature class. Throws an assertion error if either class or test case
are not recognized. These have to be set here together, as the settings with which the test case has to be loaded
are defined per feature class in the baseline (extracted from provenance information).
Only (re)loads an image/mask if the test case has changed, or the change of feature class causes a change in test
settings.
If feature class and test case are unchanged, nothing is reloaded and function returns False. If either feature
class or test case is changed, function returns True.
"""
global TEST_CASES
if self._featureClassName == className and self._test == test:
return False
self._test = test
self._testedSet.add(self._test)
# First set featureClass if necessary, because if settings have changed, testCase needs te be reloaded
if self._featureClassName != className:
self._logger.debug('Setting feature class name to %s', className)
assert className in self._baseline.keys() # Check if a baseline has been read for this class
self._featureClassName = className
# Check if test settings have changed
if self._current_config != self._baseline[className].getTestConfig(test):
self._current_config = self._baseline[className].getTestConfig(test)
self._testCase = None # forces image to be reloaded (as settings have changed)
# Next, set testCase if necessary
if self._testCase != self._current_config['TestCase']:
self._testCase = self._current_config['TestCase']
self._logger.info("Reading the image and mask for test case %s", self._testCase)
assert self._current_config['TestCase'] in TEST_CASES
imageName, maskName = getTestCase(self._testCase)
assert imageName is not None
assert maskName is not None
self._image = sitk.ReadImage(imageName)
self._mask = sitk.ReadImage(maskName)
if 'ImageHash' in self._current_config:
assert sitk.Hash(self._image) == self._current_config['ImageHash']
if 'MaskHash' in self._current_config:
assert sitk.Hash(self._mask) == self._current_config['MaskHash']
settings = self._current_config.get('Settings', {})
interpolator = settings.get('interpolator', sitk.sitkBSpline)
resampledPixelSpacing = settings.get('resampledPixelSpacing', None)
if interpolator is not None and resampledPixelSpacing is not None:
self._image, self._mask = imageoperations.resampleImage(self._image,
self._mask,
resampledPixelSpacing,
interpolator,
settings.get('label', 1),
settings.get('padDistance', 5))
self._bb, correctedMask = imageoperations.checkMask(self._image, self._mask, **settings)
if correctedMask is not None:
self._mask = correctedMask
self._imageType = None
return True
def getImage(self, imageType):
if self._imageType != imageType:
self._applyFilter(imageType)
return self._current_image
def getMask(self, imageType):
if self._imageType != imageType:
self._applyFilter(imageType)
return self._current_mask
def _applyFilter(self, imageType):
if imageType == 'original':
self._current_image, self._current_mask = imageoperations.cropToTumorMask(self._image, self._mask, self._bb)
else:
raise NotImplementedError()
self._imageType = imageType
def getSettings(self):
return self._current_config.get('Settings', {})
def checkResult(self, featureName, value):
"""
Use utility methods to get and test the results against the expected baseline value for this key.
"""
longName = '_'.join(featureName)
if value is None:
self._diffs[self._test][longName] = None
self._results[self._test][longName] = None
assert (value is not None)
if math.isnan(value):
self._diffs[self._test][longName] = numpy.nan
self._results[self._test][longName] = numpy.nan
assert (not math.isnan(value))
# save the result using the baseline class and feature names
self._logger.debug('checkResults: featureName = %s', featureName)
self._results[self._test][longName] = value
baselineValue = self._baseline[self._featureClassName].getBaselineValue(self._test, longName)
assert baselineValue is not None
baselineValue = float(baselineValue)
self._logger.debug('checkResults: for featureName %s, got baseline value = %f', featureName, baselineValue)
if baselineValue == 0.0:
# avoid divide by zero, the difference is either 0% if the value is also zero, or 100%
if value - baselineValue == 0.0:
percentDiff = 0.0
else:
percentDiff = 1.0
else:
percentDiff = abs(1.0 - (value / baselineValue))
# save the difference
self._diffs[self._test][longName] = percentDiff
# check for a less than three percent difference
if (percentDiff >= 0.03):
self._logger.error('checkResult %s, baseline value = %f, calculated = %f, diff = %f%%', featureName,
float(baselineValue), value, percentDiff * 100)
assert (percentDiff < 0.03)
def getResults(self):
return self._results
def getDiffs(self):
return self._diffs
def getDataDir(self):
return self._dataDir
def writeCSV(self, data, fileName):
"""
Write out data in a csv file.
Assumes a data structure with:
{'id1' : {'f1':n1, 'f2':n2}, 'id2' : {'f1':n3, 'f2':n4}}
"""
# Get the headers from the first testCase in _testedSet
# If no tests were run, the length of _testedSet will be 0, and no files should be written
if len(self._testedSet) > 0:
with open(fileName, 'w') as csvFile:
csvFileWriter = csv.writer(csvFile, lineterminator='\n')
testedCases = sorted(self._testedSet)
header = sorted(data[testedCases[0]].keys())
header = ['testCase'] + header
csvFileWriter.writerow(header)
for testCase in testedCases:
thisCase = data[testCase]
thisCase['testCase'] = testCase
row = []
for h in header:
row = row + [thisCase.get(h, "N/A")]
csvFileWriter.writerow(row)
self._logger.info('Wrote to file %s', fileName)
else:
self._logger.info('No test cases run, aborting file write to %s', fileName)
class PyRadiomicsBaseline:
def __init__(self, featureClassName):
self.logger = logging.getLogger('radiomics.testing.baseline')
self.cls = featureClassName
self.configuration = {}
self.baseline = {}
self.tests = set()
@classmethod
def readBaselineFile(cls, baselineFile):
featureClassName = os.path.basename(baselineFile)[9:-4]
new_baseline = cls(featureClassName)
new_baseline.logger.debug('Reading baseline for class %s', new_baseline.cls)
with open(baselineFile, 'r' if six.PY3 else 'rb') as baselineReader:
csvReader = csv.reader(baselineReader)
tests = six.next(csvReader)[1:]
for case in tests:
new_baseline.configuration[case] = {}
new_baseline.baseline[case] = {}
for testRow in csvReader:
for case_idx, case in enumerate(tests, start=1):
if 'general_info' in testRow[0]:
new_baseline.configuration[case][testRow[0]] = testRow[case_idx]
else:
new_baseline.baseline[case][testRow[0]] = testRow[case_idx]
new_baseline.tests = set(tests)
return new_baseline
def getTestConfig(self, test):
if test not in self.configuration:
return {} # This test is not present in the baseline for this class
config = {
'TestCase': self.configuration[test].get('general_info_TestCase', None),
'Settings': ast.literal_eval(self.configuration[test].get('general_info_GeneralSettings', '{}')),
}
if 'general_info_ImageHash' in self.configuration[test]:
config['ImageHash'] = self.configuration[test]['general_info_ImageHash']
if 'general_info_MaskHash' in self.configuration[test]:
config['MaskHash'] = self.configuration[test]['general_info_MaskHash']
if config['TestCase'] is None:
self.logger.error('Missing key "general_info_TestCase". Cannot configure!')
return None
return config
def getTestFeatures(self, test):
"""
Gets all features for which a baseline value is available for the current class and test case. Returns a list
containing the feature names.
"""
if test not in self.baseline:
return None # This test is not present in the baseline for this class
return list(self.baseline[test].keys())
def getBaselineValue(self, test, featureName):
if test not in self.baseline:
return None
return self.baseline[test].get(featureName, None)
def writeBaselineFile(self, baselineDir):
baselineFile = os.path.join(baselineDir, 'baseline_%s.csv' % self.cls)
testCases = list(self.baseline.keys())
with open(baselineFile, 'wb') as baseline:
csvWriter = csv.writer(baseline)
header = ['featureName'] + testCases
csvWriter.writerow(header)
config = self.configuration[testCases[0]].keys()
for c in config:
row = [c]
for testCase in testCases:
row.append(str(self.configuration[testCase].get(c, '')))
csvWriter.writerow(row)
features = self.baseline[testCases[0]].keys()
for f in features:
row = [f]
for testCase in testCases:
row.append(str(self.baseline[testCase].get(f, '')))
csvWriter.writerow(row)
| tests/testUtils.py | 14,093 | This utility class reads in and stores the baseline files stored in 'dataaseline' (one per feature class)
It provides utility methods to get the baseline feature value for a feature class and compare it to the result generated
by the test.
Use utility methods to get and test the results against the expected baseline value for this key.
A custom test name function that will ensure that the tests are run such that they're batched with all tests for a
given data set are run together, avoiding re-reading the data more than necessary. Tests are run in alphabetical
order, so put the test case first. An alternate option is to right justify the test number (param_num) with zeroes
so that the numerical and alphabetical orders are the same. Not providing this method when there are more than 10
tests results in tests running in an order similar to:
test_*.test_scenario_0_*
test_*.test_scenario_10_*
test_*.test_scenario_11_*
...
test_*.test_scenario_19_*
test_*.test_scenario_1_*
test_*.test_scenario_20_*
Gets all features for which a baseline value is available for the current class and test case. Returns a list
containing the feature names (without image type and feature class specifiers, i.e. just the feature name).
Gets all features for which a baseline value is available for the current class and test case. Returns a list
containing the feature names.
Return all the tests for which there are baseline information.
Reads the 'baseline' folder contained in dataDir. All files starting with 'baseline_' are read as baseline files.
These files should therefore be named as follows: 'baseline_<className>.csv'.
Set testing suite to specified testCase and feature class. Throws an assertion error if either class or test case
are not recognized. These have to be set here together, as the settings with which the test case has to be loaded
are defined per feature class in the baseline (extracted from provenance information).
Only (re)loads an image/mask if the test case has changed, or the change of feature class causes a change in test
settings.
If feature class and test case are unchanged, nothing is reloaded and function returns False. If either feature
class or test case is changed, function returns True.
Write out data in a csv file.
Assumes a data structure with:
{'id1' : {'f1':n1, 'f2':n2}, 'id2' : {'f1':n3, 'f2':n4}}
Get the logger. This is done outside the class, as it is needed by both the class and the custom_name_func the image and mask volumes set up file paths Test, specifies an image and mask and some configuration (settings) Test image and mask to use in configured test No baseline available for specified class First set featureClass if necessary, because if settings have changed, testCase needs te be reloaded Check if a baseline has been read for this class Check if test settings have changed forces image to be reloaded (as settings have changed) Next, set testCase if necessary save the result using the baseline class and feature names avoid divide by zero, the difference is either 0% if the value is also zero, or 100% save the difference check for a less than three percent difference Get the headers from the first testCase in _testedSet If no tests were run, the length of _testedSet will be 0, and no files should be written This test is not present in the baseline for this class This test is not present in the baseline for this class | 3,403 | en | 0.846449 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
# IntegrityError Exception for checking duplicate entry,
# connection import to establish connection to database
from django.db import IntegrityError, connection
# Used for serializing object data to json string
from django.core.serializers.json import DjangoJSONEncoder
from django.core.serializers import serialize
# Django HTTP Request
from django.shortcuts import render, redirect
from django.http import HttpResponse, Http404, HttpResponseForbidden, HttpResponseRedirect, JsonResponse
# Generic views as Class
from django.views.generic import TemplateView
from django.views.generic.list import ListView
from django.views import View
# system imports
import sys, os, csv, json, datetime, calendar, re
# Django utils
from django.utils import timezone, safestring
from django.utils.decorators import method_decorator
# Django authentication
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.hashers import make_password
# Django Messaging Framework
from django.contrib import messages
# Conditional operators and exception for models
from django.db.models import Q, Count, Sum, Prefetch
from django.core.exceptions import ObjectDoesNotExist
# Paginator class import
from django.core.paginator import Paginator, InvalidPage, EmptyPage, PageNotAnInteger
# Helpers
import app.user_helper as user_helper
import app.records_helper as records_helper
# Forms
from app.forms import *
#=========================================================================================
# GET SUB CATEGORY ON BASIS OF CATEGORY
#=========================================================================================
def get_sub_category(request):
sub_cat_list = request.GET.getlist("cat_id[]")
if len(sub_cat_list) > 0:
sub_cats = records_helper.SubCategoryList(sub_cat_list)
html = []
for sub in sub_cats:
html.append('<option value="'+str(sub.id)+'">'+str(sub)+'</option>');
return HttpResponse(''.join(html))
return HttpResponse('') | app/views/combiners_views.py | 2,214 | -*- coding: utf-8 -*- IntegrityError Exception for checking duplicate entry, connection import to establish connection to database Used for serializing object data to json string Django HTTP Request Generic views as Class system imports Django utils Django authentication Django Messaging Framework Conditional operators and exception for models Paginator class import Helpers Forms========================================================================================= GET SUB CATEGORY ON BASIS OF CATEGORY========================================================================================= | 602 | en | 0.54973 |
# -*- coding: utf-8 -*-
# This script was written by Takashi SUGA on April-August 2017
# You may use and/or modify this file according to the license described in the MIT LICENSE.txt file https://raw.githubusercontent.com/suchowan/watson-api-client/master
"""『重要文抽出によるWebページ要約のためのHTMLテキスト分割』
http://harp.lib.hiroshima-u.ac.jp/hiroshima-cu/metadata/5532
を参考にした HTML テキスト化処理
"""
import codecs
import re
class Article:
# この順に文字コードを試みる
encodings = [
"utf-8",
"cp932",
"euc-jp",
"iso-2022-jp",
"latin_1"
]
# ブロックレベル要素抽出正規表現
block_level_tags = re.compile("(?i)</?(" + "|".join([
"address", "blockquote", "center", "dir", "div", "dl",
"fieldset", "form", "h[1-6]", "hr", "isindex", "menu",
"noframes", "noscript", "ol", "pre", "p", "table", "ul",
"dd", "dt", "frameset", "li", "tbody", "td", "tfoot",
"th", "thead", "tr"
]) + ")(>|[^a-z].*?>)")
def __init__(self, path):
print(path)
self.path = path
self.contents = self.get_contents()
# self.contents = self.get_title()
def get_contents(self):
for encoding in self.encodings:
try:
lines = codecs.open(self.path, 'r', encoding)
html = ' '.join(line.rstrip('\r\n') for line in lines)
return self.__get_contents_in_html(html)
except UnicodeDecodeError:
continue
print('Cannot detect encoding of ' + self.path)
return None
def __get_contents_in_html(self, html):
parts = re.split("(?i)<(?:body|frame).*?>", html, 1)
if len(parts) == 2:
head, body = parts
else:
print('Cannot split ' + self.path)
body = html
body = re.sub(r"(?i)<(script|style|select).*?>.*?</\1\s*>", " ", body)
body = re.sub(self.block_level_tags, ' _BLOCK_LEVEL_TAG_ ', body)
body = re.sub(r"(?i)<a\s.+?>", ' _ANCHOR_LEFT_TAG_ ', body)
body = re.sub("(?i)</a>", ' _ANCHOR_RIGHT_TAG_ ', body)
body = re.sub("(?i)<[/a-z].*?>", " ", body)
return re.sub(" +", " ", "".join(self.__get_contents_in_body(body)))
def __get_contents_in_body(self, body):
for block in body.split("_BLOCK_LEVEL_TAG_"):
yield from self.__get_contents_in_block(block)
def __get_contents_in_block(self, block):
self.in_sentence = False
for unit in block.split("。"):
yield from self.__get_contents_in_unit(unit)
if self.in_sentence:
yield '。\n'
def __get_contents_in_unit(self, unit):
image_link = "_ANCHOR_LEFT_TAG_ +_ANCHOR_RIGHT_TAG_"
unit = re.sub(image_link, " ", unit)
if re.match(r"^ *$", unit):
return
fragment_tag = "((?:_ANCHOR_LEFT_TAG_ .+?_ANCHOR_LEFT_TAG_ ){2,})"
for fragment in re.split(fragment_tag, unit):
yield from self.__get_contents_in_fragment(fragment)
def __get_contents_in_fragment(self, fragment):
fragment = re.sub("_ANCHOR_(LEFT|RIGHT)_TAG_", ' ', fragment)
if re.match(r"^ *$", fragment):
return
text_unit = TextUnit(fragment)
if text_unit.is_sentence():
# 文ユニットは“ 。”で終わる
if self.in_sentence:
yield '。\n'
yield text_unit.separated
yield ' 。\n'
self.in_sentence = False
else:
# 非文ユニットは“―。”で終わる
# (制約) 論文と相違し非文ユニットは結合のみ行い分割していない
yield text_unit.separated
yield '―'
self.in_sentence = True
def get_title(self):
return self.path.split('/')[-1]
from janome.tokenizer import Tokenizer
from collections import defaultdict
import mojimoji
#import re
class TextUnit:
tokenizer = Tokenizer("user_dic.csv", udic_type="simpledic", udic_enc="utf8")
def __init__(self,fragment):
self.fragment = fragment
self.categories = defaultdict(int)
separated = []
for token in self.tokenizer.tokenize(self.preprocess(self.fragment)):
self.categories[self.categorize(token.part_of_speech)] += 1
separated.append(token.surface)
separated.append('')
self.separated = '/'.join(separated)
def categorize(self,part_of_speech):
if re.match("^名詞,(一般|代名詞|固有名詞|サ変接続|[^,]+語幹)", part_of_speech):
return '自立'
if re.match("^動詞", part_of_speech) and not re.match("サ変", part_of_speech):
return '自立'
if re.match("^形容詞,自立", part_of_speech):
return '自立'
if re.match("^助詞", part_of_speech):
return '助詞'
if re.match("^助動詞", part_of_speech):
return '助動詞'
return 'その他'
def is_sentence(self):
if self.categories['自立'] == 0:
return False
match = 0
if self.categories['自立'] >= 7:
match += 1
if 100 * self.categories['自立'] / sum(self.categories.values()) <= 64:
match += 1
if 100 * (self.categories['助詞'] + self.categories['助動詞']) / self.categories['自立'] >= 22:
# 論文通り「付属語 = 助詞 ⋃ 助動詞」と解釈 (通常の定義と異なる)
match += 1
if 100 * self.categories['助詞'] / self.categories['自立'] >= 26:
match += 1
if 100 * self.categories['助動詞'] / self.categories['自立'] >= 6:
match += 1
return match >= 3
def preprocess(self, text):
text = re.sub("&[^;]+;", " ", text)
text = mojimoji.han_to_zen(text, digit=False)
text = re.sub('(\t | )+', " ", text)
return text
if __name__ == '__main__':
import glob
import os
path_pattern = '/home/samba/example/links/bookmarks.crawled/**/*.html'
# The converted plaintext is put as '/home/samba/example/links/bookmarks.plaintext/**/*.txt'
for path in glob.glob(path_pattern, recursive=True):
article = Article(path)
plaintext_path = re.sub("(?i)html?$", "txt", path.replace('.crawled', '.plaintext'))
plaintext_path = plaintext_path.replace('\\', '/')
plaintext_dir = re.sub("/[^/]+$", "", plaintext_path)
if not os.path.exists(plaintext_dir):
os.makedirs(plaintext_dir)
with codecs.open(plaintext_path, 'w', 'utf-8') as f:
f.write(article.contents)
| scripts/python/html2plaintext.py | 6,788 | 『重要文抽出によるWebページ要約のためのHTMLテキスト分割』
http://harp.lib.hiroshima-u.ac.jp/hiroshima-cu/metadata/5532
を参考にした HTML テキスト化処理
-*- coding: utf-8 -*- This script was written by Takashi SUGA on April-August 2017 You may use and/or modify this file according to the license described in the MIT LICENSE.txt file https://raw.githubusercontent.com/suchowan/watson-api-client/master この順に文字コードを試みる ブロックレベル要素抽出正規表現 self.contents = self.get_title() 文ユニットは“ 。”で終わる 非文ユニットは“―。”で終わる (制約) 論文と相違し非文ユニットは結合のみ行い分割していないimport re 論文通り「付属語 = 助詞 ⋃ 助動詞」と解釈 (通常の定義と異なる) The converted plaintext is put as '/home/samba/example/links/bookmarks.plaintext/**/*.txt' | 627 | ja | 0.762214 |
# coding: utf-8
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class UpdateHttpRedirectDetails(object):
"""
The details of a HTTP Redirect configured to redirect traffic from one hostname to another.
**Warning:** Oracle recommends that you avoid using any confidential information when you supply string values using the API.
"""
def __init__(self, **kwargs):
"""
Initializes a new UpdateHttpRedirectDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param display_name:
The value to assign to the display_name property of this UpdateHttpRedirectDetails.
:type display_name: str
:param target:
The value to assign to the target property of this UpdateHttpRedirectDetails.
:type target: HttpRedirectTarget
:param response_code:
The value to assign to the response_code property of this UpdateHttpRedirectDetails.
:type response_code: int
:param freeform_tags:
The value to assign to the freeform_tags property of this UpdateHttpRedirectDetails.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this UpdateHttpRedirectDetails.
:type defined_tags: dict(str, dict(str, object))
"""
self.swagger_types = {
'display_name': 'str',
'target': 'HttpRedirectTarget',
'response_code': 'int',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))'
}
self.attribute_map = {
'display_name': 'displayName',
'target': 'target',
'response_code': 'responseCode',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags'
}
self._display_name = None
self._target = None
self._response_code = None
self._freeform_tags = None
self._defined_tags = None
@property
def display_name(self):
"""
Gets the display_name of this UpdateHttpRedirectDetails.
The user-friendly name of the HTTP Redirect. The name can be changed and does not need to be unique.
:return: The display_name of this UpdateHttpRedirectDetails.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this UpdateHttpRedirectDetails.
The user-friendly name of the HTTP Redirect. The name can be changed and does not need to be unique.
:param display_name: The display_name of this UpdateHttpRedirectDetails.
:type: str
"""
self._display_name = display_name
@property
def target(self):
"""
Gets the target of this UpdateHttpRedirectDetails.
The redirect target object including all the redirect data.
:return: The target of this UpdateHttpRedirectDetails.
:rtype: HttpRedirectTarget
"""
return self._target
@target.setter
def target(self, target):
"""
Sets the target of this UpdateHttpRedirectDetails.
The redirect target object including all the redirect data.
:param target: The target of this UpdateHttpRedirectDetails.
:type: HttpRedirectTarget
"""
self._target = target
@property
def response_code(self):
"""
Gets the response_code of this UpdateHttpRedirectDetails.
The response code returned for the redirect to the client. For more information, see `RFC 7231`__.
__ https://tools.ietf.org/html/rfc7231#section-6.4
:return: The response_code of this UpdateHttpRedirectDetails.
:rtype: int
"""
return self._response_code
@response_code.setter
def response_code(self, response_code):
"""
Sets the response_code of this UpdateHttpRedirectDetails.
The response code returned for the redirect to the client. For more information, see `RFC 7231`__.
__ https://tools.ietf.org/html/rfc7231#section-6.4
:param response_code: The response_code of this UpdateHttpRedirectDetails.
:type: int
"""
self._response_code = response_code
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this UpdateHttpRedirectDetails.
Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.
For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:return: The freeform_tags of this UpdateHttpRedirectDetails.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this UpdateHttpRedirectDetails.
Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.
For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:param freeform_tags: The freeform_tags of this UpdateHttpRedirectDetails.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def defined_tags(self):
"""
Gets the defined_tags of this UpdateHttpRedirectDetails.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:return: The defined_tags of this UpdateHttpRedirectDetails.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this UpdateHttpRedirectDetails.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:param defined_tags: The defined_tags of this UpdateHttpRedirectDetails.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| darling_ansible/python_venv/lib/python3.7/site-packages/oci/waas/models/update_http_redirect_details.py | 7,583 | The details of a HTTP Redirect configured to redirect traffic from one hostname to another.
**Warning:** Oracle recommends that you avoid using any confidential information when you supply string values using the API.
Initializes a new UpdateHttpRedirectDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param display_name:
The value to assign to the display_name property of this UpdateHttpRedirectDetails.
:type display_name: str
:param target:
The value to assign to the target property of this UpdateHttpRedirectDetails.
:type target: HttpRedirectTarget
:param response_code:
The value to assign to the response_code property of this UpdateHttpRedirectDetails.
:type response_code: int
:param freeform_tags:
The value to assign to the freeform_tags property of this UpdateHttpRedirectDetails.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this UpdateHttpRedirectDetails.
:type defined_tags: dict(str, dict(str, object))
Gets the defined_tags of this UpdateHttpRedirectDetails.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see `Resource Tags`__.
Example: `{"Operations": {"CostCenter": "42"}}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:return: The defined_tags of this UpdateHttpRedirectDetails.
:rtype: dict(str, dict(str, object))
Sets the defined_tags of this UpdateHttpRedirectDetails.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see `Resource Tags`__.
Example: `{"Operations": {"CostCenter": "42"}}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:param defined_tags: The defined_tags of this UpdateHttpRedirectDetails.
:type: dict(str, dict(str, object))
Gets the display_name of this UpdateHttpRedirectDetails.
The user-friendly name of the HTTP Redirect. The name can be changed and does not need to be unique.
:return: The display_name of this UpdateHttpRedirectDetails.
:rtype: str
Sets the display_name of this UpdateHttpRedirectDetails.
The user-friendly name of the HTTP Redirect. The name can be changed and does not need to be unique.
:param display_name: The display_name of this UpdateHttpRedirectDetails.
:type: str
Gets the freeform_tags of this UpdateHttpRedirectDetails.
Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.
For more information, see `Resource Tags`__.
Example: `{"Department": "Finance"}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:return: The freeform_tags of this UpdateHttpRedirectDetails.
:rtype: dict(str, str)
Sets the freeform_tags of this UpdateHttpRedirectDetails.
Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.
For more information, see `Resource Tags`__.
Example: `{"Department": "Finance"}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:param freeform_tags: The freeform_tags of this UpdateHttpRedirectDetails.
:type: dict(str, str)
Gets the response_code of this UpdateHttpRedirectDetails.
The response code returned for the redirect to the client. For more information, see `RFC 7231`__.
__ https://tools.ietf.org/html/rfc7231#section-6.4
:return: The response_code of this UpdateHttpRedirectDetails.
:rtype: int
Sets the response_code of this UpdateHttpRedirectDetails.
The response code returned for the redirect to the client. For more information, see `RFC 7231`__.
__ https://tools.ietf.org/html/rfc7231#section-6.4
:param response_code: The response_code of this UpdateHttpRedirectDetails.
:type: int
Gets the target of this UpdateHttpRedirectDetails.
The redirect target object including all the redirect data.
:return: The target of this UpdateHttpRedirectDetails.
:rtype: HttpRedirectTarget
Sets the target of this UpdateHttpRedirectDetails.
The redirect target object including all the redirect data.
:param target: The target of this UpdateHttpRedirectDetails.
:type: HttpRedirectTarget
coding: utf-8 Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. noqa: F401 | 4,596 | en | 0.569555 |
#sum = 10
def func1():
#sum = 20
print('Local1:', sum)
def func2():
#sum = 30
print('Local 2:', sum)
func2()
func1()
print("Global:", sum([1, 2, 3]))
| Course/functions/example_12.py | 187 | sum = 10sum = 20sum = 30 | 24 | la | 0.519697 |
from __future__ import annotations
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class ContractResourceAssetAvailiabilityCodesCode(GenericTypeCode):
"""
ContractResourceAssetAvailiabilityCodes
From: http://hl7.org/fhir/asset-availability in valuesets.xml
This value set has asset availability codes.
"""
def __init__(self, value: AutoMapperTextInputType):
super().__init__(value=value)
"""
http://hl7.org/fhir/asset-availability
"""
codeset: FhirUri = "http://hl7.org/fhir/asset-availability"
class ContractResourceAssetAvailiabilityCodesCodeValues:
"""
To be completed
From: http://hl7.org/fhir/asset-availability in valuesets.xml
"""
Lease = ContractResourceAssetAvailiabilityCodesCode("lease")
| spark_auto_mapper_fhir/value_sets/contract_resource_asset_availiability_codes.py | 1,052 | ContractResourceAssetAvailiabilityCodes
From: http://hl7.org/fhir/asset-availability in valuesets.xml
This value set has asset availability codes.
To be completed
From: http://hl7.org/fhir/asset-availability in valuesets.xml
This file is auto-generated by generate_classes so do not edit manually noinspection PyPep8Naming | 328 | en | 0.746596 |
# -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""This is a class for RandomMirrow_pair."""
import numpy as np
from vega.common import ClassFactory, ClassType
@ClassFactory.register(ClassType.TRANSFORM)
class RandomMirrow_pair(object):
"""Random mirrow two related image."""
def __call__(self, image, label):
"""Call function of RandomMirrow_pair.
:param image: usually the feature image, for example, the LR image for super solution dataset,
the initial image for the segmentation dataset, and etc
:type image: PIL image
:param label: usually the label image, for example, the HR image for super solution dataset,
the mask image for the segmentation dataset, and etc
:type lebel: PIL image
:return: the image after transform
:rtype: list, erery item is a PIL image, the first one is feature image, the second is label image
"""
flip = np.random.choice(2) * 2 - 1
channels_image = image.shape[-1]
channels_label = label.shape[-1]
if channels_image == 3:
image = image[:, :, ::flip]
else:
image = image[:, ::flip]
if channels_label == 3:
label = label[:, :, ::flip]
else:
label = label[:, ::flip]
return image, label
| vega/datasets/transforms/RandomMirrow_pair.py | 1,710 | Random mirrow two related image.
Call function of RandomMirrow_pair.
:param image: usually the feature image, for example, the LR image for super solution dataset,
the initial image for the segmentation dataset, and etc
:type image: PIL image
:param label: usually the label image, for example, the HR image for super solution dataset,
the mask image for the segmentation dataset, and etc
:type lebel: PIL image
:return: the image after transform
:rtype: list, erery item is a PIL image, the first one is feature image, the second is label image
This is a class for RandomMirrow_pair.
-*- coding: utf-8 -*- Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the MIT License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the MIT License for more details. | 1,006 | en | 0.7647 |
import pytest
import autofit as af
from autofit.mock import mock as m
@pytest.fixture(
name="target_gaussian"
)
def make_target_gaussian():
return af.PriorModel(
m.Gaussian
)
@pytest.fixture(
name="prior"
)
def make_prior():
return af.UniformPrior()
@pytest.fixture(
name="source_gaussian"
)
def make_source_gaussian(prior):
return af.PriorModel(
m.Gaussian,
centre=prior
)
def test_simple(
source_gaussian,
target_gaussian,
prior
):
target_gaussian.take_attributes(
source_gaussian
)
assert target_gaussian.centre == prior
def test_assertions(
source_gaussian,
target_gaussian
):
target_gaussian.add_assertion(
target_gaussian.centre <= target_gaussian.intensity
)
with pytest.raises(AssertionError):
target_gaussian.take_attributes(
source_gaussian
)
def test_assertions_collection(
source_gaussian,
target_gaussian
):
target_gaussian.add_assertion(
target_gaussian.centre <= target_gaussian.intensity
)
target_collection = af.Collection(
gaussian=target_gaussian
)
source_collection = af.Collection(
gaussian=source_gaussian
)
with pytest.raises(AssertionError):
target_collection.take_attributes(
source_collection
)
def test_in_collection(
source_gaussian,
target_gaussian,
prior
):
target = af.CollectionPriorModel(
gaussian=target_gaussian
)
source = af.CollectionPriorModel(
gaussian=source_gaussian
)
target.take_attributes(
source
)
assert target.gaussian.centre == prior
def test_tuple(
source_gaussian,
target_gaussian,
prior
):
source_gaussian.centre = (prior, 1.0)
target_gaussian.take_attributes(
source_gaussian
)
assert target_gaussian.centre == (prior, 1.0)
def test_tuple_prior(
source_gaussian,
target_gaussian,
prior
):
source_gaussian.centre = (prior, 1.0)
target_gaussian.centre = af.TuplePrior()
target_gaussian.take_attributes(
source_gaussian
)
assert target_gaussian.centre == (prior, 1.0)
def test_tuple_in_instance(
target_gaussian,
prior
):
# noinspection PyTypeChecker
source_gaussian = m.Gaussian(
centre=(prior, 1.0)
)
target_gaussian.take_attributes(
source_gaussian
)
assert target_gaussian.centre == (prior, 1.0)
def test_tuple_in_collection(
source_gaussian,
target_gaussian,
prior
):
source_gaussian.centre = (prior, 1.0)
source = af.CollectionPriorModel(
gaussian=source_gaussian
)
target = af.CollectionPriorModel(
gaussian=target_gaussian
)
target.take_attributes(source)
assert target.gaussian.centre == (prior, 1.0)
def test_tuple_in_instance_in_collection(
target_gaussian,
prior
):
# noinspection PyTypeChecker
source_gaussian = m.Gaussian(
centre=(prior, 1.0)
)
source = af.CollectionPriorModel(
gaussian=source_gaussian
)
target = af.CollectionPriorModel(
gaussian=target_gaussian
)
target.take_attributes(source)
assert target.gaussian.centre == (prior, 1.0)
def test_source_is_dict(
source_gaussian,
target_gaussian,
prior
):
source = dict(
gaussian=source_gaussian
)
target = af.CollectionPriorModel(
gaussian=target_gaussian
)
target.take_attributes(source)
assert target.gaussian.centre == prior
def test_target_is_dict(
source_gaussian,
target_gaussian,
prior
):
source = af.CollectionPriorModel(
collection=af.CollectionPriorModel(
gaussian=source_gaussian
)
)
target = af.CollectionPriorModel(
collection=dict(
gaussian=target_gaussian
)
)
target.take_attributes(source)
assert target.collection.gaussian.centre == prior
def test_missing_from_source(
target_gaussian,
prior
):
target_gaussian.centre = prior
target_gaussian.take_attributes(
af.CollectionPriorModel()
)
assert target_gaussian.centre == prior
def test_unlabelled_in_collection(
source_gaussian,
target_gaussian,
prior
):
target = af.CollectionPriorModel(
[target_gaussian]
)
source = af.CollectionPriorModel(
[source_gaussian]
)
target.take_attributes(
source
)
assert target[0].centre == prior
def test_passing_float(
source_gaussian,
target_gaussian
):
source_gaussian.centre = 2.0
target_gaussian.take_attributes(
source_gaussian
)
assert target_gaussian.centre == 2.0
def test_missing_from_origin(
target_gaussian
):
target_gaussian.take_attributes(
af.CollectionPriorModel()
)
def test_limits(
source_gaussian,
target_gaussian
):
source_gaussian.centre = af.GaussianPrior(
mean=0,
sigma=1,
lower_limit=-1,
upper_limit=1
)
target_gaussian.take_attributes(
source_gaussian
)
assert target_gaussian.centre.lower_limit == -1
assert target_gaussian.centre.upper_limit == 1
def test_tuples():
centre = (0.0, 1.0)
source = af.Model(
m.Gaussian,
centre=centre
)
target = af.Model(
m.Gaussian
)
target.take_attributes(source)
assert target.centre == centre
| test_autofit/mapper/test_take_attributes.py | 5,938 | noinspection PyTypeChecker noinspection PyTypeChecker | 53 | en | 0.06758 |
#
# Copyright 2017 Alsanium, SAS. or its affiliates. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import handler
class Context:
def get_remaining_time_in_millis(self):
pass
def log(self):
pass
class TestCase(unittest.TestCase):
def test_case(self):
with self.assertRaisesRegexp(AttributeError, "runtime: symbol Handle is not valid"):
handler.Handle({}, Context())
| tests/sig_param_count/test.py | 959 | Copyright 2017 Alsanium, SAS. or its affiliates. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 591 | en | 0.870606 |
# USAGE
# python hard_negative_mine.py --conf conf/cars.json
# import the necessary packages
from __future__ import print_function
from pyimagesearch.object_detection import ObjectDetector
from pyimagesearch.descriptors import HOG
from pyimagesearch.utils import dataset
from pyimagesearch.utils import Conf
from imutils import paths
import numpy as np
import progressbar
import argparse
import pickle
import random
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-c", "--conf", required=True, help="path to the configuration file")
args = vars(ap.parse_args())
# load the configuration file and initialize the data list
conf = Conf(args["conf"])
data = []
# load the classifier, then initialize the Histogram of Oriented Gradients descriptor
# and the object detector
model = pickle.loads(open(conf["classifier_path"], "rb").read())
hog = HOG(orientations=conf["orientations"], pixelsPerCell=tuple(conf["pixels_per_cell"]),
cellsPerBlock=tuple(conf["cells_per_block"]), normalize=conf["normalize"], block_norm="L1")
od = ObjectDetector(model, hog)
# grab the set of distraction paths and randomly sample them
dstPaths = list(paths.list_images(conf["image_distractions"]))
dstPaths = random.sample(dstPaths, conf["hn_num_distraction_images"])
# setup the progress bar
widgets = ["Mining: ", progressbar.Percentage(), " ", progressbar.Bar(), " ", progressbar.ETA()]
pbar = progressbar.ProgressBar(maxval=len(dstPaths), widgets=widgets).start()
# loop over the distraction paths
for (i, imagePath) in enumerate(dstPaths):
# load the image and convert it to grayscale
image = cv2.imread(imagePath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# detect objects in the image
(boxes, probs) = od.detect(gray, conf["window_dim"], winStep=conf["hn_window_step"],
pyramidScale=conf["hn_pyramid_scale"], minProb=conf["hn_min_probability"])
# loop over the bounding boxes
for (prob, (startX, startY, endX, endY)) in zip(probs, boxes):
# extract the ROI from the image, resize it to a known, canonical size, extract
# HOG features from teh ROI, and finally update the data
roi = cv2.resize(gray[startY:endY, startX:endX], tuple(conf["window_dim"]),
interpolation=cv2.INTER_AREA)
features = hog.describe(roi)
data.append(np.hstack([[prob], features]))
# update the progress bar
pbar.update(i)
# sort the data points by confidence
pbar.finish()
print("[INFO] sorting by probability...")
data = np.array(data)
data = data[data[:, 0].argsort()[::-1]]
# dump the dataset to file
print("[INFO] dumping hard negatives to file...")
dataset.dump_dataset(data[:, 1:], [-1] * len(data), conf["features_path"], "hard_negatives",
writeMethod="a") | Module_02_Building_Your_Own_Custom_Object_Detector/2.10_Re-Training_and_Running_your_Classifier/hard_negative_mine.py | 2,728 | USAGE python hard_negative_mine.py --conf conf/cars.json import the necessary packages construct the argument parser and parse the arguments load the configuration file and initialize the data list load the classifier, then initialize the Histogram of Oriented Gradients descriptor and the object detector grab the set of distraction paths and randomly sample them setup the progress bar loop over the distraction paths load the image and convert it to grayscale detect objects in the image loop over the bounding boxes extract the ROI from the image, resize it to a known, canonical size, extract HOG features from teh ROI, and finally update the data update the progress bar sort the data points by confidence dump the dataset to file | 736 | en | 0.676728 |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2022 Valory AG
# Copyright 2018-2021 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the tests for the code-blocks in the standalone-transaction.md file."""
import logging
import os
from unittest.mock import patch
import pytest
from aea.test_tools.test_cases import BaseAEATestCase
from tests.conftest import CUR_PATH, MAX_FLAKY_RERUNS_INTEGRATION, ROOT_DIR
from tests.test_docs.helper import extract_code_blocks, extract_python_code
from tests.test_docs.test_standalone_transaction.standalone_transaction import (
logger,
run,
)
MD_FILE = "docs/standalone-transaction.md"
PY_FILE = "test_docs/test_standalone_transaction/standalone_transaction.py"
test_logger = logging.getLogger(__name__)
class TestStandaloneTransaction(BaseAEATestCase):
"""This class contains the tests for the code-blocks in the agent-vs-aea.md file."""
@classmethod
def _patch_logger(cls):
cls.patch_logger_info = patch.object(logger, "info")
cls.mocked_logger_info = cls.patch_logger_info.__enter__()
@classmethod
def _unpatch_logger(cls):
cls.mocked_logger_info.__exit__()
@classmethod
def setup_class(cls):
"""Setup the test class."""
super().setup_class()
cls._patch_logger()
doc_path = os.path.join(ROOT_DIR, MD_FILE)
cls.code_blocks = extract_code_blocks(filepath=doc_path, filter_="python")
test_code_path = os.path.join(CUR_PATH, PY_FILE)
cls.python_file = extract_python_code(test_code_path)
def test_read_md_file(self):
"""Test the last code block, that is the full listing of the demo from the Markdown."""
assert (
self.code_blocks[-1] == self.python_file
), "Files must be exactly the same."
@pytest.mark.integration(reruns=MAX_FLAKY_RERUNS_INTEGRATION)
def test_run_end_to_end(self):
"""Run the transaction from the file."""
try:
run()
self.mocked_logger_info.assert_any_call("Transaction complete.")
except RuntimeError:
test_logger.info("RuntimeError: Some transactions have failed")
def test_code_blocks_exist(self):
"""Test that all the code-blocks exist in the python file."""
for blocks in self.code_blocks:
assert (
blocks in self.python_file
), "Code-block doesn't exist in the python file."
| tests/test_docs/test_standalone_transaction/test_standalone_transaction.py | 3,151 | This class contains the tests for the code-blocks in the agent-vs-aea.md file.
Setup the test class.
Test that all the code-blocks exist in the python file.
Test the last code block, that is the full listing of the demo from the Markdown.
Run the transaction from the file.
This module contains the tests for the code-blocks in the standalone-transaction.md file.
-*- coding: utf-8 -*- ------------------------------------------------------------------------------ Copyright 2022 Valory AG Copyright 2018-2021 Fetch.AI Limited Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ------------------------------------------------------------------------------ | 1,151 | en | 0.755059 |
"""Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
"""
import collections as _collections
from tensorflow.python.eager import execute as _execute
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import tensor_shape as _tensor_shape
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.tf_export import tf_export
@tf_export('PeriodicResample')
def periodic_resample(values, shape, name=None):
r"""Periodically resample elements of a tensor to conform to `shape`.
This function implements a slightly more generic version of the subpixel
convolutions found in this [paper](https://arxiv.org/abs/1609.05158).
The formula for computing the elements in the `output` tensor is as follows:
`T` = `values` tensor of rank `R`
`S` = desired `shape` of output tensor (vector of length `R`)
`P` = `output` tensor of rank `R`
\((T_1,\ldots,T_R)\) = shape(`T`)
\([S_1,\ldots,S_q,\ldots,S_R]\) = elements of vector `S`
A single element in `S` is left unspecified (denoted \(S_q=-1\)).
Let \(f_i\) denote the (possibly non-integer) factor that relates the original
dimension to the desired dimensions, \(S_i=f_i T_i\), for \(i\neq q\) where
\(f_i>0\).
Define the following:
\(g_i=\lceil f_i\rceil\)
\(t=\prod_i T_i\)
\(s=\prod_{i\neq q} S_i\)
\(S_q\) can then be defined as by \(S_q=\lfloor t/s\rfloor\).
The elements of the resulting tensor are defined as
\(P_{s_1,\ldots,s_R}=T_{h_1,\ldots,h_q,\ldots,h_R}\).
The \(h_i\) (\(i\neq q\)) are defined by \(h_i=\lfloor s_i/g_i\rfloor\).
\(h_q=S_q\sum_{j\neq q}^{q-1}G_j \mathrm{mod}(s_j,g_j) + s_q\), where
\(G_j=\prod_{i}^{j-1}g_i\) (\(G_0=1\)).
One drawback of this method is that whenever the output dimensions are slightly
less than integer multiples of the input dimensions, many of the tensor elements
are repeated in an inefficient way. This is resolved by specifying that all
desired dimensions are integer multiples of the input tensor.
For example:
```prettyprint
`input` is [[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
tf.periodic_resample(input, [6, None]) ==> [[ 0 1]
[ 2 3]
[ 4 5]
[ 6 7]
[ 8 9]
[10 11]]
```
Args:
values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`, `uint32`, `uint64`, `bfloat16`.
The tensor of rank `R` to periodic_resample
shape: A `tf.TensorShape` or list of `ints`.
A 1-D tensor representing the desired shape of the output tensor.
Exactly one element of this tensor must have the value `None` which represents
that this dimension of `values` can be adjusted downward in order to
accommodate increases in other dimensions. The specified sizes of the
non-adjustable dimensions must by at least as large as in the `values` tensor.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `values`.
Periodically resampled tensor that has dimensions specified as in
`shape` except that the dimension specified as `None` will be minimally
decreased as necessary.
"""
shape = _execute.make_shape(shape, "shape")
_ctx = _context.context()
if _ctx.in_graph_mode():
_, _, _op = _op_def_lib._apply_op_helper(
"PeriodicResample", values=values, shape=shape, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "shape", _op.get_attr("shape"))
else:
_attr_T, (values,) = _execute.args_to_matching_eager([values], _ctx)
_inputs_flat = [values]
_attrs = ("T", _attr_T, "shape", shape)
_result = _execute.execute(b"PeriodicResample", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"PeriodicResample", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def _InitOpDefLibrary(op_list_proto_bytes):
op_list = _op_def_pb2.OpList()
op_list.ParseFromString(op_list_proto_bytes)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
# op {
# name: "PeriodicResample"
# input_arg {
# name: "values"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT64
# type: DT_INT32
# type: DT_UINT8
# type: DT_UINT16
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# type: DT_BFLOAT16
# }
# }
# }
# attr {
# name: "shape"
# type: "shape"
# }
# }
_op_def_lib = _InitOpDefLibrary(b"\n^\n\020PeriodicResample\022\013\n\006values\"\001T\032\013\n\006output\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\t\003\004\021\005\006\010\022\013\014\r\023\026\027\016\"\016\n\005shape\022\005shape")
| tensorflow/contrib/periodic_resample/python/ops/gen_periodic_resample_op.py | 6,067 | Periodically resample elements of a tensor to conform to `shape`.
This function implements a slightly more generic version of the subpixel
convolutions found in this [paper](https://arxiv.org/abs/1609.05158).
The formula for computing the elements in the `output` tensor is as follows:
`T` = `values` tensor of rank `R`
`S` = desired `shape` of output tensor (vector of length `R`)
`P` = `output` tensor of rank `R`
\((T_1,\ldots,T_R)\) = shape(`T`)
\([S_1,\ldots,S_q,\ldots,S_R]\) = elements of vector `S`
A single element in `S` is left unspecified (denoted \(S_q=-1\)).
Let \(f_i\) denote the (possibly non-integer) factor that relates the original
dimension to the desired dimensions, \(S_i=f_i T_i\), for \(i\neq q\) where
\(f_i>0\).
Define the following:
\(g_i=\lceil f_i\rceil\)
\(t=\prod_i T_i\)
\(s=\prod_{i\neq q} S_i\)
\(S_q\) can then be defined as by \(S_q=\lfloor t/s\rfloor\).
The elements of the resulting tensor are defined as
\(P_{s_1,\ldots,s_R}=T_{h_1,\ldots,h_q,\ldots,h_R}\).
The \(h_i\) (\(i\neq q\)) are defined by \(h_i=\lfloor s_i/g_i\rfloor\).
\(h_q=S_q\sum_{j\neq q}^{q-1}G_j \mathrm{mod}(s_j,g_j) + s_q\), where
\(G_j=\prod_{i}^{j-1}g_i\) (\(G_0=1\)).
One drawback of this method is that whenever the output dimensions are slightly
less than integer multiples of the input dimensions, many of the tensor elements
are repeated in an inefficient way. This is resolved by specifying that all
desired dimensions are integer multiples of the input tensor.
For example:
```prettyprint
`input` is [[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
tf.periodic_resample(input, [6, None]) ==> [[ 0 1]
[ 2 3]
[ 4 5]
[ 6 7]
[ 8 9]
[10 11]]
```
Args:
values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`, `uint32`, `uint64`, `bfloat16`.
The tensor of rank `R` to periodic_resample
shape: A `tf.TensorShape` or list of `ints`.
A 1-D tensor representing the desired shape of the output tensor.
Exactly one element of this tensor must have the value `None` which represents
that this dimension of `values` can be adjusted downward in order to
accommodate increases in other dimensions. The specified sizes of the
non-adjustable dimensions must by at least as large as in the `values` tensor.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `values`.
Periodically resampled tensor that has dimensions specified as in
`shape` except that the dimension specified as `None` will be minimally
decreased as necessary.
Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
Needed to trigger the call to _set_call_cpp_shape_fn. op { name: "PeriodicResample" input_arg { name: "values" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 type: DT_BFLOAT16 } } } attr { name: "shape" type: "shape" } } | 3,773 | en | 0.735289 |
import os
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY')
SQLALCHEMY_TRACK_MODIFICATIONS = False
UPLOADED_PHOTOS_DEST = 'app/static/photos'
# email configurations
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get("MAIL_USERNAME")
MAIL_PASSWORD = os.environ.get("MAIL_PASSWORD")
# simple mde configurations
SIMPLEMDE_JS_IIFE = True
SIMPLEMDE_USE_CDN = True
@staticmethod
def init_app(app):
pass
class TestConfig(Config):
pass
class ProdConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL")
if SQLALCHEMY_DATABASE_URI and SQLALCHEMY_DATABASE_URI.startswith("postgres://"):
SQLALCHEMY_DATABASE_URI = SQLALCHEMY_DATABASE_URI.replace("postgres://", "postgresql://", 1)
pass
class DevConfig(Config):
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://moringa:Anna123!@localhost/blogapp1'
DEBUG = True
config_options = {
'development': DevConfig,
'production': ProdConfig,
'test': TestConfig
} | config.py | 1,094 | email configurations simple mde configurations | 47 | en | 0.44506 |
import os
import re
import sys
import uuid
import redis
from cryptography.fernet import Fernet
from flask import abort, Flask, render_template, request
from redis.exceptions import ConnectionError
from werkzeug.urls import url_quote_plus
from werkzeug.urls import url_unquote_plus
NO_SSL = os.environ.get('NO_SSL', False)
TOKEN_SEPARATOR = '~'
# Initialize Flask Application
app = Flask(__name__)
if os.environ.get('DEBUG'):
app.debug = True
app.secret_key = os.environ.get('SECRET_KEY', 'Secret Key')
app.config.update(
dict(STATIC_URL=os.environ.get('STATIC_URL', 'static')))
# Initialize Redis
if os.environ.get('MOCK_REDIS'):
from mockredis import mock_strict_redis_client
redis_client = mock_strict_redis_client()
elif os.environ.get('REDIS_URL'):
redis_client = redis.StrictRedis.from_url(os.environ.get('REDIS_URL'))
else:
redis_host = os.environ.get('REDIS_HOST', 'localhost')
redis_port = os.environ.get('REDIS_PORT', 6379)
redis_db = os.environ.get('SNAPPASS_REDIS_DB', 0)
redis_client = redis.StrictRedis(
host=redis_host, port=redis_port, db=redis_db)
REDIS_PREFIX = os.environ.get('REDIS_PREFIX', 'snappass')
TIME_CONVERSION = {'week': 604800, 'day': 86400, 'hour': 3600}
def check_redis_alive(fn):
def inner(*args, **kwargs):
try:
if fn.__name__ == 'main':
redis_client.ping()
return fn(*args, **kwargs)
except ConnectionError as e:
print('Failed to connect to redis! %s' % e.message)
if fn.__name__ == 'main':
sys.exit(0)
else:
return abort(500)
return inner
def encrypt(password):
"""
Take a password string, encrypt it with Fernet symmetric encryption,
and return the result (bytes), with the decryption key (bytes)
"""
encryption_key = Fernet.generate_key()
fernet = Fernet(encryption_key)
encrypted_password = fernet.encrypt(password.encode('utf-8'))
return encrypted_password, encryption_key
def decrypt(password, decryption_key):
"""
Decrypt a password (bytes) using the provided key (bytes),
and return the plain-text password (bytes).
"""
fernet = Fernet(decryption_key)
return fernet.decrypt(password)
def parse_token(token):
token_fragments = token.split(TOKEN_SEPARATOR, 1) # Split once, not more.
storage_key = token_fragments[0]
try:
decryption_key = token_fragments[1].encode('utf-8')
except IndexError:
decryption_key = None
return storage_key, decryption_key
@check_redis_alive
def set_password(password, ttl):
"""
Encrypt and store the password for the specified lifetime.
Returns a token comprised of the key where the encrypted password
is stored, and the decryption key.
"""
storage_key = REDIS_PREFIX + uuid.uuid4().hex
encrypted_password, encryption_key = encrypt(password)
redis_client.setex(storage_key, ttl, encrypted_password)
encryption_key = encryption_key.decode('utf-8')
token = TOKEN_SEPARATOR.join([storage_key, encryption_key])
return token
@check_redis_alive
def get_password(token):
"""
From a given token, return the initial password.
If the token is tilde-separated, we decrypt the password fetched from Redis.
If not, the password is simply returned as is.
"""
storage_key, decryption_key = parse_token(token)
password = redis_client.get(storage_key)
redis_client.delete(storage_key)
if password is not None:
if decryption_key is not None:
password = decrypt(password, decryption_key)
return password.decode('utf-8')
@check_redis_alive
def password_exists(token):
storage_key, decryption_key = parse_token(token)
return redis_client.exists(storage_key)
def empty(value):
if not value:
return True
def clean_input():
"""
Make sure we're not getting bad data from the front end,
format data to be machine readable
"""
if empty(request.form.get('password', '')):
abort(400)
if empty(request.form.get('ttl', '')):
abort(400)
time_period = request.form['ttl'].lower()
if time_period not in TIME_CONVERSION:
abort(400)
return TIME_CONVERSION[time_period], request.form['password']
@app.route('/', methods=['GET'])
def index():
return render_template('set_password.html')
@app.route('/', methods=['POST'])
def handle_password():
ttl, password = clean_input()
token = set_password(password, ttl)
if NO_SSL:
base_url = request.url_root
else:
base_url = request.url_root.replace("http://", "https://")
link = base_url + url_quote_plus(token)
return render_template('confirm.html', password_link=link)
@app.route('/<password_key>', methods=['GET'])
def preview_password(password_key):
password_key = url_unquote_plus(password_key)
if not password_exists(password_key):
abort(404)
return render_template('preview.html')
@app.route('/<password_key>', methods=['POST'])
def show_password(password_key):
password_key = url_unquote_plus(password_key)
password = get_password(password_key)
if not password:
abort(404)
return render_template('password.html', password=password)
@check_redis_alive
def main():
app.run(host='0.0.0.0')
if __name__ == '__main__':
main()
| snappass/main.py | 5,404 | Make sure we're not getting bad data from the front end,
format data to be machine readable
Decrypt a password (bytes) using the provided key (bytes),
and return the plain-text password (bytes).
Take a password string, encrypt it with Fernet symmetric encryption,
and return the result (bytes), with the decryption key (bytes)
From a given token, return the initial password.
If the token is tilde-separated, we decrypt the password fetched from Redis.
If not, the password is simply returned as is.
Encrypt and store the password for the specified lifetime.
Returns a token comprised of the key where the encrypted password
is stored, and the decryption key.
Initialize Flask Application Initialize Redis Split once, not more. | 731 | en | 0.801883 |
# Copyright 2021 Hathor Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Callable, Dict, Optional
from structlog import get_logger
from hathor.p2p.messages import ProtocolMessages
if TYPE_CHECKING:
from hathor.p2p.protocol import HathorProtocol # noqa: F401
logger = get_logger()
class BaseState:
protocol: 'HathorProtocol'
cmd_map: Dict[ProtocolMessages, Callable[[str], None]]
def __init__(self, protocol: 'HathorProtocol'):
self.log = logger.new(**protocol.get_logger_context())
self.protocol = protocol
self.cmd_map = {
ProtocolMessages.ERROR: self.handle_error,
ProtocolMessages.THROTTLE: self.handle_throttle,
}
# This variable is set by HathorProtocol after instantiating the state
self.state_name = None
def handle_error(self, payload: str) -> None:
self.protocol.handle_error(payload)
def handle_throttle(self, payload: str) -> None:
self.log.info('throttled', payload=payload)
def send_message(self, cmd: ProtocolMessages, payload: Optional[str] = None) -> None:
self.protocol.send_message(cmd, payload)
def send_throttle(self, key: str) -> None:
limit = self.protocol.ratelimit.get_limit(key)
if limit is None:
return
max_hits, window_seconds = limit
payload = '{} At most {} hits every {} seconds'.format(key, max_hits, window_seconds)
self.protocol.send_message(ProtocolMessages.THROTTLE, payload)
def on_enter(self) -> None:
raise NotImplementedError
def on_exit(self) -> None:
pass
def prepare_to_disconnect(self) -> None:
"""Called when we will disconnect with the peer."""
pass
| hathor/p2p/states/base.py | 2,271 | Called when we will disconnect with the peer.
Copyright 2021 Hathor Labs Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. noqa: F401 This variable is set by HathorProtocol after instantiating the state | 675 | en | 0.879326 |
"""This module contains wunderkafka producer's boilerplate."""
| wunderkafka/producers/__init__.py | 63 | This module contains wunderkafka producer's boilerplate. | 56 | en | 0.553061 |
from typing import List, Optional
from pydantic import BaseModel
from typing_extensions import Literal
from .request import BaseResponseData, CountOffsetParams, ListRequestParams, ListResponseData
from .tag import Tag
from .user import CommonUserDetails
class Comment(BaseModel):
# The ID of the post
aweme_id: str
# The ID of the comment
cid: str
# The timestamp in seconds when the comment was posted
create_time: int
# The number of times the comment has been liked
digg_count: int
# If this comment is replying to a comment, this array contains the original comment
reply_comment: Optional[List["Comment"]] = None
# If this comment is replying to a comment, the ID of that comment - "0" if not a reply
reply_id: str
# The status of the comment - 1 = published, 4 = published by you?
status: int
# The comment text
text: str
# Details about any tags in the comment
text_extra: List[Tag]
# Details about the author
user: CommonUserDetails
# 1 if the user likes the comment
user_digged: Literal[0, 1]
class ListCommentsRequest(ListRequestParams, CountOffsetParams):
# The ID of the post to list comments for
aweme_id: str
# ??? - default is 2
comment_style: Optional[int] = None
# ???
digged_cid = None
# ???
insert_cids = None
class ListCommentsResponse(ListResponseData, CountOffsetParams):
comments: List[Comment]
class PostCommentRequest(BaseModel):
# The ID of the post to comment on
aweme_id: str
# The comment text
text: str
# The ID of the comment that is being replied to
reply_id: Optional[str] = None
# Details about any tags in the comment
text_extra: List[Tag]
# ???
is_self_see: Literal[0, 1]
class PostCommentResponse(BaseResponseData):
# The comment that was posted
comment: Comment
| tiktok_bot/models/comment.py | 1,901 | The ID of the post The ID of the comment The timestamp in seconds when the comment was posted The number of times the comment has been liked If this comment is replying to a comment, this array contains the original comment If this comment is replying to a comment, the ID of that comment - "0" if not a reply The status of the comment - 1 = published, 4 = published by you? The comment text Details about any tags in the comment Details about the author 1 if the user likes the comment The ID of the post to list comments for ??? - default is 2 ??? ??? The ID of the post to comment on The comment text The ID of the comment that is being replied to Details about any tags in the comment ??? The comment that was posted | 720 | en | 0.93055 |
import mmcv
def wider_face_classes():
return ['face']
def voc_classes():
return [
'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',
'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'
]
def imagenet_det_classes():
return [
'accordion', 'airplane', 'ant', 'antelope', 'apple', 'armadillo',
'artichoke', 'axe', 'baby_bed', 'backpack', 'bagel', 'balance_beam',
'banana', 'band_aid', 'banjo', 'baseball', 'basketball', 'bathing_cap',
'beaker', 'bear', 'bee', 'bell_pepper', 'bench', 'bicycle', 'binder',
'bird', 'bookshelf', 'bow_tie', 'bow', 'bowl', 'brassiere', 'burrito',
'bus', 'butterfly', 'camel', 'can_opener', 'car', 'cart', 'cattle',
'cello', 'centipede', 'chain_saw', 'chair', 'chime', 'cocktail_shaker',
'coffee_maker', 'computer_keyboard', 'computer_mouse', 'corkscrew',
'cream', 'croquet_ball', 'crutch', 'cucumber', 'cup_or_mug', 'diaper',
'digital_clock', 'dishwasher', 'dog', 'domestic_cat', 'dragonfly',
'drum', 'dumbbell', 'electric_fan', 'elephant', 'face_powder', 'fig',
'filing_cabinet', 'flower_pot', 'flute', 'fox', 'french_horn', 'frog',
'frying_pan', 'giant_panda', 'goldfish', 'golf_ball', 'golfcart',
'guacamole', 'guitar', 'hair_dryer', 'hair_spray', 'hamburger',
'hammer', 'hamster', 'harmonica', 'harp', 'hat_with_a_wide_brim',
'head_cabbage', 'helmet', 'hippopotamus', 'horizontal_bar', 'horse',
'hotdog', 'iPod', 'isopod', 'jellyfish', 'koala_bear', 'ladle',
'ladybug', 'lamp', 'laptop', 'lemon', 'lion', 'lipstick', 'lizard',
'lobster', 'maillot', 'maraca', 'microphone', 'microwave', 'milk_can',
'miniskirt', 'monkey', 'motorcycle', 'mushroom', 'nail', 'neck_brace',
'oboe', 'orange', 'otter', 'pencil_box', 'pencil_sharpener', 'perfume',
'person', 'piano', 'pineapple', 'ping-pong_ball', 'pitcher', 'pizza',
'plastic_bag', 'plate_rack', 'pomegranate', 'popsicle', 'porcupine',
'power_drill', 'pretzel', 'printer', 'puck', 'punching_bag', 'purse',
'rabbit', 'racket', 'ray', 'red_panda', 'refrigerator',
'remote_control', 'rubber_eraser', 'rugby_ball', 'ruler',
'salt_or_pepper_shaker', 'saxophone', 'scorpion', 'screwdriver',
'seal', 'sheep', 'ski', 'skunk', 'snail', 'snake', 'snowmobile',
'snowplow', 'soap_dispenser', 'soccer_ball', 'sofa', 'spatula',
'squirrel', 'starfish', 'stethoscope', 'stove', 'strainer',
'strawberry', 'stretcher', 'sunglasses', 'swimming_trunks', 'swine',
'syringe', 'table', 'tape_player', 'tennis_ball', 'tick', 'tie',
'tiger', 'toaster', 'traffic_light', 'train', 'trombone', 'trumpet',
'turtle', 'tv_or_monitor', 'unicycle', 'vacuum', 'violin',
'volleyball', 'waffle_iron', 'washer', 'water_bottle', 'watercraft',
'whale', 'wine_bottle', 'zebra'
]
def imagenet_vid_classes():
return [
'airplane', 'antelope', 'bear', 'bicycle', 'bird', 'bus', 'car',
'cattle', 'dog', 'domestic_cat', 'elephant', 'fox', 'giant_panda',
'hamster', 'horse', 'lion', 'lizard', 'monkey', 'motorcycle', 'rabbit',
'red_panda', 'sheep', 'snake', 'squirrel', 'tiger', 'train', 'turtle',
'watercraft', 'whale', 'zebra'
]
def coco_classes():
# return ['瓶盖破损','瓶盖变形','瓶盖坏边','瓶盖打旋','瓶盖断点','标贴歪斜','标贴起皱','标贴气泡','喷码正常','喷码异常']
return ['瓶盖破损', '瓶盖变形', '瓶盖坏边', '瓶盖打旋', '瓶盖断点' '喷码正常', '喷码异常']#pg
# return ['标贴歪斜', '标贴起皱', '标贴气泡']
# return [
# 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
# 'truck', 'boat', 'traffic_light', 'fire_hydrant', 'stop_sign',
# 'parking_meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',
# 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',
# 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
# 'sports_ball', 'kite', 'baseball_bat', 'baseball_glove', 'skateboard',
# 'surfboard', 'tennis_racket', 'bottle', 'wine_glass', 'cup', 'fork',
# 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',
# 'broccoli', 'carrot', 'hot_dog', 'pizza', 'donut', 'cake', 'chair',
# 'couch', 'potted_plant', 'bed', 'dining_table', 'toilet', 'tv',
# 'laptop', 'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave',
# 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
# 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush'
# ]
def cityscapes_classes():
return [
'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',
'bicycle'
]
dataset_aliases = {
'voc': ['voc', 'pascal_voc', 'voc07', 'voc12'],
'imagenet_det': ['det', 'imagenet_det', 'ilsvrc_det'],
'imagenet_vid': ['vid', 'imagenet_vid', 'ilsvrc_vid'],
'coco': ['coco', 'mscoco', 'ms_coco'],
'wider_face': ['WIDERFaceDataset', 'wider_face', 'WDIERFace'],
'cityscapes': ['cityscapes']
}
def get_classes(dataset):
"""Get class names of a dataset."""
alias2name = {}
for name, aliases in dataset_aliases.items():
for alias in aliases:
alias2name[alias] = name
if mmcv.is_str(dataset):
if dataset in alias2name:
labels = eval(alias2name[dataset] + '_classes()')
else:
raise ValueError('Unrecognized dataset: {}'.format(dataset))
else:
raise TypeError('dataset must a str, but got {}'.format(type(dataset)))
return labels
| my_configs/new/mmdet/core/evaluation/class_names.py | 5,827 | Get class names of a dataset.
return ['瓶盖破损','瓶盖变形','瓶盖坏边','瓶盖打旋','瓶盖断点','标贴歪斜','标贴起皱','标贴气泡','喷码正常','喷码异常']pg return ['标贴歪斜', '标贴起皱', '标贴气泡'] return [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic_light', 'fire_hydrant', 'stop_sign', 'parking_meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports_ball', 'kite', 'baseball_bat', 'baseball_glove', 'skateboard', 'surfboard', 'tennis_racket', 'bottle', 'wine_glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot_dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush' ] | 1,067 | en | 0.066395 |
# /usr/bin/env python3
"""Benchmark of handling PDB files comparing multiple libraries."""
import argparse
import glob
import os
import re
import subprocess
import sys
from pathlib import Path
def gather_libs(selected_libs):
libs = []
for path in sorted(glob.iglob("bench/*")):
lib = os.path.basename(path)
if not os.path.isdir(path) or (selected_libs and lib not in selected_libs):
continue
libs.append(lib)
return libs
def gather_tests(libs, selected_tests):
tests = []
for lib in libs:
for filepath in sorted(glob.iglob(os.path.join("bench", lib, "*"))):
test, _ = os.path.splitext(os.path.basename(filepath))
if test in tests or (selected_tests and test not in selected_tests):
continue
tests.append(test)
return tests
def parse_args(argv):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-t", "--tests", help="Test names to run.")
parser.add_argument("-l", "--libraries", help="Library names to test.")
opts = parser.parse_args()
if opts.tests:
opts.tests = opts.tests.split(",")
if opts.libraries:
opts.libraries = opts.libraries.split(",")
return vars(opts)
def run_test(filepath, pdbfile, repeats=10):
*_, dirname, filename = Path(filepath).parts
basename, _ = os.path.splitext(filename)
pdbid, _ = os.path.splitext(os.path.basename(pdbfile))
print(format(f"{dirname}/{basename}/{pdbid}", "<40"), end="", flush=True)
if "schrodinger" in filepath:
cmd = [
os.path.join(os.environ["SCHRODINGER"], "run"),
filepath,
pdbfile,
str(repeats),
]
elif filepath.endswith(".py"):
cmd = ["python3", filepath, pdbfile, str(repeats)]
elif filepath.endswith(".cr"):
cmd = ["crystal", "run", "--release", filepath, "--", pdbfile, str(repeats)]
elif filepath.endswith(".tcl"):
cmd = [
"vmd",
"-dispdev",
"none",
"-e",
filepath,
"-args",
pdbfile,
str(repeats),
]
try:
output = subprocess.check_output(cmd, stderr=subprocess.DEVNULL)
output = output.decode(sys.stdout.encoding).strip()
try:
elapsed = float(output)
except ValueError:
elapsed = float(re.findall(r"elapsed *= *([\d\.e\-]+)", output)[0])
print(format(elapsed, ".6f"))
except subprocess.CalledProcessError:
print("failed")
opts = parse_args(sys.argv[1:])
libs = gather_libs(opts["libraries"])
tests = gather_tests(libs, opts["tests"])
pdbs = list(map(os.path.abspath, glob.glob("data/*.pdb")))
for test in tests:
for pdbfile in pdbs if test.startswith("parse") else ["data/1ake.pdb"]:
for lib in libs:
paths = glob.glob(f"bench/{lib}/{test}.*")
if not paths:
continue
run_test(paths[0], pdbfile, repeats=10 if "1htq" not in pdbfile else 3)
print("")
| run.py | 3,088 | Benchmark of handling PDB files comparing multiple libraries.
/usr/bin/env python3 | 84 | en | 0.580107 |
#!/usr/bin/env python3
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Various small unit tests
import io
import itertools
import json
import xml.etree.ElementTree
from yt_dlp.compat import (
compat_chr,
compat_etree_fromstring,
compat_getenv,
compat_HTMLParseError,
compat_os_name,
compat_setenv,
)
from yt_dlp.utils import (
Config,
DateRange,
ExtractorError,
InAdvancePagedList,
LazyList,
OnDemandPagedList,
age_restricted,
args_to_str,
base_url,
caesar,
clean_html,
clean_podcast_url,
cli_bool_option,
cli_option,
cli_valueless_option,
date_from_str,
datetime_from_str,
detect_exe_version,
determine_ext,
dfxp2srt,
dict_get,
encode_base_n,
encode_compat_str,
encodeFilename,
escape_rfc3986,
escape_url,
expand_path,
extract_attributes,
find_xpath_attr,
fix_xml_ampersands,
float_or_none,
format_bytes,
get_element_by_attribute,
get_element_by_class,
get_element_html_by_attribute,
get_element_html_by_class,
get_element_text_and_html_by_tag,
get_elements_by_attribute,
get_elements_by_class,
get_elements_html_by_attribute,
get_elements_html_by_class,
get_elements_text_and_html_by_attribute,
int_or_none,
intlist_to_bytes,
iri_to_uri,
is_html,
js_to_json,
limit_length,
locked_file,
lowercase_escape,
match_str,
merge_dicts,
mimetype2ext,
month_by_name,
multipart_encode,
ohdave_rsa_encrypt,
orderedSet,
parse_age_limit,
parse_bitrate,
parse_codecs,
parse_count,
parse_dfxp_time_expr,
parse_duration,
parse_filesize,
parse_iso8601,
parse_qs,
parse_resolution,
pkcs1pad,
prepend_extension,
read_batch_urls,
remove_end,
remove_quotes,
remove_start,
render_table,
replace_extension,
rot47,
sanitize_filename,
sanitize_path,
sanitize_url,
sanitized_Request,
shell_quote,
smuggle_url,
str_to_int,
strip_jsonp,
strip_or_none,
subtitles_filename,
timeconvert,
unescapeHTML,
unified_strdate,
unified_timestamp,
unsmuggle_url,
update_url_query,
uppercase_escape,
url_basename,
url_or_none,
urlencode_postdata,
urljoin,
urshift,
version_tuple,
xpath_attr,
xpath_element,
xpath_text,
xpath_with_ns,
)
class TestUtil(unittest.TestCase):
def test_timeconvert(self):
self.assertTrue(timeconvert('') is None)
self.assertTrue(timeconvert('bougrg') is None)
def test_sanitize_filename(self):
self.assertEqual(sanitize_filename(''), '')
self.assertEqual(sanitize_filename('abc'), 'abc')
self.assertEqual(sanitize_filename('abc_d-e'), 'abc_d-e')
self.assertEqual(sanitize_filename('123'), '123')
self.assertEqual('abc_de', sanitize_filename('abc/de'))
self.assertFalse('/' in sanitize_filename('abc/de///'))
self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de'))
self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|'))
self.assertEqual('yes no', sanitize_filename('yes? no'))
self.assertEqual('this - that', sanitize_filename('this: that'))
self.assertEqual(sanitize_filename('AT&T'), 'AT&T')
aumlaut = 'ä'
self.assertEqual(sanitize_filename(aumlaut), aumlaut)
tests = '\u043a\u0438\u0440\u0438\u043b\u043b\u0438\u0446\u0430'
self.assertEqual(sanitize_filename(tests), tests)
self.assertEqual(
sanitize_filename('New World record at 0:12:34'),
'New World record at 0_12_34')
self.assertEqual(sanitize_filename('--gasdgf'), '--gasdgf')
self.assertEqual(sanitize_filename('--gasdgf', is_id=True), '--gasdgf')
self.assertEqual(sanitize_filename('--gasdgf', is_id=False), '_-gasdgf')
self.assertEqual(sanitize_filename('.gasdgf'), '.gasdgf')
self.assertEqual(sanitize_filename('.gasdgf', is_id=True), '.gasdgf')
self.assertEqual(sanitize_filename('.gasdgf', is_id=False), 'gasdgf')
forbidden = '"\0\\/'
for fc in forbidden:
for fbc in forbidden:
self.assertTrue(fbc not in sanitize_filename(fc))
def test_sanitize_filename_restricted(self):
self.assertEqual(sanitize_filename('abc', restricted=True), 'abc')
self.assertEqual(sanitize_filename('abc_d-e', restricted=True), 'abc_d-e')
self.assertEqual(sanitize_filename('123', restricted=True), '123')
self.assertEqual('abc_de', sanitize_filename('abc/de', restricted=True))
self.assertFalse('/' in sanitize_filename('abc/de///', restricted=True))
self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de', restricted=True))
self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|', restricted=True))
self.assertEqual('yes_no', sanitize_filename('yes? no', restricted=True))
self.assertEqual('this_-_that', sanitize_filename('this: that', restricted=True))
tests = 'aäb\u4e2d\u56fd\u7684c'
self.assertEqual(sanitize_filename(tests, restricted=True), 'aab_c')
self.assertTrue(sanitize_filename('\xf6', restricted=True) != '') # No empty filename
forbidden = '"\0\\/&!: \'\t\n()[]{}$;`^,#'
for fc in forbidden:
for fbc in forbidden:
self.assertTrue(fbc not in sanitize_filename(fc, restricted=True))
# Handle a common case more neatly
self.assertEqual(sanitize_filename('\u5927\u58f0\u5e26 - Song', restricted=True), 'Song')
self.assertEqual(sanitize_filename('\u603b\u7edf: Speech', restricted=True), 'Speech')
# .. but make sure the file name is never empty
self.assertTrue(sanitize_filename('-', restricted=True) != '')
self.assertTrue(sanitize_filename(':', restricted=True) != '')
self.assertEqual(sanitize_filename(
'ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ', restricted=True),
'AAAAAAAECEEEEIIIIDNOOOOOOOOEUUUUUYTHssaaaaaaaeceeeeiiiionooooooooeuuuuuythy')
def test_sanitize_ids(self):
self.assertEqual(sanitize_filename('_n_cd26wFpw', is_id=True), '_n_cd26wFpw')
self.assertEqual(sanitize_filename('_BD_eEpuzXw', is_id=True), '_BD_eEpuzXw')
self.assertEqual(sanitize_filename('N0Y__7-UOdI', is_id=True), 'N0Y__7-UOdI')
def test_sanitize_path(self):
if sys.platform != 'win32':
return
self.assertEqual(sanitize_path('abc'), 'abc')
self.assertEqual(sanitize_path('abc/def'), 'abc\\def')
self.assertEqual(sanitize_path('abc\\def'), 'abc\\def')
self.assertEqual(sanitize_path('abc|def'), 'abc#def')
self.assertEqual(sanitize_path('<>:"|?*'), '#######')
self.assertEqual(sanitize_path('C:/abc/def'), 'C:\\abc\\def')
self.assertEqual(sanitize_path('C?:/abc/def'), 'C##\\abc\\def')
self.assertEqual(sanitize_path('\\\\?\\UNC\\ComputerName\\abc'), '\\\\?\\UNC\\ComputerName\\abc')
self.assertEqual(sanitize_path('\\\\?\\UNC/ComputerName/abc'), '\\\\?\\UNC\\ComputerName\\abc')
self.assertEqual(sanitize_path('\\\\?\\C:\\abc'), '\\\\?\\C:\\abc')
self.assertEqual(sanitize_path('\\\\?\\C:/abc'), '\\\\?\\C:\\abc')
self.assertEqual(sanitize_path('\\\\?\\C:\\ab?c\\de:f'), '\\\\?\\C:\\ab#c\\de#f')
self.assertEqual(sanitize_path('\\\\?\\C:\\abc'), '\\\\?\\C:\\abc')
self.assertEqual(
sanitize_path('youtube/%(uploader)s/%(autonumber)s-%(title)s-%(upload_date)s.%(ext)s'),
'youtube\\%(uploader)s\\%(autonumber)s-%(title)s-%(upload_date)s.%(ext)s')
self.assertEqual(
sanitize_path('youtube/TheWreckingYard ./00001-Not bad, Especially for Free! (1987 Yamaha 700)-20141116.mp4.part'),
'youtube\\TheWreckingYard #\\00001-Not bad, Especially for Free! (1987 Yamaha 700)-20141116.mp4.part')
self.assertEqual(sanitize_path('abc/def...'), 'abc\\def..#')
self.assertEqual(sanitize_path('abc.../def'), 'abc..#\\def')
self.assertEqual(sanitize_path('abc.../def...'), 'abc..#\\def..#')
self.assertEqual(sanitize_path('../abc'), '..\\abc')
self.assertEqual(sanitize_path('../../abc'), '..\\..\\abc')
self.assertEqual(sanitize_path('./abc'), 'abc')
self.assertEqual(sanitize_path('./../abc'), '..\\abc')
def test_sanitize_url(self):
self.assertEqual(sanitize_url('//foo.bar'), 'http://foo.bar')
self.assertEqual(sanitize_url('httpss://foo.bar'), 'https://foo.bar')
self.assertEqual(sanitize_url('rmtps://foo.bar'), 'rtmps://foo.bar')
self.assertEqual(sanitize_url('https://foo.bar'), 'https://foo.bar')
self.assertEqual(sanitize_url('foo bar'), 'foo bar')
def test_extract_basic_auth(self):
auth_header = lambda url: sanitized_Request(url).get_header('Authorization')
self.assertFalse(auth_header('http://foo.bar'))
self.assertFalse(auth_header('http://:foo.bar'))
self.assertEqual(auth_header('http://@foo.bar'), 'Basic Og==')
self.assertEqual(auth_header('http://:pass@foo.bar'), 'Basic OnBhc3M=')
self.assertEqual(auth_header('http://user:@foo.bar'), 'Basic dXNlcjo=')
self.assertEqual(auth_header('http://user:pass@foo.bar'), 'Basic dXNlcjpwYXNz')
def test_expand_path(self):
def env(var):
return f'%{var}%' if sys.platform == 'win32' else f'${var}'
compat_setenv('yt_dlp_EXPATH_PATH', 'expanded')
self.assertEqual(expand_path(env('yt_dlp_EXPATH_PATH')), 'expanded')
self.assertEqual(expand_path(env('HOME')), compat_getenv('HOME'))
self.assertEqual(expand_path('~'), compat_getenv('HOME'))
self.assertEqual(
expand_path('~/%s' % env('yt_dlp_EXPATH_PATH')),
'%s/expanded' % compat_getenv('HOME'))
def test_prepend_extension(self):
self.assertEqual(prepend_extension('abc.ext', 'temp'), 'abc.temp.ext')
self.assertEqual(prepend_extension('abc.ext', 'temp', 'ext'), 'abc.temp.ext')
self.assertEqual(prepend_extension('abc.unexpected_ext', 'temp', 'ext'), 'abc.unexpected_ext.temp')
self.assertEqual(prepend_extension('abc', 'temp'), 'abc.temp')
self.assertEqual(prepend_extension('.abc', 'temp'), '.abc.temp')
self.assertEqual(prepend_extension('.abc.ext', 'temp'), '.abc.temp.ext')
def test_replace_extension(self):
self.assertEqual(replace_extension('abc.ext', 'temp'), 'abc.temp')
self.assertEqual(replace_extension('abc.ext', 'temp', 'ext'), 'abc.temp')
self.assertEqual(replace_extension('abc.unexpected_ext', 'temp', 'ext'), 'abc.unexpected_ext.temp')
self.assertEqual(replace_extension('abc', 'temp'), 'abc.temp')
self.assertEqual(replace_extension('.abc', 'temp'), '.abc.temp')
self.assertEqual(replace_extension('.abc.ext', 'temp'), '.abc.temp')
def test_subtitles_filename(self):
self.assertEqual(subtitles_filename('abc.ext', 'en', 'vtt'), 'abc.en.vtt')
self.assertEqual(subtitles_filename('abc.ext', 'en', 'vtt', 'ext'), 'abc.en.vtt')
self.assertEqual(subtitles_filename('abc.unexpected_ext', 'en', 'vtt', 'ext'), 'abc.unexpected_ext.en.vtt')
def test_remove_start(self):
self.assertEqual(remove_start(None, 'A - '), None)
self.assertEqual(remove_start('A - B', 'A - '), 'B')
self.assertEqual(remove_start('B - A', 'A - '), 'B - A')
def test_remove_end(self):
self.assertEqual(remove_end(None, ' - B'), None)
self.assertEqual(remove_end('A - B', ' - B'), 'A')
self.assertEqual(remove_end('B - A', ' - B'), 'B - A')
def test_remove_quotes(self):
self.assertEqual(remove_quotes(None), None)
self.assertEqual(remove_quotes('"'), '"')
self.assertEqual(remove_quotes("'"), "'")
self.assertEqual(remove_quotes(';'), ';')
self.assertEqual(remove_quotes('";'), '";')
self.assertEqual(remove_quotes('""'), '')
self.assertEqual(remove_quotes('";"'), ';')
def test_ordered_set(self):
self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7])
self.assertEqual(orderedSet([]), [])
self.assertEqual(orderedSet([1]), [1])
# keep the list ordered
self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1])
def test_unescape_html(self):
self.assertEqual(unescapeHTML('%20;'), '%20;')
self.assertEqual(unescapeHTML('/'), '/')
self.assertEqual(unescapeHTML('/'), '/')
self.assertEqual(unescapeHTML('é'), 'é')
self.assertEqual(unescapeHTML('�'), '�')
self.assertEqual(unescapeHTML('&a"'), '&a"')
# HTML5 entities
self.assertEqual(unescapeHTML('.''), '.\'')
def test_date_from_str(self):
self.assertEqual(date_from_str('yesterday'), date_from_str('now-1day'))
self.assertEqual(date_from_str('now+7day'), date_from_str('now+1week'))
self.assertEqual(date_from_str('now+14day'), date_from_str('now+2week'))
self.assertEqual(date_from_str('20200229+365day'), date_from_str('20200229+1year'))
self.assertEqual(date_from_str('20210131+28day'), date_from_str('20210131+1month'))
def test_datetime_from_str(self):
self.assertEqual(datetime_from_str('yesterday', precision='day'), datetime_from_str('now-1day', precision='auto'))
self.assertEqual(datetime_from_str('now+7day', precision='day'), datetime_from_str('now+1week', precision='auto'))
self.assertEqual(datetime_from_str('now+14day', precision='day'), datetime_from_str('now+2week', precision='auto'))
self.assertEqual(datetime_from_str('20200229+365day', precision='day'), datetime_from_str('20200229+1year', precision='auto'))
self.assertEqual(datetime_from_str('20210131+28day', precision='day'), datetime_from_str('20210131+1month', precision='auto'))
self.assertEqual(datetime_from_str('20210131+59day', precision='day'), datetime_from_str('20210131+2month', precision='auto'))
self.assertEqual(datetime_from_str('now+1day', precision='hour'), datetime_from_str('now+24hours', precision='auto'))
self.assertEqual(datetime_from_str('now+23hours', precision='hour'), datetime_from_str('now+23hours', precision='auto'))
def test_daterange(self):
_20century = DateRange("19000101", "20000101")
self.assertFalse("17890714" in _20century)
_ac = DateRange("00010101")
self.assertTrue("19690721" in _ac)
_firstmilenium = DateRange(end="10000101")
self.assertTrue("07110427" in _firstmilenium)
def test_unified_dates(self):
self.assertEqual(unified_strdate('December 21, 2010'), '20101221')
self.assertEqual(unified_strdate('8/7/2009'), '20090708')
self.assertEqual(unified_strdate('Dec 14, 2012'), '20121214')
self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011')
self.assertEqual(unified_strdate('1968 12 10'), '19681210')
self.assertEqual(unified_strdate('1968-12-10'), '19681210')
self.assertEqual(unified_strdate('28/01/2014 21:00:00 +0100'), '20140128')
self.assertEqual(
unified_strdate('11/26/2014 11:30:00 AM PST', day_first=False),
'20141126')
self.assertEqual(
unified_strdate('2/2/2015 6:47:40 PM', day_first=False),
'20150202')
self.assertEqual(unified_strdate('Feb 14th 2016 5:45PM'), '20160214')
self.assertEqual(unified_strdate('25-09-2014'), '20140925')
self.assertEqual(unified_strdate('27.02.2016 17:30'), '20160227')
self.assertEqual(unified_strdate('UNKNOWN DATE FORMAT'), None)
self.assertEqual(unified_strdate('Feb 7, 2016 at 6:35 pm'), '20160207')
self.assertEqual(unified_strdate('July 15th, 2013'), '20130715')
self.assertEqual(unified_strdate('September 1st, 2013'), '20130901')
self.assertEqual(unified_strdate('Sep 2nd, 2013'), '20130902')
self.assertEqual(unified_strdate('November 3rd, 2019'), '20191103')
self.assertEqual(unified_strdate('October 23rd, 2005'), '20051023')
def test_unified_timestamps(self):
self.assertEqual(unified_timestamp('December 21, 2010'), 1292889600)
self.assertEqual(unified_timestamp('8/7/2009'), 1247011200)
self.assertEqual(unified_timestamp('Dec 14, 2012'), 1355443200)
self.assertEqual(unified_timestamp('2012/10/11 01:56:38 +0000'), 1349920598)
self.assertEqual(unified_timestamp('1968 12 10'), -33436800)
self.assertEqual(unified_timestamp('1968-12-10'), -33436800)
self.assertEqual(unified_timestamp('28/01/2014 21:00:00 +0100'), 1390939200)
self.assertEqual(
unified_timestamp('11/26/2014 11:30:00 AM PST', day_first=False),
1417001400)
self.assertEqual(
unified_timestamp('2/2/2015 6:47:40 PM', day_first=False),
1422902860)
self.assertEqual(unified_timestamp('Feb 14th 2016 5:45PM'), 1455471900)
self.assertEqual(unified_timestamp('25-09-2014'), 1411603200)
self.assertEqual(unified_timestamp('27.02.2016 17:30'), 1456594200)
self.assertEqual(unified_timestamp('UNKNOWN DATE FORMAT'), None)
self.assertEqual(unified_timestamp('May 16, 2016 11:15 PM'), 1463440500)
self.assertEqual(unified_timestamp('Feb 7, 2016 at 6:35 pm'), 1454870100)
self.assertEqual(unified_timestamp('2017-03-30T17:52:41Q'), 1490896361)
self.assertEqual(unified_timestamp('Sep 11, 2013 | 5:49 AM'), 1378878540)
self.assertEqual(unified_timestamp('December 15, 2017 at 7:49 am'), 1513324140)
self.assertEqual(unified_timestamp('2018-03-14T08:32:43.1493874+00:00'), 1521016363)
def test_determine_ext(self):
self.assertEqual(determine_ext('http://example.com/foo/bar.mp4/?download'), 'mp4')
self.assertEqual(determine_ext('http://example.com/foo/bar/?download', None), None)
self.assertEqual(determine_ext('http://example.com/foo/bar.nonext/?download', None), None)
self.assertEqual(determine_ext('http://example.com/foo/bar/mp4?download', None), None)
self.assertEqual(determine_ext('http://example.com/foo/bar.m3u8//?download'), 'm3u8')
self.assertEqual(determine_ext('foobar', None), None)
def test_find_xpath_attr(self):
testxml = '''<root>
<node/>
<node x="a"/>
<node x="a" y="c" />
<node x="b" y="d" />
<node x="" />
</root>'''
doc = compat_etree_fromstring(testxml)
self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n'), None)
self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n', 'v'), None)
self.assertEqual(find_xpath_attr(doc, './/node', 'n'), None)
self.assertEqual(find_xpath_attr(doc, './/node', 'n', 'v'), None)
self.assertEqual(find_xpath_attr(doc, './/node', 'x'), doc[1])
self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'a'), doc[1])
self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'b'), doc[3])
self.assertEqual(find_xpath_attr(doc, './/node', 'y'), doc[2])
self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'c'), doc[2])
self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'd'), doc[3])
self.assertEqual(find_xpath_attr(doc, './/node', 'x', ''), doc[4])
def test_xpath_with_ns(self):
testxml = '''<root xmlns:media="http://example.com/">
<media:song>
<media:author>The Author</media:author>
<url>http://server.com/download.mp3</url>
</media:song>
</root>'''
doc = compat_etree_fromstring(testxml)
find = lambda p: doc.find(xpath_with_ns(p, {'media': 'http://example.com/'}))
self.assertTrue(find('media:song') is not None)
self.assertEqual(find('media:song/media:author').text, 'The Author')
self.assertEqual(find('media:song/url').text, 'http://server.com/download.mp3')
def test_xpath_element(self):
doc = xml.etree.ElementTree.Element('root')
div = xml.etree.ElementTree.SubElement(doc, 'div')
p = xml.etree.ElementTree.SubElement(div, 'p')
p.text = 'Foo'
self.assertEqual(xpath_element(doc, 'div/p'), p)
self.assertEqual(xpath_element(doc, ['div/p']), p)
self.assertEqual(xpath_element(doc, ['div/bar', 'div/p']), p)
self.assertEqual(xpath_element(doc, 'div/bar', default='default'), 'default')
self.assertEqual(xpath_element(doc, ['div/bar'], default='default'), 'default')
self.assertTrue(xpath_element(doc, 'div/bar') is None)
self.assertTrue(xpath_element(doc, ['div/bar']) is None)
self.assertTrue(xpath_element(doc, ['div/bar'], 'div/baz') is None)
self.assertRaises(ExtractorError, xpath_element, doc, 'div/bar', fatal=True)
self.assertRaises(ExtractorError, xpath_element, doc, ['div/bar'], fatal=True)
self.assertRaises(ExtractorError, xpath_element, doc, ['div/bar', 'div/baz'], fatal=True)
def test_xpath_text(self):
testxml = '''<root>
<div>
<p>Foo</p>
</div>
</root>'''
doc = compat_etree_fromstring(testxml)
self.assertEqual(xpath_text(doc, 'div/p'), 'Foo')
self.assertEqual(xpath_text(doc, 'div/bar', default='default'), 'default')
self.assertTrue(xpath_text(doc, 'div/bar') is None)
self.assertRaises(ExtractorError, xpath_text, doc, 'div/bar', fatal=True)
def test_xpath_attr(self):
testxml = '''<root>
<div>
<p x="a">Foo</p>
</div>
</root>'''
doc = compat_etree_fromstring(testxml)
self.assertEqual(xpath_attr(doc, 'div/p', 'x'), 'a')
self.assertEqual(xpath_attr(doc, 'div/bar', 'x'), None)
self.assertEqual(xpath_attr(doc, 'div/p', 'y'), None)
self.assertEqual(xpath_attr(doc, 'div/bar', 'x', default='default'), 'default')
self.assertEqual(xpath_attr(doc, 'div/p', 'y', default='default'), 'default')
self.assertRaises(ExtractorError, xpath_attr, doc, 'div/bar', 'x', fatal=True)
self.assertRaises(ExtractorError, xpath_attr, doc, 'div/p', 'y', fatal=True)
def test_smuggle_url(self):
data = {"ö": "ö", "abc": [3]}
url = 'https://foo.bar/baz?x=y#a'
smug_url = smuggle_url(url, data)
unsmug_url, unsmug_data = unsmuggle_url(smug_url)
self.assertEqual(url, unsmug_url)
self.assertEqual(data, unsmug_data)
res_url, res_data = unsmuggle_url(url)
self.assertEqual(res_url, url)
self.assertEqual(res_data, None)
smug_url = smuggle_url(url, {'a': 'b'})
smug_smug_url = smuggle_url(smug_url, {'c': 'd'})
res_url, res_data = unsmuggle_url(smug_smug_url)
self.assertEqual(res_url, url)
self.assertEqual(res_data, {'a': 'b', 'c': 'd'})
def test_shell_quote(self):
args = ['ffmpeg', '-i', encodeFilename('ñ€ß\'.mp4')]
self.assertEqual(
shell_quote(args),
"""ffmpeg -i 'ñ€ß'"'"'.mp4'""" if compat_os_name != 'nt' else '''ffmpeg -i "ñ€ß'.mp4"''')
def test_float_or_none(self):
self.assertEqual(float_or_none('42.42'), 42.42)
self.assertEqual(float_or_none('42'), 42.0)
self.assertEqual(float_or_none(''), None)
self.assertEqual(float_or_none(None), None)
self.assertEqual(float_or_none([]), None)
self.assertEqual(float_or_none(set()), None)
def test_int_or_none(self):
self.assertEqual(int_or_none('42'), 42)
self.assertEqual(int_or_none(''), None)
self.assertEqual(int_or_none(None), None)
self.assertEqual(int_or_none([]), None)
self.assertEqual(int_or_none(set()), None)
def test_str_to_int(self):
self.assertEqual(str_to_int('123,456'), 123456)
self.assertEqual(str_to_int('123.456'), 123456)
self.assertEqual(str_to_int(523), 523)
self.assertEqual(str_to_int('noninteger'), None)
self.assertEqual(str_to_int([]), None)
def test_url_basename(self):
self.assertEqual(url_basename('http://foo.de/'), '')
self.assertEqual(url_basename('http://foo.de/bar/baz'), 'baz')
self.assertEqual(url_basename('http://foo.de/bar/baz?x=y'), 'baz')
self.assertEqual(url_basename('http://foo.de/bar/baz#x=y'), 'baz')
self.assertEqual(url_basename('http://foo.de/bar/baz/'), 'baz')
self.assertEqual(
url_basename('http://media.w3.org/2010/05/sintel/trailer.mp4'),
'trailer.mp4')
def test_base_url(self):
self.assertEqual(base_url('http://foo.de/'), 'http://foo.de/')
self.assertEqual(base_url('http://foo.de/bar'), 'http://foo.de/')
self.assertEqual(base_url('http://foo.de/bar/'), 'http://foo.de/bar/')
self.assertEqual(base_url('http://foo.de/bar/baz'), 'http://foo.de/bar/')
self.assertEqual(base_url('http://foo.de/bar/baz?x=z/x/c'), 'http://foo.de/bar/')
def test_urljoin(self):
self.assertEqual(urljoin('http://foo.de/', '/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin(b'http://foo.de/', '/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de/', b'/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin(b'http://foo.de/', b'/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('//foo.de/', '/a/b/c.txt'), '//foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de/', 'a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de', '/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de', 'a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de/', 'http://foo.de/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de/', '//foo.de/a/b/c.txt'), '//foo.de/a/b/c.txt')
self.assertEqual(urljoin(None, 'http://foo.de/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin(None, '//foo.de/a/b/c.txt'), '//foo.de/a/b/c.txt')
self.assertEqual(urljoin('', 'http://foo.de/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin(['foobar'], 'http://foo.de/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de/', None), None)
self.assertEqual(urljoin('http://foo.de/', ''), None)
self.assertEqual(urljoin('http://foo.de/', ['foobar']), None)
self.assertEqual(urljoin('http://foo.de/a/b/c.txt', '.././../d.txt'), 'http://foo.de/d.txt')
self.assertEqual(urljoin('http://foo.de/a/b/c.txt', 'rtmp://foo.de'), 'rtmp://foo.de')
self.assertEqual(urljoin(None, 'rtmp://foo.de'), 'rtmp://foo.de')
def test_url_or_none(self):
self.assertEqual(url_or_none(None), None)
self.assertEqual(url_or_none(''), None)
self.assertEqual(url_or_none('foo'), None)
self.assertEqual(url_or_none('http://foo.de'), 'http://foo.de')
self.assertEqual(url_or_none('https://foo.de'), 'https://foo.de')
self.assertEqual(url_or_none('http$://foo.de'), None)
self.assertEqual(url_or_none('http://foo.de'), 'http://foo.de')
self.assertEqual(url_or_none('//foo.de'), '//foo.de')
self.assertEqual(url_or_none('s3://foo.de'), None)
self.assertEqual(url_or_none('rtmpte://foo.de'), 'rtmpte://foo.de')
self.assertEqual(url_or_none('mms://foo.de'), 'mms://foo.de')
self.assertEqual(url_or_none('rtspu://foo.de'), 'rtspu://foo.de')
self.assertEqual(url_or_none('ftps://foo.de'), 'ftps://foo.de')
def test_parse_age_limit(self):
self.assertEqual(parse_age_limit(None), None)
self.assertEqual(parse_age_limit(False), None)
self.assertEqual(parse_age_limit('invalid'), None)
self.assertEqual(parse_age_limit(0), 0)
self.assertEqual(parse_age_limit(18), 18)
self.assertEqual(parse_age_limit(21), 21)
self.assertEqual(parse_age_limit(22), None)
self.assertEqual(parse_age_limit('18'), 18)
self.assertEqual(parse_age_limit('18+'), 18)
self.assertEqual(parse_age_limit('PG-13'), 13)
self.assertEqual(parse_age_limit('TV-14'), 14)
self.assertEqual(parse_age_limit('TV-MA'), 17)
self.assertEqual(parse_age_limit('TV14'), 14)
self.assertEqual(parse_age_limit('TV_G'), 0)
def test_parse_duration(self):
self.assertEqual(parse_duration(None), None)
self.assertEqual(parse_duration(False), None)
self.assertEqual(parse_duration('invalid'), None)
self.assertEqual(parse_duration('1'), 1)
self.assertEqual(parse_duration('1337:12'), 80232)
self.assertEqual(parse_duration('9:12:43'), 33163)
self.assertEqual(parse_duration('12:00'), 720)
self.assertEqual(parse_duration('00:01:01'), 61)
self.assertEqual(parse_duration('x:y'), None)
self.assertEqual(parse_duration('3h11m53s'), 11513)
self.assertEqual(parse_duration('3h 11m 53s'), 11513)
self.assertEqual(parse_duration('3 hours 11 minutes 53 seconds'), 11513)
self.assertEqual(parse_duration('3 hours 11 mins 53 secs'), 11513)
self.assertEqual(parse_duration('3 hours, 11 minutes, 53 seconds'), 11513)
self.assertEqual(parse_duration('3 hours, 11 mins, 53 secs'), 11513)
self.assertEqual(parse_duration('62m45s'), 3765)
self.assertEqual(parse_duration('6m59s'), 419)
self.assertEqual(parse_duration('49s'), 49)
self.assertEqual(parse_duration('0h0m0s'), 0)
self.assertEqual(parse_duration('0m0s'), 0)
self.assertEqual(parse_duration('0s'), 0)
self.assertEqual(parse_duration('01:02:03.05'), 3723.05)
self.assertEqual(parse_duration('T30M38S'), 1838)
self.assertEqual(parse_duration('5 s'), 5)
self.assertEqual(parse_duration('3 min'), 180)
self.assertEqual(parse_duration('2.5 hours'), 9000)
self.assertEqual(parse_duration('02:03:04'), 7384)
self.assertEqual(parse_duration('01:02:03:04'), 93784)
self.assertEqual(parse_duration('1 hour 3 minutes'), 3780)
self.assertEqual(parse_duration('87 Min.'), 5220)
self.assertEqual(parse_duration('PT1H0.040S'), 3600.04)
self.assertEqual(parse_duration('PT00H03M30SZ'), 210)
self.assertEqual(parse_duration('P0Y0M0DT0H4M20.880S'), 260.88)
self.assertEqual(parse_duration('01:02:03:050'), 3723.05)
self.assertEqual(parse_duration('103:050'), 103.05)
def test_fix_xml_ampersands(self):
self.assertEqual(
fix_xml_ampersands('"&x=y&z=a'), '"&x=y&z=a')
self.assertEqual(
fix_xml_ampersands('"&x=y&wrong;&z=a'),
'"&x=y&wrong;&z=a')
self.assertEqual(
fix_xml_ampersands('&'><"'),
'&'><"')
self.assertEqual(
fix_xml_ampersands('Ӓ᪼'), 'Ӓ᪼')
self.assertEqual(fix_xml_ampersands('&#&#'), '&#&#')
def test_paged_list(self):
def testPL(size, pagesize, sliceargs, expected):
def get_page(pagenum):
firstid = pagenum * pagesize
upto = min(size, pagenum * pagesize + pagesize)
yield from range(firstid, upto)
pl = OnDemandPagedList(get_page, pagesize)
got = pl.getslice(*sliceargs)
self.assertEqual(got, expected)
iapl = InAdvancePagedList(get_page, size // pagesize + 1, pagesize)
got = iapl.getslice(*sliceargs)
self.assertEqual(got, expected)
testPL(5, 2, (), [0, 1, 2, 3, 4])
testPL(5, 2, (1,), [1, 2, 3, 4])
testPL(5, 2, (2,), [2, 3, 4])
testPL(5, 2, (4,), [4])
testPL(5, 2, (0, 3), [0, 1, 2])
testPL(5, 2, (1, 4), [1, 2, 3])
testPL(5, 2, (2, 99), [2, 3, 4])
testPL(5, 2, (20, 99), [])
def test_read_batch_urls(self):
f = io.StringIO('''\xef\xbb\xbf foo
bar\r
baz
# More after this line\r
; or after this
bam''')
self.assertEqual(read_batch_urls(f), ['foo', 'bar', 'baz', 'bam'])
def test_urlencode_postdata(self):
data = urlencode_postdata({'username': 'foo@bar.com', 'password': '1234'})
self.assertTrue(isinstance(data, bytes))
def test_update_url_query(self):
self.assertEqual(parse_qs(update_url_query(
'http://example.com/path', {'quality': ['HD'], 'format': ['mp4']})),
parse_qs('http://example.com/path?quality=HD&format=mp4'))
self.assertEqual(parse_qs(update_url_query(
'http://example.com/path', {'system': ['LINUX', 'WINDOWS']})),
parse_qs('http://example.com/path?system=LINUX&system=WINDOWS'))
self.assertEqual(parse_qs(update_url_query(
'http://example.com/path', {'fields': 'id,formats,subtitles'})),
parse_qs('http://example.com/path?fields=id,formats,subtitles'))
self.assertEqual(parse_qs(update_url_query(
'http://example.com/path', {'fields': ('id,formats,subtitles', 'thumbnails')})),
parse_qs('http://example.com/path?fields=id,formats,subtitles&fields=thumbnails'))
self.assertEqual(parse_qs(update_url_query(
'http://example.com/path?manifest=f4m', {'manifest': []})),
parse_qs('http://example.com/path'))
self.assertEqual(parse_qs(update_url_query(
'http://example.com/path?system=LINUX&system=WINDOWS', {'system': 'LINUX'})),
parse_qs('http://example.com/path?system=LINUX'))
self.assertEqual(parse_qs(update_url_query(
'http://example.com/path', {'fields': b'id,formats,subtitles'})),
parse_qs('http://example.com/path?fields=id,formats,subtitles'))
self.assertEqual(parse_qs(update_url_query(
'http://example.com/path', {'width': 1080, 'height': 720})),
parse_qs('http://example.com/path?width=1080&height=720'))
self.assertEqual(parse_qs(update_url_query(
'http://example.com/path', {'bitrate': 5020.43})),
parse_qs('http://example.com/path?bitrate=5020.43'))
self.assertEqual(parse_qs(update_url_query(
'http://example.com/path', {'test': '第二行тест'})),
parse_qs('http://example.com/path?test=%E7%AC%AC%E4%BA%8C%E8%A1%8C%D1%82%D0%B5%D1%81%D1%82'))
def test_multipart_encode(self):
self.assertEqual(
multipart_encode({b'field': b'value'}, boundary='AAAAAA')[0],
b'--AAAAAA\r\nContent-Disposition: form-data; name="field"\r\n\r\nvalue\r\n--AAAAAA--\r\n')
self.assertEqual(
multipart_encode({'欄位'.encode(): '值'.encode()}, boundary='AAAAAA')[0],
b'--AAAAAA\r\nContent-Disposition: form-data; name="\xe6\xac\x84\xe4\xbd\x8d"\r\n\r\n\xe5\x80\xbc\r\n--AAAAAA--\r\n')
self.assertRaises(
ValueError, multipart_encode, {b'field': b'value'}, boundary='value')
def test_dict_get(self):
FALSE_VALUES = {
'none': None,
'false': False,
'zero': 0,
'empty_string': '',
'empty_list': [],
}
d = FALSE_VALUES.copy()
d['a'] = 42
self.assertEqual(dict_get(d, 'a'), 42)
self.assertEqual(dict_get(d, 'b'), None)
self.assertEqual(dict_get(d, 'b', 42), 42)
self.assertEqual(dict_get(d, ('a', )), 42)
self.assertEqual(dict_get(d, ('b', 'a', )), 42)
self.assertEqual(dict_get(d, ('b', 'c', 'a', 'd', )), 42)
self.assertEqual(dict_get(d, ('b', 'c', )), None)
self.assertEqual(dict_get(d, ('b', 'c', ), 42), 42)
for key, false_value in FALSE_VALUES.items():
self.assertEqual(dict_get(d, ('b', 'c', key, )), None)
self.assertEqual(dict_get(d, ('b', 'c', key, ), skip_false_values=False), false_value)
def test_merge_dicts(self):
self.assertEqual(merge_dicts({'a': 1}, {'b': 2}), {'a': 1, 'b': 2})
self.assertEqual(merge_dicts({'a': 1}, {'a': 2}), {'a': 1})
self.assertEqual(merge_dicts({'a': 1}, {'a': None}), {'a': 1})
self.assertEqual(merge_dicts({'a': 1}, {'a': ''}), {'a': 1})
self.assertEqual(merge_dicts({'a': 1}, {}), {'a': 1})
self.assertEqual(merge_dicts({'a': None}, {'a': 1}), {'a': 1})
self.assertEqual(merge_dicts({'a': ''}, {'a': 1}), {'a': ''})
self.assertEqual(merge_dicts({'a': ''}, {'a': 'abc'}), {'a': 'abc'})
self.assertEqual(merge_dicts({'a': None}, {'a': ''}, {'a': 'abc'}), {'a': 'abc'})
def test_encode_compat_str(self):
self.assertEqual(encode_compat_str(b'\xd1\x82\xd0\xb5\xd1\x81\xd1\x82', 'utf-8'), 'тест')
self.assertEqual(encode_compat_str('тест', 'utf-8'), 'тест')
def test_parse_iso8601(self):
self.assertEqual(parse_iso8601('2014-03-23T23:04:26+0100'), 1395612266)
self.assertEqual(parse_iso8601('2014-03-23T22:04:26+0000'), 1395612266)
self.assertEqual(parse_iso8601('2014-03-23T22:04:26Z'), 1395612266)
self.assertEqual(parse_iso8601('2014-03-23T22:04:26.1234Z'), 1395612266)
self.assertEqual(parse_iso8601('2015-09-29T08:27:31.727'), 1443515251)
self.assertEqual(parse_iso8601('2015-09-29T08-27-31.727'), None)
def test_strip_jsonp(self):
stripped = strip_jsonp('cb ([ {"id":"532cb",\n\n\n"x":\n3}\n]\n);')
d = json.loads(stripped)
self.assertEqual(d, [{"id": "532cb", "x": 3}])
stripped = strip_jsonp('parseMetadata({"STATUS":"OK"})\n\n\n//epc')
d = json.loads(stripped)
self.assertEqual(d, {'STATUS': 'OK'})
stripped = strip_jsonp('ps.embedHandler({"status": "success"});')
d = json.loads(stripped)
self.assertEqual(d, {'status': 'success'})
stripped = strip_jsonp('window.cb && window.cb({"status": "success"});')
d = json.loads(stripped)
self.assertEqual(d, {'status': 'success'})
stripped = strip_jsonp('window.cb && cb({"status": "success"});')
d = json.loads(stripped)
self.assertEqual(d, {'status': 'success'})
stripped = strip_jsonp('({"status": "success"});')
d = json.loads(stripped)
self.assertEqual(d, {'status': 'success'})
def test_strip_or_none(self):
self.assertEqual(strip_or_none(' abc'), 'abc')
self.assertEqual(strip_or_none('abc '), 'abc')
self.assertEqual(strip_or_none(' abc '), 'abc')
self.assertEqual(strip_or_none('\tabc\t'), 'abc')
self.assertEqual(strip_or_none('\n\tabc\n\t'), 'abc')
self.assertEqual(strip_or_none('abc'), 'abc')
self.assertEqual(strip_or_none(''), '')
self.assertEqual(strip_or_none(None), None)
self.assertEqual(strip_or_none(42), None)
self.assertEqual(strip_or_none([]), None)
def test_uppercase_escape(self):
self.assertEqual(uppercase_escape('aä'), 'aä')
self.assertEqual(uppercase_escape('\\U0001d550'), '𝕐')
def test_lowercase_escape(self):
self.assertEqual(lowercase_escape('aä'), 'aä')
self.assertEqual(lowercase_escape('\\u0026'), '&')
def test_limit_length(self):
self.assertEqual(limit_length(None, 12), None)
self.assertEqual(limit_length('foo', 12), 'foo')
self.assertTrue(
limit_length('foo bar baz asd', 12).startswith('foo bar'))
self.assertTrue('...' in limit_length('foo bar baz asd', 12))
def test_mimetype2ext(self):
self.assertEqual(mimetype2ext(None), None)
self.assertEqual(mimetype2ext('video/x-flv'), 'flv')
self.assertEqual(mimetype2ext('application/x-mpegURL'), 'm3u8')
self.assertEqual(mimetype2ext('text/vtt'), 'vtt')
self.assertEqual(mimetype2ext('text/vtt;charset=utf-8'), 'vtt')
self.assertEqual(mimetype2ext('text/html; charset=utf-8'), 'html')
self.assertEqual(mimetype2ext('audio/x-wav'), 'wav')
self.assertEqual(mimetype2ext('audio/x-wav;codec=pcm'), 'wav')
def test_month_by_name(self):
self.assertEqual(month_by_name(None), None)
self.assertEqual(month_by_name('December', 'en'), 12)
self.assertEqual(month_by_name('décembre', 'fr'), 12)
self.assertEqual(month_by_name('December'), 12)
self.assertEqual(month_by_name('décembre'), None)
self.assertEqual(month_by_name('Unknown', 'unknown'), None)
def test_parse_codecs(self):
self.assertEqual(parse_codecs(''), {})
self.assertEqual(parse_codecs('avc1.77.30, mp4a.40.2'), {
'vcodec': 'avc1.77.30',
'acodec': 'mp4a.40.2',
'dynamic_range': None,
})
self.assertEqual(parse_codecs('mp4a.40.2'), {
'vcodec': 'none',
'acodec': 'mp4a.40.2',
'dynamic_range': None,
})
self.assertEqual(parse_codecs('mp4a.40.5,avc1.42001e'), {
'vcodec': 'avc1.42001e',
'acodec': 'mp4a.40.5',
'dynamic_range': None,
})
self.assertEqual(parse_codecs('avc3.640028'), {
'vcodec': 'avc3.640028',
'acodec': 'none',
'dynamic_range': None,
})
self.assertEqual(parse_codecs(', h264,,newcodec,aac'), {
'vcodec': 'h264',
'acodec': 'aac',
'dynamic_range': None,
})
self.assertEqual(parse_codecs('av01.0.05M.08'), {
'vcodec': 'av01.0.05M.08',
'acodec': 'none',
'dynamic_range': None,
})
self.assertEqual(parse_codecs('vp9.2'), {
'vcodec': 'vp9.2',
'acodec': 'none',
'dynamic_range': 'HDR10',
})
self.assertEqual(parse_codecs('av01.0.12M.10.0.110.09.16.09.0'), {
'vcodec': 'av01.0.12M.10',
'acodec': 'none',
'dynamic_range': 'HDR10',
})
self.assertEqual(parse_codecs('dvhe'), {
'vcodec': 'dvhe',
'acodec': 'none',
'dynamic_range': 'DV',
})
self.assertEqual(parse_codecs('theora, vorbis'), {
'vcodec': 'theora',
'acodec': 'vorbis',
'dynamic_range': None,
})
self.assertEqual(parse_codecs('unknownvcodec, unknownacodec'), {
'vcodec': 'unknownvcodec',
'acodec': 'unknownacodec',
})
self.assertEqual(parse_codecs('unknown'), {})
def test_escape_rfc3986(self):
reserved = "!*'();:@&=+$,/?#[]"
unreserved = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.~'
self.assertEqual(escape_rfc3986(reserved), reserved)
self.assertEqual(escape_rfc3986(unreserved), unreserved)
self.assertEqual(escape_rfc3986('тест'), '%D1%82%D0%B5%D1%81%D1%82')
self.assertEqual(escape_rfc3986('%D1%82%D0%B5%D1%81%D1%82'), '%D1%82%D0%B5%D1%81%D1%82')
self.assertEqual(escape_rfc3986('foo bar'), 'foo%20bar')
self.assertEqual(escape_rfc3986('foo%20bar'), 'foo%20bar')
def test_escape_url(self):
self.assertEqual(
escape_url('http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavré_FD.mp4'),
'http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavre%CC%81_FD.mp4'
)
self.assertEqual(
escape_url('http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erklärt/Das-Erste/Video?documentId=22673108&bcastId=5290'),
'http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erkl%C3%A4rt/Das-Erste/Video?documentId=22673108&bcastId=5290'
)
self.assertEqual(
escape_url('http://тест.рф/фрагмент'),
'http://xn--e1aybc.xn--p1ai/%D1%84%D1%80%D0%B0%D0%B3%D0%BC%D0%B5%D0%BD%D1%82'
)
self.assertEqual(
escape_url('http://тест.рф/абв?абв=абв#абв'),
'http://xn--e1aybc.xn--p1ai/%D0%B0%D0%B1%D0%B2?%D0%B0%D0%B1%D0%B2=%D0%B0%D0%B1%D0%B2#%D0%B0%D0%B1%D0%B2'
)
self.assertEqual(escape_url('http://vimeo.com/56015672#at=0'), 'http://vimeo.com/56015672#at=0')
def test_js_to_json_realworld(self):
inp = '''{
'clip':{'provider':'pseudo'}
}'''
self.assertEqual(js_to_json(inp), '''{
"clip":{"provider":"pseudo"}
}''')
json.loads(js_to_json(inp))
inp = '''{
'playlist':[{'controls':{'all':null}}]
}'''
self.assertEqual(js_to_json(inp), '''{
"playlist":[{"controls":{"all":null}}]
}''')
inp = '''"The CW\\'s \\'Crazy Ex-Girlfriend\\'"'''
self.assertEqual(js_to_json(inp), '''"The CW's 'Crazy Ex-Girlfriend'"''')
inp = '"SAND Number: SAND 2013-7800P\\nPresenter: Tom Russo\\nHabanero Software Training - Xyce Software\\nXyce, Sandia\\u0027s"'
json_code = js_to_json(inp)
self.assertEqual(json.loads(json_code), json.loads(inp))
inp = '''{
0:{src:'skipped', type: 'application/dash+xml'},
1:{src:'skipped', type: 'application/vnd.apple.mpegURL'},
}'''
self.assertEqual(js_to_json(inp), '''{
"0":{"src":"skipped", "type": "application/dash+xml"},
"1":{"src":"skipped", "type": "application/vnd.apple.mpegURL"}
}''')
inp = '''{"foo":101}'''
self.assertEqual(js_to_json(inp), '''{"foo":101}''')
inp = '''{"duration": "00:01:07"}'''
self.assertEqual(js_to_json(inp), '''{"duration": "00:01:07"}''')
inp = '''{segments: [{"offset":-3.885780586188048e-16,"duration":39.75000000000001}]}'''
self.assertEqual(js_to_json(inp), '''{"segments": [{"offset":-3.885780586188048e-16,"duration":39.75000000000001}]}''')
def test_js_to_json_edgecases(self):
on = js_to_json("{abc_def:'1\\'\\\\2\\\\\\'3\"4'}")
self.assertEqual(json.loads(on), {"abc_def": "1'\\2\\'3\"4"})
on = js_to_json('{"abc": true}')
self.assertEqual(json.loads(on), {'abc': True})
# Ignore JavaScript code as well
on = js_to_json('''{
"x": 1,
y: "a",
z: some.code
}''')
d = json.loads(on)
self.assertEqual(d['x'], 1)
self.assertEqual(d['y'], 'a')
# Just drop ! prefix for now though this results in a wrong value
on = js_to_json('''{
a: !0,
b: !1,
c: !!0,
d: !!42.42,
e: !!![],
f: !"abc",
g: !"",
!42: 42
}''')
self.assertEqual(json.loads(on), {
'a': 0,
'b': 1,
'c': 0,
'd': 42.42,
'e': [],
'f': "abc",
'g': "",
'42': 42
})
on = js_to_json('["abc", "def",]')
self.assertEqual(json.loads(on), ['abc', 'def'])
on = js_to_json('[/*comment\n*/"abc"/*comment\n*/,/*comment\n*/"def",/*comment\n*/]')
self.assertEqual(json.loads(on), ['abc', 'def'])
on = js_to_json('[//comment\n"abc" //comment\n,//comment\n"def",//comment\n]')
self.assertEqual(json.loads(on), ['abc', 'def'])
on = js_to_json('{"abc": "def",}')
self.assertEqual(json.loads(on), {'abc': 'def'})
on = js_to_json('{/*comment\n*/"abc"/*comment\n*/:/*comment\n*/"def"/*comment\n*/,/*comment\n*/}')
self.assertEqual(json.loads(on), {'abc': 'def'})
on = js_to_json('{ 0: /* " \n */ ",]" , }')
self.assertEqual(json.loads(on), {'0': ',]'})
on = js_to_json('{ /*comment\n*/0/*comment\n*/: /* " \n */ ",]" , }')
self.assertEqual(json.loads(on), {'0': ',]'})
on = js_to_json('{ 0: // comment\n1 }')
self.assertEqual(json.loads(on), {'0': 1})
on = js_to_json(r'["<p>x<\/p>"]')
self.assertEqual(json.loads(on), ['<p>x</p>'])
on = js_to_json(r'["\xaa"]')
self.assertEqual(json.loads(on), ['\u00aa'])
on = js_to_json("['a\\\nb']")
self.assertEqual(json.loads(on), ['ab'])
on = js_to_json("/*comment\n*/[/*comment\n*/'a\\\nb'/*comment\n*/]/*comment\n*/")
self.assertEqual(json.loads(on), ['ab'])
on = js_to_json('{0xff:0xff}')
self.assertEqual(json.loads(on), {'255': 255})
on = js_to_json('{/*comment\n*/0xff/*comment\n*/:/*comment\n*/0xff/*comment\n*/}')
self.assertEqual(json.loads(on), {'255': 255})
on = js_to_json('{077:077}')
self.assertEqual(json.loads(on), {'63': 63})
on = js_to_json('{/*comment\n*/077/*comment\n*/:/*comment\n*/077/*comment\n*/}')
self.assertEqual(json.loads(on), {'63': 63})
on = js_to_json('{42:42}')
self.assertEqual(json.loads(on), {'42': 42})
on = js_to_json('{/*comment\n*/42/*comment\n*/:/*comment\n*/42/*comment\n*/}')
self.assertEqual(json.loads(on), {'42': 42})
on = js_to_json('{42:4.2e1}')
self.assertEqual(json.loads(on), {'42': 42.0})
on = js_to_json('{ "0x40": "0x40" }')
self.assertEqual(json.loads(on), {'0x40': '0x40'})
on = js_to_json('{ "040": "040" }')
self.assertEqual(json.loads(on), {'040': '040'})
on = js_to_json('[1,//{},\n2]')
self.assertEqual(json.loads(on), [1, 2])
def test_js_to_json_malformed(self):
self.assertEqual(js_to_json('42a1'), '42"a1"')
self.assertEqual(js_to_json('42a-1'), '42"a"-1')
def test_extract_attributes(self):
self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'})
self.assertEqual(extract_attributes("<e x='y'>"), {'x': 'y'})
self.assertEqual(extract_attributes('<e x=y>'), {'x': 'y'})
self.assertEqual(extract_attributes('<e x="a \'b\' c">'), {'x': "a 'b' c"})
self.assertEqual(extract_attributes('<e x=\'a "b" c\'>'), {'x': 'a "b" c'})
self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'})
self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'})
self.assertEqual(extract_attributes('<e x="&">'), {'x': '&'}) # XML
self.assertEqual(extract_attributes('<e x=""">'), {'x': '"'})
self.assertEqual(extract_attributes('<e x="£">'), {'x': '£'}) # HTML 3.2
self.assertEqual(extract_attributes('<e x="λ">'), {'x': 'λ'}) # HTML 4.0
self.assertEqual(extract_attributes('<e x="&foo">'), {'x': '&foo'})
self.assertEqual(extract_attributes('<e x="\'">'), {'x': "'"})
self.assertEqual(extract_attributes('<e x=\'"\'>'), {'x': '"'})
self.assertEqual(extract_attributes('<e x >'), {'x': None})
self.assertEqual(extract_attributes('<e x=y a>'), {'x': 'y', 'a': None})
self.assertEqual(extract_attributes('<e x= y>'), {'x': 'y'})
self.assertEqual(extract_attributes('<e x=1 y=2 x=3>'), {'y': '2', 'x': '3'})
self.assertEqual(extract_attributes('<e \nx=\ny\n>'), {'x': 'y'})
self.assertEqual(extract_attributes('<e \nx=\n"y"\n>'), {'x': 'y'})
self.assertEqual(extract_attributes("<e \nx=\n'y'\n>"), {'x': 'y'})
self.assertEqual(extract_attributes('<e \nx="\ny\n">'), {'x': '\ny\n'})
self.assertEqual(extract_attributes('<e CAPS=x>'), {'caps': 'x'}) # Names lowercased
self.assertEqual(extract_attributes('<e x=1 X=2>'), {'x': '2'})
self.assertEqual(extract_attributes('<e X=1 x=2>'), {'x': '2'})
self.assertEqual(extract_attributes('<e _:funny-name1=1>'), {'_:funny-name1': '1'})
self.assertEqual(extract_attributes('<e x="Fáilte 世界 \U0001f600">'), {'x': 'Fáilte 世界 \U0001f600'})
self.assertEqual(extract_attributes('<e x="décomposé">'), {'x': 'décompose\u0301'})
# "Narrow" Python builds don't support unicode code points outside BMP.
try:
compat_chr(0x10000)
supports_outside_bmp = True
except ValueError:
supports_outside_bmp = False
if supports_outside_bmp:
self.assertEqual(extract_attributes('<e x="Smile 😀!">'), {'x': 'Smile \U0001f600!'})
# Malformed HTML should not break attributes extraction on older Python
self.assertEqual(extract_attributes('<mal"formed/>'), {})
def test_clean_html(self):
self.assertEqual(clean_html('a:\nb'), 'a: b')
self.assertEqual(clean_html('a:\n "b"'), 'a: "b"')
self.assertEqual(clean_html('a<br>\xa0b'), 'a\nb')
def test_intlist_to_bytes(self):
self.assertEqual(
intlist_to_bytes([0, 1, 127, 128, 255]),
b'\x00\x01\x7f\x80\xff')
def test_args_to_str(self):
self.assertEqual(
args_to_str(['foo', 'ba/r', '-baz', '2 be', '']),
'foo ba/r -baz \'2 be\' \'\'' if compat_os_name != 'nt' else 'foo ba/r -baz "2 be" ""'
)
def test_parse_filesize(self):
self.assertEqual(parse_filesize(None), None)
self.assertEqual(parse_filesize(''), None)
self.assertEqual(parse_filesize('91 B'), 91)
self.assertEqual(parse_filesize('foobar'), None)
self.assertEqual(parse_filesize('2 MiB'), 2097152)
self.assertEqual(parse_filesize('5 GB'), 5000000000)
self.assertEqual(parse_filesize('1.2Tb'), 1200000000000)
self.assertEqual(parse_filesize('1.2tb'), 1200000000000)
self.assertEqual(parse_filesize('1,24 KB'), 1240)
self.assertEqual(parse_filesize('1,24 kb'), 1240)
self.assertEqual(parse_filesize('8.5 megabytes'), 8500000)
def test_parse_count(self):
self.assertEqual(parse_count(None), None)
self.assertEqual(parse_count(''), None)
self.assertEqual(parse_count('0'), 0)
self.assertEqual(parse_count('1000'), 1000)
self.assertEqual(parse_count('1.000'), 1000)
self.assertEqual(parse_count('1.1k'), 1100)
self.assertEqual(parse_count('1.1 k'), 1100)
self.assertEqual(parse_count('1,1 k'), 1100)
self.assertEqual(parse_count('1.1kk'), 1100000)
self.assertEqual(parse_count('1.1kk '), 1100000)
self.assertEqual(parse_count('1,1kk'), 1100000)
self.assertEqual(parse_count('100 views'), 100)
self.assertEqual(parse_count('1,100 views'), 1100)
self.assertEqual(parse_count('1.1kk views'), 1100000)
self.assertEqual(parse_count('10M views'), 10000000)
self.assertEqual(parse_count('has 10M views'), 10000000)
def test_parse_resolution(self):
self.assertEqual(parse_resolution(None), {})
self.assertEqual(parse_resolution(''), {})
self.assertEqual(parse_resolution(' 1920x1080'), {'width': 1920, 'height': 1080})
self.assertEqual(parse_resolution('1920×1080 '), {'width': 1920, 'height': 1080})
self.assertEqual(parse_resolution('1920 x 1080'), {'width': 1920, 'height': 1080})
self.assertEqual(parse_resolution('720p'), {'height': 720})
self.assertEqual(parse_resolution('4k'), {'height': 2160})
self.assertEqual(parse_resolution('8K'), {'height': 4320})
self.assertEqual(parse_resolution('pre_1920x1080_post'), {'width': 1920, 'height': 1080})
self.assertEqual(parse_resolution('ep1x2'), {})
self.assertEqual(parse_resolution('1920, 1080'), {'width': 1920, 'height': 1080})
def test_parse_bitrate(self):
self.assertEqual(parse_bitrate(None), None)
self.assertEqual(parse_bitrate(''), None)
self.assertEqual(parse_bitrate('300kbps'), 300)
self.assertEqual(parse_bitrate('1500kbps'), 1500)
self.assertEqual(parse_bitrate('300 kbps'), 300)
def test_version_tuple(self):
self.assertEqual(version_tuple('1'), (1,))
self.assertEqual(version_tuple('10.23.344'), (10, 23, 344))
self.assertEqual(version_tuple('10.1-6'), (10, 1, 6)) # avconv style
def test_detect_exe_version(self):
self.assertEqual(detect_exe_version('''ffmpeg version 1.2.1
built on May 27 2013 08:37:26 with gcc 4.7 (Debian 4.7.3-4)
configuration: --prefix=/usr --extra-'''), '1.2.1')
self.assertEqual(detect_exe_version('''ffmpeg version N-63176-g1fb4685
built on May 15 2014 22:09:06 with gcc 4.8.2 (GCC)'''), 'N-63176-g1fb4685')
self.assertEqual(detect_exe_version('''X server found. dri2 connection failed!
Trying to open render node...
Success at /dev/dri/renderD128.
ffmpeg version 2.4.4 Copyright (c) 2000-2014 the FFmpeg ...'''), '2.4.4')
def test_age_restricted(self):
self.assertFalse(age_restricted(None, 10)) # unrestricted content
self.assertFalse(age_restricted(1, None)) # unrestricted policy
self.assertFalse(age_restricted(8, 10))
self.assertTrue(age_restricted(18, 14))
self.assertFalse(age_restricted(18, 18))
def test_is_html(self):
self.assertFalse(is_html(b'\x49\x44\x43<html'))
self.assertTrue(is_html(b'<!DOCTYPE foo>\xaaa'))
self.assertTrue(is_html( # UTF-8 with BOM
b'\xef\xbb\xbf<!DOCTYPE foo>\xaaa'))
self.assertTrue(is_html( # UTF-16-LE
b'\xff\xfe<\x00h\x00t\x00m\x00l\x00>\x00\xe4\x00'
))
self.assertTrue(is_html( # UTF-16-BE
b'\xfe\xff\x00<\x00h\x00t\x00m\x00l\x00>\x00\xe4'
))
self.assertTrue(is_html( # UTF-32-BE
b'\x00\x00\xFE\xFF\x00\x00\x00<\x00\x00\x00h\x00\x00\x00t\x00\x00\x00m\x00\x00\x00l\x00\x00\x00>\x00\x00\x00\xe4'))
self.assertTrue(is_html( # UTF-32-LE
b'\xFF\xFE\x00\x00<\x00\x00\x00h\x00\x00\x00t\x00\x00\x00m\x00\x00\x00l\x00\x00\x00>\x00\x00\x00\xe4\x00\x00\x00'))
def test_render_table(self):
self.assertEqual(
render_table(
['a', 'empty', 'bcd'],
[[123, '', 4], [9999, '', 51]]),
'a empty bcd\n'
'123 4\n'
'9999 51')
self.assertEqual(
render_table(
['a', 'empty', 'bcd'],
[[123, '', 4], [9999, '', 51]],
hide_empty=True),
'a bcd\n'
'123 4\n'
'9999 51')
self.assertEqual(
render_table(
['\ta', 'bcd'],
[['1\t23', 4], ['\t9999', 51]]),
' a bcd\n'
'1 23 4\n'
'9999 51')
self.assertEqual(
render_table(
['a', 'bcd'],
[[123, 4], [9999, 51]],
delim='-'),
'a bcd\n'
'--------\n'
'123 4\n'
'9999 51')
self.assertEqual(
render_table(
['a', 'bcd'],
[[123, 4], [9999, 51]],
delim='-', extra_gap=2),
'a bcd\n'
'----------\n'
'123 4\n'
'9999 51')
def test_match_str(self):
# Unary
self.assertFalse(match_str('xy', {'x': 1200}))
self.assertTrue(match_str('!xy', {'x': 1200}))
self.assertTrue(match_str('x', {'x': 1200}))
self.assertFalse(match_str('!x', {'x': 1200}))
self.assertTrue(match_str('x', {'x': 0}))
self.assertTrue(match_str('is_live', {'is_live': True}))
self.assertFalse(match_str('is_live', {'is_live': False}))
self.assertFalse(match_str('is_live', {'is_live': None}))
self.assertFalse(match_str('is_live', {}))
self.assertFalse(match_str('!is_live', {'is_live': True}))
self.assertTrue(match_str('!is_live', {'is_live': False}))
self.assertTrue(match_str('!is_live', {'is_live': None}))
self.assertTrue(match_str('!is_live', {}))
self.assertTrue(match_str('title', {'title': 'abc'}))
self.assertTrue(match_str('title', {'title': ''}))
self.assertFalse(match_str('!title', {'title': 'abc'}))
self.assertFalse(match_str('!title', {'title': ''}))
# Numeric
self.assertFalse(match_str('x>0', {'x': 0}))
self.assertFalse(match_str('x>0', {}))
self.assertTrue(match_str('x>?0', {}))
self.assertTrue(match_str('x>1K', {'x': 1200}))
self.assertFalse(match_str('x>2K', {'x': 1200}))
self.assertTrue(match_str('x>=1200 & x < 1300', {'x': 1200}))
self.assertFalse(match_str('x>=1100 & x < 1200', {'x': 1200}))
self.assertTrue(match_str('x > 1:0:0', {'x': 3700}))
# String
self.assertFalse(match_str('y=a212', {'y': 'foobar42'}))
self.assertTrue(match_str('y=foobar42', {'y': 'foobar42'}))
self.assertFalse(match_str('y!=foobar42', {'y': 'foobar42'}))
self.assertTrue(match_str('y!=foobar2', {'y': 'foobar42'}))
self.assertTrue(match_str('y^=foo', {'y': 'foobar42'}))
self.assertFalse(match_str('y!^=foo', {'y': 'foobar42'}))
self.assertFalse(match_str('y^=bar', {'y': 'foobar42'}))
self.assertTrue(match_str('y!^=bar', {'y': 'foobar42'}))
self.assertRaises(ValueError, match_str, 'x^=42', {'x': 42})
self.assertTrue(match_str('y*=bar', {'y': 'foobar42'}))
self.assertFalse(match_str('y!*=bar', {'y': 'foobar42'}))
self.assertFalse(match_str('y*=baz', {'y': 'foobar42'}))
self.assertTrue(match_str('y!*=baz', {'y': 'foobar42'}))
self.assertTrue(match_str('y$=42', {'y': 'foobar42'}))
self.assertFalse(match_str('y$=43', {'y': 'foobar42'}))
# And
self.assertFalse(match_str(
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 90, 'description': 'foo'}))
self.assertTrue(match_str(
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 190, 'description': 'foo'}))
self.assertFalse(match_str(
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 190, 'dislike_count': 60, 'description': 'foo'}))
self.assertFalse(match_str(
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 190, 'dislike_count': 10}))
# Regex
self.assertTrue(match_str(r'x~=\bbar', {'x': 'foo bar'}))
self.assertFalse(match_str(r'x~=\bbar.+', {'x': 'foo bar'}))
self.assertFalse(match_str(r'x~=^FOO', {'x': 'foo bar'}))
self.assertTrue(match_str(r'x~=(?i)^FOO', {'x': 'foo bar'}))
# Quotes
self.assertTrue(match_str(r'x^="foo"', {'x': 'foo "bar"'}))
self.assertFalse(match_str(r'x^="foo "', {'x': 'foo "bar"'}))
self.assertFalse(match_str(r'x$="bar"', {'x': 'foo "bar"'}))
self.assertTrue(match_str(r'x$=" \"bar\""', {'x': 'foo "bar"'}))
# Escaping &
self.assertFalse(match_str(r'x=foo & bar', {'x': 'foo & bar'}))
self.assertTrue(match_str(r'x=foo \& bar', {'x': 'foo & bar'}))
self.assertTrue(match_str(r'x=foo \& bar & x^=foo', {'x': 'foo & bar'}))
self.assertTrue(match_str(r'x="foo \& bar" & x^=foo', {'x': 'foo & bar'}))
# Example from docs
self.assertTrue(match_str(
r"!is_live & like_count>?100 & description~='(?i)\bcats \& dogs\b'",
{'description': 'Raining Cats & Dogs'}))
# Incomplete
self.assertFalse(match_str('id!=foo', {'id': 'foo'}, True))
self.assertTrue(match_str('x', {'id': 'foo'}, True))
self.assertTrue(match_str('!x', {'id': 'foo'}, True))
self.assertFalse(match_str('x', {'id': 'foo'}, False))
def test_parse_dfxp_time_expr(self):
self.assertEqual(parse_dfxp_time_expr(None), None)
self.assertEqual(parse_dfxp_time_expr(''), None)
self.assertEqual(parse_dfxp_time_expr('0.1'), 0.1)
self.assertEqual(parse_dfxp_time_expr('0.1s'), 0.1)
self.assertEqual(parse_dfxp_time_expr('00:00:01'), 1.0)
self.assertEqual(parse_dfxp_time_expr('00:00:01.100'), 1.1)
self.assertEqual(parse_dfxp_time_expr('00:00:01:100'), 1.1)
def test_dfxp2srt(self):
dfxp_data = '''<?xml version="1.0" encoding="UTF-8"?>
<tt xmlns="http://www.w3.org/ns/ttml" xml:lang="en" xmlns:tts="http://www.w3.org/ns/ttml#parameter">
<body>
<div xml:lang="en">
<p begin="0" end="1">The following line contains Chinese characters and special symbols</p>
<p begin="1" end="2">第二行<br/>♪♪</p>
<p begin="2" dur="1"><span>Third<br/>Line</span></p>
<p begin="3" end="-1">Lines with invalid timestamps are ignored</p>
<p begin="-1" end="-1">Ignore, two</p>
<p begin="3" dur="-1">Ignored, three</p>
</div>
</body>
</tt>'''.encode()
srt_data = '''1
00:00:00,000 --> 00:00:01,000
The following line contains Chinese characters and special symbols
2
00:00:01,000 --> 00:00:02,000
第二行
♪♪
3
00:00:02,000 --> 00:00:03,000
Third
Line
'''
self.assertEqual(dfxp2srt(dfxp_data), srt_data)
dfxp_data_no_default_namespace = b'''<?xml version="1.0" encoding="UTF-8"?>
<tt xml:lang="en" xmlns:tts="http://www.w3.org/ns/ttml#parameter">
<body>
<div xml:lang="en">
<p begin="0" end="1">The first line</p>
</div>
</body>
</tt>'''
srt_data = '''1
00:00:00,000 --> 00:00:01,000
The first line
'''
self.assertEqual(dfxp2srt(dfxp_data_no_default_namespace), srt_data)
dfxp_data_with_style = b'''<?xml version="1.0" encoding="utf-8"?>
<tt xmlns="http://www.w3.org/2006/10/ttaf1" xmlns:ttp="http://www.w3.org/2006/10/ttaf1#parameter" ttp:timeBase="media" xmlns:tts="http://www.w3.org/2006/10/ttaf1#style" xml:lang="en" xmlns:ttm="http://www.w3.org/2006/10/ttaf1#metadata">
<head>
<styling>
<style id="s2" style="s0" tts:color="cyan" tts:fontWeight="bold" />
<style id="s1" style="s0" tts:color="yellow" tts:fontStyle="italic" />
<style id="s3" style="s0" tts:color="lime" tts:textDecoration="underline" />
<style id="s0" tts:backgroundColor="black" tts:fontStyle="normal" tts:fontSize="16" tts:fontFamily="sansSerif" tts:color="white" />
</styling>
</head>
<body tts:textAlign="center" style="s0">
<div>
<p begin="00:00:02.08" id="p0" end="00:00:05.84">default style<span tts:color="red">custom style</span></p>
<p style="s2" begin="00:00:02.08" id="p0" end="00:00:05.84"><span tts:color="lime">part 1<br /></span><span tts:color="cyan">part 2</span></p>
<p style="s3" begin="00:00:05.84" id="p1" end="00:00:09.56">line 3<br />part 3</p>
<p style="s1" tts:textDecoration="underline" begin="00:00:09.56" id="p2" end="00:00:12.36"><span style="s2" tts:color="lime">inner<br /> </span>style</p>
</div>
</body>
</tt>'''
srt_data = '''1
00:00:02,080 --> 00:00:05,840
<font color="white" face="sansSerif" size="16">default style<font color="red">custom style</font></font>
2
00:00:02,080 --> 00:00:05,840
<b><font color="cyan" face="sansSerif" size="16"><font color="lime">part 1
</font>part 2</font></b>
3
00:00:05,840 --> 00:00:09,560
<u><font color="lime">line 3
part 3</font></u>
4
00:00:09,560 --> 00:00:12,360
<i><u><font color="yellow"><font color="lime">inner
</font>style</font></u></i>
'''
self.assertEqual(dfxp2srt(dfxp_data_with_style), srt_data)
dfxp_data_non_utf8 = '''<?xml version="1.0" encoding="UTF-16"?>
<tt xmlns="http://www.w3.org/ns/ttml" xml:lang="en" xmlns:tts="http://www.w3.org/ns/ttml#parameter">
<body>
<div xml:lang="en">
<p begin="0" end="1">Line 1</p>
<p begin="1" end="2">第二行</p>
</div>
</body>
</tt>'''.encode('utf-16')
srt_data = '''1
00:00:00,000 --> 00:00:01,000
Line 1
2
00:00:01,000 --> 00:00:02,000
第二行
'''
self.assertEqual(dfxp2srt(dfxp_data_non_utf8), srt_data)
def test_cli_option(self):
self.assertEqual(cli_option({'proxy': '127.0.0.1:3128'}, '--proxy', 'proxy'), ['--proxy', '127.0.0.1:3128'])
self.assertEqual(cli_option({'proxy': None}, '--proxy', 'proxy'), [])
self.assertEqual(cli_option({}, '--proxy', 'proxy'), [])
self.assertEqual(cli_option({'retries': 10}, '--retries', 'retries'), ['--retries', '10'])
def test_cli_valueless_option(self):
self.assertEqual(cli_valueless_option(
{'downloader': 'external'}, '--external-downloader', 'downloader', 'external'), ['--external-downloader'])
self.assertEqual(cli_valueless_option(
{'downloader': 'internal'}, '--external-downloader', 'downloader', 'external'), [])
self.assertEqual(cli_valueless_option(
{'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate'), ['--no-check-certificate'])
self.assertEqual(cli_valueless_option(
{'nocheckcertificate': False}, '--no-check-certificate', 'nocheckcertificate'), [])
self.assertEqual(cli_valueless_option(
{'checkcertificate': True}, '--no-check-certificate', 'checkcertificate', False), [])
self.assertEqual(cli_valueless_option(
{'checkcertificate': False}, '--no-check-certificate', 'checkcertificate', False), ['--no-check-certificate'])
def test_cli_bool_option(self):
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate'),
['--no-check-certificate', 'true'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate', separator='='),
['--no-check-certificate=true'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': True}, '--check-certificate', 'nocheckcertificate', 'false', 'true'),
['--check-certificate', 'false'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': True}, '--check-certificate', 'nocheckcertificate', 'false', 'true', '='),
['--check-certificate=false'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': False}, '--check-certificate', 'nocheckcertificate', 'false', 'true'),
['--check-certificate', 'true'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': False}, '--check-certificate', 'nocheckcertificate', 'false', 'true', '='),
['--check-certificate=true'])
self.assertEqual(
cli_bool_option(
{}, '--check-certificate', 'nocheckcertificate', 'false', 'true', '='),
[])
def test_ohdave_rsa_encrypt(self):
N = 0xab86b6371b5318aaa1d3c9e612a9f1264f372323c8c0f19875b5fc3b3fd3afcc1e5bec527aa94bfa85bffc157e4245aebda05389a5357b75115ac94f074aefcd
e = 65537
self.assertEqual(
ohdave_rsa_encrypt(b'aa111222', e, N),
'726664bd9a23fd0c70f9f1b84aab5e3905ce1e45a584e9cbcf9bcc7510338fc1986d6c599ff990d923aa43c51c0d9013cd572e13bc58f4ae48f2ed8c0b0ba881')
def test_pkcs1pad(self):
data = [1, 2, 3]
padded_data = pkcs1pad(data, 32)
self.assertEqual(padded_data[:2], [0, 2])
self.assertEqual(padded_data[28:], [0, 1, 2, 3])
self.assertRaises(ValueError, pkcs1pad, data, 8)
def test_encode_base_n(self):
self.assertEqual(encode_base_n(0, 30), '0')
self.assertEqual(encode_base_n(80, 30), '2k')
custom_table = '9876543210ZYXWVUTSRQPONMLKJIHGFEDCBA'
self.assertEqual(encode_base_n(0, 30, custom_table), '9')
self.assertEqual(encode_base_n(80, 30, custom_table), '7P')
self.assertRaises(ValueError, encode_base_n, 0, 70)
self.assertRaises(ValueError, encode_base_n, 0, 60, custom_table)
def test_caesar(self):
self.assertEqual(caesar('ace', 'abcdef', 2), 'cea')
self.assertEqual(caesar('cea', 'abcdef', -2), 'ace')
self.assertEqual(caesar('ace', 'abcdef', -2), 'eac')
self.assertEqual(caesar('eac', 'abcdef', 2), 'ace')
self.assertEqual(caesar('ace', 'abcdef', 0), 'ace')
self.assertEqual(caesar('xyz', 'abcdef', 2), 'xyz')
self.assertEqual(caesar('abc', 'acegik', 2), 'ebg')
self.assertEqual(caesar('ebg', 'acegik', -2), 'abc')
def test_rot47(self):
self.assertEqual(rot47('yt-dlp'), r'JE\5=A')
self.assertEqual(rot47('YT-DLP'), r'*%\s{!')
def test_urshift(self):
self.assertEqual(urshift(3, 1), 1)
self.assertEqual(urshift(-3, 1), 2147483646)
GET_ELEMENT_BY_CLASS_TEST_STRING = '''
<span class="foo bar">nice</span>
'''
def test_get_element_by_class(self):
html = self.GET_ELEMENT_BY_CLASS_TEST_STRING
self.assertEqual(get_element_by_class('foo', html), 'nice')
self.assertEqual(get_element_by_class('no-such-class', html), None)
def test_get_element_html_by_class(self):
html = self.GET_ELEMENT_BY_CLASS_TEST_STRING
self.assertEqual(get_element_html_by_class('foo', html), html.strip())
self.assertEqual(get_element_by_class('no-such-class', html), None)
GET_ELEMENT_BY_ATTRIBUTE_TEST_STRING = '''
<div itemprop="author" itemscope>foo</div>
'''
def test_get_element_by_attribute(self):
html = self.GET_ELEMENT_BY_CLASS_TEST_STRING
self.assertEqual(get_element_by_attribute('class', 'foo bar', html), 'nice')
self.assertEqual(get_element_by_attribute('class', 'foo', html), None)
self.assertEqual(get_element_by_attribute('class', 'no-such-foo', html), None)
html = self.GET_ELEMENT_BY_ATTRIBUTE_TEST_STRING
self.assertEqual(get_element_by_attribute('itemprop', 'author', html), 'foo')
def test_get_element_html_by_attribute(self):
html = self.GET_ELEMENT_BY_CLASS_TEST_STRING
self.assertEqual(get_element_html_by_attribute('class', 'foo bar', html), html.strip())
self.assertEqual(get_element_html_by_attribute('class', 'foo', html), None)
self.assertEqual(get_element_html_by_attribute('class', 'no-such-foo', html), None)
html = self.GET_ELEMENT_BY_ATTRIBUTE_TEST_STRING
self.assertEqual(get_element_html_by_attribute('itemprop', 'author', html), html.strip())
GET_ELEMENTS_BY_CLASS_TEST_STRING = '''
<span class="foo bar">nice</span><span class="foo bar">also nice</span>
'''
GET_ELEMENTS_BY_CLASS_RES = ['<span class="foo bar">nice</span>', '<span class="foo bar">also nice</span>']
def test_get_elements_by_class(self):
html = self.GET_ELEMENTS_BY_CLASS_TEST_STRING
self.assertEqual(get_elements_by_class('foo', html), ['nice', 'also nice'])
self.assertEqual(get_elements_by_class('no-such-class', html), [])
def test_get_elements_html_by_class(self):
html = self.GET_ELEMENTS_BY_CLASS_TEST_STRING
self.assertEqual(get_elements_html_by_class('foo', html), self.GET_ELEMENTS_BY_CLASS_RES)
self.assertEqual(get_elements_html_by_class('no-such-class', html), [])
def test_get_elements_by_attribute(self):
html = self.GET_ELEMENTS_BY_CLASS_TEST_STRING
self.assertEqual(get_elements_by_attribute('class', 'foo bar', html), ['nice', 'also nice'])
self.assertEqual(get_elements_by_attribute('class', 'foo', html), [])
self.assertEqual(get_elements_by_attribute('class', 'no-such-foo', html), [])
def test_get_elements_html_by_attribute(self):
html = self.GET_ELEMENTS_BY_CLASS_TEST_STRING
self.assertEqual(get_elements_html_by_attribute('class', 'foo bar', html), self.GET_ELEMENTS_BY_CLASS_RES)
self.assertEqual(get_elements_html_by_attribute('class', 'foo', html), [])
self.assertEqual(get_elements_html_by_attribute('class', 'no-such-foo', html), [])
def test_get_elements_text_and_html_by_attribute(self):
html = self.GET_ELEMENTS_BY_CLASS_TEST_STRING
self.assertEqual(
list(get_elements_text_and_html_by_attribute('class', 'foo bar', html)),
list(zip(['nice', 'also nice'], self.GET_ELEMENTS_BY_CLASS_RES)))
self.assertEqual(list(get_elements_text_and_html_by_attribute('class', 'foo', html)), [])
self.assertEqual(list(get_elements_text_and_html_by_attribute('class', 'no-such-foo', html)), [])
GET_ELEMENT_BY_TAG_TEST_STRING = '''
random text lorem ipsum</p>
<div>
this should be returned
<span>this should also be returned</span>
<div>
this should also be returned
</div>
closing tag above should not trick, so this should also be returned
</div>
but this text should not be returned
'''
GET_ELEMENT_BY_TAG_RES_OUTERDIV_HTML = GET_ELEMENT_BY_TAG_TEST_STRING.strip()[32:276]
GET_ELEMENT_BY_TAG_RES_OUTERDIV_TEXT = GET_ELEMENT_BY_TAG_RES_OUTERDIV_HTML[5:-6]
GET_ELEMENT_BY_TAG_RES_INNERSPAN_HTML = GET_ELEMENT_BY_TAG_TEST_STRING.strip()[78:119]
GET_ELEMENT_BY_TAG_RES_INNERSPAN_TEXT = GET_ELEMENT_BY_TAG_RES_INNERSPAN_HTML[6:-7]
def test_get_element_text_and_html_by_tag(self):
html = self.GET_ELEMENT_BY_TAG_TEST_STRING
self.assertEqual(
get_element_text_and_html_by_tag('div', html),
(self.GET_ELEMENT_BY_TAG_RES_OUTERDIV_TEXT, self.GET_ELEMENT_BY_TAG_RES_OUTERDIV_HTML))
self.assertEqual(
get_element_text_and_html_by_tag('span', html),
(self.GET_ELEMENT_BY_TAG_RES_INNERSPAN_TEXT, self.GET_ELEMENT_BY_TAG_RES_INNERSPAN_HTML))
self.assertRaises(compat_HTMLParseError, get_element_text_and_html_by_tag, 'article', html)
def test_iri_to_uri(self):
self.assertEqual(
iri_to_uri('https://www.google.com/search?q=foo&ie=utf-8&oe=utf-8&client=firefox-b'),
'https://www.google.com/search?q=foo&ie=utf-8&oe=utf-8&client=firefox-b') # Same
self.assertEqual(
iri_to_uri('https://www.google.com/search?q=Käsesoßenrührlöffel'), # German for cheese sauce stirring spoon
'https://www.google.com/search?q=K%C3%A4seso%C3%9Fenr%C3%BChrl%C3%B6ffel')
self.assertEqual(
iri_to_uri('https://www.google.com/search?q=lt<+gt>+eq%3D+amp%26+percent%25+hash%23+colon%3A+tilde~#trash=?&garbage=#'),
'https://www.google.com/search?q=lt%3C+gt%3E+eq%3D+amp%26+percent%25+hash%23+colon%3A+tilde~#trash=?&garbage=#')
self.assertEqual(
iri_to_uri('http://правозащита38.рф/category/news/'),
'http://xn--38-6kcaak9aj5chl4a3g.xn--p1ai/category/news/')
self.assertEqual(
iri_to_uri('http://www.правозащита38.рф/category/news/'),
'http://www.xn--38-6kcaak9aj5chl4a3g.xn--p1ai/category/news/')
self.assertEqual(
iri_to_uri('https://i❤.ws/emojidomain/👍👏🤝💪'),
'https://xn--i-7iq.ws/emojidomain/%F0%9F%91%8D%F0%9F%91%8F%F0%9F%A4%9D%F0%9F%92%AA')
self.assertEqual(
iri_to_uri('http://日本語.jp/'),
'http://xn--wgv71a119e.jp/')
self.assertEqual(
iri_to_uri('http://导航.中国/'),
'http://xn--fet810g.xn--fiqs8s/')
def test_clean_podcast_url(self):
self.assertEqual(clean_podcast_url('https://www.podtrac.com/pts/redirect.mp3/chtbl.com/track/5899E/traffic.megaphone.fm/HSW7835899191.mp3'), 'https://traffic.megaphone.fm/HSW7835899191.mp3')
self.assertEqual(clean_podcast_url('https://play.podtrac.com/npr-344098539/edge1.pod.npr.org/anon.npr-podcasts/podcast/npr/waitwait/2020/10/20201003_waitwait_wwdtmpodcast201003-015621a5-f035-4eca-a9a1-7c118d90bc3c.mp3'), 'https://edge1.pod.npr.org/anon.npr-podcasts/podcast/npr/waitwait/2020/10/20201003_waitwait_wwdtmpodcast201003-015621a5-f035-4eca-a9a1-7c118d90bc3c.mp3')
def test_LazyList(self):
it = list(range(10))
self.assertEqual(list(LazyList(it)), it)
self.assertEqual(LazyList(it).exhaust(), it)
self.assertEqual(LazyList(it)[5], it[5])
self.assertEqual(LazyList(it)[5:], it[5:])
self.assertEqual(LazyList(it)[:5], it[:5])
self.assertEqual(LazyList(it)[::2], it[::2])
self.assertEqual(LazyList(it)[1::2], it[1::2])
self.assertEqual(LazyList(it)[5::-1], it[5::-1])
self.assertEqual(LazyList(it)[6:2:-2], it[6:2:-2])
self.assertEqual(LazyList(it)[::-1], it[::-1])
self.assertTrue(LazyList(it))
self.assertFalse(LazyList(range(0)))
self.assertEqual(len(LazyList(it)), len(it))
self.assertEqual(repr(LazyList(it)), repr(it))
self.assertEqual(str(LazyList(it)), str(it))
self.assertEqual(list(LazyList(it, reverse=True)), it[::-1])
self.assertEqual(list(reversed(LazyList(it))[::-1]), it)
self.assertEqual(list(reversed(LazyList(it))[1:3:7]), it[::-1][1:3:7])
def test_LazyList_laziness(self):
def test(ll, idx, val, cache):
self.assertEqual(ll[idx], val)
self.assertEqual(getattr(ll, '_LazyList__cache'), list(cache))
ll = LazyList(range(10))
test(ll, 0, 0, range(1))
test(ll, 5, 5, range(6))
test(ll, -3, 7, range(10))
ll = LazyList(range(10), reverse=True)
test(ll, -1, 0, range(1))
test(ll, 3, 6, range(10))
ll = LazyList(itertools.count())
test(ll, 10, 10, range(11))
ll = reversed(ll)
test(ll, -15, 14, range(15))
def test_format_bytes(self):
self.assertEqual(format_bytes(0), '0.00B')
self.assertEqual(format_bytes(1000), '1000.00B')
self.assertEqual(format_bytes(1024), '1.00KiB')
self.assertEqual(format_bytes(1024**2), '1.00MiB')
self.assertEqual(format_bytes(1024**3), '1.00GiB')
self.assertEqual(format_bytes(1024**4), '1.00TiB')
self.assertEqual(format_bytes(1024**5), '1.00PiB')
self.assertEqual(format_bytes(1024**6), '1.00EiB')
self.assertEqual(format_bytes(1024**7), '1.00ZiB')
self.assertEqual(format_bytes(1024**8), '1.00YiB')
self.assertEqual(format_bytes(1024**9), '1024.00YiB')
def test_hide_login_info(self):
self.assertEqual(Config.hide_login_info(['-u', 'foo', '-p', 'bar']),
['-u', 'PRIVATE', '-p', 'PRIVATE'])
self.assertEqual(Config.hide_login_info(['-u']), ['-u'])
self.assertEqual(Config.hide_login_info(['-u', 'foo', '-u', 'bar']),
['-u', 'PRIVATE', '-u', 'PRIVATE'])
self.assertEqual(Config.hide_login_info(['--username=foo']),
['--username=PRIVATE'])
def test_locked_file(self):
TEXT = 'test_locked_file\n'
FILE = 'test_locked_file.ytdl'
MODES = 'war' # Order is important
try:
for lock_mode in MODES:
with locked_file(FILE, lock_mode, False) as f:
if lock_mode == 'r':
self.assertEqual(f.read(), TEXT * 2, 'Wrong file content')
else:
f.write(TEXT)
for test_mode in MODES:
testing_write = test_mode != 'r'
try:
with locked_file(FILE, test_mode, False):
pass
except (BlockingIOError, PermissionError):
if not testing_write: # FIXME
print(f'Known issue: Exclusive lock ({lock_mode}) blocks read access ({test_mode})')
continue
self.assertTrue(testing_write, f'{test_mode} is blocked by {lock_mode}')
else:
self.assertFalse(testing_write, f'{test_mode} is not blocked by {lock_mode}')
finally:
try:
os.remove(FILE)
except Exception:
pass
if __name__ == '__main__':
unittest.main()
| test/test_utils.py | 84,977 | !/usr/bin/env python3 Allow direct execution Various small unit tests No empty filename Handle a common case more neatly .. but make sure the file name is never empty keep the list ordered HTML5 entities Ignore JavaScript code as well Just drop ! prefix for now though this results in a wrong value XML HTML 3.2 HTML 4.0 Names lowercased "Narrow" Python builds don't support unicode code points outside BMP. Malformed HTML should not break attributes extraction on older Python avconv style unrestricted content unrestricted policy UTF-8 with BOM UTF-16-LE UTF-16-BE UTF-32-BE UTF-32-LE Unary Numeric String And Regex Quotes Escaping & Example from docs Incomplete Same German for cheese sauce stirring spoon Order is important FIXME | 733 | en | 0.818625 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#pylint: skip-file
from nose.tools import assert_equal
from iot_message.cryptor.plain import Cryptor
from iot_message.message import Message
__author__ = 'Bartosz Kościów'
import iot_message.factory as factory
class TestCryptorPlain(object):
def setUp(self):
Message.chip_id = 'pc'
Message.node_name = 'Turkusik'
Message.drop_unencrypted = False
Message.encoders = []
Message.decoders = {}
def test_encode_message(self):
Message.add_encoder(Cryptor())
msg = factory.MessageFactory.create()
inp = {"event": "channel.on", "parameters": {"channel": 0}, "response": "", "targets": ["node-north"]}
msg.set(inp)
msg.encrypt()
assert_equal(inp["event"], msg.data["event"])
assert_equal(inp["parameters"], msg.data["parameters"])
assert_equal(inp["targets"], msg.data["targets"])
def test_decrypt_message(self):
Message.add_decoder(Cryptor())
inp = """{"protocol": "iot:1", "node": "Turkusik", "chip_id": "pc", "event": "message.plain", "parameters": ["a"], "response": "", "targets": ["Turkusik"]}"""
msg = factory.MessageFactory.create(inp)
assert_equal(msg.data["event"], "message.plain")
assert_equal(msg.data["parameters"], ["a"])
assert_equal(msg.data["targets"], ['Turkusik'])
| iot_message/tests/test_plain_cryptor.py | 1,394 | !/usr/bin/python3 -*- coding: utf-8 -*-pylint: skip-file | 56 | en | 0.452885 |
#! /usr/bin/env python
import os
os.mkdir('_testing')
os.chdir('_testing')
os.environ['MPLBACKEND'] = 'Agg'
from pymt.components import FrostNumberGeoModel as Model
model = Model()
for default in model.defaults:
print('{name}: {val} {units}'.format(
name=default[0], val=default[1][0], units=default[1][1]))
| recipe/run_test.py | 324 | ! /usr/bin/env python | 21 | fr | 0.299742 |
"""
This module stores constants used during the operations of the UI.
"""
# Application info.
CM_NAME = "CovertMark"
CM_VER = "0.1"
CM_RELEASE = "alpha"
CM_AUTHOR = "C Shi"
CM_LINK = "https://github.com/chongyangshi"
CM_LICENSE = "Please see LICENSE.md for terms of usage of this program."
CM_TITLE = """\
_____ _ ___ ___ _
/ __ \ | | | \/ | | |
| / \/ _____ _____ _ __| |_| . . | __ _ _ __| | __
| | / _ \ \ / / _ | '__| __| |\/| |/ _` | '__| |/ /
| \__/| (_) \ V | __| | | |_| | | | (_| | | | <
\____/\___/ \_/ \___|_| \__\_| |_/\__,_|_| |_|\_\\
"""
DIVIDER = "-" * 40
PROCEDURE_RUN_FIELDS = ["strategy", "run_order", "user_params", "pt_pcap",
"pt_filters", "pt_collection", "neg_pcap", "neg_filters", "neg_collection",
"user_defined_name"]
# UI colours.
class colours:
GREEN = '\033[92m'
YELLOW = '\033[93m'
PURPLE = '\033[95m'
RED = '\033[91m'
GRAY = '\033[90m'
BGC = "\033[;7m"
BOLD = '\033[1m'
ENDC = '\033[0m'
RATINGS = {
(0, 75.0): (colours.GREEN, "This strategy is not very effective in identifying this obfuscation protocol."),
(75.0, 90.0): (colours.PURPLE, "This strategy is reasonably effective in identifying this obfuscation protocol, and can be deployed by a state censor with some difficulties."),
(90.0, 100.0): (colours.RED, "This strategy is very effective in identifying this obfuscation protocol, and can be easily deployed by a state censor.")
}
RATING_BANDS = {
(0, 75.0): "Good Covertness",
(75.0, 90.0): "Reasonable Covertness",
(90.0, 100.0): "Bad Covertness"
}
| CovertMark/constants.py | 1,635 | This module stores constants used during the operations of the UI.
Application info. UI colours. | 98 | en | 0.818942 |
import matplotlib.pyplot as plt
import numpy as np
def gen_data(n, start=0, end=10):
x = np.linspace(start, end, n)
y = np.sin(10*x) - x*x
return y
def gen_data_osc(n):
return np.array([1024 + (-2)**(-i/100) for i in range(n)])
def gen_data_rand(n):
return np.random.randn(n) + 0.3*np.linspace(0, 10, n)
def calc_cov(X, Y):
return np.sum((X - np.average(X))*(Y - np.average(Y))) / (X.shape[0] - 1)
def angular_coef(X,Y):
return calc_cov(X,Y)/calc_cov(X,X)
def linear_coef(a, X, Y):
return np.average(Y) - a*np.average(X)
count = 100
end = 100
time = np.linspace(0, end, count)
data = gen_data(count)
delta = end / count
preds = []
kg_preds = []
kg_prediction = 0
for i in range(1, count):
a = angular_coef(time[:i], data[:i])
b = linear_coef(a, time[:i], data[:i])
prediction = (time[i]+delta)*a + b
preds.append(prediction)
avg_X = np.average(time[:i])
avg_Y = np.average(data[:i])
cov = calc_cov(time[:i], data[:i])
estimate = time*a + b
plt.scatter(time, data, label="Medições", color="#FF5850")
plt.scatter(time[1:], preds, label="Est. Min. Quad.", color="#62B21C")
plt.plot(time, estimate, label="Min. Quad. Final", color="#36A1FF")
plt.xlabel("Tempo")
plt.ylabel("Temperatura")
plt.title("Aproximação Por Minimos Quadrados")
# Place a legend to the right of this smaller subplot.
plt.legend()
plt.show() | Data Fusion Test/Minimos Quadrados Puro.py | 1,446 | Place a legend to the right of this smaller subplot. | 52 | en | 0.683234 |
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class SteamScrapeSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class SteamScrapeDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| steam-scrapy/steam_scrape/middlewares.py | 3,607 | -*- coding: utf-8 -*- Define here the models for your spider middleware See documentation in: https://docs.scrapy.org/en/latest/topics/spider-middleware.html Not all methods need to be defined. If a method is not defined, scrapy acts as if the spider middleware does not modify the passed objects. This method is used by Scrapy to create your spiders. Called for each response that goes through the spider middleware and into the spider. Should return None or raise an exception. Called with the results returned from the Spider, after it has processed the response. Must return an iterable of Request, dict or Item objects. Called when a spider or process_spider_input() method (from other spider middleware) raises an exception. Should return either None or an iterable of Request, dict or Item objects. Called with the start requests of the spider, and works similarly to the process_spider_output() method, except that it doesn’t have a response associated. Must return only requests (not items). Not all methods need to be defined. If a method is not defined, scrapy acts as if the downloader middleware does not modify the passed objects. This method is used by Scrapy to create your spiders. Called for each request that goes through the downloader middleware. Must either: - return None: continue processing this request - or return a Response object - or return a Request object - or raise IgnoreRequest: process_exception() methods of installed downloader middleware will be called Called with the response returned from the downloader. Must either; - return a Response object - return a Request object - or raise IgnoreRequest Called when a download handler or a process_request() (from other downloader middleware) raises an exception. Must either: - return None: continue processing this exception - return a Response object: stops process_exception() chain - return a Request object: stops process_exception() chain | 1,931 | en | 0.871625 |
"""The tests for the Canary sensor platform."""
import copy
import unittest
from unittest.mock import Mock
from homeassistant.components.canary import DATA_CANARY
from homeassistant.components.sensor import canary
from homeassistant.components.sensor.canary import CanarySensor, \
SENSOR_TYPES, ATTR_AIR_QUALITY, STATE_AIR_QUALITY_NORMAL, \
STATE_AIR_QUALITY_ABNORMAL, STATE_AIR_QUALITY_VERY_ABNORMAL
from tests.common import (get_test_home_assistant)
from tests.components.test_canary import mock_device, mock_location
VALID_CONFIG = {
"canary": {
"username": "foo@bar.org",
"password": "bar",
}
}
class TestCanarySensorSetup(unittest.TestCase):
"""Test the Canary platform."""
DEVICES = []
def add_entities(self, devices, action):
"""Mock add devices."""
for device in devices:
self.DEVICES.append(device)
def setUp(self):
"""Initialize values for this testcase class."""
self.hass = get_test_home_assistant()
self.config = copy.deepcopy(VALID_CONFIG)
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_setup_sensors(self):
"""Test the sensor setup."""
online_device_at_home = mock_device(20, "Dining Room", True, "Canary")
offline_device_at_home = mock_device(21, "Front Yard", False, "Canary")
online_device_at_work = mock_device(22, "Office", True, "Canary")
self.hass.data[DATA_CANARY] = Mock()
self.hass.data[DATA_CANARY].locations = [
mock_location("Home", True, devices=[online_device_at_home,
offline_device_at_home]),
mock_location("Work", True, devices=[online_device_at_work]),
]
canary.setup_platform(self.hass, self.config, self.add_entities, None)
assert 6 == len(self.DEVICES)
def test_temperature_sensor(self):
"""Test temperature sensor with fahrenheit."""
device = mock_device(10, "Family Room", "Canary")
location = mock_location("Home", False)
data = Mock()
data.get_reading.return_value = 21.1234
sensor = CanarySensor(data, SENSOR_TYPES[0], location, device)
sensor.update()
assert "Home Family Room Temperature" == sensor.name
assert "°C" == sensor.unit_of_measurement
assert 21.12 == sensor.state
assert "mdi:thermometer" == sensor.icon
def test_temperature_sensor_with_none_sensor_value(self):
"""Test temperature sensor with fahrenheit."""
device = mock_device(10, "Family Room", "Canary")
location = mock_location("Home", False)
data = Mock()
data.get_reading.return_value = None
sensor = CanarySensor(data, SENSOR_TYPES[0], location, device)
sensor.update()
assert sensor.state is None
def test_humidity_sensor(self):
"""Test humidity sensor."""
device = mock_device(10, "Family Room", "Canary")
location = mock_location("Home")
data = Mock()
data.get_reading.return_value = 50.4567
sensor = CanarySensor(data, SENSOR_TYPES[1], location, device)
sensor.update()
assert "Home Family Room Humidity" == sensor.name
assert "%" == sensor.unit_of_measurement
assert 50.46 == sensor.state
assert "mdi:water-percent" == sensor.icon
def test_air_quality_sensor_with_very_abnormal_reading(self):
"""Test air quality sensor."""
device = mock_device(10, "Family Room", "Canary")
location = mock_location("Home")
data = Mock()
data.get_reading.return_value = 0.4
sensor = CanarySensor(data, SENSOR_TYPES[2], location, device)
sensor.update()
assert "Home Family Room Air Quality" == sensor.name
assert sensor.unit_of_measurement is None
assert 0.4 == sensor.state
assert "mdi:weather-windy" == sensor.icon
air_quality = sensor.device_state_attributes[ATTR_AIR_QUALITY]
assert STATE_AIR_QUALITY_VERY_ABNORMAL == air_quality
def test_air_quality_sensor_with_abnormal_reading(self):
"""Test air quality sensor."""
device = mock_device(10, "Family Room", "Canary")
location = mock_location("Home")
data = Mock()
data.get_reading.return_value = 0.59
sensor = CanarySensor(data, SENSOR_TYPES[2], location, device)
sensor.update()
assert "Home Family Room Air Quality" == sensor.name
assert sensor.unit_of_measurement is None
assert 0.59 == sensor.state
assert "mdi:weather-windy" == sensor.icon
air_quality = sensor.device_state_attributes[ATTR_AIR_QUALITY]
assert STATE_AIR_QUALITY_ABNORMAL == air_quality
def test_air_quality_sensor_with_normal_reading(self):
"""Test air quality sensor."""
device = mock_device(10, "Family Room", "Canary")
location = mock_location("Home")
data = Mock()
data.get_reading.return_value = 1.0
sensor = CanarySensor(data, SENSOR_TYPES[2], location, device)
sensor.update()
assert "Home Family Room Air Quality" == sensor.name
assert sensor.unit_of_measurement is None
assert 1.0 == sensor.state
assert "mdi:weather-windy" == sensor.icon
air_quality = sensor.device_state_attributes[ATTR_AIR_QUALITY]
assert STATE_AIR_QUALITY_NORMAL == air_quality
def test_air_quality_sensor_with_none_sensor_value(self):
"""Test air quality sensor."""
device = mock_device(10, "Family Room", "Canary")
location = mock_location("Home")
data = Mock()
data.get_reading.return_value = None
sensor = CanarySensor(data, SENSOR_TYPES[2], location, device)
sensor.update()
assert sensor.state is None
assert sensor.device_state_attributes is None
def test_battery_sensor(self):
"""Test battery sensor."""
device = mock_device(10, "Family Room", "Canary Flex")
location = mock_location("Home")
data = Mock()
data.get_reading.return_value = 70.4567
sensor = CanarySensor(data, SENSOR_TYPES[4], location, device)
sensor.update()
assert "Home Family Room Battery" == sensor.name
assert "%" == sensor.unit_of_measurement
assert 70.46 == sensor.state
assert "mdi:battery-70" == sensor.icon
def test_wifi_sensor(self):
"""Test battery sensor."""
device = mock_device(10, "Family Room", "Canary Flex")
location = mock_location("Home")
data = Mock()
data.get_reading.return_value = -57
sensor = CanarySensor(data, SENSOR_TYPES[3], location, device)
sensor.update()
assert "Home Family Room Wifi" == sensor.name
assert "dBm" == sensor.unit_of_measurement
assert -57 == sensor.state
assert "mdi:wifi" == sensor.icon
| tests/components/sensor/test_canary.py | 7,016 | Test the Canary platform.
Mock add devices.
Initialize values for this testcase class.
Stop everything that was started.
Test air quality sensor.
Test air quality sensor.
Test air quality sensor.
Test air quality sensor.
Test battery sensor.
Test humidity sensor.
Test the sensor setup.
Test temperature sensor with fahrenheit.
Test temperature sensor with fahrenheit.
Test battery sensor.
The tests for the Canary sensor platform. | 431 | en | 0.734232 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-07-07 21:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20160706_2232'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='picture_path',
field=models.CharField(blank=True, max_length=128, null=True),
),
]
| market_place/users/migrations/0003_auto_20160708_0036.py | 483 | -*- coding: utf-8 -*- Generated by Django 1.9.6 on 2016-07-07 21:36 | 67 | en | 0.708277 |
# Copyright (c) 2020 DDN. All rights reserved.
# Use of this source code is governed by a MIT-style
# license that can be found in the LICENSE file.
from collections import defaultdict
import json
from django.db.models import Q
from django.contrib.contenttypes.models import ContentType
class LockCache(object):
# Lock change receivers are called whenever a change occurs to the locks. It allows something to
# respond to changes. An example would be long polling.
# The receivers are called with the lock being removed and LOCK_ADD or LOCK_REMOVE as the paramter.
lock_change_receivers = []
LOCK_ADD = 1
LOCK_REMOVE = 2
def __init__(self):
from chroma_core.models import Job, StateLock
self.write_locks = []
self.write_by_item = defaultdict(list)
self.read_locks = []
self.read_by_item = defaultdict(list)
self.all_by_job = defaultdict(list)
self.all_by_item = defaultdict(list)
for job in Job.objects.filter(~Q(state="complete")):
if job.locks_json:
locks = json.loads(job.locks_json)
for lock in locks:
self._add(StateLock.from_dict(job, lock))
def call_receivers(self, lock, add_remove):
for lock_change_receiver in self.lock_change_receivers:
lock_change_receiver(lock, add_remove)
def remove_job(self, job):
locks = list(self.all_by_job[job.id])
n = len(locks)
for lock in locks:
if lock.write:
self.write_locks.remove(lock)
self.write_by_item[lock.locked_item].remove(lock)
else:
self.read_locks.remove(lock)
self.read_by_item[lock.locked_item].remove(lock)
self.all_by_job[job.id].remove(lock)
self.all_by_item[lock.locked_item].remove(lock)
self.call_receivers(lock, self.LOCK_REMOVE)
return n
def add(self, lock):
self._add(lock)
def _add(self, lock):
assert lock.job.id is not None
if lock.write:
self.write_locks.append(lock)
self.write_by_item[lock.locked_item].append(lock)
else:
self.read_locks.append(lock)
self.read_by_item[lock.locked_item].append(lock)
self.all_by_job[lock.job.id].append(lock)
self.all_by_item[lock.locked_item].append(lock)
self.call_receivers(lock, self.LOCK_ADD)
def get_by_job(self, job):
return self.all_by_job[job.id]
def get_all(self, locked_item):
return self.all_by_item[locked_item]
def get_latest_write(self, locked_item, not_job=None):
try:
if not_job is not None:
return sorted(
[l for l in self.write_by_item[locked_item] if l.job != not_job],
lambda a, b: cmp(a.job.id, b.job.id),
)[-1]
return sorted(self.write_by_item[locked_item], lambda a, b: cmp(a.job.id, b.job.id))[-1]
except IndexError:
return None
def get_read_locks(self, locked_item, after, not_job):
return [x for x in self.read_by_item[locked_item] if after <= x.job.id and x.job != not_job]
def get_write(self, locked_item):
return self.write_by_item[locked_item]
def get_by_locked_item(self, item):
return self.all_by_item[item]
def get_write_by_locked_item(self):
result = {}
for locked_item, locks in self.write_by_item.items():
if locks:
result[locked_item] = sorted(locks, lambda a, b: cmp(a.job.id, b.job.id))[-1]
return result
def lock_change_receiver():
"""
A decorator for connecting receivers to signals that a lock has change.
@receiver(post_save, sender=MyModel)
def signal_receiver(sender, **kwargs):
...
"""
def _decorator(func):
LockCache.lock_change_receivers.append(func)
return func
return _decorator
def to_lock_json(lock, add_remove=LockCache.LOCK_ADD):
if getattr(lock.locked_item, "downcast", None) and callable(lock.locked_item.downcast):
item = lock.locked_item.downcast()
else:
item = lock.locked_item
return {
"job_id": lock.job.id,
"content_type_id": ContentType.objects.get_for_model(item).id,
"item_id": lock.locked_item.id,
"uuid": lock.uuid,
"description": lock.job.description(),
"lock_type": "write" if lock.write else "read",
"action": "add" if add_remove == LockCache.LOCK_ADD else "remove",
}
| chroma_core/services/job_scheduler/lock_cache.py | 4,624 | A decorator for connecting receivers to signals that a lock has change.
@receiver(post_save, sender=MyModel)
def signal_receiver(sender, **kwargs):
...
Copyright (c) 2020 DDN. All rights reserved. Use of this source code is governed by a MIT-style license that can be found in the LICENSE file. Lock change receivers are called whenever a change occurs to the locks. It allows something to respond to changes. An example would be long polling. The receivers are called with the lock being removed and LOCK_ADD or LOCK_REMOVE as the paramter. | 560 | en | 0.957696 |
"""
Support for interfacing with the XBMC/Kodi JSON-RPC API.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.kodi/
"""
import asyncio
import logging
import urllib
import aiohttp
import voluptuous as vol
from homeassistant.components.media_player import (
SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PREVIOUS_TRACK, SUPPORT_SEEK,
SUPPORT_PLAY_MEDIA, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, SUPPORT_STOP,
SUPPORT_TURN_OFF, SUPPORT_PLAY, SUPPORT_VOLUME_STEP, MediaPlayerDevice,
PLATFORM_SCHEMA)
from homeassistant.const import (
STATE_IDLE, STATE_OFF, STATE_PAUSED, STATE_PLAYING, CONF_HOST, CONF_NAME,
CONF_PORT, CONF_USERNAME, CONF_PASSWORD)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['jsonrpc-async==0.2']
_LOGGER = logging.getLogger(__name__)
CONF_TURN_OFF_ACTION = 'turn_off_action'
DEFAULT_NAME = 'Kodi'
DEFAULT_PORT = 8080
DEFAULT_TIMEOUT = 5
TURN_OFF_ACTION = [None, 'quit', 'hibernate', 'suspend', 'reboot', 'shutdown']
SUPPORT_KODI = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \
SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | SUPPORT_SEEK | \
SUPPORT_PLAY_MEDIA | SUPPORT_STOP | SUPPORT_PLAY | SUPPORT_VOLUME_STEP
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_TURN_OFF_ACTION, default=None): vol.In(TURN_OFF_ACTION),
vol.Inclusive(CONF_USERNAME, 'auth'): cv.string,
vol.Inclusive(CONF_PASSWORD, 'auth'): cv.string,
})
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Setup the Kodi platform."""
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
if host.startswith('http://') or host.startswith('https://'):
host = host.lstrip('http://').lstrip('https://')
_LOGGER.warning(
"Kodi host name should no longer conatin http:// See updated "
"definitions here: "
"https://home-assistant.io/components/media_player.kodi/")
entity = KodiDevice(
hass,
name=config.get(CONF_NAME),
host=host, port=port,
username=config.get(CONF_USERNAME),
password=config.get(CONF_PASSWORD),
turn_off_action=config.get(CONF_TURN_OFF_ACTION))
yield from async_add_entities([entity], update_before_add=True)
class KodiDevice(MediaPlayerDevice):
"""Representation of a XBMC/Kodi device."""
def __init__(self, hass, name, host, port, username=None, password=None,
turn_off_action=None):
"""Initialize the Kodi device."""
import jsonrpc_async
self.hass = hass
self._name = name
kwargs = {
'timeout': DEFAULT_TIMEOUT,
'session': async_get_clientsession(hass),
}
if username is not None:
kwargs['auth'] = aiohttp.BasicAuth(username, password)
image_auth_string = "{}:{}@".format(username, password)
else:
image_auth_string = ""
self._http_url = 'http://{}:{}/jsonrpc'.format(host, port)
self._image_url = 'http://{}{}:{}/image'.format(
image_auth_string, host, port)
self._server = jsonrpc_async.Server(self._http_url, **kwargs)
self._turn_off_action = turn_off_action
self._players = list()
self._properties = None
self._item = None
self._app_properties = None
@property
def name(self):
"""Return the name of the device."""
return self._name
@asyncio.coroutine
def _get_players(self):
"""Return the active player objects or None."""
import jsonrpc_async
try:
return (yield from self._server.Player.GetActivePlayers())
except jsonrpc_async.jsonrpc.TransportError:
if self._players is not None:
_LOGGER.info('Unable to fetch kodi data')
_LOGGER.debug('Unable to fetch kodi data', exc_info=True)
return None
@property
def state(self):
"""Return the state of the device."""
if self._players is None:
return STATE_OFF
if len(self._players) == 0:
return STATE_IDLE
if self._properties['speed'] == 0 and not self._properties['live']:
return STATE_PAUSED
else:
return STATE_PLAYING
@asyncio.coroutine
def async_update(self):
"""Retrieve latest state."""
self._players = yield from self._get_players()
if self._players is not None and len(self._players) > 0:
player_id = self._players[0]['playerid']
assert isinstance(player_id, int)
self._properties = yield from self._server.Player.GetProperties(
player_id,
['time', 'totaltime', 'speed', 'live']
)
self._item = (yield from self._server.Player.GetItem(
player_id,
['title', 'file', 'uniqueid', 'thumbnail', 'artist']
))['item']
self._app_properties = \
yield from self._server.Application.GetProperties(
['volume', 'muted']
)
else:
self._properties = None
self._item = None
self._app_properties = None
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
if self._app_properties is not None:
return self._app_properties['volume'] / 100.0
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
if self._app_properties is not None:
return self._app_properties['muted']
@property
def media_content_id(self):
"""Content ID of current playing media."""
if self._item is not None:
return self._item.get('uniqueid', None)
@property
def media_content_type(self):
"""Content type of current playing media."""
if self._players is not None and len(self._players) > 0:
return self._players[0]['type']
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
if self._properties is not None and not self._properties['live']:
total_time = self._properties['totaltime']
return (
total_time['hours'] * 3600 +
total_time['minutes'] * 60 +
total_time['seconds'])
@property
def media_image_url(self):
"""Image url of current playing media."""
if self._item is None:
return None
url_components = urllib.parse.urlparse(self._item['thumbnail'])
if url_components.scheme == 'image':
return '{}/{}'.format(
self._image_url,
urllib.parse.quote_plus(self._item['thumbnail']))
@property
def media_title(self):
"""Title of current playing media."""
# find a string we can use as a title
if self._item is not None:
return self._item.get(
'title',
self._item.get('label', self._item.get('file', 'unknown')))
@property
def supported_media_commands(self):
"""Flag of media commands that are supported."""
supported_media_commands = SUPPORT_KODI
if self._turn_off_action in TURN_OFF_ACTION:
supported_media_commands |= SUPPORT_TURN_OFF
return supported_media_commands
@asyncio.coroutine
def async_turn_off(self):
"""Execute turn_off_action to turn off media player."""
if self._turn_off_action == 'quit':
yield from self._server.Application.Quit()
elif self._turn_off_action == 'hibernate':
yield from self._server.System.Hibernate()
elif self._turn_off_action == 'suspend':
yield from self._server.System.Suspend()
elif self._turn_off_action == 'reboot':
yield from self._server.System.Reboot()
elif self._turn_off_action == 'shutdown':
yield from self._server.System.Shutdown()
else:
_LOGGER.warning('turn_off requested but turn_off_action is none')
@asyncio.coroutine
def async_volume_up(self):
"""Volume up the media player."""
assert (
yield from self._server.Input.ExecuteAction('volumeup')) == 'OK'
@asyncio.coroutine
def async_volume_down(self):
"""Volume down the media player."""
assert (
yield from self._server.Input.ExecuteAction('volumedown')) == 'OK'
def async_set_volume_level(self, volume):
"""Set volume level, range 0..1.
This method must be run in the event loop and returns a coroutine.
"""
return self._server.Application.SetVolume(int(volume * 100))
def async_mute_volume(self, mute):
"""Mute (true) or unmute (false) media player.
This method must be run in the event loop and returns a coroutine.
"""
return self._server.Application.SetMute(mute)
@asyncio.coroutine
def async_set_play_state(self, state):
"""Helper method for play/pause/toggle."""
players = yield from self._get_players()
if len(players) != 0:
yield from self._server.Player.PlayPause(
players[0]['playerid'], state)
def async_media_play_pause(self):
"""Pause media on media player.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_set_play_state('toggle')
def async_media_play(self):
"""Play media.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_set_play_state(True)
def async_media_pause(self):
"""Pause the media player.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_set_play_state(False)
@asyncio.coroutine
def async_media_stop(self):
"""Stop the media player."""
players = yield from self._get_players()
if len(players) != 0:
yield from self._server.Player.Stop(players[0]['playerid'])
@asyncio.coroutine
def _goto(self, direction):
"""Helper method used for previous/next track."""
players = yield from self._get_players()
if len(players) != 0:
if direction == 'previous':
# first seek to position 0. Kodi goes to the beginning of the
# current track if the current track is not at the beginning.
yield from self._server.Player.Seek(players[0]['playerid'], 0)
yield from self._server.Player.GoTo(
players[0]['playerid'], direction)
def async_media_next_track(self):
"""Send next track command.
This method must be run in the event loop and returns a coroutine.
"""
return self._goto('next')
def async_media_previous_track(self):
"""Send next track command.
This method must be run in the event loop and returns a coroutine.
"""
return self._goto('previous')
@asyncio.coroutine
def async_media_seek(self, position):
"""Send seek command."""
players = yield from self._get_players()
time = {}
time['milliseconds'] = int((position % 1) * 1000)
position = int(position)
time['seconds'] = int(position % 60)
position /= 60
time['minutes'] = int(position % 60)
position /= 60
time['hours'] = int(position)
if len(players) != 0:
yield from self._server.Player.Seek(players[0]['playerid'], time)
def async_play_media(self, media_type, media_id, **kwargs):
"""Send the play_media command to the media player.
This method must be run in the event loop and returns a coroutine.
"""
if media_type == "CHANNEL":
return self._server.Player.Open(
{"item": {"channelid": int(media_id)}})
else:
return self._server.Player.Open(
{"item": {"file": str(media_id)}})
| homeassistant/components/media_player/kodi.py | 12,525 | Representation of a XBMC/Kodi device.
Initialize the Kodi device.
Return the active player objects or None.
Helper method used for previous/next track.
Send next track command.
This method must be run in the event loop and returns a coroutine.
Pause the media player.
This method must be run in the event loop and returns a coroutine.
Play media.
This method must be run in the event loop and returns a coroutine.
Pause media on media player.
This method must be run in the event loop and returns a coroutine.
Send next track command.
This method must be run in the event loop and returns a coroutine.
Send seek command.
Stop the media player.
Mute (true) or unmute (false) media player.
This method must be run in the event loop and returns a coroutine.
Send the play_media command to the media player.
This method must be run in the event loop and returns a coroutine.
Helper method for play/pause/toggle.
Set volume level, range 0..1.
This method must be run in the event loop and returns a coroutine.
Setup the Kodi platform.
Execute turn_off_action to turn off media player.
Retrieve latest state.
Volume down the media player.
Volume up the media player.
Boolean if volume is currently muted.
Content ID of current playing media.
Content type of current playing media.
Duration of current playing media in seconds.
Image url of current playing media.
Title of current playing media.
Return the name of the device.
Return the state of the device.
Flag of media commands that are supported.
Volume level of the media player (0..1).
Support for interfacing with the XBMC/Kodi JSON-RPC API.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.kodi/
find a string we can use as a title first seek to position 0. Kodi goes to the beginning of the current track if the current track is not at the beginning. | 1,890 | en | 0.880453 |
## DQN Tutorial
## Implementation from https://github.com/FitMachineLearning
import torch
import gym
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from dataclasses import dataclass
from typing import Any
from random import random
@dataclass
class sars:
state: Any
action: Any
reward: float
next_state: Any
done: bool
qval: float
advantage: float = 0.0
class DQNAgent:
def __init__(self,actor_model,critic_model):
self.actor_model = actor_model
self.critic_model = critic_model
def get_actions(self, observations):
# import ipdb; ipdb.set_trace()
guessed_actions = self.actor_model(torch.Tensor(observations).to(self.actor_model.device))
return guessed_actions
def get_predicted_Q_values(self,observation_and_action):
guessed_Qs = self.critic_model(torch.Tensor(observation_and_action))
return guessed_Qs(-1)[1]
def update_target_model(self):
self.targetModel.load_state_dict(self.model.state_dict())
class ActorModel(nn.Module):
def __init__(self, obs_shape, action_shape,lr):
super(ActorModel,self).__init__()
assert len(obs_shape) ==1, "This network only works on flat observations"
self.obs_shape = obs_shape
self.action_shape = action_shape
# import ipdb; ipdb.set_trace()
self.net = torch.nn.Sequential(
torch.nn.Linear(obs_shape[0],512),
torch.nn.ReLU(),
# torch.nn.Linear(1024,256),
# torch.nn.ReLU(),
torch.nn.Linear(512,action_shape[0])
)
self.opt = optim.Adam(self.net.parameters(),lr=lr)
if torch.cuda.is_available():
print("Using CUDA")
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cuda:1')
self.to(self.device)
def forward(self, x):
return self.net(x)
class CriticModel(nn.Module):
def __init__(self, obs_shape, action_shape,lr):
super(CriticModel,self).__init__()
assert len(obs_shape) ==1, "This network only works on flat observations"
self.obs_shape = obs_shape
self.action_shape = action_shape
self.net = torch.nn.Sequential(
torch.nn.Linear(obs_shape[0]+action_shape[0],512),
torch.nn.ReLU(),
# torch.nn.Linear(2048,512),
# torch.nn.ReLU(),
torch.nn.Linear(512,1) # one out put because we are predicting Q values
)
self.opt = optim.Adam(self.net.parameters(),lr=lr)
if torch.cuda.is_available():
print("Using CUDA")
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cuda:1')
self.to(self.device)
def forward(self, x):
return self.net(x)
class ReplayBuffer:
def __init__(self, buffer_size = 1000):
# self.buffer_size = buffer_size
self.buffer_size = buffer_size
self.buffer = np.empty((buffer_size),dtype=object)
# self.buffer = []
self.index = 0
def insert(self, sars):
# self.buffer.append(sars)
# print("inserting index ", self.index, "@",self.index%self.buffer_size)
if(self.index == 10):
print("first 10 ",self.buffer[0:10])
# import ipdb; ipdb.set_trace()
# if(self.index > self.buffer_size and self.index%self.buffer_size==0):
# print("first 10 ",self.buffer[0:10])
# print("last 10 ",self.buffer[-10:])
# print("")
# import ipdb; ipdb.set_trace()
self.buffer[self.index%self.buffer_size] = sars
self.index+=1
# self.buffer.append(sars)
# if(len(self.buffer)>self.buffer_size):
# self.buffer = self.buffer[1:]
# # print("Clipping Buffer at size", len(self.buffer))
def sample(self, num_samples,current_episode_steps):
# assert num_samples < min(len(self.buffer),self.index)
# if num_samples>self.index:
# print("sampling n ",min(num_samples,self.index))
a = self.buffer[0:min(self.index,self.buffer_size)]
if len(self.buffer) > 0:
return np.random.choice(a, min(num_samples,self.index))
else:
return []
| Pytorch/ActorCritic/agent_and_model.py | 4,409 | DQN Tutorial Implementation from https://github.com/FitMachineLearning import ipdb; ipdb.set_trace() import ipdb; ipdb.set_trace() torch.nn.Linear(1024,256), torch.nn.ReLU(), torch.nn.Linear(2048,512), torch.nn.ReLU(), one out put because we are predicting Q values self.buffer_size = buffer_size self.buffer = [] self.buffer.append(sars) print("inserting index ", self.index, "@",self.index%self.buffer_size) import ipdb; ipdb.set_trace() if(self.index > self.buffer_size and self.index%self.buffer_size==0): print("first 10 ",self.buffer[0:10]) print("last 10 ",self.buffer[-10:]) print("") import ipdb; ipdb.set_trace() self.buffer.append(sars) if(len(self.buffer)>self.buffer_size): self.buffer = self.buffer[1:] print("Clipping Buffer at size", len(self.buffer)) assert num_samples < min(len(self.buffer),self.index) if num_samples>self.index: print("sampling n ",min(num_samples,self.index)) | 922 | en | 0.368402 |
""" library to take autodiff and execute a computation graph """
from __future__ import absolute_import
import numpy as np
from .Node import Op
from .. import ndarray
from ..stream import *
import ctypes
import os
from pynvml import *
FLAG_SHOW_GRAPH = False
G_NODE_ID = 0
NAME_RULE = 1
def communicate_init(worker_num, worker_id, source_ip, target_ip):
global lib_communicate
# lib_communicate.DL_Connect_Init(2, 0, "*:4001", "localhost:4002")
# lib_communicate.DL_Connect_Init(2, 1, "*:4002", "localhost:4001")
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
lib_path = os.path.join(curr_path, '../../build/lib/')
path_to_so_file = os.path.join(lib_path, "lib_communication.so")
lib_communicate = ctypes.cdll.LoadLibrary(path_to_so_file)
lib_communicate.DL_Connect_Init(
worker_num, worker_id, source_ip, target_ip)
def communicate_finish():
lib_communicate.DL_Communicate_Close()
class Distributed_CommunicateOp(Op):
def __call__(self, nodeA):
new_node = Op.__call__(self)
new_node.inputs = [nodeA]
new_node.name = "Distributed_Communicate(%s)" % (nodeA.name)
# print nodeA.name
return new_node
def compute(self, node, input_vals, output_val, use_numpy=True):
after_reduce_gradient_cpu = ndarray.empty(
shape=output_val.shape, ctx=ndarray.cpu(0))
if use_numpy:
gradient_val_cpu = ndarray.array(input_vals[0], ctx=ndarray.cpu(0))
else:
gradient_val_cpu = ndarray.array(
input_vals[0].asnumpy(), ctx=ndarray.cpu(0))
# print gradient_val_cpu.asnumpy()
lib_communicate.DL_Communicate_Init(gradient_val_cpu.handle)
lib_communicate.DL_Communicate(
gradient_val_cpu.handle, after_reduce_gradient_cpu.handle)
# print after_reduce_gradient_cpu.asnumpy()
if use_numpy:
output_val[:] = after_reduce_gradient_cpu.asnumpy()
else:
after_reduce_gradient_cpu.copyto(output_val)
def gradient(self, node, output_grad):
raise NotImplementedError
def infer_shape(self, node, input_shapes):
return input_shapes[0]
distributed_communicate_op = Distributed_CommunicateOp()
class StreamExecutor(object):
"""Executor computes values for given set of nodes in computation graph."""
def __init__(self, eval_node_list, ctx = None, stream = None, policy = None):
"""
Parameters
----------
eval_node_list: list of nodes whose values need to be computed.
ctx: runtime DLContext, default is None which means np.ndarray on cpu
topo_order: list of nodes in topological order
node_to_shape_map: dict from node to shape of the node
node_to_arr_map: dict from node to ndarray.NDArray allocated for node
feed_shapes: shapes of feed_dict from last run(...)
"""
self.eval_node_list = eval_node_list
self.ctx = ctx
if stream is None:
self.stream = create_stream_handle(ctx)
else:
self.stream = stream
self.stream.sync()
self.topo_order = find_topo_sort(self.eval_node_list)
self.node_to_shape_map = None
self.node_to_arr_map = None
self.feed_shapes = None
self.policy = policy
if self.policy == 'swap':
self.swap_queue = []
def infer_shape(self, feed_shapes):
"""Given shapes of feed_dict nodes, infer shape for all nodes in graph.
Implementation note:
Iteratively calls node.op.infer_shape to infer shapes.
Node shapes stored in self.node_to_shape_map.
Parameters
----------
feed_shapes: node->shapes mapping for feed_dict nodes.
"""
"""TODO: Your code here"""
self.node_to_shape_map = {}
for node in self.topo_order:
if node in feed_shapes:
self.node_to_shape_map[node] = feed_shapes[node]
else:
# print(node.name)
input_shapes = [self.node_to_shape_map[n] for n in node.inputs]
self.node_to_shape_map[node] = node.op.infer_shape(
node, input_shapes)
def memory_plan(self, feed_shapes):
"""Allocates ndarray.NDArray for every node except feed_dict nodes.
Implementation note:
Option 1: Alloc a ndarray.NDArray per node that persists across run()
Option 2: Implement a memory pool to reuse memory for nodes of same
shapes. More details see Lecture 7.
For both options, self.node_to_arr_map stores node->NDArray mapping to
allow mapping to persist across multiple executor.run().
Hint: use ndarray.empty(shape, ctx=self.ctx) to allocate NDArray.
Parameters
----------
feed_shapes: node->shapes mapping for feed_dict nodes.
"""
"""TODO: Your code here"""
assert (self.ctx is not None)
# self.infer_shape(feed_shapes)
self.node_to_arr_map = {}
for node, shape in self.node_to_shape_map.items():
if self.policy == 'swap':
if not node.swap:
self.node_to_arr_map[node] = ndarray.empty(
shape, ctx=self.ctx)
elif self.policy == 'vdnn':
self.node_to_arr_map[node] = np.empty(shape)
else:
self.node_to_arr_map[node] = ndarray.empty(shape, ctx=self.ctx)
def run(self, feed_dict, convert_to_numpy_ret_vals=False):
"""
Parameters
----------
feed_dict: a dictionary of node->np.ndarray supplied by user.
convert_to_numpy_ret_vals: whether to convert ret vals to np.array
Returns
-------
A list of values for nodes in eval_node_list. NDArray or np.ndarray.
"""
def are_feed_shapes_equal(sa, sb):
if (not isinstance(sa, dict)) or (not isinstance(sb, dict)):
return False
unmatched_item = set(sa.items()) ^ set(sb.items())
return len(unmatched_item) == 0
# Assume self.ctx is None implies numpy array and numpy ops.
use_numpy = self.ctx is None
node_to_val_map = {}
for node, value in feed_dict.items():
if use_numpy:
# all values passed in feed_dict must be np.ndarray
assert isinstance(value, np.ndarray)
node_to_val_map[node] = value
else:
# convert values to ndarray.NDArray if necessary
if isinstance(value, np.ndarray):
node_to_val_map[node] = ndarray.array(value, ctx=self.ctx)
elif isinstance(value, ndarray.NDArray):
node_to_val_map[node] = value
else:
assert False, "feed_dict value type not supported"
# print"xxxx"
# collect shapes for all placeholders
# infer shape if feed_shapes changed since last run
# e.g. call run() on test data after trainng
# print feed_shapes
feed_shapes = {}
for node in node_to_val_map:
feed_shapes[node] = node_to_val_map[node].shape
if(not are_feed_shapes_equal(feed_shapes, self.feed_shapes)):
self.infer_shape(feed_shapes)
self.feed_shapes = feed_shapes
if (not use_numpy):
self.memory_plan(self.feed_shapes)
for node in self.topo_order:
if node in node_to_val_map:
continue
input_vals = [node_to_val_map[n] for n in node.inputs]
if use_numpy:
node_val = np.empty(shape=self.node_to_shape_map[node])
else:
node_val = self.node_to_arr_map[node]
# print(node.name)
node.op.compute(node, input_vals, node_val, use_numpy, self.stream)
node_to_val_map[node] = node_val
self.stream.sync()
if not use_numpy and convert_to_numpy_ret_vals:
return [node_to_val_map[n].asnumpy() for n in self.eval_node_list]
return [node_to_val_map[n] for n in self.eval_node_list]
# def run(self, feed_dict, convert_to_numpy_ret_vals=False):
# """
# Parameters
# ----------
# feed_dict: a dictionary of node->np.ndarray supplied by user.
# convert_to_numpy_ret_vals: whether to convert ret vals to np.array
# Returns
# -------
# A list of values for nodes in eval_node_list. NDArray or np.ndarray.
# """
# def are_feed_shapes_equal(sa, sb):
# if (not isinstance(sa, dict)) or (not isinstance(sb, dict)):
# return False
# unmatched_item = set(sa.items()) ^ set(sb.items())
# return len(unmatched_item) == 0
# # Assume self.ctx is None implies numpy array and numpy ops.
# use_numpy = self.ctx is None
# node_to_val_map = {}
# for node, value in feed_dict.items():
# if self.policy == 'vdnn':
# assert isinstance(value, np.ndarray)
# node_to_val_map[node] = value
# else:
# if use_numpy:
# # all values passed in feed_dict must be np.ndarray
# assert isinstance(value, np.ndarray)
# node_to_val_map[node] = value
# else:
# # convert values to ndarray.NDArray if necessary
# if isinstance(value, np.ndarray):
# if self.policy == 'swap':
# if node.swap == True:
# node_to_val_map[node] = value
# else:
# node_to_val_map[node] = ndarray.array(value, ctx=self.ctx)
# else:
# node_to_val_map[node] = ndarray.array(value, ctx=self.ctx)
# elif isinstance(value, ndarray.NDArray):
# node_to_val_map[node] = value
# else:
# assert False, "feed_dict value type not supported"
# # collect shapes for all placeholders
# feed_shapes = {}
# for node in node_to_val_map:
# feed_shapes[node] = node_to_val_map[node].shape
# # infer shape if feed_shapes changed since last run
# # e.g. call run() on test data after trainng
# # print feed_shapes
# if (not are_feed_shapes_equal(feed_shapes, self.feed_shapes)):
# self.infer_shape(feed_shapes)
# self.feed_shapes = feed_shapes
# if not self.policy == 'vdnn':
# # plan memory if using GPU
# if (not use_numpy):
# self.memory_plan(feed_shapes)
# # Traverse graph in topo order and compute values for all nodes.
# global FLAG_SHOW_GRAPH
# if self.policy == 'swap':
# # generate swap queue
# if not use_numpy:
# for node in self.topo_order:
# if node not in node_to_val_map:
# # variable in placeholder
# for input_node in node.inputs:
# if input_node.swap == True:
# self.swap_queue.append(input_node)
# # variable grad
# if node.swap == True:
# self.swap_queue.append(node)
# node_in_GPU = None
# if FLAG_SHOW_GRAPH:
# print "Show swap queue:"
# for node in self.swap_queue:
# print node
# elif self.policy == 'vdnn':
# # TODO traverse graph to select in-gpu window
# window = [0,0]
# if not use_numpy:
# nvmlInit()
# handle = nvmlDeviceGetHandleByIndex(0)
# info = nvmlDeviceGetMemoryInfo(handle)
# gpu_mem = info.free
# nvmlShutdown()
# loss_node = self.eval_node_list[0]
# window[1] = self.topo_order.index(loss_node)+1
# window[0] = self.topo_order.index(loss_node)+1
# for node in reversed(self.topo_order[:window[1]+1]):
# node_size = 4 # float32
# #print node, self.node_to_shape_map[node]
# for shape in self.node_to_shape_map[node]:
# node_size = node_size * shape
# if gpu_mem > node_size:
# gpu_mem = gpu_mem - node_size
# window[0] = window[0] - 1
# #print "gpu_mem:",gpu_mem
# # Traverse graph in topo order and compute values for all nodes.
# if FLAG_SHOW_GRAPH:
# print "run topo_order"
# # Show graph dependency
# if FLAG_SHOW_GRAPH:
# print "node:",node
# print "node.desc:",node.desc
# for node in self.topo_order:
# if self.policy == 'vdnn':
# # Skip placeholder nodes
# if node in node_to_val_map:
# continue
# # H2D before compute
# ## Collect inputs
# input_vals = []
# for n in node.inputs:
# if not use_numpy:
# if isinstance(node_to_val_map[n], np.ndarray):
# node_to_val_map[n] = ndarray.array(node_to_val_map[n], ctx=self.ctx)
# input_vals.append(node_to_val_map[n])
# ## Alloc node space
# if use_numpy:
# node_val = np.empty(shape=self.node_to_shape_map[node])
# else:
# node_val = ndarray.empty(shape=self.node_to_shape_map[node], ctx=self.ctx)
# # Compute
# # node_val is modified in-place whether np.ndarray or NDArray
# node.op.compute(node, input_vals, node_val, use_numpy)
# # D2H after compute
# if use_numpy:
# node_to_val_map[node] = node_val
# else:
# node_index = self.topo_order.index(node)
# if node_index > window[0] and node_index < window[1]:
# node_to_val_map[node] = node_val
# continue
# node_to_val_map[node] = node_val.asnumpy()
# del node_val
# for n in node.inputs:
# if isinstance(node_to_val_map[n], ndarray.NDArray):
# tmp_val = node_to_val_map[n].asnumpy()
# del node_to_val_map[n]
# node_to_val_map[n] = tmp_val
# elif self.policy == 'swap':
# # Switch in GPU
# if not use_numpy:
# if self.swap_queue and (node_in_GPU==None):
# swap_node = self.swap_queue[0]
# if swap_node in node_to_val_map:
# node_to_val_map[swap_node] = ndarray.array(node_to_val_map[swap_node], ctx=self.ctx)
# else:
# self.node_to_arr_map[swap_node] = ndarray.empty(self.node_to_shape_map[swap_node], ctx=self.ctx)
# node_in_GPU = swap_node.id
# if node in node_to_val_map:
# # Skip placeholder nodes. Values already provided by feed_dict.
# continue
# # Compute
# input_vals = [node_to_val_map[n] for n in node.inputs]
# if use_numpy:
# node_val = np.empty(shape=self.node_to_shape_map[node])
# else:
# node_val = self.node_to_arr_map[node]
# # node_val is modified in-place whether np.ndarray or NDArray
# node.op.compute(node, input_vals, node_val, use_numpy)
# if node.swap == True:
# node_to_val_map[node] = node_val.asnumpy()
# del node_val
# del self.node_to_arr_map[node]
# del self.swap_queue[0]
# node_in_GPU = None
# else:
# node_to_val_map[node] = node_val
# # Switch out GPU
# if not use_numpy:
# if self.swap_queue:
# if self.swap_queue[0] in node.inputs:
# out_node = self.swap_queue.pop(0)
# if self.swap_queue:
# if not self.swap_queue[0].id == node_in_GPU:
# tmp_array = node_to_val_map[out_node].asnumpy()
# del node_to_val_map[out_node]
# node_to_val_map[out_node] = tmp_array
# node_in_GPU = None
# else:
# if node in node_to_val_map:
# # Skip placeholder nodes. Values already provided by feed_dict.
# continue
# input_vals = [node_to_val_map[n] for n in node.inputs]
# # print self.node_to_shape_map[node]
# if use_numpy:
# node_val = np.empty(shape=self.node_to_shape_map[node])
# else:
# node_val = self.node_to_arr_map[node]
# # node_val is modified in-place whether np.ndarray or NDArray
# # if (len(node.inputs) == 1):
# # print "computs",node.inputs[0].name
# # else:
# # print "computs",node.inputs[0].name,node.inputs[1].name
# # print node.name
# # print node_val.shape
# # print "xxx"
# # print node.name
# node.op.compute(node, input_vals, node_val, use_numpy)
# # print "xxx"
# node_to_val_map[node] = node_val
# # print "xxx"
# if FLAG_SHOW_GRAPH:
# FLAG_SHOW_GRAPH = False
# # Collect node values.
# if not use_numpy and convert_to_numpy_ret_vals:
# if self.policy == 'swap':
# node_values = []
# for n in self.eval_node_list:
# if n.swap == True:
# node_values.append(node_to_val_map[n])
# else:
# node_values.append(node_to_val_map[n].asnumpy())
# return node_values
# elif self.policy == 'vdnn':
# return [node_to_val_map[n] for n in self.eval_node_list]
# else:
# return [node_to_val_map[n].asnumpy() for n in self.eval_node_list]
# return [node_to_val_map[n] for n in self.eval_node_list]
def gradients(output_node, node_list, scheduler_policy=None):
"""Take gradient of output node with respect to each node in node_list.
Parameters
----------
output_node: output node that we are taking derivative of.
node_list: list of nodes that we are taking derivative wrt.
Returns
-------
A list of gradient values, one for each node in node_list respectively.
"""
from . import OnesLike
node_to_output_grads_list = {}
node_to_output_grads_list[output_node] = [
OnesLike.oneslike_op(output_node)]
node_to_output_grad = {}
# Traverse forward graph in reverse topological order
reverse_topo_order = reversed(find_topo_sort([output_node]))
for node in reverse_topo_order:
output_grad = sum_node_list(node_to_output_grads_list[node])
node_to_output_grad[node] = output_grad
input_grads_list = node.op.gradient(node, output_grad)
#print len(node.name)
#print len(node.inputs)
#raw_input("\n\nPress the enter key to exit.")
for i in range(len(node.inputs)):
if node.inputs[i] not in node_to_output_grads_list:
node_to_output_grads_list[node.inputs[i]] = []
# Calculate partial adjoint for input nodes.
# print node.name
node_to_output_grads_list[node.inputs[i]].append(
input_grads_list[i])
if scheduler_policy == 'swap':
for node in node_list:
if node.swap:
node_to_output_grad[node].swap = True
grad_node_list = [node_to_output_grad[node] for node in node_list]
# grad_node_list = [distributed_communicate_op(node_to_output_grad[node]) for node in node_list]
return grad_node_list
def distributed_gradients(output_node, node_list, scheduler_policy=None):
"""Take gradient of output node with respect to each node in node_list.
Parameters
----------
output_node: output node that we are taking derivative of.
node_list: list of nodes that we are taking derivative wrt.
Returns
-------
A list of gradient values, one for each node in node_list respectively.
"""
from .OnesLike import oneslike_op
node_to_output_grads_list = {}
node_to_output_grads_list[output_node] = [oneslike_op(output_node)]
node_to_output_grad = {}
# Traverse forward graph in reverse topological order
reverse_topo_order = reversed(find_topo_sort([output_node]))
for node in reverse_topo_order:
output_grad = sum_node_list(node_to_output_grads_list[node])
node_to_output_grad[node] = output_grad
input_grads_list = node.op.gradient(node, output_grad)
#print len(node.name)
#print len(node.inputs)
#raw_input("\n\nPress the enter key to exit.")
for i in range(len(node.inputs)):
if node.inputs[i] not in node_to_output_grads_list:
node_to_output_grads_list[node.inputs[i]] = []
# Calculate partial adjoint for input nodes.
node_to_output_grads_list[node.inputs[i]].append(
input_grads_list[i])
if scheduler_policy == 'swap':
for node in node_list:
if node.swap:
node_to_output_grad[node].swap = True
# grad_node_list = [node_to_output_grad[node] for node in node_list]
grad_node_list = [distributed_communicate_op(
node_to_output_grad[node]) for node in node_list]
return grad_node_list
##################
# Helper Methods #
##################
def find_topo_sort(node_list):
"""Given a list of nodes, return a topo ordering of nodes ending in them.
A simple algorithm is to do a post-order DFS traversal on the given nodes,
going backwards based on input edges. Since a node is added to the ordering
after all its predecessors are traversed due to post-order DFS, we get a
topological sort.
"""
visited = set()
topo_order = []
for node in node_list:
topo_sort_dfs(node, visited, topo_order)
return topo_order
def topo_sort_dfs(node, visited, topo_order):
"""Post-order DFS"""
if node in visited:
return
visited.add(node)
for n in node.inputs:
topo_sort_dfs(n, visited, topo_order)
topo_order.append(node)
def sum_node_list(node_list):
"""Custom sum func to avoid creating redundant nodes in Python sum func."""
from operator import add
from functools import reduce
return reduce(add, node_list)
def broadcast_rule(shape_a, shape_b):
"""Return output shape of broadcast shape_a, shape_b.
e.g. broadcast_rule((3,2), (4,3,2))
returns output_shape = (4,3,2)
Check out explanations and more examples at
https://docs.scipy.org/doc/numpy-1.10.0/user/basics.broadcasting.html
http://eli.thegreenplace.net/2015/broadcasting-arrays-in-numpy/
"""
assert(isinstance(shape_a, tuple))
assert(isinstance(shape_b, tuple))
if len(shape_a) > len(shape_b):
longer_shape, shorter_shape = shape_a, shape_b
else:
longer_shape, shorter_shape = shape_b, shape_a
len_diff = len(longer_shape) - len(shorter_shape)
for i in range(len_diff):
# pad with leading 1s
shorter_shape = (1,) + shorter_shape
assert len(shorter_shape) == len(longer_shape)
output_shape = list(longer_shape)
for i in range(len(output_shape)):
assert (shorter_shape[i] == longer_shape[i]) \
or (shorter_shape[i] == 1) \
or (longer_shape[i] == 1)
output_shape[i] = max(shorter_shape[i], longer_shape[i])
return tuple(output_shape)
| python/athena/gpu_ops/StreamExecutor.py | 23,975 | Executor computes values for given set of nodes in computation graph.
Parameters
----------
eval_node_list: list of nodes whose values need to be computed.
ctx: runtime DLContext, default is None which means np.ndarray on cpu
topo_order: list of nodes in topological order
node_to_shape_map: dict from node to shape of the node
node_to_arr_map: dict from node to ndarray.NDArray allocated for node
feed_shapes: shapes of feed_dict from last run(...)
Return output shape of broadcast shape_a, shape_b.
e.g. broadcast_rule((3,2), (4,3,2))
returns output_shape = (4,3,2)
Check out explanations and more examples at
https://docs.scipy.org/doc/numpy-1.10.0/user/basics.broadcasting.html
http://eli.thegreenplace.net/2015/broadcasting-arrays-in-numpy/
Take gradient of output node with respect to each node in node_list.
Parameters
----------
output_node: output node that we are taking derivative of.
node_list: list of nodes that we are taking derivative wrt.
Returns
-------
A list of gradient values, one for each node in node_list respectively.
Given a list of nodes, return a topo ordering of nodes ending in them.
A simple algorithm is to do a post-order DFS traversal on the given nodes,
going backwards based on input edges. Since a node is added to the ordering
after all its predecessors are traversed due to post-order DFS, we get a
topological sort.
Take gradient of output node with respect to each node in node_list.
Parameters
----------
output_node: output node that we are taking derivative of.
node_list: list of nodes that we are taking derivative wrt.
Returns
-------
A list of gradient values, one for each node in node_list respectively.
Given shapes of feed_dict nodes, infer shape for all nodes in graph.
Implementation note:
Iteratively calls node.op.infer_shape to infer shapes.
Node shapes stored in self.node_to_shape_map.
Parameters
----------
feed_shapes: node->shapes mapping for feed_dict nodes.
Allocates ndarray.NDArray for every node except feed_dict nodes.
Implementation note:
Option 1: Alloc a ndarray.NDArray per node that persists across run()
Option 2: Implement a memory pool to reuse memory for nodes of same
shapes. More details see Lecture 7.
For both options, self.node_to_arr_map stores node->NDArray mapping to
allow mapping to persist across multiple executor.run().
Hint: use ndarray.empty(shape, ctx=self.ctx) to allocate NDArray.
Parameters
----------
feed_shapes: node->shapes mapping for feed_dict nodes.
Parameters
----------
feed_dict: a dictionary of node->np.ndarray supplied by user.
convert_to_numpy_ret_vals: whether to convert ret vals to np.array
Returns
-------
A list of values for nodes in eval_node_list. NDArray or np.ndarray.
Custom sum func to avoid creating redundant nodes in Python sum func.
Post-order DFS
library to take autodiff and execute a computation graph
lib_communicate.DL_Connect_Init(2, 0, "*:4001", "localhost:4002") lib_communicate.DL_Connect_Init(2, 1, "*:4002", "localhost:4001") print nodeA.name print gradient_val_cpu.asnumpy() print after_reduce_gradient_cpu.asnumpy() print(node.name) self.infer_shape(feed_shapes) Assume self.ctx is None implies numpy array and numpy ops. all values passed in feed_dict must be np.ndarray convert values to ndarray.NDArray if necessary print"xxxx" collect shapes for all placeholders infer shape if feed_shapes changed since last run e.g. call run() on test data after trainng print feed_shapes print(node.name) def run(self, feed_dict, convert_to_numpy_ret_vals=False): """ Parameters ---------- feed_dict: a dictionary of node->np.ndarray supplied by user. convert_to_numpy_ret_vals: whether to convert ret vals to np.array Returns ------- A list of values for nodes in eval_node_list. NDArray or np.ndarray. """ def are_feed_shapes_equal(sa, sb): if (not isinstance(sa, dict)) or (not isinstance(sb, dict)): return False unmatched_item = set(sa.items()) ^ set(sb.items()) return len(unmatched_item) == 0 Assume self.ctx is None implies numpy array and numpy ops. use_numpy = self.ctx is None node_to_val_map = {} for node, value in feed_dict.items(): if self.policy == 'vdnn': assert isinstance(value, np.ndarray) node_to_val_map[node] = value else: if use_numpy: all values passed in feed_dict must be np.ndarray assert isinstance(value, np.ndarray) node_to_val_map[node] = value else: convert values to ndarray.NDArray if necessary if isinstance(value, np.ndarray): if self.policy == 'swap': if node.swap == True: node_to_val_map[node] = value else: node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) else: node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) elif isinstance(value, ndarray.NDArray): node_to_val_map[node] = value else: assert False, "feed_dict value type not supported" collect shapes for all placeholders feed_shapes = {} for node in node_to_val_map: feed_shapes[node] = node_to_val_map[node].shape infer shape if feed_shapes changed since last run e.g. call run() on test data after trainng print feed_shapes if (not are_feed_shapes_equal(feed_shapes, self.feed_shapes)): self.infer_shape(feed_shapes) self.feed_shapes = feed_shapes if not self.policy == 'vdnn': plan memory if using GPU if (not use_numpy): self.memory_plan(feed_shapes) Traverse graph in topo order and compute values for all nodes. global FLAG_SHOW_GRAPH if self.policy == 'swap': generate swap queue if not use_numpy: for node in self.topo_order: if node not in node_to_val_map: variable in placeholder for input_node in node.inputs: if input_node.swap == True: self.swap_queue.append(input_node) variable grad if node.swap == True: self.swap_queue.append(node) node_in_GPU = None if FLAG_SHOW_GRAPH: print "Show swap queue:" for node in self.swap_queue: print node elif self.policy == 'vdnn': TODO traverse graph to select in-gpu window window = [0,0] if not use_numpy: nvmlInit() handle = nvmlDeviceGetHandleByIndex(0) info = nvmlDeviceGetMemoryInfo(handle) gpu_mem = info.free nvmlShutdown() loss_node = self.eval_node_list[0] window[1] = self.topo_order.index(loss_node)+1 window[0] = self.topo_order.index(loss_node)+1 for node in reversed(self.topo_order[:window[1]+1]): node_size = 4 float32 print node, self.node_to_shape_map[node] for shape in self.node_to_shape_map[node]: node_size = node_size * shape if gpu_mem > node_size: gpu_mem = gpu_mem - node_size window[0] = window[0] - 1 print "gpu_mem:",gpu_mem Traverse graph in topo order and compute values for all nodes. if FLAG_SHOW_GRAPH: print "run topo_order" Show graph dependency if FLAG_SHOW_GRAPH: print "node:",node print "node.desc:",node.desc for node in self.topo_order: if self.policy == 'vdnn': Skip placeholder nodes if node in node_to_val_map: continue H2D before compute Collect inputs input_vals = [] for n in node.inputs: if not use_numpy: if isinstance(node_to_val_map[n], np.ndarray): node_to_val_map[n] = ndarray.array(node_to_val_map[n], ctx=self.ctx) input_vals.append(node_to_val_map[n]) Alloc node space if use_numpy: node_val = np.empty(shape=self.node_to_shape_map[node]) else: node_val = ndarray.empty(shape=self.node_to_shape_map[node], ctx=self.ctx) Compute node_val is modified in-place whether np.ndarray or NDArray node.op.compute(node, input_vals, node_val, use_numpy) D2H after compute if use_numpy: node_to_val_map[node] = node_val else: node_index = self.topo_order.index(node) if node_index > window[0] and node_index < window[1]: node_to_val_map[node] = node_val continue node_to_val_map[node] = node_val.asnumpy() del node_val for n in node.inputs: if isinstance(node_to_val_map[n], ndarray.NDArray): tmp_val = node_to_val_map[n].asnumpy() del node_to_val_map[n] node_to_val_map[n] = tmp_val elif self.policy == 'swap': Switch in GPU if not use_numpy: if self.swap_queue and (node_in_GPU==None): swap_node = self.swap_queue[0] if swap_node in node_to_val_map: node_to_val_map[swap_node] = ndarray.array(node_to_val_map[swap_node], ctx=self.ctx) else: self.node_to_arr_map[swap_node] = ndarray.empty(self.node_to_shape_map[swap_node], ctx=self.ctx) node_in_GPU = swap_node.id if node in node_to_val_map: Skip placeholder nodes. Values already provided by feed_dict. continue Compute input_vals = [node_to_val_map[n] for n in node.inputs] if use_numpy: node_val = np.empty(shape=self.node_to_shape_map[node]) else: node_val = self.node_to_arr_map[node] node_val is modified in-place whether np.ndarray or NDArray node.op.compute(node, input_vals, node_val, use_numpy) if node.swap == True: node_to_val_map[node] = node_val.asnumpy() del node_val del self.node_to_arr_map[node] del self.swap_queue[0] node_in_GPU = None else: node_to_val_map[node] = node_val Switch out GPU if not use_numpy: if self.swap_queue: if self.swap_queue[0] in node.inputs: out_node = self.swap_queue.pop(0) if self.swap_queue: if not self.swap_queue[0].id == node_in_GPU: tmp_array = node_to_val_map[out_node].asnumpy() del node_to_val_map[out_node] node_to_val_map[out_node] = tmp_array node_in_GPU = None else: if node in node_to_val_map: Skip placeholder nodes. Values already provided by feed_dict. continue input_vals = [node_to_val_map[n] for n in node.inputs] print self.node_to_shape_map[node] if use_numpy: node_val = np.empty(shape=self.node_to_shape_map[node]) else: node_val = self.node_to_arr_map[node] node_val is modified in-place whether np.ndarray or NDArray if (len(node.inputs) == 1): print "computs",node.inputs[0].name else: print "computs",node.inputs[0].name,node.inputs[1].name print node.name print node_val.shape print "xxx" print node.name node.op.compute(node, input_vals, node_val, use_numpy) print "xxx" node_to_val_map[node] = node_val print "xxx" if FLAG_SHOW_GRAPH: FLAG_SHOW_GRAPH = False Collect node values. if not use_numpy and convert_to_numpy_ret_vals: if self.policy == 'swap': node_values = [] for n in self.eval_node_list: if n.swap == True: node_values.append(node_to_val_map[n]) else: node_values.append(node_to_val_map[n].asnumpy()) return node_values elif self.policy == 'vdnn': return [node_to_val_map[n] for n in self.eval_node_list] else: return [node_to_val_map[n].asnumpy() for n in self.eval_node_list] return [node_to_val_map[n] for n in self.eval_node_list] Traverse forward graph in reverse topological orderprint len(node.name)print len(node.inputs)raw_input("\n\nPress the enter key to exit.") Calculate partial adjoint for input nodes. print node.name grad_node_list = [distributed_communicate_op(node_to_output_grad[node]) for node in node_list] Traverse forward graph in reverse topological orderprint len(node.name)print len(node.inputs)raw_input("\n\nPress the enter key to exit.") Calculate partial adjoint for input nodes. grad_node_list = [node_to_output_grad[node] for node in node_list] Helper Methods pad with leading 1s | 12,795 | en | 0.359744 |
"""
TickerHandler
This implements an efficient Ticker which uses a subscription
model to 'tick' subscribed objects at regular intervals.
The ticker mechanism is used by importing and accessing
the instantiated TICKER_HANDLER instance in this module. This
instance is run by the server; it will save its status across
server reloads and be started automaticall on boot.
Example:
```python
from evennia.scripts.tickerhandler import TICKER_HANDLER
# call tick myobj.at_tick(*args, **kwargs) every 15 seconds
TICKER_HANDLER.add(15, myobj.at_tick, *args, **kwargs)
```
You supply the interval to tick and a callable to call regularly
with any extra args/kwargs. The handler will transparently set
up and add new timers behind the scenes to tick at given intervals,
using a TickerPool - all callables with the same interval will share
the interval ticker.
To remove:
```python
TICKER_HANDLER.remove(15, myobj.at_tick)
```
Both interval and callable must be given since a single object can be subscribed
to many different tickers at the same time. You can also supply `idstring`
as an identifying string if you ever want to tick the callable at the same interval
but with different arguments (args/kwargs are not used for identifying the ticker). There
is also `persistent=False` if you don't want to make a ticker that don't survive a reload.
If either or both `idstring` or `persistent` has been changed from their defaults, they
must be supplied to the `TICKER_HANDLER.remove` call to properly identify the ticker
to remove.
The TickerHandler's functionality can be overloaded by modifying the
Ticker class and then changing TickerPool and TickerHandler to use the
custom classes
```python
class MyTicker(Ticker):
# [doing custom stuff]
class MyTickerPool(TickerPool):
ticker_class = MyTicker
class MyTickerHandler(TickerHandler):
ticker_pool_class = MyTickerPool
```
If one wants to duplicate TICKER_HANDLER's auto-saving feature in
a custom handler one can make a custom `AT_STARTSTOP_MODULE` entry to
call the handler's `save()` and `restore()` methods when the server reboots.
"""
import inspect
from builtins import object
from twisted.internet.defer import inlineCallbacks
from django.core.exceptions import ObjectDoesNotExist
from evennia.scripts.scripts import ExtendedLoopingCall
from evennia.server.models import ServerConfig
from evennia.utils.logger import log_trace, log_err
from evennia.utils.dbserialize import dbserialize, dbunserialize, pack_dbobj, unpack_dbobj
from evennia.utils import variable_from_module
_GA = object.__getattribute__
_SA = object.__setattr__
_ERROR_ADD_TICKER = \
"""TickerHandler: Tried to add an invalid ticker:
{storekey}
Ticker was not added."""
class Ticker(object):
"""
Represents a repeatedly running task that calls
hooks repeatedly. Overload `_callback` to change the
way it operates.
"""
@inlineCallbacks
def _callback(self):
"""
This will be called repeatedly every `self.interval` seconds.
`self.subscriptions` contain tuples of (obj, args, kwargs) for
each subscribing object.
If overloading, this callback is expected to handle all
subscriptions when it is triggered. It should not return
anything and should not traceback on poorly designed hooks.
The callback should ideally work under @inlineCallbacks so it
can yield appropriately.
The _hook_key, which is passed down through the handler via
kwargs is used here to identify which hook method to call.
"""
self._to_add = []
self._to_remove = []
self._is_ticking = True
for store_key, (args, kwargs) in self.subscriptions.iteritems():
callback = yield kwargs.pop("_callback", "at_tick")
obj = yield kwargs.pop("_obj", None)
try:
if callable(callback):
# call directly
yield callback(*args, **kwargs)
continue
# try object method
if not obj or not obj.pk:
# object was deleted between calls
self._to_remove.append(store_key)
continue
else:
yield _GA(obj, callback)(*args, **kwargs)
except ObjectDoesNotExist:
log_trace("Removing ticker.")
self._to_remove.append(store_key)
except Exception:
log_trace()
finally:
# make sure to re-store
kwargs["_callback"] = callback
kwargs["_obj"] = obj
# cleanup - we do this here to avoid changing the subscription dict while it loops
self._is_ticking = False
for store_key in self._to_remove:
self.remove(store_key)
for store_key, (args, kwargs) in self._to_add:
self.add(store_key, *args, **kwargs)
self._to_remove = []
self._to_add = []
def __init__(self, interval):
"""
Set up the ticker
Args:
interval (int): The stepping interval.
"""
self.interval = interval
self.subscriptions = {}
self._is_ticking = False
self._to_remove = []
self._to_add = []
# set up a twisted asynchronous repeat call
self.task = ExtendedLoopingCall(self._callback)
def validate(self, start_delay=None):
"""
Start/stop the task depending on how many subscribers we have
using it.
Args:
start_delay (int): Time to way before starting.
"""
subs = self.subscriptions
if self.task.running:
if not subs:
self.task.stop()
elif subs:
self.task.start(self.interval, now=False, start_delay=start_delay)
def add(self, store_key, *args, **kwargs):
"""
Sign up a subscriber to this ticker.
Args:
store_key (str): Unique storage hash for this ticker subscription.
args (any, optional): Arguments to call the hook method with.
Kwargs:
_start_delay (int): If set, this will be
used to delay the start of the trigger instead of
`interval`.
"""
if self._is_ticking:
# protects the subscription dict from
# updating while it is looping
self._to_start.append((store_key, (args, kwargs)))
else:
start_delay = kwargs.pop("_start_delay", None)
self.subscriptions[store_key] = (args, kwargs)
self.validate(start_delay=start_delay)
def remove(self, store_key):
"""
Unsubscribe object from this ticker
Args:
store_key (str): Unique store key.
"""
if self._is_ticking:
# this protects the subscription dict from
# updating while it is looping
self._to_remove.append(store_key)
else:
self.subscriptions.pop(store_key, False)
self.validate()
def stop(self):
"""
Kill the Task, regardless of subscriptions.
"""
self.subscriptions = {}
self.validate()
class TickerPool(object):
"""
This maintains a pool of
`evennia.scripts.scripts.ExtendedLoopingCall` tasks for calling
subscribed objects at given times.
"""
ticker_class = Ticker
def __init__(self):
"""
Initialize the pool.
"""
self.tickers = {}
def add(self, store_key, *args, **kwargs):
"""
Add new ticker subscriber.
Args:
store_key (str): Unique storage hash.
args (any, optional): Arguments to send to the hook method.
"""
_, _, _, interval, _, _ = store_key
if not interval:
log_err(_ERROR_ADD_TICKER.format(store_key=store_key))
return
if interval not in self.tickers:
self.tickers[interval] = self.ticker_class(interval)
self.tickers[interval].add(store_key, *args, **kwargs)
def remove(self, store_key):
"""
Remove subscription from pool.
Args:
store_key (str): Unique storage hash to remove
"""
_, _, _, interval, _, _ = store_key
if interval in self.tickers:
self.tickers[interval].remove(store_key)
if not self.tickers[interval]:
del self.tickers[interval]
def stop(self, interval=None):
"""
Stop all scripts in pool. This is done at server reload since
restoring the pool will automatically re-populate the pool.
Args:
interval (int, optional): Only stop tickers with this
interval.
"""
if interval and interval in self.tickers:
self.tickers[interval].stop()
else:
for ticker in self.tickers.values():
ticker.stop()
class TickerHandler(object):
"""
The Tickerhandler maintains a pool of tasks for subscribing
objects to various tick rates. The pool maintains creation
instructions and and re-applies them at a server restart.
"""
ticker_pool_class = TickerPool
def __init__(self, save_name="ticker_storage"):
"""
Initialize handler
save_name (str, optional): The name of the ServerConfig
instance to store the handler state persistently.
"""
self.ticker_storage = {}
self.save_name = save_name
self.ticker_pool = self.ticker_pool_class()
def _get_callback(self, callback):
"""
Analyze callback and determine its consituents
Args:
callback (function or method): This is either a stand-alone
function or class method on a typeclassed entitye (that is,
an entity that can be saved to the database).
Returns:
ret (tuple): This is a tuple of the form `(obj, path, callfunc)`,
where `obj` is the database object the callback is defined on
if it's a method (otherwise `None`) and vice-versa, `path` is
the python-path to the stand-alone function (`None` if a method).
The `callfunc` is either the name of the method to call or the
callable function object itself.
"""
outobj, outpath, outcallfunc = None, None, None
if callable(callback):
if inspect.ismethod(callback):
outobj = callback.im_self
outcallfunc = callback.im_func.func_name
elif inspect.isfunction(callback):
outpath = "%s.%s" % (callback.__module__, callback.func_name)
outcallfunc = callback
else:
raise TypeError("%s is not a callable function or method." % callback)
return outobj, outpath, outcallfunc
def _store_key(self, obj, path, interval, callfunc, idstring="", persistent=True):
"""
Tries to create a store_key for the object.
Args:
obj (Object, tuple or None): Subscribing object if any. If a tuple, this is
a packed_obj tuple from dbserialize.
path (str or None): Python-path to callable, if any.
interval (int): Ticker interval.
callfunc (callable or str): This is either the callable function or
the name of the method to call. Note that the callable is never
stored in the key; that is uniquely identified with the python-path.
idstring (str, optional): Additional separator between
different subscription types.
persistent (bool, optional): If this ticker should survive a system
shutdown or not.
Returns:
store_key (tuple): A tuple `(packed_obj, methodname, outpath, interval,
idstring, persistent)` that uniquely identifies the
ticker. Here, `packed_obj` is the unique string representation of the
object or `None`. The `methodname` is the string name of the method on
`packed_obj` to call, or `None` if `packed_obj` is unset. `path` is
the Python-path to a non-method callable, or `None`. Finally, `interval`
`idstring` and `persistent` are integers, strings and bools respectively.
"""
interval = int(interval)
persistent = bool(persistent)
packed_obj = pack_dbobj(obj)
methodname = callfunc if callfunc and isinstance(callfunc, basestring) else None
outpath = path if path and isinstance(path, basestring) else None
return (packed_obj, methodname, outpath, interval, idstring, persistent)
def save(self):
"""
Save ticker_storage as a serialized string into a temporary
ServerConf field. Whereas saving is done on the fly, if called
by server when it shuts down, the current timer of each ticker
will be saved so it can start over from that point.
"""
if self.ticker_storage:
# get the current times so the tickers can be restarted with a delay later
start_delays = dict((interval, ticker.task.next_call_time())
for interval, ticker in self.ticker_pool.tickers.items())
# remove any subscriptions that lost its object in the interim
to_save = {store_key: (args, kwargs) for store_key, (args, kwargs) in self.ticker_storage.items()
if ((store_key[1] and ("_obj" in kwargs and kwargs["_obj"].pk) and
hasattr(kwargs["_obj"], store_key[1])) or # a valid method with existing obj
store_key[2])} # a path given
# update the timers for the tickers
for store_key, (args, kwargs) in to_save.items():
interval = store_key[1]
# this is a mutable, so it's updated in-place in ticker_storage
kwargs["_start_delay"] = start_delays.get(interval, None)
ServerConfig.objects.conf(key=self.save_name, value=dbserialize(to_save))
else:
# make sure we have nothing lingering in the database
ServerConfig.objects.conf(key=self.save_name, delete=True)
def restore(self, server_reload=True):
"""
Restore ticker_storage from database and re-initialize the
handler from storage. This is triggered by the server at
restart.
Args:
server_reload (bool, optional): If this is False, it means
the server went through a cold reboot and all
non-persistent tickers must be killed.
"""
# load stored command instructions and use them to re-initialize handler
restored_tickers = ServerConfig.objects.conf(key=self.save_name)
if restored_tickers:
# the dbunserialize will convert all serialized dbobjs to real objects
restored_tickers = dbunserialize(restored_tickers)
self.ticker_storage = {}
for store_key, (args, kwargs) in restored_tickers.iteritems():
try:
# at this point obj is the actual object (or None) due to how
# the dbunserialize works
obj, callfunc, path, interval, idstring, persistent = store_key
if not persistent and not server_reload:
# this ticker will not be restarted
continue
if isinstance(callfunc, basestring) and not obj:
# methods must have an existing object
continue
# we must rebuild the store_key here since obj must not be
# stored as the object itself for the store_key to be hashable.
store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent)
if obj and callfunc:
kwargs["_callback"] = callfunc
kwargs["_obj"] = obj
elif path:
modname, varname = path.rsplit(".", 1)
callback = variable_from_module(modname, varname)
kwargs["_callback"] = callback
kwargs["_obj"] = None
else:
# Neither object nor path - discard this ticker
log_err("Tickerhandler: Removing malformed ticker: %s" % str(store_key))
continue
except Exception:
# this suggests a malformed save or missing objects
log_trace("Tickerhandler: Removing malformed ticker: %s" % str(store_key))
continue
# if we get here we should create a new ticker
self.ticker_storage[store_key] = (args, kwargs)
self.ticker_pool.add(store_key, *args, **kwargs)
def add(self, interval=60, callback=None, idstring="", persistent=True, *args, **kwargs):
"""
Add subscription to tickerhandler
Args:
interval (int, optional): Interval in seconds between calling
`callable(*args, **kwargs)`
callable (callable function or method, optional): This
should either be a stand-alone function or a method on a
typeclassed entity (that is, one that can be saved to the
database).
idstring (str, optional): Identifier for separating
this ticker-subscription from others with the same
interval. Allows for managing multiple calls with
the same time interval and callback.
persistent (bool, optional): A ticker will always survive
a server reload. If this is unset, the ticker will be
deleted by a server shutdown.
args, kwargs (optional): These will be passed into the
callback every time it is called.
Notes:
The callback will be identified by type and stored either as
as combination of serialized database object + methodname or
as a python-path to the module + funcname. These strings will
be combined iwth `interval` and `idstring` to define a
unique storage key for saving. These must thus all be supplied
when wanting to modify/remove the ticker later.
"""
if isinstance(callback, int):
raise RuntimeError("TICKER_HANDLER.add has changed: "
"the interval is now the first argument, callback the second.")
obj, path, callfunc = self._get_callback(callback)
store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent)
kwargs["_obj"] = obj
kwargs["_callback"] = callfunc # either method-name or callable
self.ticker_storage[store_key] = (args, kwargs)
self.ticker_pool.add(store_key, *args, **kwargs)
self.save()
def remove(self, interval=60, callback=None, idstring="", persistent=True):
"""
Remove object from ticker or only remove it from tickers with
a given interval.
Args:
interval (int, optional): Interval of ticker to remove.
callback (callable function or method): Either a function or
the method of a typeclassed object.
idstring (str, optional): Identifier id of ticker to remove.
"""
if isinstance(callback, int):
raise RuntimeError("TICKER_HANDLER.remove has changed: "
"the interval is now the first argument, callback the second.")
obj, path, callfunc = self._get_callback(callback)
store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent)
to_remove = self.ticker_storage.pop(store_key, None)
if to_remove:
self.ticker_pool.remove(store_key)
self.save()
def clear(self, interval=None):
"""
Stop/remove tickers from handler.
Args:
interval (int): Only stop tickers with this interval.
Notes:
This is the only supported way to kill tickers related to
non-db objects.
"""
self.ticker_pool.stop(interval)
if interval:
self.ticker_storage = dict((store_key, store_key)
for store_key in self.ticker_storage
if store_key[1] != interval)
else:
self.ticker_storage = {}
self.save()
def all(self, interval=None):
"""
Get all subscriptions.
Args:
interval (int): Limit match to tickers with this interval.
Returns:
tickers (list): If `interval` was given, this is a list of
tickers using that interval.
tickerpool_layout (dict): If `interval` was *not* given,
this is a dict {interval1: [ticker1, ticker2, ...], ...}
"""
if interval is None:
# return dict of all, ordered by interval
return dict((interval, ticker.subscriptions)
for interval, ticker in self.ticker_pool.tickers.iteritems())
else:
# get individual interval
ticker = self.ticker_pool.tickers.get(interval, None)
if ticker:
return {interval: ticker.subscriptions}
def all_display(self):
"""
Get all tickers on an easily displayable form.
Returns:
tickers (dict): A list of all storekeys
"""
store_keys = []
for ticker in self.ticker_pool.tickers.itervalues():
for (objtup, callfunc, path, interval, idstring, persistent), (args, kwargs) in ticker.subscriptions.iteritems():
store_keys.append((kwargs.get("_obj", None), callfunc, path, interval, idstring, persistent))
return store_keys
# main tickerhandler
TICKER_HANDLER = TickerHandler()
| evennia/scripts/tickerhandler.py | 22,370 | Represents a repeatedly running task that calls
hooks repeatedly. Overload `_callback` to change the
way it operates.
The Tickerhandler maintains a pool of tasks for subscribing
objects to various tick rates. The pool maintains creation
instructions and and re-applies them at a server restart.
This maintains a pool of
`evennia.scripts.scripts.ExtendedLoopingCall` tasks for calling
subscribed objects at given times.
Set up the ticker
Args:
interval (int): The stepping interval.
Initialize the pool.
Initialize handler
save_name (str, optional): The name of the ServerConfig
instance to store the handler state persistently.
This will be called repeatedly every `self.interval` seconds.
`self.subscriptions` contain tuples of (obj, args, kwargs) for
each subscribing object.
If overloading, this callback is expected to handle all
subscriptions when it is triggered. It should not return
anything and should not traceback on poorly designed hooks.
The callback should ideally work under @inlineCallbacks so it
can yield appropriately.
The _hook_key, which is passed down through the handler via
kwargs is used here to identify which hook method to call.
Analyze callback and determine its consituents
Args:
callback (function or method): This is either a stand-alone
function or class method on a typeclassed entitye (that is,
an entity that can be saved to the database).
Returns:
ret (tuple): This is a tuple of the form `(obj, path, callfunc)`,
where `obj` is the database object the callback is defined on
if it's a method (otherwise `None`) and vice-versa, `path` is
the python-path to the stand-alone function (`None` if a method).
The `callfunc` is either the name of the method to call or the
callable function object itself.
Tries to create a store_key for the object.
Args:
obj (Object, tuple or None): Subscribing object if any. If a tuple, this is
a packed_obj tuple from dbserialize.
path (str or None): Python-path to callable, if any.
interval (int): Ticker interval.
callfunc (callable or str): This is either the callable function or
the name of the method to call. Note that the callable is never
stored in the key; that is uniquely identified with the python-path.
idstring (str, optional): Additional separator between
different subscription types.
persistent (bool, optional): If this ticker should survive a system
shutdown or not.
Returns:
store_key (tuple): A tuple `(packed_obj, methodname, outpath, interval,
idstring, persistent)` that uniquely identifies the
ticker. Here, `packed_obj` is the unique string representation of the
object or `None`. The `methodname` is the string name of the method on
`packed_obj` to call, or `None` if `packed_obj` is unset. `path` is
the Python-path to a non-method callable, or `None`. Finally, `interval`
`idstring` and `persistent` are integers, strings and bools respectively.
Sign up a subscriber to this ticker.
Args:
store_key (str): Unique storage hash for this ticker subscription.
args (any, optional): Arguments to call the hook method with.
Kwargs:
_start_delay (int): If set, this will be
used to delay the start of the trigger instead of
`interval`.
Add new ticker subscriber.
Args:
store_key (str): Unique storage hash.
args (any, optional): Arguments to send to the hook method.
Add subscription to tickerhandler
Args:
interval (int, optional): Interval in seconds between calling
`callable(*args, **kwargs)`
callable (callable function or method, optional): This
should either be a stand-alone function or a method on a
typeclassed entity (that is, one that can be saved to the
database).
idstring (str, optional): Identifier for separating
this ticker-subscription from others with the same
interval. Allows for managing multiple calls with
the same time interval and callback.
persistent (bool, optional): A ticker will always survive
a server reload. If this is unset, the ticker will be
deleted by a server shutdown.
args, kwargs (optional): These will be passed into the
callback every time it is called.
Notes:
The callback will be identified by type and stored either as
as combination of serialized database object + methodname or
as a python-path to the module + funcname. These strings will
be combined iwth `interval` and `idstring` to define a
unique storage key for saving. These must thus all be supplied
when wanting to modify/remove the ticker later.
Get all subscriptions.
Args:
interval (int): Limit match to tickers with this interval.
Returns:
tickers (list): If `interval` was given, this is a list of
tickers using that interval.
tickerpool_layout (dict): If `interval` was *not* given,
this is a dict {interval1: [ticker1, ticker2, ...], ...}
Get all tickers on an easily displayable form.
Returns:
tickers (dict): A list of all storekeys
Stop/remove tickers from handler.
Args:
interval (int): Only stop tickers with this interval.
Notes:
This is the only supported way to kill tickers related to
non-db objects.
Unsubscribe object from this ticker
Args:
store_key (str): Unique store key.
Remove subscription from pool.
Args:
store_key (str): Unique storage hash to remove
Remove object from ticker or only remove it from tickers with
a given interval.
Args:
interval (int, optional): Interval of ticker to remove.
callback (callable function or method): Either a function or
the method of a typeclassed object.
idstring (str, optional): Identifier id of ticker to remove.
Restore ticker_storage from database and re-initialize the
handler from storage. This is triggered by the server at
restart.
Args:
server_reload (bool, optional): If this is False, it means
the server went through a cold reboot and all
non-persistent tickers must be killed.
Save ticker_storage as a serialized string into a temporary
ServerConf field. Whereas saving is done on the fly, if called
by server when it shuts down, the current timer of each ticker
will be saved so it can start over from that point.
Kill the Task, regardless of subscriptions.
Stop all scripts in pool. This is done at server reload since
restoring the pool will automatically re-populate the pool.
Args:
interval (int, optional): Only stop tickers with this
interval.
Start/stop the task depending on how many subscribers we have
using it.
Args:
start_delay (int): Time to way before starting.
TickerHandler
This implements an efficient Ticker which uses a subscription
model to 'tick' subscribed objects at regular intervals.
The ticker mechanism is used by importing and accessing
the instantiated TICKER_HANDLER instance in this module. This
instance is run by the server; it will save its status across
server reloads and be started automaticall on boot.
Example:
```python
from evennia.scripts.tickerhandler import TICKER_HANDLER
# call tick myobj.at_tick(*args, **kwargs) every 15 seconds
TICKER_HANDLER.add(15, myobj.at_tick, *args, **kwargs)
```
You supply the interval to tick and a callable to call regularly
with any extra args/kwargs. The handler will transparently set
up and add new timers behind the scenes to tick at given intervals,
using a TickerPool - all callables with the same interval will share
the interval ticker.
To remove:
```python
TICKER_HANDLER.remove(15, myobj.at_tick)
```
Both interval and callable must be given since a single object can be subscribed
to many different tickers at the same time. You can also supply `idstring`
as an identifying string if you ever want to tick the callable at the same interval
but with different arguments (args/kwargs are not used for identifying the ticker). There
is also `persistent=False` if you don't want to make a ticker that don't survive a reload.
If either or both `idstring` or `persistent` has been changed from their defaults, they
must be supplied to the `TICKER_HANDLER.remove` call to properly identify the ticker
to remove.
The TickerHandler's functionality can be overloaded by modifying the
Ticker class and then changing TickerPool and TickerHandler to use the
custom classes
```python
class MyTicker(Ticker):
# [doing custom stuff]
class MyTickerPool(TickerPool):
ticker_class = MyTicker
class MyTickerHandler(TickerHandler):
ticker_pool_class = MyTickerPool
```
If one wants to duplicate TICKER_HANDLER's auto-saving feature in
a custom handler one can make a custom `AT_STARTSTOP_MODULE` entry to
call the handler's `save()` and `restore()` methods when the server reboots.
call directly try object method object was deleted between calls make sure to re-store cleanup - we do this here to avoid changing the subscription dict while it loops set up a twisted asynchronous repeat call protects the subscription dict from updating while it is looping this protects the subscription dict from updating while it is looping get the current times so the tickers can be restarted with a delay later remove any subscriptions that lost its object in the interim a valid method with existing obj a path given update the timers for the tickers this is a mutable, so it's updated in-place in ticker_storage make sure we have nothing lingering in the database load stored command instructions and use them to re-initialize handler the dbunserialize will convert all serialized dbobjs to real objects at this point obj is the actual object (or None) due to how the dbunserialize works this ticker will not be restarted methods must have an existing object we must rebuild the store_key here since obj must not be stored as the object itself for the store_key to be hashable. Neither object nor path - discard this ticker this suggests a malformed save or missing objects if we get here we should create a new ticker either method-name or callable return dict of all, ordered by interval get individual interval main tickerhandler | 10,194 | en | 0.810958 |
# ======================================================================
# The Stars Align
# Advent of Code 2018 Day 10 -- Eric Wastl -- https://adventofcode.com
#
# Python implementation by Dr. Dean Earl Wright III
# ======================================================================
# ======================================================================
# a o c _ 1 0 . p y
# ======================================================================
"Solve the puzzles for Advent of Code 2018 day 10"
# ----------------------------------------------------------------------
# import
# ----------------------------------------------------------------------
import argparse
import sys
import lights
# ----------------------------------------------------------------------
# constants
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# parse_commnd_line
# ----------------------------------------------------------------------
def parse_command_line():
"Parse the command line options"
# 1. Create the command line parser
desc = 'The Stars Align - Day 10 of Advent of Code 2018'
sample = 'sample: python aoc_10.py input.txt'
parser = argparse.ArgumentParser(description=desc,
epilog=sample)
parser.add_argument('-v', '--verbose', action='store_true', default=False,
dest='verbose', help='Print status messages to stdout')
parser.add_argument('-p', '--part', action='store', default=1, type=int,
dest='part', help='Puzzle Part (1 or 2)')
parser.add_argument('-l', '--limit', action='store', default=0, type=int,
dest='limit',
help='Maximum limit (e.g., time, size, recursion) before stopping')
parser.add_argument('filepath', metavar='FILENAME', action='store', type=str,
help="Location of puzzle input")
# 2. Get the options and arguments
return parser.parse_args()
# ----------------------------------------------------------------------
# part_one
# ----------------------------------------------------------------------
def part_one(args, input_lines):
"Process part one of the puzzle"
# 1. Create the puzzle solver
solver = lights.Lights(part2=False, text=input_lines)
# 2. Determine the solution for part one
solution = solver.part_one(verbose=args.verbose, limit=args.limit)
if solution is None:
print("There is no solution")
else:
print("The solution for part one is %s" % (solution))
# 3. Return result
return solution is not None
# ----------------------------------------------------------------------
# part_two
# ----------------------------------------------------------------------
def part_two(args, input_lines):
"Process part two of the puzzle"
# 1. Create the puzzle solver
solver = lights.Lights(part2=True, text=input_lines)
# 2. Determine the solution for part two
solution = solver.part_two(verbose=args.verbose, limit=args.limit)
if solution is None:
print("There is no solution")
else:
print("The solution for part two is %s" % (solution))
# 3. Return result
return solution is not None
# ----------------------------------------------------------------------
# from_file
# ----------------------------------------------------------------------
def from_file(filepath):
"Read the file"
return from_text(open(filepath).read())
# ----------------------------------------------------------------------
# from_text
# ----------------------------------------------------------------------
def from_text(text):
"Break the text into trimed, non-comment lines"
# 1. We start with no lines
lines = []
# 2. Loop for lines in the text
for line in text.split('\n'):
# 3. But ignore blank and non-claim lines
line = line.rstrip(' \r')
if not line:
continue
if line.startswith('!'):
continue
# 4. Add the line
lines.append(line)
# 5. Return a list of clean lines
return lines
# ----------------------------------------------------------------------
# main
# ----------------------------------------------------------------------
def main():
"Read the Advent of Code problem and solve it"
# 1. Get the command line options
args = parse_command_line()
# 2. Read the puzzle file
input_text = from_file(args.filepath)
# 3. Process the appropiate part of the puzzle
if args.part == 1:
result = part_one(args, input_text)
else:
result = part_two(args, input_text)
# 5. Set return code (0 if solution found, 2 if not)
if result:
sys.exit(0)
sys.exit(2)
# ----------------------------------------------------------------------
# module initialization
# ----------------------------------------------------------------------
if __name__ == '__main__':
main()
# ======================================================================
# end a o c _ 1 0 . p y end
# ======================================================================
| 2018/10_TheStarsAlign/aoc_10.py | 5,875 | Read the file
Break the text into trimed, non-comment lines
Read the Advent of Code problem and solve it
Parse the command line options
Process part one of the puzzle
Process part two of the puzzle
Solve the puzzles for Advent of Code 2018 day 10
====================================================================== The Stars Align Advent of Code 2018 Day 10 -- Eric Wastl -- https://adventofcode.com Python implementation by Dr. Dean Earl Wright III ====================================================================== ====================================================================== a o c _ 1 0 . p y ====================================================================== ---------------------------------------------------------------------- import ---------------------------------------------------------------------- ---------------------------------------------------------------------- constants ---------------------------------------------------------------------- ---------------------------------------------------------------------- parse_commnd_line ---------------------------------------------------------------------- 1. Create the command line parser 2. Get the options and arguments ---------------------------------------------------------------------- part_one ---------------------------------------------------------------------- 1. Create the puzzle solver 2. Determine the solution for part one 3. Return result ---------------------------------------------------------------------- part_two ---------------------------------------------------------------------- 1. Create the puzzle solver 2. Determine the solution for part two 3. Return result ---------------------------------------------------------------------- from_file ---------------------------------------------------------------------- ---------------------------------------------------------------------- from_text ---------------------------------------------------------------------- 1. We start with no lines 2. Loop for lines in the text 3. But ignore blank and non-claim lines 4. Add the line 5. Return a list of clean lines ---------------------------------------------------------------------- main ---------------------------------------------------------------------- 1. Get the command line options 2. Read the puzzle file 3. Process the appropiate part of the puzzle 5. Set return code (0 if solution found, 2 if not) ---------------------------------------------------------------------- module initialization ---------------------------------------------------------------------- ====================================================================== end a o c _ 1 0 . p y end ====================================================================== | 3,374 | en | 0.323267 |
from pyecharts import options as opts
from pyecharts.charts import Map
import pandas as pd
import namemap
def read_country_code():
"""
获取国家中英文字典
:return:
"""
country_dict = {}
for key, val in namemap.nameMap.items(): # 将 nameMap 列表里面键值互换
country_dict[val] = key
return country_dict
def read_csv():
"""
读取数据,返回国家英文名称列表和累计确诊数列表
:return:
"""
country_dict = read_country_code()
data = pd.read_csv("2019-nCoV.csv", index_col=False)
countrys_names = list()
confirmed_count = list()
for x in range(len(data.index)):
if data['name'].iloc[x] in country_dict.keys():
countrys_names.append(country_dict[data['name'].iloc[x]])
confirmed_count.append(data['confirm'].iloc[x])
else:
print(data['name'].iloc[x])
return countrys_names, confirmed_count
def draw_map():
"""
绘制世界地图
遇到一个很神奇的问题:
两个列表必须写死数据地图才会渲染数据,如果数据是从方法中获得,则地图不渲染数据
:return:
"""
# 修复注释中的问题,原因是 confirmed_count 中的 int 是 numpy 的 int ,需转化为 python 中的 int
# 感谢公众号的 @李康伟 同学提出
countrys_names, confirmed_count = read_csv()
confirmed_count_list = []
for item in confirmed_count:
confirmed_count_list.append(int(item))
# countrys_names = ['United States', 'Brazil', 'Russia', 'Spain', 'United Kingdom', 'Italy', 'France', 'Germany', 'Turkey', 'Iran', 'India', 'Peru', 'Canada', 'Saudi Arabia', 'Mexico', 'Chile', 'Belgium', 'Pakistan', 'Netherlands', 'Qatar', 'Ecuador', 'Belarus', 'Sweden', 'Bangladesh', 'Singapore Rep.', 'Switzerland', 'Portugal', 'United Arab Emirates', 'Ireland', 'Indonesia', 'South Africa', 'Poland', 'Ukraine', 'Kuwait', 'Colombia', 'Romania', 'Israel', 'Japan', 'Egypt', 'Austria', 'Dominican Rep.', 'Philippines', 'Denmark', 'Argentina', 'Korea', 'Serbia', 'Panama', 'Afghanistan', 'Czech Rep.', 'Norway', 'Kazakhstan', 'Algeria', 'Nigeria', 'Morocco', 'Oman', 'Malaysia', 'Australia', 'Moldova', 'Ghana', 'Finland', 'Armenia', 'Bolivia', 'Cameroon', 'Iraq', 'Luxembourg', 'Azerbaijan', 'Honduras', 'Hungary', 'Sudan', 'Guinea', 'Uzbekistan', 'Guatemala', 'Thailand', 'Senegal', 'Greece', 'Tajikistan', 'Bulgaria', "Côte d'Ivoire", 'Djibouti', 'Croatia', 'Gabon', 'Cuba', 'Estonia', 'El Salvador', 'Iceland', 'Lithuania', 'Somalia', 'New Zealand', 'Slovakia', 'Slovenia', 'Kyrgyzstan', 'Kenya', 'Guinea Bissau', 'Lebanon', 'Sri Lanka', 'Tunisia', 'Latvia', 'Mali', 'Venezuela', 'Albania', 'Eq. Guinea', 'Niger', 'Cyprus', 'Zambia', 'Costa Rica', 'Haiti', 'Paraguay', 'Burkina Faso', 'Uruguay', 'Georgia', 'Jordan', 'Chad', 'Sierra Leone', 'Nepal', 'Jamaica', 'Tanzania', 'Ethiopia', 'Madagascar', 'Palestine', 'Togo', 'Vietnam', 'Rwanda', 'Montenegro', 'Nicaragua', 'Liberia', 'Swaziland', 'Mauritania', 'Yemen', 'Myanmar', 'Uganda', 'Mozambique', 'Mongolia', 'Brunei', 'Benin', 'Guyana', 'Cambodia', 'The Bahamas', 'Malawi', 'Libya', 'Syria', 'Angola', 'Zimbabwe', 'Burundi', 'Eritrea', 'Botswana', 'Gambia', 'Bhutan', 'East Timor', 'Namibia', 'Lao PDR', 'Fiji', 'Belize', 'Suriname', 'Papua New Guinea', 'Lesotho']
#
# confirmed_count = [1666828, 347398, 335882, 281904, 258504, 229327, 182036, 179986, 155686, 133521, 131920, 115754, 85151, 70161, 65856, 65393, 56810, 54601, 45265, 42213, 36258, 35244, 33188, 32078, 31068, 30725, 30471, 28704, 24582, 21745, 21343, 20931, 20580, 20464, 20177, 17857, 16712, 16536, 16513, 16486, 14422, 13777, 11487, 11353, 11190, 11092, 10577, 9998, 8890, 8346, 8322, 8113, 7526, 7406, 7257, 7185, 7114, 6994, 6617, 6568, 6302, 5915, 4400, 4272, 3990, 3982, 3743, 3741, 3628, 3176, 3132, 3054, 3040, 2976, 2876, 2738, 2427, 2366, 2270, 2243, 1934, 1931, 1821, 1819, 1804, 1616, 1594, 1504, 1504, 1468, 1403, 1192, 1114, 1097, 1089, 1048, 1046, 1015, 1010, 989, 960, 943, 927, 920, 918, 865, 850, 814, 764, 728, 704, 648, 621, 584, 550, 509, 494, 488, 423, 373, 325, 325, 324, 279, 255, 238, 227, 212, 201, 198, 168, 141, 141, 135, 127, 124, 100, 82, 75, 70, 61, 56, 42, 39, 30, 25, 24, 24, 20, 19, 18, 18, 11, 8, 2]
c = (
Map()
.add(
"确诊人数",
[list(z) for z in zip(countrys_names, confirmed_count_list)],
is_map_symbol_show=False,
maptype="world",
label_opts=opts.LabelOpts(is_show=False),
itemstyle_opts=opts.ItemStyleOpts(color="rgb(49,60,72)")
)
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
.set_global_opts(
title_opts=opts.TitleOpts(title="全球 2019-nCoV 地图"),
visualmap_opts=opts.VisualMapOpts(max_=1700000),
)
.render("map_world.html")
)
if __name__ == '__main__':
draw_map()
| python-data-analysis/2019-nCoV-global/global_map.py | 5,031 | 绘制世界地图
遇到一个很神奇的问题:
两个列表必须写死数据地图才会渲染数据,如果数据是从方法中获得,则地图不渲染数据
:return:
获取国家中英文字典
:return:
读取数据,返回国家英文名称列表和累计确诊数列表
:return:
将 nameMap 列表里面键值互换 修复注释中的问题,原因是 confirmed_count 中的 int 是 numpy 的 int ,需转化为 python 中的 int 感谢公众号的 @李康伟 同学提出 countrys_names = ['United States', 'Brazil', 'Russia', 'Spain', 'United Kingdom', 'Italy', 'France', 'Germany', 'Turkey', 'Iran', 'India', 'Peru', 'Canada', 'Saudi Arabia', 'Mexico', 'Chile', 'Belgium', 'Pakistan', 'Netherlands', 'Qatar', 'Ecuador', 'Belarus', 'Sweden', 'Bangladesh', 'Singapore Rep.', 'Switzerland', 'Portugal', 'United Arab Emirates', 'Ireland', 'Indonesia', 'South Africa', 'Poland', 'Ukraine', 'Kuwait', 'Colombia', 'Romania', 'Israel', 'Japan', 'Egypt', 'Austria', 'Dominican Rep.', 'Philippines', 'Denmark', 'Argentina', 'Korea', 'Serbia', 'Panama', 'Afghanistan', 'Czech Rep.', 'Norway', 'Kazakhstan', 'Algeria', 'Nigeria', 'Morocco', 'Oman', 'Malaysia', 'Australia', 'Moldova', 'Ghana', 'Finland', 'Armenia', 'Bolivia', 'Cameroon', 'Iraq', 'Luxembourg', 'Azerbaijan', 'Honduras', 'Hungary', 'Sudan', 'Guinea', 'Uzbekistan', 'Guatemala', 'Thailand', 'Senegal', 'Greece', 'Tajikistan', 'Bulgaria', "Côte d'Ivoire", 'Djibouti', 'Croatia', 'Gabon', 'Cuba', 'Estonia', 'El Salvador', 'Iceland', 'Lithuania', 'Somalia', 'New Zealand', 'Slovakia', 'Slovenia', 'Kyrgyzstan', 'Kenya', 'Guinea Bissau', 'Lebanon', 'Sri Lanka', 'Tunisia', 'Latvia', 'Mali', 'Venezuela', 'Albania', 'Eq. Guinea', 'Niger', 'Cyprus', 'Zambia', 'Costa Rica', 'Haiti', 'Paraguay', 'Burkina Faso', 'Uruguay', 'Georgia', 'Jordan', 'Chad', 'Sierra Leone', 'Nepal', 'Jamaica', 'Tanzania', 'Ethiopia', 'Madagascar', 'Palestine', 'Togo', 'Vietnam', 'Rwanda', 'Montenegro', 'Nicaragua', 'Liberia', 'Swaziland', 'Mauritania', 'Yemen', 'Myanmar', 'Uganda', 'Mozambique', 'Mongolia', 'Brunei', 'Benin', 'Guyana', 'Cambodia', 'The Bahamas', 'Malawi', 'Libya', 'Syria', 'Angola', 'Zimbabwe', 'Burundi', 'Eritrea', 'Botswana', 'Gambia', 'Bhutan', 'East Timor', 'Namibia', 'Lao PDR', 'Fiji', 'Belize', 'Suriname', 'Papua New Guinea', 'Lesotho'] confirmed_count = [1666828, 347398, 335882, 281904, 258504, 229327, 182036, 179986, 155686, 133521, 131920, 115754, 85151, 70161, 65856, 65393, 56810, 54601, 45265, 42213, 36258, 35244, 33188, 32078, 31068, 30725, 30471, 28704, 24582, 21745, 21343, 20931, 20580, 20464, 20177, 17857, 16712, 16536, 16513, 16486, 14422, 13777, 11487, 11353, 11190, 11092, 10577, 9998, 8890, 8346, 8322, 8113, 7526, 7406, 7257, 7185, 7114, 6994, 6617, 6568, 6302, 5915, 4400, 4272, 3990, 3982, 3743, 3741, 3628, 3176, 3132, 3054, 3040, 2976, 2876, 2738, 2427, 2366, 2270, 2243, 1934, 1931, 1821, 1819, 1804, 1616, 1594, 1504, 1504, 1468, 1403, 1192, 1114, 1097, 1089, 1048, 1046, 1015, 1010, 989, 960, 943, 927, 920, 918, 865, 850, 814, 764, 728, 704, 648, 621, 584, 550, 509, 494, 488, 423, 373, 325, 325, 324, 279, 255, 238, 227, 212, 201, 198, 168, 141, 141, 135, 127, 124, 100, 82, 75, 70, 61, 56, 42, 39, 30, 25, 24, 24, 20, 19, 18, 18, 11, 8, 2] | 2,984 | en | 0.082177 |
"""
2. Categorical Predictors
=========================
"""
###############################################################################
# The syntax for handling categorical predictors is **different** between standard regression models/two-stage-models (i.e. :code:`Lm` and :code:`Lm2`) and multi-level models (:code:`Lmer`) in :code:`pymer4`. This is because formula parsing is passed to R for :code:`Lmer` models, but handled by Python for other models.
###############################################################################
# Lm and Lm2 Models
# -----------------
# :code:`Lm` and :code:`Lm2` models use `patsy <https://patsy.readthedocs.io/en/latest/>`_ to parse model formulae. Patsy is very powerful and has built-in support for handling categorical coding schemes by wrapping a predictor in then :code:`C()` *within* the module formula. Patsy can also perform some pre-processing such as scaling and standardization using special functions like :code:`center()`. Here are some examples.
# import basic libraries and sample data
import os
import pandas as pd
from pymer4.utils import get_resource_path
from pymer4.models import Lm
# IV3 is a categorical predictors with 3 levels in the sample data
df = pd.read_csv(os.path.join(get_resource_path(), "sample_data.csv"))
###############################################################################
# Dummy-coded/Treatment contrasts
# +++++++++++++++++++++++++++++++
# Estimate a model using Treatment contrasts (dummy-coding)
# with '1.0' as the reference level
# This is the default of the C() function
model = Lm("DV ~ C(IV3, levels=[1.0, 0.5, 1.5])", data=df)
print(model.fit())
###############################################################################
# Orthogonal Polynomial Contrasts
# +++++++++++++++++++++++++++++++
# Patsy can do this using the Poly argument to the
# C() function
model = Lm("DV ~ C(IV3, Poly)", data=df)
print(model.fit())
###############################################################################
# Sum-to-zero contrasts
# +++++++++++++++++++++
# Similar to before but with the Sum argument
model = Lm("DV ~ C(IV3, Sum)", data=df)
print(model.fit())
###############################################################################
# Scaling/Centering
# +++++++++++++++++
# Moderation with IV2, but centering IV2 first
model = Lm("DV ~ center(IV2) * C(IV3, Sum)", data=df)
print(model.fit())
###############################################################################
# Please refer to the `patsy documentation <https://patsy.readthedocs.io/en/latest/categorical-coding.html>`_ for more details when working categorical predictors in :code:`Lm` or :code:`Lm2` models.
###############################################################################
# Lmer Models
# -----------
# :code:`Lmer()` models currently have support for handling categorical predictors in one of three ways based on how R's :code:`factor()` works (see the note at the end of this tutorial):
#
# - Dummy-coded factor levels (treatment contrasts) in which each model term is the difference between a factor level and a selected reference level
# - Orthogonal polynomial contrasts in which each model term is a polynomial contrast across factor levels (e.g. linear, quadratic, cubic, etc)
# - Custom contrasts for each level of a factor, which should be provided in the manner expected by R.
#
# To make re-parameterizing models easier, factor codings are passed as a dictionary to the :code:`factors` argument of a model's :code:`.fit()`. This obviates the need for adjusting data-frame properties as in R. Note that this is **different** from :code:`Lm` and :code:`Lm2` models above which expect factor codings in their formulae (because patsy does).
#
# Each of these ways also enables you to easily compute post-hoc comparisons between factor levels, as well as interactions between continuous predictors and each factor level. See tutorial 3 for more on post-hoc tests.
from pymer4.models import Lmer
# We're going to fit a multi-level logistic regression using the
# dichotomous DV_l variable and the same categorical predictor (IV3)
# as before
model = Lmer("DV_l ~ IV3 + (IV3|Group)", data=df, family="binomial")
###############################################################################
# Dummy-coding factors
# ++++++++++++++++++++
# First we'll use dummy-coding/treatment contrasts with 1.0 as the reference level. This will compute two coefficients: 0.5 > 1.0 and 1.5 > 1.0.
print(model.fit(factors={"IV3": ["1.0", "0.5", "1.5"]}))
###############################################################################
# Polynomial contrast coding
# ++++++++++++++++++++++++++
# Second we'll use orthogonal polynomial contrasts. This is accomplished using the :code:`ordered=True` argument and specifying the order of the *linear* contrast in increasing order. R will automatically compute higher order polynomial contrats that are orthogonal to this linear contrast. In this example, since there are 3 factor levels this will result in two polynomial terms: a linear contrast we specify below corresponding to 0.5 < 1.0 < 1.5 and an orthogonal quadratic contrast automatically determined by R, corresponding to 0.5 > 1 < 1.5
print(model.fit(factors={"IV3": ["0.5", "1.0", "1.5"]}, ordered=True))
###############################################################################
# Custom contrasts
# ++++++++++++++++
# :code:`Lmer` models can also take custom factor contrasts based on how they are expected by R (see the note at the end of this tutorial for how contrasts work in R). Remember that there can be at most k-1 model terms representing any k level factor without over-parameterizing a model. If you specify a custom contrast, R will generate set of orthogonal contrasts for the rest of your model terms.
# Compare level '1.0' to the mean of levels '0.5' and '1.5'
# and let R determine the second contrast orthogonal to it
print(model.fit(factors={"IV3": {"1.0": 1, "0.5": -0.5, "1.5": -0.5}}))
###############################################################################
# User-created contrasts (without R)
# ++++++++++++++++++++++++++++++++++
# Another option available to you is fitting a model with *only* your desired contrast(s) rather than a full set of k-1 contrasts. Contrary to how statistics is usually taught, you don't ever *have to* include a full set of k-1 contrasts for a k level factor! The upside to doing this is that you won't need to rely on R to compute anything for you (aside from the model fit), and you will have a model with exactly the number of terms as contrasts you desire, giving you complete control. The downside is that post-hoc tests will no longer be available (see tutorial 3 for more information on post-hoc tests), but it's unlikely you're doing post-hoc tests if you are computing a subset of specific contrasts anyway. This is also a useful approach if you don't want to use patsy's formula syntax with :code:`Lm` and :code:`Lm2` as noted above.
#
# This can be accomplished by creating new columns in your dataframe to test specific hypotheses and is trivial to do with pandas `map <https://pandas.pydata.org/pandas-docs/version/0.25/reference/api/pandas.Series.map.html/>`_ and `assign <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.assign.html/>`_ methods. For example, here we manually compute a linear contrast by creating a new column in our dataframe and treating it as a continuous variable.
# Create a new column in the dataframe with a custom (linear) contrast
df = df.assign(IV3_custom_lin=df["IV3"].map({0.5: -1, 1.0: 0, 1.5: 1}))
print(df.head())
###############################################################################
# Now we can use this variable as a continuous predictor without the need for the :code:`factors` argument. Notice how the z-stat and p-value of the estimate are the same as the linear polynomial contrast estimated above. The coefficients differ in scale only because R uses [~-0.707, ~0, ~0.707] for its polynomial contrasts rather than [-1, 0, 1] like we did.
# Estimate model
model = Lmer(
"DV_l ~ IV3_custom_lin + (IV3_custom_lin|Group)", data=df, family="binomial"
)
print(model.fit())
###############################################################################
# A note on how contrasts in R work
# ---------------------------------
# .. note::
# This is just for folks curious about how contrasts in R work
#
# Specifying multiple custom contrasts in R has always been a point of confusion amongst users. This because the :code:`contrasts()` command in R doesn't actually expect contrast weights (i.e. a design matrix) as one would intuit. Rather, it is made for generating contrast coding schemes which are the inverse of the contrast weight matrix. For a longer explanation with examples see `this reference <https://rstudio-pubs-static.s3.amazonaws.com/65059_586f394d8eb84f84b1baaf56ffb6b47f.html>`_ and `this reference <https://github.com/ejolly/R/blob/master/Guides/Contrasts_in_R.md>`_. For these situations pymer4 offers a few utility functions to convert between these matrix types if desired in :code:`pymer4.utils`: :code:`R2con()` and :code:`con2R()`.
| docs/auto_examples/example_02_categorical.py | 9,230 | 2. Categorical Predictors
=========================
The syntax for handling categorical predictors is **different** between standard regression models/two-stage-models (i.e. :code:`Lm` and :code:`Lm2`) and multi-level models (:code:`Lmer`) in :code:`pymer4`. This is because formula parsing is passed to R for :code:`Lmer` models, but handled by Python for other models. Lm and Lm2 Models ----------------- :code:`Lm` and :code:`Lm2` models use `patsy <https://patsy.readthedocs.io/en/latest/>`_ to parse model formulae. Patsy is very powerful and has built-in support for handling categorical coding schemes by wrapping a predictor in then :code:`C()` *within* the module formula. Patsy can also perform some pre-processing such as scaling and standardization using special functions like :code:`center()`. Here are some examples. import basic libraries and sample data IV3 is a categorical predictors with 3 levels in the sample data Dummy-coded/Treatment contrasts +++++++++++++++++++++++++++++++ Estimate a model using Treatment contrasts (dummy-coding) with '1.0' as the reference level This is the default of the C() function Orthogonal Polynomial Contrasts +++++++++++++++++++++++++++++++ Patsy can do this using the Poly argument to the C() function Sum-to-zero contrasts +++++++++++++++++++++ Similar to before but with the Sum argument Scaling/Centering +++++++++++++++++ Moderation with IV2, but centering IV2 first Please refer to the `patsy documentation <https://patsy.readthedocs.io/en/latest/categorical-coding.html>`_ for more details when working categorical predictors in :code:`Lm` or :code:`Lm2` models. Lmer Models ----------- :code:`Lmer()` models currently have support for handling categorical predictors in one of three ways based on how R's :code:`factor()` works (see the note at the end of this tutorial): - Dummy-coded factor levels (treatment contrasts) in which each model term is the difference between a factor level and a selected reference level - Orthogonal polynomial contrasts in which each model term is a polynomial contrast across factor levels (e.g. linear, quadratic, cubic, etc) - Custom contrasts for each level of a factor, which should be provided in the manner expected by R. To make re-parameterizing models easier, factor codings are passed as a dictionary to the :code:`factors` argument of a model's :code:`.fit()`. This obviates the need for adjusting data-frame properties as in R. Note that this is **different** from :code:`Lm` and :code:`Lm2` models above which expect factor codings in their formulae (because patsy does). Each of these ways also enables you to easily compute post-hoc comparisons between factor levels, as well as interactions between continuous predictors and each factor level. See tutorial 3 for more on post-hoc tests. We're going to fit a multi-level logistic regression using the dichotomous DV_l variable and the same categorical predictor (IV3) as before Dummy-coding factors ++++++++++++++++++++ First we'll use dummy-coding/treatment contrasts with 1.0 as the reference level. This will compute two coefficients: 0.5 > 1.0 and 1.5 > 1.0. Polynomial contrast coding ++++++++++++++++++++++++++ Second we'll use orthogonal polynomial contrasts. This is accomplished using the :code:`ordered=True` argument and specifying the order of the *linear* contrast in increasing order. R will automatically compute higher order polynomial contrats that are orthogonal to this linear contrast. In this example, since there are 3 factor levels this will result in two polynomial terms: a linear contrast we specify below corresponding to 0.5 < 1.0 < 1.5 and an orthogonal quadratic contrast automatically determined by R, corresponding to 0.5 > 1 < 1.5 Custom contrasts ++++++++++++++++ :code:`Lmer` models can also take custom factor contrasts based on how they are expected by R (see the note at the end of this tutorial for how contrasts work in R). Remember that there can be at most k-1 model terms representing any k level factor without over-parameterizing a model. If you specify a custom contrast, R will generate set of orthogonal contrasts for the rest of your model terms. Compare level '1.0' to the mean of levels '0.5' and '1.5' and let R determine the second contrast orthogonal to it User-created contrasts (without R) ++++++++++++++++++++++++++++++++++ Another option available to you is fitting a model with *only* your desired contrast(s) rather than a full set of k-1 contrasts. Contrary to how statistics is usually taught, you don't ever *have to* include a full set of k-1 contrasts for a k level factor! The upside to doing this is that you won't need to rely on R to compute anything for you (aside from the model fit), and you will have a model with exactly the number of terms as contrasts you desire, giving you complete control. The downside is that post-hoc tests will no longer be available (see tutorial 3 for more information on post-hoc tests), but it's unlikely you're doing post-hoc tests if you are computing a subset of specific contrasts anyway. This is also a useful approach if you don't want to use patsy's formula syntax with :code:`Lm` and :code:`Lm2` as noted above. This can be accomplished by creating new columns in your dataframe to test specific hypotheses and is trivial to do with pandas `map <https://pandas.pydata.org/pandas-docs/version/0.25/reference/api/pandas.Series.map.html/>`_ and `assign <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.assign.html/>`_ methods. For example, here we manually compute a linear contrast by creating a new column in our dataframe and treating it as a continuous variable. Create a new column in the dataframe with a custom (linear) contrast Now we can use this variable as a continuous predictor without the need for the :code:`factors` argument. Notice how the z-stat and p-value of the estimate are the same as the linear polynomial contrast estimated above. The coefficients differ in scale only because R uses [~-0.707, ~0, ~0.707] for its polynomial contrasts rather than [-1, 0, 1] like we did. Estimate model A note on how contrasts in R work --------------------------------- .. note:: This is just for folks curious about how contrasts in R work Specifying multiple custom contrasts in R has always been a point of confusion amongst users. This because the :code:`contrasts()` command in R doesn't actually expect contrast weights (i.e. a design matrix) as one would intuit. Rather, it is made for generating contrast coding schemes which are the inverse of the contrast weight matrix. For a longer explanation with examples see `this reference <https://rstudio-pubs-static.s3.amazonaws.com/65059_586f394d8eb84f84b1baaf56ffb6b47f.html>`_ and `this reference <https://github.com/ejolly/R/blob/master/Guides/Contrasts_in_R.md>`_. For these situations pymer4 offers a few utility functions to convert between these matrix types if desired in :code:`pymer4.utils`: :code:`R2con()` and :code:`con2R()`. | 7,004 | en | 0.880017 |
# print('Reading templates/__init__.py')
from .errors import *
import logging
logging.debug('Reading src/templates/__init__.py')
| src/pythonFEA/templates/__init__.py | 131 | print('Reading templates/__init__.py') | 38 | ar | 0.157462 |
import multiprocessing
import warnings
import six
from chainer.backends import cuda
from chainer.dataset import convert
from chainer import reporter
from chainer.training.updaters import standard_updater
try:
from cupy.cuda import nccl
_available = True
except Exception:
_available = False
import numpy
class _Worker(multiprocessing.Process):
def __init__(self, proc_id, pipe, master):
super(_Worker, self).__init__()
self.proc_id = proc_id
self.pipe = pipe
self.converter = master.converter
self.model = master._master
self.device = master._devices[proc_id]
self.iterator = master._mpu_iterators[proc_id]
self.n_devices = len(master._devices)
def setup(self):
_, comm_id = self.pipe.recv()
self.comm = nccl.NcclCommunicator(self.n_devices, comm_id,
self.proc_id)
self.model.to_gpu(self.device)
self.reporter = reporter.Reporter()
self.reporter.add_observer('main', self.model)
self.reporter.add_observers('main',
self.model.namedlinks(skipself=True))
def run(self):
dev = cuda.Device(self.device)
dev.use()
self.setup()
while True:
job, data = self.pipe.recv()
if job == 'finalize':
dev.synchronize()
break
if job == 'update':
# For reducing memory
self.model.cleargrads()
batch = self.converter(self.iterator.next(), self.device)
with self.reporter.scope({}): # pass dummy observation
loss = _calc_loss(self.model, batch)
self.model.cleargrads()
loss.backward()
del loss
gg = gather_grads(self.model)
nccl_data_type = _get_nccl_data_type(gg.dtype)
null_stream = cuda.Stream.null
self.comm.reduce(gg.data.ptr, gg.data.ptr, gg.size,
nccl_data_type, nccl.NCCL_SUM, 0,
null_stream.ptr)
del gg
self.model.cleargrads()
gp = gather_params(self.model)
nccl_data_type = _get_nccl_data_type(gp.dtype)
self.comm.bcast(gp.data.ptr, gp.size, nccl_data_type, 0,
null_stream.ptr)
scatter_params(self.model, gp)
del gp
class MultiprocessParallelUpdater(standard_updater.StandardUpdater):
"""Implementation of a multiprocess parallel GPU Updater.
This is an implementation of :class:`Updater` that uses multiple GPUs
with multi-process data parallelism. It uses Nvidia NCCL for communication
between multiple GPUs.
It behaves similarly to
:class:`~chainer.training.updaters.StandardUpdater`.
The update routine is modified to support data-parallel
computation on multiple GPUs in one machine.
It is based on synchronous parallel SGD: it
parallelizes the gradient computation over a mini-batch, and updates the
parameters only in the main device.
It does not transfer the values collected by :class:`Reporter` in the sub
devices to the main device. So you can only see the reported values in
the main device.
Args:
iterators: List of dataset iterator for the training dataset. The
number of the iterators must be same to the number of GPUs you use.
optimizer: Optimizer to update parameters. The model should be attached
to the optimizer.
converter: Converter function to build input arrays. Each batch
extracted by the iterator is split equally between the devices and
then passed with corresponding ``device`` option to this function.
:func:`~chainer.dataset.concat_examples` is used by default.
devices: Dictionary or list of devices to which the training data is
sent. The master device will be the first one in the list or the
value attached to the key ``'main'``.
auto_new_epoch (bool): If ``True``,
:meth:`~chainer.Optimizer.new_epoch` of the main optimizer is
automatically called when the ``is_new_epoch`` attribute of the
main iterator is ``True``.
"""
def __init__(self, iterators, optimizer, converter=convert.concat_examples,
devices=None, auto_new_epoch=True):
if not MultiprocessParallelUpdater.available():
raise Exception(
'NCCL is not enabled. MultiprocessParallelUpdater '
'requires NCCL.\n'
'Please reinstall CuPy after you install NCCL.\n'
'(see https://docs-cupy.chainer.org/en/latest/install.html)')
try:
cuda.cupy.cuda.driver.ctxGetCurrent()
_cuda_initialized = True
except cuda.cupy.cuda.driver.CUDADriverError:
# The context is not initialized, it will be fine.
_cuda_initialized = False
if _cuda_initialized:
raise RuntimeError(
'The CUDA context has been already initialized. '
'MultiprocessParallelUpdater assumes the context is '
'uninitialized. Please do not call CUDA API before '
'MultiprocessParallelUpdater creates processes.')
assert len(iterators) == len(devices)
for iterator in iterators[1:]:
assert len(iterator.dataset) == len(iterators[0].dataset)
# Correct optimizer parameters for new minibatch size
optim = optimizer.__class__.__name__
if optim in ('Adam', 'AdaGrad', 'RMSprop'):
optimizer.eps *= len(devices)
warnings.warn('optimizer.eps is changed to {} '
'by MultiprocessParallelUpdater for new batch size.'.
format(optimizer.eps))
elif optim in ('RMSpropGraves', 'AdaDelta'):
optimizer.eps *= len(devices) ** 2 # not quite right for AdaDelta
warnings.warn('optimizer.eps is changed to {} '
'by MultiprocessParallelUpdater for new batch size.'.
format(optimizer.eps))
elif hasattr(optimizer, 'lr'):
optimizer.lr /= len(devices)
warnings.warn('optimizer.lr is changed to {} '
'by MultiprocessParallelUpdater for new batch size.'.
format(optimizer.lr))
super(MultiprocessParallelUpdater, self).__init__(
iterator=iterators[0],
optimizer=optimizer,
converter=converter,
auto_new_epoch=auto_new_epoch,
)
if isinstance(devices, dict):
devices = devices.copy()
main = devices.pop('main')
devices = list(six.itervalues(devices))
devices = [main] + devices
elif isinstance(devices, (list, tuple)):
devices = list(devices)
else:
raise ValueError(
'devices argument should be either dict, list or tuple,'
' but {} was given.'.format(type(devices)))
if devices is None or any(device is None for device in devices):
raise ValueError('must specify GPU devices')
self._master = optimizer.target
self._devices = devices
self._mpu_iterators = iterators
self._initialized = False
self._pipes = []
self._workers = []
self.comm = None
@staticmethod
def available():
return _available
def _send_message(self, message):
for pipe in self._pipes:
pipe.send(message)
def setup_workers(self):
if self._initialized:
return
self._initialized = True
self._master.cleargrads()
for i in six.moves.range(1, len(self._devices)):
pipe, worker_end = multiprocessing.Pipe()
worker = _Worker(i, worker_end, self)
worker.start()
self._workers.append(worker)
self._pipes.append(pipe)
with cuda.Device(self._devices[0]):
self._master.to_gpu(self._devices[0])
if len(self._devices) > 1:
comm_id = nccl.get_unique_id()
self._send_message(('set comm_id', comm_id))
self.comm = nccl.NcclCommunicator(len(self._devices),
comm_id, 0)
def update_core(self):
self.setup_workers()
self._send_message(('update', None))
with cuda.Device(self._devices[0]):
# For reducing memory
self._master.cleargrads()
optimizer = self.get_optimizer('main')
iterator = self.get_iterator('main')
batch = iterator.next()
batch = self.converter(batch, self._devices[0])
loss = _calc_loss(self._master, batch)
self._master.cleargrads()
loss.backward()
# NCCL: reduce grads
null_stream = cuda.Stream.null
if self.comm is not None:
gg = gather_grads(self._master)
nccl_data_type = _get_nccl_data_type(gg.dtype)
self.comm.reduce(gg.data.ptr, gg.data.ptr, gg.size,
nccl_data_type, nccl.NCCL_SUM,
0, null_stream.ptr)
scatter_grads(self._master, gg)
del gg
optimizer.update()
if self.comm is not None:
gp = gather_params(self._master)
nccl_data_type = _get_nccl_data_type(gp.dtype)
self.comm.bcast(gp.data.ptr, gp.size, nccl_data_type,
0, null_stream.ptr)
if self.auto_new_epoch and iterator.is_new_epoch:
optimizer.new_epoch(auto=True)
def finalize(self):
self._send_message(('finalize', None))
for worker in self._workers:
worker.join()
def _calc_loss(model, in_arrays):
if isinstance(in_arrays, tuple):
return model(*in_arrays)
elif isinstance(in_arrays, dict):
return model(**in_arrays)
else:
return model(in_arrays)
def size_num_grads(link):
"""Count total size of all gradient arrays of a given link
Args:
link (chainer.link.Link): Target link object.
"""
size = 0
num = 0
for param in link.params():
if param.size == 0:
continue
size += param.size
num += 1
return size, num
def _memcpy_gather():
return cuda.elementwise(
'raw T ptrs, raw X dtypes, raw Y info',
'raw float32 dst',
'''
int id_min = id_pre;
int id_max = num_src;
while (id_max - id_min > 1) {
int id = (id_max + id_min) / 2;
if (i < info[id]) id_max = id;
else id_min = id;
}
int id = id_min;
int i_dst = i;
int i_src = i;
if (id > 0) i_src -= info[id];
dst[i_dst] = 0;
if (ptrs[id] != NULL) {
if (dtypes[id] == 0) { // fp32
float *src = reinterpret_cast<float *>(ptrs[id]);
dst[i_dst] = src[i_src];
}
else { // fp16
float16 *src = reinterpret_cast<float16 *>(ptrs[id]);
dst[i_dst] = static_cast<float>(src[i_src]);
}
}
id_pre = id;
''',
'_memcpy_gather',
loop_prep='''
int num_src = info[0];
int id_pre = 0;
''')
def _gather(link, target):
size, num = size_num_grads(link)
ptrs = numpy.empty(num, dtype=numpy.uint64)
dtypes = numpy.empty(num, dtype=numpy.int8)
info = numpy.empty(num + 1, dtype=numpy.int32)
info[0] = 0
i = 0
for _, param in sorted(link.namedparams()):
if param.size == 0:
continue
ptrs[i] = 0 # NULL pointer
d = getattr(param, target)
if d is not None:
ptrs[i] = d.data.ptr
dtypes[i] = 0 # fp32
if param.dtype == numpy.float16:
dtypes[i] = 1 # fp16
info[i + 1] = info[i] + param.size
i += 1
info[0] = num
ptrs = cuda.to_gpu(ptrs)
dtypes = cuda.to_gpu(dtypes)
info = cuda.to_gpu(info)
return _memcpy_gather()(ptrs, dtypes, info, size=size)
def gather_grads(link):
"""Put together all gradient arrays and make a single array
Args:
link (chainer.link.Link): Target link object.
Return:
cupy.ndarray
"""
if link.xp is numpy:
raise RuntimeError('gather_grads works only on GPU.')
return _gather(link, 'grad')
def gather_params(link):
"""Put together all gradient arrays and make a single array
Args:
link (chainer.link.Link): Target link object.
Return:
cupy.ndarray
"""
if link.xp is numpy:
raise RuntimeError('Link.gather_params works only on GPU.')
return _gather(link, 'data')
def _memcpy_scatter():
return cuda.elementwise(
'raw T ptrs, raw X dtypes, raw Y info, raw float32 array',
'',
'''
int id_min = id_pre;
int id_max = num_src;
while (id_max - id_min > 1) {
int id = (id_max + id_min) / 2;
if (i < info[id]) id_max = id;
else id_min = id;
}
int id = id_min;
int i_src = i;
int i_dst = i;
if (id > 0) i_dst -= info[id];
if (ptrs[id] != NULL) {
if (dtypes[id] == 0) { // fp32
float *dst = reinterpret_cast<float *>(ptrs[id]);
dst[i_dst] = array[i_src];
}
else { // fp16
float16 *dst = reinterpret_cast<float16 *>(ptrs[id]);
dst[i_dst] = static_cast<float16>(array[i_src]);
}
}
id_pre = id;
''',
'_memcpy_scatter',
loop_prep='''
int num_src = info[0];
int id_pre = 0;
''')
def _scatter(link, array, target):
size, num = size_num_grads(link)
ptrs = numpy.zeros(num, dtype=numpy.uint64)
dtypes = numpy.zeros(num, dtype=numpy.int8)
info = numpy.zeros(num + 1, dtype=numpy.int32)
info[0] = 0
i = 0
for _, param in sorted(link.namedparams()):
if param.size == 0:
continue
ptrs[i] = 0 # NULL pointer
d = getattr(param, target)
if d is None:
d = cuda.cupy.zeros(param.shape, dtype=param.dtype)
setattr(param, target, d)
ptrs[i] = d.data.ptr
dtypes[i] = 0 # fp32
if param.dtype == numpy.float16:
dtypes[i] = 1 # fp16
info[i + 1] = info[i] + param.size
i += 1
if i != num:
raise()
info[0] = num
ptrs = cuda.to_gpu(ptrs)
dtypes = cuda.to_gpu(dtypes)
info = cuda.to_gpu(info)
return _memcpy_scatter()(ptrs, dtypes, info, array, size=size)
def scatter_grads(link, array):
"""Put back contents of the specified array to the related gradient arrays
Args:
link (chainer.link.Link): Target link object.
array (cupy.ndarray): gathered array created by gather_grads()
"""
return _scatter(link, array, 'grad')
def scatter_params(link, array):
"""Put back contents of the specified array to the related gradient arrays
Args:
link (chainer.link.Link): Target link object.
array (cupy.ndarray): gathered array created by gather_params()
"""
return _scatter(link, array, 'data')
def _get_nccl_data_type(dtype):
"""Get data type for NCCL"""
if dtype == numpy.float32:
nccl_data_type = nccl.NCCL_FLOAT
elif dtype == numpy.float16:
nccl_data_type = nccl.NCCL_HALF
elif dtype == numpy.float64:
nccl_data_type = nccl.NCCL_DOUBLE
else:
raise RuntimeError('Unexpected data type:{}'.format(dtype))
return nccl_data_type
| chainer/training/updaters/multiprocess_parallel_updater.py | 16,381 | Implementation of a multiprocess parallel GPU Updater.
This is an implementation of :class:`Updater` that uses multiple GPUs
with multi-process data parallelism. It uses Nvidia NCCL for communication
between multiple GPUs.
It behaves similarly to
:class:`~chainer.training.updaters.StandardUpdater`.
The update routine is modified to support data-parallel
computation on multiple GPUs in one machine.
It is based on synchronous parallel SGD: it
parallelizes the gradient computation over a mini-batch, and updates the
parameters only in the main device.
It does not transfer the values collected by :class:`Reporter` in the sub
devices to the main device. So you can only see the reported values in
the main device.
Args:
iterators: List of dataset iterator for the training dataset. The
number of the iterators must be same to the number of GPUs you use.
optimizer: Optimizer to update parameters. The model should be attached
to the optimizer.
converter: Converter function to build input arrays. Each batch
extracted by the iterator is split equally between the devices and
then passed with corresponding ``device`` option to this function.
:func:`~chainer.dataset.concat_examples` is used by default.
devices: Dictionary or list of devices to which the training data is
sent. The master device will be the first one in the list or the
value attached to the key ``'main'``.
auto_new_epoch (bool): If ``True``,
:meth:`~chainer.Optimizer.new_epoch` of the main optimizer is
automatically called when the ``is_new_epoch`` attribute of the
main iterator is ``True``.
Get data type for NCCL
Put together all gradient arrays and make a single array
Args:
link (chainer.link.Link): Target link object.
Return:
cupy.ndarray
Put together all gradient arrays and make a single array
Args:
link (chainer.link.Link): Target link object.
Return:
cupy.ndarray
Put back contents of the specified array to the related gradient arrays
Args:
link (chainer.link.Link): Target link object.
array (cupy.ndarray): gathered array created by gather_grads()
Put back contents of the specified array to the related gradient arrays
Args:
link (chainer.link.Link): Target link object.
array (cupy.ndarray): gathered array created by gather_params()
Count total size of all gradient arrays of a given link
Args:
link (chainer.link.Link): Target link object.
For reducing memory pass dummy observation The context is not initialized, it will be fine. Correct optimizer parameters for new minibatch size not quite right for AdaDelta For reducing memory NCCL: reduce grads NULL pointer fp32 fp16 NULL pointer fp32 fp16 | 2,736 | en | 0.688311 |
import libtmux
def ensure_server() -> libtmux.Server:
'''
Either create new or return existing server
'''
return libtmux.Server()
def spawn_session(name: str, kubeconfig_location: str, server: libtmux.Server):
if server.has_session(name):
return
else:
session = server.new_session(name)
session.set_environment("KUBECONFIG", kubeconfig_location)
# the new_session will create default window and pane which will not contain KUBECONFIG, add manually
session.attached_window.attached_pane.send_keys("export KUBECONFIG={}".format(kubeconfig_location))
| kmux/tmux.py | 616 | Either create new or return existing server
the new_session will create default window and pane which will not contain KUBECONFIG, add manually | 145 | en | 0.589872 |
import numpy as np
import cv2
from PIL import Image
img_form = "jpg"
img_out_dir = "./output_images"
vid_form = "mp4"
vid_out_dir = "./test_videos_output"
class array_image:
def __init__(self):
self.image = None
self.binary_image = None
def store(self, name):
name = img_out_dir + "/" + name + "." + img_form
print("Saving image: " + name)
im = Image.fromarray(self.binary_image)
im.save(name)
class color(array_image):
def __init__(self, caller=None, color = "Gr"):
threshold = {'R':(200, 255), 'G':(200, 255), 'B':(200, 255), 'H':(15, 100), 'L':(0,255), 'S':(90, 255), 'Gr':(200, 255)}
self.available = False
self.binary_available = False
self.image = None
self.binary_image = None
self.caller = caller
self.color = color
self.threshold = threshold[self.color]
def get(self, binary=False, masked=False, thresh=None):
ret = 0
if (self.available) & (thresh==None):
if binary:
if self.binary_available:
ret = self.binary_image
else:
self.binary_image = self.color_select(color=self.color, binary=True)
self.binary_available = True
ret = self.binary_image
else:
ret = self.image
else:
self.image = self.color_select(color=self.color, binary=False)
self.available = True
if binary:
self.binary_image = self.color_select(color=self.color, binary=True, thresh=None)
self.binary_available = True
ret = self.binary_image
else:
ret = self.image
if masked:
ret = self.caller.region_of_interest(ret)
return ret
def grayscale(self):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(self.caller.image, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def color_select(self, color='R', binary = True, thresh=None):
#image received is RGB mpimg.imread
img = np.copy(self.caller.image)
RGB_colors = {'R':0, 'G':1, 'B':2}
HLS_colors = {'H':0, 'L':1, 'S':2}
if color in RGB_colors:
channel = img[:,:,RGB_colors[color]]
elif color in HLS_colors:
img = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
channel = img[:, :, HLS_colors[color]]
else:
channel = self.grayscale()
if binary:
if not thresh:
thresh = self.threshold
binary_output = np.zeros_like(img[:,:,0])
binary_output[(channel > thresh[0]) & (channel <= thresh[1])] = 1
return binary_output
else:
return channel
| color.py | 3,150 | Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')
Or use BGR2GRAY if you read an image with cv2.imread() return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)image received is RGB mpimg.imread | 367 | en | 0.863719 |
import numpy as np
from network.activation import Activation
from network.layer import Layer
from network.utils.im2col_cython import im2col_cython, col2im_cython
class Convolution(Layer):
def __init__(self, filter_shape, stride, padding, dropout_rate: float = 0, activation: Activation = None,
last_layer=False, weight_initializer=None, fb_weight_initializer=None) -> None:
assert len(filter_shape) == 4, \
"invalid filter shape: 4-tuple required, {}-tuple given".format(len(filter_shape))
super().__init__()
self.filter_shape = filter_shape
self.stride = stride
self.padding = padding
self.dropout_rate = dropout_rate
self.activation = activation
self.last_layer = last_layer
self.weight_initializer = weight_initializer
self.fb_weight_initializer = fb_weight_initializer
def initialize(self, input_size, num_classes, train_method) -> tuple:
assert np.size(input_size) == 3, \
"invalid input size: 3-tuple required for convolution layer"
c_in, h_in, w_in = input_size
f, c_f, h_f, w_f = self.filter_shape
assert c_in == c_f, \
"input channel dimension ({}) not compatible with filter channel dimension ({})".format(c_in, c_f)
assert (h_in - h_f + 2 * self.padding) % self.stride == 0, \
"filter width ({}) not compatible with input width ({})".format(h_f, h_in)
assert (w_in - w_f + 2 * self.padding) % self.stride == 0, \
"filter height ({}) not compatible with input height ({})".format(h_f, h_in)
self.h_out = ((h_in - h_f + 2 * self.padding) // self.stride) + 1
self.w_out = ((w_in - w_f + 2 * self.padding) // self.stride) + 1
# initialize weights
if self.weight_initializer is None:
sqrt_fan_in = np.sqrt(c_in * h_in * w_in)
self.W = np.random.uniform(low=-1 / sqrt_fan_in, high=1 / sqrt_fan_in, size=self.filter_shape)
else:
self.W = self.weight_initializer.init(dim=(f, c_f, h_f, w_f))
# initialize feedback weights
if self.fb_weight_initializer is None:
sqrt_fan_out = np.sqrt(f * self.h_out * self.w_out)
# self.B = np.random.uniform(low=-1 / sqrt_fan_out, high=1 / sqrt_fan_out, size=(num_classes, f, self.h_out, self.w_out))
self.B = np.random.uniform(low=-1 / sqrt_fan_out, high=1 / sqrt_fan_out, size=(num_classes, f * self.h_out * self.w_out))
else:
# self.B = self.fb_weight_initializer.init(dim=(num_classes, f, self.h_out, self.w_out))
self.B = self.fb_weight_initializer.init(dim=(num_classes, f * self.h_out * self.w_out))
# initialize bias units
self.b = np.zeros(f)
return f, self.h_out, self.w_out
def forward(self, X, mode='predict') -> np.ndarray:
n_in, c, h_in, w_in = X.shape
n_f, c, h_f, w_f = self.W.shape
self.x_cols = im2col_cython(X, h_f, w_f, self.padding, self.stride) # <->
z = self.W.reshape((n_f, -1)).dot(self.x_cols)
z += self.b.reshape(-1, 1) # +
z = z.reshape(n_f, self.h_out, self.w_out, n_in).transpose(3, 0, 1, 2)
self.a_in = X
if self.activation is None:
self.a_out = z
else:
self.a_out = self.activation.forward(z)
if mode == 'train' and self.dropout_rate > 0:
# self.dropout_mask = np.random.binomial(size=self.a_out.shape, n=1, p=1 - self.dropout_rate)
self.dropout_mask = (np.random.rand(*self.a_out.shape) > self.dropout_rate).astype(int)
self.a_out *= self.dropout_mask
return self.a_out
def dfa(self, E: np.ndarray) -> tuple:
# E = np.einsum('ij,jklm->iklm', E, self.B)
n_f, c_f, h_f, w_f = self.W.shape
E = np.dot(E, self.B).reshape((-1, n_f, self.h_out, self.w_out))
if self.dropout_rate > 0:
E *= self.dropout_mask
if self.activation is None:
E *= self.a_out
else:
E *= self.activation.gradient(self.a_out)
dW = E.transpose((1, 2, 3, 0)).reshape(n_f, -1).dot(self.x_cols.T).reshape(self.W.shape)
db = np.sum(E, axis=(0, 2, 3))
return dW, db
def back_prob(self, E: np.ndarray) -> tuple:
if self.dropout_rate > 0:
E *= self.dropout_mask
n_in, c_in, h_in, w_in = self.a_in.shape
n_f, c_f, h_f, w_f = self.W.shape
if self.activation is None:
E *= self.a_out
else:
E *= self.activation.gradient(self.a_out)
delta_reshaped = E.transpose((1, 2, 3, 0)).reshape(n_f, -1)
dX_cols = self.W.reshape(n_f, -1).T.dot(delta_reshaped)
dX = col2im_cython(dX_cols, n_in, c_in, h_in, w_in, h_f, w_f, self.padding, self.stride)
dW = delta_reshaped.dot(self.x_cols.T).reshape(self.W.shape)
db = np.sum(E, axis=(0, 2, 3))
return dX, dW, db
def has_weights(self) -> bool:
return True
| network/layers/convolution_im2col.py | 5,048 | initialize weights initialize feedback weights self.B = np.random.uniform(low=-1 / sqrt_fan_out, high=1 / sqrt_fan_out, size=(num_classes, f, self.h_out, self.w_out)) self.B = self.fb_weight_initializer.init(dim=(num_classes, f, self.h_out, self.w_out)) initialize bias units <-> + self.dropout_mask = np.random.binomial(size=self.a_out.shape, n=1, p=1 - self.dropout_rate) E = np.einsum('ij,jklm->iklm', E, self.B) | 415 | en | 0.325867 |
#!/usr/bin/python
#
# Copyright 2018-2022 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from mock import MagicMock, patch
from polyaxon.cli.artifacts import artifacts
from polyaxon_sdk import V1ProjectVersionKind
from tests.test_cli.utils import BaseCommandTestCase
@pytest.mark.cli_mark
class TestCliArtifacts(BaseCommandTestCase):
@patch("polyaxon_sdk.ProjectsV1Api.create_version")
@patch("polyaxon_sdk.ProjectsV1Api.patch_version")
@patch("polyaxon_sdk.ProjectsV1Api.get_version")
def test_create_artifact(self, get_version, patch_version, create_version):
self.runner.invoke(artifacts, ["register"])
assert create_version.call_count == 0
assert patch_version.call_count == 0
assert get_version.call_count == 0
get_version.return_value = None
self.runner.invoke(artifacts, ["register", "--project=owner/foo"])
assert get_version.call_count == 1
assert patch_version.call_count == 0
assert create_version.call_count == 1
get_version.return_value = MagicMock(
kind=V1ProjectVersionKind.ARTIFACT,
)
self.runner.invoke(artifacts, ["register", "--project=owner/foo"])
assert get_version.call_count == 2
assert patch_version.call_count == 0
assert create_version.call_count == 1
self.runner.invoke(artifacts, ["register", "--project=owner/foo", "--force"])
assert get_version.call_count == 3
assert patch_version.call_count == 1
assert create_version.call_count == 1
@patch("polyaxon_sdk.ProjectsV1Api.list_versions")
def test_list_artifacts(self, list_artifacts):
self.runner.invoke(artifacts, ["ls", "--project=owner/foo"])
assert list_artifacts.call_count == 1
@patch("polyaxon_sdk.ProjectsV1Api.get_version")
def test_get_artifact(self, get_artifact):
self.runner.invoke(artifacts, ["get", "-p", "admin/foo"])
assert get_artifact.call_count == 1
@patch("polyaxon_sdk.ProjectsV1Api.patch_version")
def test_update_artifact(self, update_artifact):
self.runner.invoke(
artifacts, ["update", "-p", "admin/foo", "--description=foo"]
)
assert update_artifact.call_count == 1
@patch("polyaxon_sdk.ProjectsV1Api.create_version_stage")
def test_update_artifact_stage(self, stage_artifact):
self.runner.invoke(
artifacts, ["stage", "-p", "admin/foo", "-to", "production", "--reason=foo"]
)
assert stage_artifact.call_count == 1
| cli/tests/test_cli/test_artifacts.py | 3,077 | !/usr/bin/python Copyright 2018-2022 Polyaxon, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 574 | en | 0.835885 |
import asyncio
import logging
from typing import Dict, List, Optional, Set, Tuple
from seno.types.blockchain_format.sized_bytes import bytes32
from seno.util.ints import uint32, uint128
log = logging.getLogger(__name__)
class SyncStore:
# Whether or not we are syncing
sync_mode: bool
long_sync: bool
peak_to_peer: Dict[bytes32, Set[bytes32]] # Header hash : peer node id
peer_to_peak: Dict[bytes32, Tuple[bytes32, uint32, uint128]] # peer node id : [header_hash, height, weight]
sync_target_header_hash: Optional[bytes32] # Peak hash we are syncing towards
sync_target_height: Optional[uint32] # Peak height we are syncing towards
peers_changed: asyncio.Event
batch_syncing: Set[bytes32] # Set of nodes which we are batch syncing from
backtrack_syncing: Dict[bytes32, int] # Set of nodes which we are backtrack syncing from, and how many threads
@classmethod
async def create(cls):
self = cls()
self.sync_mode = False
self.long_sync = False
self.sync_target_header_hash = None
self.sync_target_height = None
self.peak_fork_point = {}
self.peak_to_peer = {}
self.peer_to_peak = {}
self.peers_changed = asyncio.Event()
self.batch_syncing = set()
self.backtrack_syncing = {}
return self
def set_peak_target(self, peak_hash: bytes32, target_height: uint32):
self.sync_target_header_hash = peak_hash
self.sync_target_height = target_height
def get_sync_target_hash(self) -> Optional[bytes32]:
return self.sync_target_header_hash
def get_sync_target_height(self) -> Optional[bytes32]:
return self.sync_target_height
def set_sync_mode(self, sync_mode: bool):
self.sync_mode = sync_mode
def get_sync_mode(self) -> bool:
return self.sync_mode
def set_long_sync(self, long_sync: bool):
self.long_sync = long_sync
def get_long_sync(self) -> bool:
return self.long_sync
def peer_has_block(self, header_hash: bytes32, peer_id: bytes32, weight: uint128, height: uint32, new_peak: bool):
"""
Adds a record that a certain peer has a block.
"""
if header_hash == self.sync_target_header_hash:
self.peers_changed.set()
if header_hash in self.peak_to_peer:
self.peak_to_peer[header_hash].add(peer_id)
else:
self.peak_to_peer[header_hash] = {peer_id}
if new_peak:
self.peer_to_peak[peer_id] = (header_hash, height, weight)
def get_peers_that_have_peak(self, header_hashes: List[bytes32]) -> Set[bytes32]:
"""
Returns: peer ids of peers that have at least one of the header hashes.
"""
node_ids: Set[bytes32] = set()
for header_hash in header_hashes:
if header_hash in self.peak_to_peer:
for node_id in self.peak_to_peer[header_hash]:
node_ids.add(node_id)
return node_ids
def get_peak_of_each_peer(self) -> Dict[bytes32, Tuple[bytes32, uint32, uint128]]:
"""
Returns: dictionary of peer id to peak information.
"""
ret = {}
for peer_id, v in self.peer_to_peak.items():
if v[0] not in self.peak_to_peer:
continue
ret[peer_id] = v
return ret
def get_heaviest_peak(self) -> Optional[Tuple[bytes32, uint32, uint128]]:
"""
Returns: the header_hash, height, and weight of the heaviest block that one of our peers has notified
us of.
"""
if len(self.peer_to_peak) == 0:
return None
heaviest_peak_hash: Optional[bytes32] = None
heaviest_peak_weight: uint128 = uint128(0)
heaviest_peak_height: Optional[uint32] = None
for peer_id, (peak_hash, height, weight) in self.peer_to_peak.items():
if peak_hash not in self.peak_to_peer:
continue
if heaviest_peak_hash is None or weight > heaviest_peak_weight:
heaviest_peak_hash = peak_hash
heaviest_peak_weight = weight
heaviest_peak_height = height
assert heaviest_peak_hash is not None and heaviest_peak_weight is not None and heaviest_peak_height is not None
return heaviest_peak_hash, heaviest_peak_height, heaviest_peak_weight
async def clear_sync_info(self):
"""
Clears the peak_to_peer info which can get quite large.
"""
self.peak_to_peer = {}
def peer_disconnected(self, node_id: bytes32):
if node_id in self.peer_to_peak:
del self.peer_to_peak[node_id]
for peak, peers in self.peak_to_peer.items():
if node_id in peers:
self.peak_to_peer[peak].remove(node_id)
assert node_id not in self.peak_to_peer[peak]
self.peers_changed.set()
| seno/full_node/sync_store.py | 4,931 | Returns: the header_hash, height, and weight of the heaviest block that one of our peers has notified
us of.
Returns: dictionary of peer id to peak information.
Returns: peer ids of peers that have at least one of the header hashes.
Adds a record that a certain peer has a block.
Whether or not we are syncing Header hash : peer node id peer node id : [header_hash, height, weight] Peak hash we are syncing towards Peak height we are syncing towards Set of nodes which we are batch syncing from Set of nodes which we are backtrack syncing from, and how many threads | 567 | en | 0.93074 |
# coding: utf-8
"""
Mailchimp Marketing API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 3.0.74
Contact: apihelp@mailchimp.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class AddListMembers1(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'email_address': 'str',
'email_type': 'str',
'status': 'str',
'merge_fields': 'dict(str, object)',
'interests': 'dict(str, bool)',
'language': 'str',
'vip': 'bool',
'location': 'Location',
'marketing_permissions': 'list[MarketingPermission1]',
'ip_signup': 'str',
'timestamp_signup': 'datetime',
'ip_opt': 'str',
'timestamp_opt': 'datetime',
'tags': 'list[str]'
}
attribute_map = {
'email_address': 'email_address',
'email_type': 'email_type',
'status': 'status',
'merge_fields': 'merge_fields',
'interests': 'interests',
'language': 'language',
'vip': 'vip',
'location': 'location',
'marketing_permissions': 'marketing_permissions',
'ip_signup': 'ip_signup',
'timestamp_signup': 'timestamp_signup',
'ip_opt': 'ip_opt',
'timestamp_opt': 'timestamp_opt',
'tags': 'tags'
}
def __init__(self, email_address=None, email_type=None, status=None, merge_fields=None, interests=None, language=None, vip=None, location=None, marketing_permissions=None, ip_signup=None, timestamp_signup=None, ip_opt=None, timestamp_opt=None, tags=None): # noqa: E501
"""AddListMembers1 - a model defined in Swagger""" # noqa: E501
self._email_address = None
self._email_type = None
self._status = None
self._merge_fields = None
self._interests = None
self._language = None
self._vip = None
self._location = None
self._marketing_permissions = None
self._ip_signup = None
self._timestamp_signup = None
self._ip_opt = None
self._timestamp_opt = None
self._tags = None
self.discriminator = None
self.email_address = email_address
if email_type is not None:
self.email_type = email_type
self.status = status
if merge_fields is not None:
self.merge_fields = merge_fields
if interests is not None:
self.interests = interests
if language is not None:
self.language = language
if vip is not None:
self.vip = vip
if location is not None:
self.location = location
if marketing_permissions is not None:
self.marketing_permissions = marketing_permissions
if ip_signup is not None:
self.ip_signup = ip_signup
if timestamp_signup is not None:
self.timestamp_signup = timestamp_signup
if ip_opt is not None:
self.ip_opt = ip_opt
if timestamp_opt is not None:
self.timestamp_opt = timestamp_opt
if tags is not None:
self.tags = tags
@property
def email_address(self):
"""Gets the email_address of this AddListMembers1. # noqa: E501
Email address for a subscriber. # noqa: E501
:return: The email_address of this AddListMembers1. # noqa: E501
:rtype: str
"""
return self._email_address
@email_address.setter
def email_address(self, email_address):
"""Sets the email_address of this AddListMembers1.
Email address for a subscriber. # noqa: E501
:param email_address: The email_address of this AddListMembers1. # noqa: E501
:type: str
"""
if email_address is None:
raise ValueError("Invalid value for `email_address`, must not be `None`") # noqa: E501
self._email_address = email_address
@property
def email_type(self):
"""Gets the email_type of this AddListMembers1. # noqa: E501
Type of email this member asked to get ('html' or 'text'). # noqa: E501
:return: The email_type of this AddListMembers1. # noqa: E501
:rtype: str
"""
return self._email_type
@email_type.setter
def email_type(self, email_type):
"""Sets the email_type of this AddListMembers1.
Type of email this member asked to get ('html' or 'text'). # noqa: E501
:param email_type: The email_type of this AddListMembers1. # noqa: E501
:type: str
"""
self._email_type = email_type
@property
def status(self):
"""Gets the status of this AddListMembers1. # noqa: E501
Subscriber's current status. # noqa: E501
:return: The status of this AddListMembers1. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this AddListMembers1.
Subscriber's current status. # noqa: E501
:param status: The status of this AddListMembers1. # noqa: E501
:type: str
"""
if status is None:
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
allowed_values = ["subscribed", "unsubscribed", "cleaned", "pending", "transactional"] # noqa: E501
if status not in allowed_values:
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}" # noqa: E501
.format(status, allowed_values)
)
self._status = status
@property
def merge_fields(self):
"""Gets the merge_fields of this AddListMembers1. # noqa: E501
A dictionary of merge fields where the keys are the merge tags. See the [Merge Fields documentation](https://mailchimp.com/developer/marketing/docs/merge-fields/#structure) for more about the structure. # noqa: E501
:return: The merge_fields of this AddListMembers1. # noqa: E501
:rtype: dict(str, object)
"""
return self._merge_fields
@merge_fields.setter
def merge_fields(self, merge_fields):
"""Sets the merge_fields of this AddListMembers1.
A dictionary of merge fields where the keys are the merge tags. See the [Merge Fields documentation](https://mailchimp.com/developer/marketing/docs/merge-fields/#structure) for more about the structure. # noqa: E501
:param merge_fields: The merge_fields of this AddListMembers1. # noqa: E501
:type: dict(str, object)
"""
self._merge_fields = merge_fields
@property
def interests(self):
"""Gets the interests of this AddListMembers1. # noqa: E501
The key of this object's properties is the ID of the interest in question. # noqa: E501
:return: The interests of this AddListMembers1. # noqa: E501
:rtype: dict(str, bool)
"""
return self._interests
@interests.setter
def interests(self, interests):
"""Sets the interests of this AddListMembers1.
The key of this object's properties is the ID of the interest in question. # noqa: E501
:param interests: The interests of this AddListMembers1. # noqa: E501
:type: dict(str, bool)
"""
self._interests = interests
@property
def language(self):
"""Gets the language of this AddListMembers1. # noqa: E501
If set/detected, the [subscriber's language](https://mailchimp.com/help/view-and-edit-contact-languages/). # noqa: E501
:return: The language of this AddListMembers1. # noqa: E501
:rtype: str
"""
return self._language
@language.setter
def language(self, language):
"""Sets the language of this AddListMembers1.
If set/detected, the [subscriber's language](https://mailchimp.com/help/view-and-edit-contact-languages/). # noqa: E501
:param language: The language of this AddListMembers1. # noqa: E501
:type: str
"""
self._language = language
@property
def vip(self):
"""Gets the vip of this AddListMembers1. # noqa: E501
[VIP status](https://mailchimp.com/help/designate-and-send-to-vip-contacts/) for subscriber. # noqa: E501
:return: The vip of this AddListMembers1. # noqa: E501
:rtype: bool
"""
return self._vip
@vip.setter
def vip(self, vip):
"""Sets the vip of this AddListMembers1.
[VIP status](https://mailchimp.com/help/designate-and-send-to-vip-contacts/) for subscriber. # noqa: E501
:param vip: The vip of this AddListMembers1. # noqa: E501
:type: bool
"""
self._vip = vip
@property
def location(self):
"""Gets the location of this AddListMembers1. # noqa: E501
:return: The location of this AddListMembers1. # noqa: E501
:rtype: Location
"""
return self._location
@location.setter
def location(self, location):
"""Sets the location of this AddListMembers1.
:param location: The location of this AddListMembers1. # noqa: E501
:type: Location
"""
self._location = location
@property
def marketing_permissions(self):
"""Gets the marketing_permissions of this AddListMembers1. # noqa: E501
The marketing permissions for the subscriber. # noqa: E501
:return: The marketing_permissions of this AddListMembers1. # noqa: E501
:rtype: list[MarketingPermission1]
"""
return self._marketing_permissions
@marketing_permissions.setter
def marketing_permissions(self, marketing_permissions):
"""Sets the marketing_permissions of this AddListMembers1.
The marketing permissions for the subscriber. # noqa: E501
:param marketing_permissions: The marketing_permissions of this AddListMembers1. # noqa: E501
:type: list[MarketingPermission1]
"""
self._marketing_permissions = marketing_permissions
@property
def ip_signup(self):
"""Gets the ip_signup of this AddListMembers1. # noqa: E501
IP address the subscriber signed up from. # noqa: E501
:return: The ip_signup of this AddListMembers1. # noqa: E501
:rtype: str
"""
return self._ip_signup
@ip_signup.setter
def ip_signup(self, ip_signup):
"""Sets the ip_signup of this AddListMembers1.
IP address the subscriber signed up from. # noqa: E501
:param ip_signup: The ip_signup of this AddListMembers1. # noqa: E501
:type: str
"""
self._ip_signup = ip_signup
@property
def timestamp_signup(self):
"""Gets the timestamp_signup of this AddListMembers1. # noqa: E501
The date and time the subscriber signed up for the list in ISO 8601 format. # noqa: E501
:return: The timestamp_signup of this AddListMembers1. # noqa: E501
:rtype: datetime
"""
return self._timestamp_signup
@timestamp_signup.setter
def timestamp_signup(self, timestamp_signup):
"""Sets the timestamp_signup of this AddListMembers1.
The date and time the subscriber signed up for the list in ISO 8601 format. # noqa: E501
:param timestamp_signup: The timestamp_signup of this AddListMembers1. # noqa: E501
:type: datetime
"""
self._timestamp_signup = timestamp_signup
@property
def ip_opt(self):
"""Gets the ip_opt of this AddListMembers1. # noqa: E501
The IP address the subscriber used to confirm their opt-in status. # noqa: E501
:return: The ip_opt of this AddListMembers1. # noqa: E501
:rtype: str
"""
return self._ip_opt
@ip_opt.setter
def ip_opt(self, ip_opt):
"""Sets the ip_opt of this AddListMembers1.
The IP address the subscriber used to confirm their opt-in status. # noqa: E501
:param ip_opt: The ip_opt of this AddListMembers1. # noqa: E501
:type: str
"""
self._ip_opt = ip_opt
@property
def timestamp_opt(self):
"""Gets the timestamp_opt of this AddListMembers1. # noqa: E501
The date and time the subscribe confirmed their opt-in status in ISO 8601 format. # noqa: E501
:return: The timestamp_opt of this AddListMembers1. # noqa: E501
:rtype: datetime
"""
return self._timestamp_opt
@timestamp_opt.setter
def timestamp_opt(self, timestamp_opt):
"""Sets the timestamp_opt of this AddListMembers1.
The date and time the subscribe confirmed their opt-in status in ISO 8601 format. # noqa: E501
:param timestamp_opt: The timestamp_opt of this AddListMembers1. # noqa: E501
:type: datetime
"""
self._timestamp_opt = timestamp_opt
@property
def tags(self):
"""Gets the tags of this AddListMembers1. # noqa: E501
The tags that are associated with a member. # noqa: E501
:return: The tags of this AddListMembers1. # noqa: E501
:rtype: list[str]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this AddListMembers1.
The tags that are associated with a member. # noqa: E501
:param tags: The tags of this AddListMembers1. # noqa: E501
:type: list[str]
"""
self._tags = tags
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AddListMembers1, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AddListMembers1):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| mailchimp_marketing_asyncio/models/add_list_members1.py | 15,584 | NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Returns true if both objects are equal
AddListMembers1 - a model defined in Swagger
Returns true if both objects are not equal
For `print` and `pprint`
Gets the email_address of this AddListMembers1. # noqa: E501
Email address for a subscriber. # noqa: E501
:return: The email_address of this AddListMembers1. # noqa: E501
:rtype: str
Sets the email_address of this AddListMembers1.
Email address for a subscriber. # noqa: E501
:param email_address: The email_address of this AddListMembers1. # noqa: E501
:type: str
Gets the email_type of this AddListMembers1. # noqa: E501
Type of email this member asked to get ('html' or 'text'). # noqa: E501
:return: The email_type of this AddListMembers1. # noqa: E501
:rtype: str
Sets the email_type of this AddListMembers1.
Type of email this member asked to get ('html' or 'text'). # noqa: E501
:param email_type: The email_type of this AddListMembers1. # noqa: E501
:type: str
Gets the interests of this AddListMembers1. # noqa: E501
The key of this object's properties is the ID of the interest in question. # noqa: E501
:return: The interests of this AddListMembers1. # noqa: E501
:rtype: dict(str, bool)
Sets the interests of this AddListMembers1.
The key of this object's properties is the ID of the interest in question. # noqa: E501
:param interests: The interests of this AddListMembers1. # noqa: E501
:type: dict(str, bool)
Gets the ip_opt of this AddListMembers1. # noqa: E501
The IP address the subscriber used to confirm their opt-in status. # noqa: E501
:return: The ip_opt of this AddListMembers1. # noqa: E501
:rtype: str
Sets the ip_opt of this AddListMembers1.
The IP address the subscriber used to confirm their opt-in status. # noqa: E501
:param ip_opt: The ip_opt of this AddListMembers1. # noqa: E501
:type: str
Gets the ip_signup of this AddListMembers1. # noqa: E501
IP address the subscriber signed up from. # noqa: E501
:return: The ip_signup of this AddListMembers1. # noqa: E501
:rtype: str
Sets the ip_signup of this AddListMembers1.
IP address the subscriber signed up from. # noqa: E501
:param ip_signup: The ip_signup of this AddListMembers1. # noqa: E501
:type: str
Gets the language of this AddListMembers1. # noqa: E501
If set/detected, the [subscriber's language](https://mailchimp.com/help/view-and-edit-contact-languages/). # noqa: E501
:return: The language of this AddListMembers1. # noqa: E501
:rtype: str
Sets the language of this AddListMembers1.
If set/detected, the [subscriber's language](https://mailchimp.com/help/view-and-edit-contact-languages/). # noqa: E501
:param language: The language of this AddListMembers1. # noqa: E501
:type: str
Gets the location of this AddListMembers1. # noqa: E501
:return: The location of this AddListMembers1. # noqa: E501
:rtype: Location
Sets the location of this AddListMembers1.
:param location: The location of this AddListMembers1. # noqa: E501
:type: Location
Gets the marketing_permissions of this AddListMembers1. # noqa: E501
The marketing permissions for the subscriber. # noqa: E501
:return: The marketing_permissions of this AddListMembers1. # noqa: E501
:rtype: list[MarketingPermission1]
Sets the marketing_permissions of this AddListMembers1.
The marketing permissions for the subscriber. # noqa: E501
:param marketing_permissions: The marketing_permissions of this AddListMembers1. # noqa: E501
:type: list[MarketingPermission1]
Gets the merge_fields of this AddListMembers1. # noqa: E501
A dictionary of merge fields where the keys are the merge tags. See the [Merge Fields documentation](https://mailchimp.com/developer/marketing/docs/merge-fields/#structure) for more about the structure. # noqa: E501
:return: The merge_fields of this AddListMembers1. # noqa: E501
:rtype: dict(str, object)
Sets the merge_fields of this AddListMembers1.
A dictionary of merge fields where the keys are the merge tags. See the [Merge Fields documentation](https://mailchimp.com/developer/marketing/docs/merge-fields/#structure) for more about the structure. # noqa: E501
:param merge_fields: The merge_fields of this AddListMembers1. # noqa: E501
:type: dict(str, object)
Gets the status of this AddListMembers1. # noqa: E501
Subscriber's current status. # noqa: E501
:return: The status of this AddListMembers1. # noqa: E501
:rtype: str
Sets the status of this AddListMembers1.
Subscriber's current status. # noqa: E501
:param status: The status of this AddListMembers1. # noqa: E501
:type: str
Gets the tags of this AddListMembers1. # noqa: E501
The tags that are associated with a member. # noqa: E501
:return: The tags of this AddListMembers1. # noqa: E501
:rtype: list[str]
Sets the tags of this AddListMembers1.
The tags that are associated with a member. # noqa: E501
:param tags: The tags of this AddListMembers1. # noqa: E501
:type: list[str]
Gets the timestamp_opt of this AddListMembers1. # noqa: E501
The date and time the subscribe confirmed their opt-in status in ISO 8601 format. # noqa: E501
:return: The timestamp_opt of this AddListMembers1. # noqa: E501
:rtype: datetime
Sets the timestamp_opt of this AddListMembers1.
The date and time the subscribe confirmed their opt-in status in ISO 8601 format. # noqa: E501
:param timestamp_opt: The timestamp_opt of this AddListMembers1. # noqa: E501
:type: datetime
Gets the timestamp_signup of this AddListMembers1. # noqa: E501
The date and time the subscriber signed up for the list in ISO 8601 format. # noqa: E501
:return: The timestamp_signup of this AddListMembers1. # noqa: E501
:rtype: datetime
Sets the timestamp_signup of this AddListMembers1.
The date and time the subscriber signed up for the list in ISO 8601 format. # noqa: E501
:param timestamp_signup: The timestamp_signup of this AddListMembers1. # noqa: E501
:type: datetime
Returns the model properties as a dict
Returns the string representation of the model
Gets the vip of this AddListMembers1. # noqa: E501
[VIP status](https://mailchimp.com/help/designate-and-send-to-vip-contacts/) for subscriber. # noqa: E501
:return: The vip of this AddListMembers1. # noqa: E501
:rtype: bool
Sets the vip of this AddListMembers1.
[VIP status](https://mailchimp.com/help/designate-and-send-to-vip-contacts/) for subscriber. # noqa: E501
:param vip: The vip of this AddListMembers1. # noqa: E501
:type: bool
Mailchimp Marketing API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 3.0.74
Contact: apihelp@mailchimp.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
coding: utf-8 noqa: F401 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 | 6,848 | en | 0.71361 |
#Curso Python #06 - Condições Aninhadas
#Primeiro Exemplo
#nome = str(input('Qual é seu Nome: '))
#if nome == 'Jefferson':
# print('Que Nome Bonito')
#else:
# print('Seu nome é bem normal.')
#print('Tenha um bom dia, {}'.format(nome))
#Segundo Exemplo
nome = str(input('Qual é seu Nome: '))
if nome == 'Jefferson':
print('Que Nome Bonito')
elif nome == 'Pedro' or nome == 'Marcos' or nome == 'Paulo':
print('Seu nome é bem popular no Brasil.')
elif nome in 'Jennifer Vitoria Mariana Deborah':
print('Belo nome você tem em !')
else:
print('Seu nome é bem normal.')
print('Tenha um bom dia, {}'.format(nome)) | Curso Python/Aula06/CondicoesAinhada.py | 640 | Curso Python 06 - Condições AninhadasPrimeiro Exemplonome = str(input('Qual é seu Nome: '))if nome == 'Jefferson': print('Que Nome Bonito')else: print('Seu nome é bem normal.')print('Tenha um bom dia, {}'.format(nome))Segundo Exemplo | 239 | pt | 0.963244 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_space_comm_station_talus.iff"
result.attribute_template_id = 9
result.stfName("npc_name","selonian_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | data/scripts/templates/object/mobile/shared_space_comm_station_talus.py | 452 | NOTICE: THIS FILE IS AUTOGENERATED MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES BEGIN MODIFICATIONS END MODIFICATIONS | 168 | en | 0.698026 |
"""Config flow to configure Xiaomi Miio."""
import logging
from re import search
from micloud import MiCloud
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.config_entries import SOURCE_REAUTH
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_TOKEN
from homeassistant.core import callback
from homeassistant.helpers.device_registry import format_mac
from .const import (
CONF_CLOUD_COUNTRY,
CONF_CLOUD_PASSWORD,
CONF_CLOUD_SUBDEVICES,
CONF_CLOUD_USERNAME,
CONF_DEVICE,
CONF_FLOW_TYPE,
CONF_GATEWAY,
CONF_MAC,
CONF_MANUAL,
CONF_MODEL,
DEFAULT_CLOUD_COUNTRY,
DOMAIN,
MODELS_ALL,
MODELS_ALL_DEVICES,
MODELS_GATEWAY,
SERVER_COUNTRY_CODES,
)
from .device import ConnectXiaomiDevice
_LOGGER = logging.getLogger(__name__)
DEVICE_SETTINGS = {
vol.Required(CONF_TOKEN): vol.All(str, vol.Length(min=32, max=32)),
}
DEVICE_CONFIG = vol.Schema({vol.Required(CONF_HOST): str}).extend(DEVICE_SETTINGS)
DEVICE_MODEL_CONFIG = vol.Schema({vol.Required(CONF_MODEL): vol.In(MODELS_ALL)})
DEVICE_CLOUD_CONFIG = vol.Schema(
{
vol.Optional(CONF_CLOUD_USERNAME): str,
vol.Optional(CONF_CLOUD_PASSWORD): str,
vol.Optional(CONF_CLOUD_COUNTRY, default=DEFAULT_CLOUD_COUNTRY): vol.In(
SERVER_COUNTRY_CODES
),
vol.Optional(CONF_MANUAL, default=False): bool,
}
)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Options for the component."""
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
"""Init object."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Manage the options."""
errors = {}
if user_input is not None:
use_cloud = user_input.get(CONF_CLOUD_SUBDEVICES, False)
cloud_username = self.config_entry.data.get(CONF_CLOUD_USERNAME)
cloud_password = self.config_entry.data.get(CONF_CLOUD_PASSWORD)
cloud_country = self.config_entry.data.get(CONF_CLOUD_COUNTRY)
if use_cloud and (
not cloud_username or not cloud_password or not cloud_country
):
errors["base"] = "cloud_credentials_incomplete"
# trigger re-auth flow
self.hass.async_create_task(
self.hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_REAUTH},
data=self.config_entry.data,
)
)
if not errors:
return self.async_create_entry(title="", data=user_input)
settings_schema = vol.Schema(
{
vol.Optional(
CONF_CLOUD_SUBDEVICES,
default=self.config_entry.options.get(CONF_CLOUD_SUBDEVICES, False),
): bool
}
)
return self.async_show_form(
step_id="init", data_schema=settings_schema, errors=errors
)
class XiaomiMiioFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a Xiaomi Miio config flow."""
VERSION = 1
def __init__(self):
"""Initialize."""
self.host = None
self.mac = None
self.token = None
self.model = None
self.name = None
self.cloud_username = None
self.cloud_password = None
self.cloud_country = None
self.cloud_devices = {}
@staticmethod
@callback
def async_get_options_flow(config_entry) -> OptionsFlowHandler:
"""Get the options flow."""
return OptionsFlowHandler(config_entry)
async def async_step_reauth(self, user_input=None):
"""Perform reauth upon an authentication error or missing cloud credentials."""
self.host = user_input[CONF_HOST]
self.token = user_input[CONF_TOKEN]
self.mac = user_input[CONF_MAC]
self.model = user_input.get(CONF_MODEL)
return await self.async_step_reauth_confirm()
async def async_step_reauth_confirm(self, user_input=None):
"""Dialog that informs the user that reauth is required."""
if user_input is not None:
return await self.async_step_cloud()
return self.async_show_form(
step_id="reauth_confirm", data_schema=vol.Schema({})
)
async def async_step_import(self, conf: dict):
"""Import a configuration from config.yaml."""
self.host = conf[CONF_HOST]
self.token = conf[CONF_TOKEN]
self.name = conf.get(CONF_NAME)
self.model = conf.get(CONF_MODEL)
self.context.update(
{"title_placeholders": {"name": f"YAML import {self.host}"}}
)
return await self.async_step_connect()
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
return await self.async_step_cloud()
async def async_step_zeroconf(self, discovery_info):
"""Handle zeroconf discovery."""
name = discovery_info.get("name")
self.host = discovery_info.get("host")
self.mac = discovery_info.get("properties", {}).get("mac")
if self.mac is None:
poch = discovery_info.get("properties", {}).get("poch", "")
result = search(r"mac=\w+", poch)
if result is not None:
self.mac = result.group(0).split("=")[1]
if not name or not self.host or not self.mac:
return self.async_abort(reason="not_xiaomi_miio")
self.mac = format_mac(self.mac)
# Check which device is discovered.
for gateway_model in MODELS_GATEWAY:
if name.startswith(gateway_model.replace(".", "-")):
unique_id = self.mac
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured({CONF_HOST: self.host})
self.context.update(
{"title_placeholders": {"name": f"Gateway {self.host}"}}
)
return await self.async_step_cloud()
for device_model in MODELS_ALL_DEVICES:
if name.startswith(device_model.replace(".", "-")):
unique_id = self.mac
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured({CONF_HOST: self.host})
self.context.update(
{"title_placeholders": {"name": f"{device_model} {self.host}"}}
)
return await self.async_step_cloud()
# Discovered device is not yet supported
_LOGGER.debug(
"Not yet supported Xiaomi Miio device '%s' discovered with host %s",
name,
self.host,
)
return self.async_abort(reason="not_xiaomi_miio")
def extract_cloud_info(self, cloud_device_info):
"""Extract the cloud info."""
if self.host is None:
self.host = cloud_device_info["localip"]
if self.mac is None:
self.mac = format_mac(cloud_device_info["mac"])
if self.model is None:
self.model = cloud_device_info["model"]
if self.name is None:
self.name = cloud_device_info["name"]
self.token = cloud_device_info["token"]
async def async_step_cloud(self, user_input=None):
"""Configure a xiaomi miio device through the Miio Cloud."""
errors = {}
if user_input is not None:
if user_input[CONF_MANUAL]:
return await self.async_step_manual()
cloud_username = user_input.get(CONF_CLOUD_USERNAME)
cloud_password = user_input.get(CONF_CLOUD_PASSWORD)
cloud_country = user_input.get(CONF_CLOUD_COUNTRY)
if not cloud_username or not cloud_password or not cloud_country:
errors["base"] = "cloud_credentials_incomplete"
return self.async_show_form(
step_id="cloud", data_schema=DEVICE_CLOUD_CONFIG, errors=errors
)
miio_cloud = MiCloud(cloud_username, cloud_password)
if not await self.hass.async_add_executor_job(miio_cloud.login):
errors["base"] = "cloud_login_error"
return self.async_show_form(
step_id="cloud", data_schema=DEVICE_CLOUD_CONFIG, errors=errors
)
devices_raw = await self.hass.async_add_executor_job(
miio_cloud.get_devices, cloud_country
)
if not devices_raw:
errors["base"] = "cloud_no_devices"
return self.async_show_form(
step_id="cloud", data_schema=DEVICE_CLOUD_CONFIG, errors=errors
)
self.cloud_devices = {}
for device in devices_raw:
parent_id = device.get("parent_id")
if not parent_id:
name = device["name"]
model = device["model"]
list_name = f"{name} - {model}"
self.cloud_devices[list_name] = device
self.cloud_username = cloud_username
self.cloud_password = cloud_password
self.cloud_country = cloud_country
if self.host is not None:
for device in self.cloud_devices.values():
cloud_host = device.get("localip")
if cloud_host == self.host:
self.extract_cloud_info(device)
return await self.async_step_connect()
if len(self.cloud_devices) == 1:
self.extract_cloud_info(list(self.cloud_devices.values())[0])
return await self.async_step_connect()
return await self.async_step_select()
return self.async_show_form(
step_id="cloud", data_schema=DEVICE_CLOUD_CONFIG, errors=errors
)
async def async_step_select(self, user_input=None):
"""Handle multiple cloud devices found."""
errors = {}
if user_input is not None:
cloud_device = self.cloud_devices[user_input["select_device"]]
self.extract_cloud_info(cloud_device)
return await self.async_step_connect()
select_schema = vol.Schema(
{vol.Required("select_device"): vol.In(list(self.cloud_devices))}
)
return self.async_show_form(
step_id="select", data_schema=select_schema, errors=errors
)
async def async_step_manual(self, user_input=None):
"""Configure a xiaomi miio device Manually."""
errors = {}
if user_input is not None:
self.token = user_input[CONF_TOKEN]
if user_input.get(CONF_HOST):
self.host = user_input[CONF_HOST]
return await self.async_step_connect()
if self.host:
schema = vol.Schema(DEVICE_SETTINGS)
else:
schema = DEVICE_CONFIG
return self.async_show_form(step_id="manual", data_schema=schema, errors=errors)
async def async_step_connect(self, user_input=None):
"""Connect to a xiaomi miio device."""
errors = {}
if self.host is None or self.token is None:
return self.async_abort(reason="incomplete_info")
if user_input is not None:
self.model = user_input[CONF_MODEL]
# Try to connect to a Xiaomi Device.
connect_device_class = ConnectXiaomiDevice(self.hass)
await connect_device_class.async_connect_device(self.host, self.token)
device_info = connect_device_class.device_info
if self.model is None and device_info is not None:
self.model = device_info.model
if self.model is None:
errors["base"] = "cannot_connect"
return self.async_show_form(
step_id="connect", data_schema=DEVICE_MODEL_CONFIG, errors=errors
)
if self.mac is None and device_info is not None:
self.mac = format_mac(device_info.mac_address)
unique_id = self.mac
existing_entry = await self.async_set_unique_id(
unique_id, raise_on_progress=False
)
if existing_entry:
data = existing_entry.data.copy()
data[CONF_HOST] = self.host
data[CONF_TOKEN] = self.token
if (
self.cloud_username is not None
and self.cloud_password is not None
and self.cloud_country is not None
):
data[CONF_CLOUD_USERNAME] = self.cloud_username
data[CONF_CLOUD_PASSWORD] = self.cloud_password
data[CONF_CLOUD_COUNTRY] = self.cloud_country
self.hass.config_entries.async_update_entry(existing_entry, data=data)
await self.hass.config_entries.async_reload(existing_entry.entry_id)
return self.async_abort(reason="reauth_successful")
if self.name is None:
self.name = self.model
flow_type = None
for gateway_model in MODELS_GATEWAY:
if self.model.startswith(gateway_model):
flow_type = CONF_GATEWAY
if flow_type is None:
for device_model in MODELS_ALL_DEVICES:
if self.model.startswith(device_model):
flow_type = CONF_DEVICE
if flow_type is not None:
return self.async_create_entry(
title=self.name,
data={
CONF_FLOW_TYPE: flow_type,
CONF_HOST: self.host,
CONF_TOKEN: self.token,
CONF_MODEL: self.model,
CONF_MAC: self.mac,
CONF_CLOUD_USERNAME: self.cloud_username,
CONF_CLOUD_PASSWORD: self.cloud_password,
CONF_CLOUD_COUNTRY: self.cloud_country,
},
)
errors["base"] = "unknown_device"
return self.async_show_form(
step_id="connect", data_schema=DEVICE_MODEL_CONFIG, errors=errors
)
| homeassistant/components/xiaomi_miio/config_flow.py | 14,236 | Options for the component.
Handle a Xiaomi Miio config flow.
Init object.
Initialize.
Get the options flow.
Extract the cloud info.
Config flow to configure Xiaomi Miio.
trigger re-auth flow Check which device is discovered. Discovered device is not yet supported Try to connect to a Xiaomi Device. | 300 | en | 0.833234 |
#!/usr/bin/env python
import os
import re
import pickle
import json
import glob
import numpy as np
from abc import ABC, abstractmethod
from concurrent.futures import ProcessPoolExecutor
from contextlib import contextmanager
from collections import namedtuple, OrderedDict
from tqdm import tqdm
from .utils import img_to_jpeg_bytes, jpeg_bytes_to_img, _DEFAULT_JPEG_QUALITY
from pathlib import Path
#from simplejpeg import is_jpeg
def is_jpeg(data):
"""
Check whether a bytes object (or similar) contains JPEG (JFIF) data.
Returns False for truncated files.
Taken from simplejpeg.is_jpeg, but less strict because it doesn't check EOI, as most JPEG viewers don't really throw error for missing EOI.
:param data: JPEG (JFIF) data
:return: True if JPEG
"""
return data[:2] == b'\xFF\xD8'
ImgInfo = namedtuple('ImgInfo', ['loc',
'pad',
'length'])
class FileFormatException(Exception):
pass
class AbstractSerializer(ABC): # pragma: no cover
@abstractmethod
def load(self, file_name):
pass
@abstractmethod
def dump(self, thing, file_name):
pass
class PickleSerializer(AbstractSerializer):
def load(self, file_name):
with open(file_name, 'rb') as file_pointer:
return pickle.load(file_pointer)
def dump(self, thing, file_name):
with open(file_name, 'wb') as file_pointer:
pickle.dump(thing, file_pointer)
class JSONSerializer(AbstractSerializer):
def load(self, file_name):
with open(file_name, 'r') as file_pointer:
return json.load(file_pointer, object_pairs_hook=OrderedDict)
def dump(self, thing, file_name):
with open(file_name, 'w') as file_pointer:
json.dump(thing, file_pointer)
pickle_serializer = PickleSerializer()
json_serializer = JSONSerializer()
def extract_input_for_getitem(element):
if isinstance(element, tuple) and len(element) == 2:
id_, slice_ = element
elif isinstance(element, (int, str)):
id_, slice_ = element, None
else:
raise TypeError("Undefined input type! id or (id, slice) expected")
id_ = str(id_)
return id_, slice_
class GulpDirectory(object):
""" Represents a directory containing *.gulp and *.gmeta files.
Parameters
----------
output_dir: str
Path to the directory containing the files.
jpeg_decoder: callable that takes a JPEG stored as :py:class:`bytes` and returns
the desired decoded image format (e.g. np.ndarray)
Attributes
----------
all_meta_dicts: list of dicts
All meta dicts from all chunks as a list.
chunk_lookup: dict: int -> str
Mapping element id to chunk index.
chunk_objs_lookup: dict: int -> GulpChunk
Mapping element id to chunk index.
merged_meta_dict: dict: id -> meta dict
all meta dicts merged
"""
def __init__(self, output_dir, jpeg_decoder=jpeg_bytes_to_img):
self.output_dir = output_dir
self.jpeg_decoder = jpeg_decoder
self.chunk_objs_lookup = OrderedDict(zip(self._chunk_ids(), self._chunks()))
self.all_meta_dicts = [c.meta_dict for c in self.chunk_objs_lookup.values()]
self.num_chunks = len(self.chunk_objs_lookup)
self.chunk_lookup = {}
for chunk_id, chunk in self.chunk_objs_lookup.items():
for id_ in chunk.meta_dict:
self.chunk_lookup[id_] = chunk_id
self.merged_meta_dict = {}
for d in self.all_meta_dicts:
for k in d.keys():
assert k not in self.merged_meta_dict,\
"Duplicate id detected {}".format(k)
else:
self.merged_meta_dict.update(d)
def __iter__(self):
return iter(self.chunk_objs_lookup.values())
def chunks(self):
""" Return a generator over existing GulpChunk objects which are ready
to be opened and read from. """
return self.__iter__()
def _chunks(self):
return (GulpChunk(*paths, jpeg_decoder=self.jpeg_decoder) for paths in
self._existing_file_paths())
def new_chunks(self, total_new_chunks):
""" Return a generator over freshly setup GulpChunk objects which are ready
to be opened and written to.
Parameters
----------
total_new_chunks: int
The total number of new chunks to initialize.
"""
return ((GulpChunk(*paths, jpeg_decoder=self.jpeg_decoder) for paths in
self._allocate_new_file_paths(total_new_chunks)))
def __getitem__(self, element):
id_, _ = extract_input_for_getitem(element)
chunk_id = self.chunk_lookup[id_]
gulp_chunk = self.chunk_objs_lookup[chunk_id]
with gulp_chunk.open():
return gulp_chunk[element]
def _find_existing_data_paths(self):
return sorted(glob.glob(os.path.join(self.output_dir, 'data*.gulp')))
def _find_existing_meta_paths(self):
return sorted(glob.glob(os.path.join(self.output_dir, 'meta*.gmeta')))
def _load_label_dict(self):
return json.load(open(os.path.join(self.output_dir, 'label2idx.json'),
'rb'))
def _existing_file_paths(self):
data_paths = self._find_existing_data_paths()
meta_paths = self._find_existing_meta_paths()
assert len(data_paths) == len(meta_paths)
return zip(data_paths, meta_paths)
def _find_ids_from_paths(self, paths):
return [int(re.findall(r'\d+', os.path.basename(p))[0]) for p in paths]
def _chunk_ids(self):
data_paths = self._find_existing_data_paths()
meta_paths = self._find_existing_meta_paths()
data_ids = self._find_ids_from_paths(data_paths)
meta_ids = self._find_ids_from_paths(meta_paths)
assert data_ids == meta_ids
return data_ids
def _next_chunk_id(self):
existing_chunk_ids = self._chunk_ids()
next_chunk_id = 0
if len(existing_chunk_ids) > 0:
next_chunk_id = max([int(i) for i in existing_chunk_ids]) + 1
return next_chunk_id
def _allocate_new_file_paths(self, total_new_chunks):
next_chunk_id = self._next_chunk_id()
return [self._initialize_filenames(i)
for i in range(next_chunk_id,
next_chunk_id + total_new_chunks)]
def _initialize_filenames(self, chunk_id):
data_file_path = os.path.join(
self.output_dir, 'data_{}.gulp'.format(chunk_id))
meta_file_path = os.path.join(
self.output_dir, 'meta_{}.gmeta'.format(chunk_id))
return data_file_path, meta_file_path
class GulpChunk(object):
""" Represents a gulp chunk on disk.
Parameters
----------
data_file_path: str
Path to the *.gulp file.
meta_file_path: str
Path to the *.gmeta file.
serializer: subclass of AbstractSerializer
The type of serializer to use.
jpeg_decoder: callable that takes a JPEG stored as :py:class:`bytes` and returns
the desired decoded image format (e.g. np.ndarray)
"""
def __init__(self, data_file_path, meta_file_path,
serializer=json_serializer, jpeg_decoder=jpeg_bytes_to_img):
self.jpeg_decoder = jpeg_decoder
self.serializer = serializer
self.data_file_path = data_file_path
self.meta_file_path = meta_file_path
self.meta_dict = self._get_or_create_dict()
self._img_info = {}
self.fp = None
def __contains__(self, id_):
return str(id_) in self.meta_dict
def __getitem__(self, element):
id_, slice_ = extract_input_for_getitem(element)
return self.read_frames(id_, slice_)
def __iter__(self):
return self.iter_all()
def _get_frame_infos(self, id_):
id_ = str(id_)
if id_ in self.meta_dict:
return (self._get_or_create_img_info(id_),
self._copy_meta_data(id_))
def _copy_meta_data(self, id_):
return dict(self.meta_dict[id_]['meta_data'][0])
def _get_or_create_img_info(self, id_):
if id_ not in self._img_info:
self._img_info[id_] = [ImgInfo(*info) for info in self.meta_dict[id_]['frame_info']]
return self._img_info[id_]
def _get_or_create_dict(self):
if os.path.exists(self.meta_file_path):
return self.serializer.load(self.meta_file_path)
else:
return OrderedDict()
@staticmethod
def _default_factory():
return OrderedDict([('frame_info', []), ('meta_data', [])])
@staticmethod
def _pad_image(number):
return (4 - (number % 4)) % 4
def _append_meta(self, id_, meta_data):
id_ = str(id_)
if id_ not in self.meta_dict: # implements an OrderedDefaultDict
self.meta_dict[id_] = self._default_factory()
self.meta_dict[id_]['meta_data'].append(meta_data)
def _write_frame(self, id_, image, jpeg_encode_quality=_DEFAULT_JPEG_QUALITY):
loc = self.fp.tell()
if isinstance(image, (str, Path)):
# If image is a string or pathlib Path, assume that it is a path to a jpeg file
# and add it directly without decoding and encoding it.
with open(str(image), 'rb') as image_file:
img_str = image_file.read()
if not is_jpeg(img_str):
raise FileFormatException(f'Image file from path {image} does not appear to be a JPEG file.')
else: # np.array
img_str = img_to_jpeg_bytes(image, jpeg_encode_quality)
assert len(img_str) > 0
pad = self._pad_image(len(img_str))
record = img_str.ljust(len(img_str) + pad, b'\0')
assert len(record) > 0
img_info = ImgInfo(loc=loc,
length=len(record),
pad=pad)
id_ = str(id_)
if id_ not in self.meta_dict: # implements an OrderedDefaultDict
self.meta_dict[id_] = self._default_factory()
self.meta_dict[id_]['frame_info'].append(img_info)
self.fp.write(record)
def _write_frames(self, id_, frames, jpeg_encode_quality=_DEFAULT_JPEG_QUALITY):
for frame in frames:
self._write_frame(id_, frame, jpeg_encode_quality)
@contextmanager
def open(self, flag='rb'):
"""Open the gulp chunk for reading.
Parameters
----------
flag: str
'rb': Read binary
'wb': Write binary
'ab': Append to binary
Notes
-----
Works as a context manager but returns None.
"""
if flag in ['wb', 'rb', 'ab']:
self.fp = open(self.data_file_path, flag)
else:
m = "This file does not support the mode: '{}'".format(flag)
raise NotImplementedError(m)
yield
if flag in ['wb', 'ab']:
self.flush()
self.fp.close()
def flush(self):
"""Flush all buffers and write the meta file."""
self.fp.flush()
self.serializer.dump(self.meta_dict, self.meta_file_path)
def append(self, id_, meta_data, frames, jpeg_encode_quality=_DEFAULT_JPEG_QUALITY):
""" Append an item to the gulp.
Parameters
----------
id_ : str
The ID of the item
meta_data: dict
The meta-data associated with the item.
frames: list of numpy arrays
The frames of the item as a list of numpy dictionaries consisting
of image pixel values.
"""
self._append_meta(id_, meta_data)
self._write_frames(id_, frames, jpeg_encode_quality=jpeg_encode_quality)
def read_frames(self, id_, slice_=None):
""" Read frames for a single item.
Parameters
----------
id_: str
The ID of the item
slice_: slice or list of ints:
A slice or list of indices with which to select frames.
Returns
-------
frames (int), meta(dict)
The frames of the item as a list of numpy arrays consisting of
image pixel values. And the metadata.
"""
frame_infos, meta_data = self._get_frame_infos(id_)
slice_element = slice_ if slice_ is not None else slice(0, len(frame_infos))
def extract_frame(frame_info):
self.fp.seek(frame_info.loc)
record = self.fp.read(frame_info.length)
img_str = record[:len(record)-frame_info.pad]
img = self.jpeg_decoder(img_str)
return img
if isinstance(slice_element, (list, np.ndarray)):
selected_frame_infos = [frame_infos[idx] for idx in slice_element]
else:
selected_frame_infos = frame_infos[slice_element]
frames = [extract_frame(frame_info)
for frame_info in selected_frame_infos]
return frames, meta_data
def iter_all(self, accepted_ids=None, shuffle=False):
""" Iterate over all frames in the gulp.
Parameters
----------
accepted_ids: list of str
A filter for accepted ids.
shuffle: bool
Shuffle the items or not.
Returns
-------
iterator
An iterator that yield a series of frames,meta tuples. See
`read_frames` for details.
"""
ids = self.meta_dict.keys()
if accepted_ids is not None:
intersection = list(set(ids) & set(accepted_ids))
ids = [id_ for id_ in ids if id_ in intersection]
if shuffle:
ids = list(ids)
np.random.shuffle(ids)
with self.open('rb'):
for id_ in ids:
frames, meta = self.read_frames(id_)
yield frames, meta
class ChunkWriter(object):
"""Can write from an adapter to a gulp chunk.
Parameters
----------
adapter: subclass of AbstractDatasetAdapter
The adapter to get items from.
"""
def __init__(self, adapter):
self.adapter = adapter
def write_chunk(self, output_chunk, input_slice):
"""Write from an input slice in the adapter to an output chunk.
Parameters
----------
output_chunk: GulpChunk
The chunk to write to
input_slice: slice
The slice to use from the adapter.
"""
with output_chunk.open('wb'):
for video in self.adapter.iter_data(input_slice):
id_ = video['id']
meta_data = video['meta']
frames = video['frames']
if len(frames) > 0:
output_chunk.append(id_, meta_data, frames, self.adapter.jpeg_encode_quality())
else:
print("Failed to write video with id: {}; no frames"
.format(id_))
def calculate_chunk_slices(items_per_chunk, num_items):
"""Calculate slices for indexing an adapter.
Parameters
----------
items_per_chunk: int
Approximate number of items per chunk.
num_items: int
Total number of items.
Returns
-------
list of slices
"""
assert items_per_chunk > 0
assert num_items > 0
return [slice(i, min(i + items_per_chunk, num_items))
for i in range(0, num_items, items_per_chunk)]
class GulpIngestor(object):
"""Ingest items from an adapter into an gulp chunks.
Parameters
----------
adapter: subclass of AbstractDatasetAdapter
The adapter to ingest from.
output_folder: str
The folder/directory to write to.
videos_per_chunk: int
The total number of items per chunk.
num_workers: int
The level of parallelism.
"""
def __init__(self, adapter, output_folder, videos_per_chunk, num_workers):
assert int(num_workers) > 0
self.adapter = adapter
self.output_folder = output_folder
self.videos_per_chunk = int(videos_per_chunk)
self.num_workers = int(num_workers)
def __call__(self):
os.makedirs(self.output_folder, exist_ok=True)
chunk_slices = calculate_chunk_slices(self.videos_per_chunk,
len(self.adapter))
gulp_directory = GulpDirectory(self.output_folder)
new_chunks = gulp_directory.new_chunks(len(chunk_slices))
chunk_writer = ChunkWriter(self.adapter)
with ProcessPoolExecutor(max_workers=self.num_workers) as executor:
result = executor.map(chunk_writer.write_chunk,
new_chunks,
chunk_slices)
for r in tqdm(result,
desc='Chunks finished',
unit='chunk',
dynamic_ncols=True,
total=len(chunk_slices)):
pass
| src/gulpio2/fileio.py | 17,062 | Can write from an adapter to a gulp chunk.
Parameters
----------
adapter: subclass of AbstractDatasetAdapter
The adapter to get items from.
Represents a gulp chunk on disk.
Parameters
----------
data_file_path: str
Path to the *.gulp file.
meta_file_path: str
Path to the *.gmeta file.
serializer: subclass of AbstractSerializer
The type of serializer to use.
jpeg_decoder: callable that takes a JPEG stored as :py:class:`bytes` and returns
the desired decoded image format (e.g. np.ndarray)
Represents a directory containing *.gulp and *.gmeta files.
Parameters
----------
output_dir: str
Path to the directory containing the files.
jpeg_decoder: callable that takes a JPEG stored as :py:class:`bytes` and returns
the desired decoded image format (e.g. np.ndarray)
Attributes
----------
all_meta_dicts: list of dicts
All meta dicts from all chunks as a list.
chunk_lookup: dict: int -> str
Mapping element id to chunk index.
chunk_objs_lookup: dict: int -> GulpChunk
Mapping element id to chunk index.
merged_meta_dict: dict: id -> meta dict
all meta dicts merged
Ingest items from an adapter into an gulp chunks.
Parameters
----------
adapter: subclass of AbstractDatasetAdapter
The adapter to ingest from.
output_folder: str
The folder/directory to write to.
videos_per_chunk: int
The total number of items per chunk.
num_workers: int
The level of parallelism.
Append an item to the gulp.
Parameters
----------
id_ : str
The ID of the item
meta_data: dict
The meta-data associated with the item.
frames: list of numpy arrays
The frames of the item as a list of numpy dictionaries consisting
of image pixel values.
Calculate slices for indexing an adapter.
Parameters
----------
items_per_chunk: int
Approximate number of items per chunk.
num_items: int
Total number of items.
Returns
-------
list of slices
Return a generator over existing GulpChunk objects which are ready
to be opened and read from.
Flush all buffers and write the meta file.
Check whether a bytes object (or similar) contains JPEG (JFIF) data.
Returns False for truncated files.
Taken from simplejpeg.is_jpeg, but less strict because it doesn't check EOI, as most JPEG viewers don't really throw error for missing EOI.
:param data: JPEG (JFIF) data
:return: True if JPEG
Iterate over all frames in the gulp.
Parameters
----------
accepted_ids: list of str
A filter for accepted ids.
shuffle: bool
Shuffle the items or not.
Returns
-------
iterator
An iterator that yield a series of frames,meta tuples. See
`read_frames` for details.
Return a generator over freshly setup GulpChunk objects which are ready
to be opened and written to.
Parameters
----------
total_new_chunks: int
The total number of new chunks to initialize.
Open the gulp chunk for reading.
Parameters
----------
flag: str
'rb': Read binary
'wb': Write binary
'ab': Append to binary
Notes
-----
Works as a context manager but returns None.
Read frames for a single item.
Parameters
----------
id_: str
The ID of the item
slice_: slice or list of ints:
A slice or list of indices with which to select frames.
Returns
-------
frames (int), meta(dict)
The frames of the item as a list of numpy arrays consisting of
image pixel values. And the metadata.
Write from an input slice in the adapter to an output chunk.
Parameters
----------
output_chunk: GulpChunk
The chunk to write to
input_slice: slice
The slice to use from the adapter.
!/usr/bin/env pythonfrom simplejpeg import is_jpeg pragma: no cover implements an OrderedDefaultDict If image is a string or pathlib Path, assume that it is a path to a jpeg file and add it directly without decoding and encoding it. np.array implements an OrderedDefaultDict | 3,803 | en | 0.612063 |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, softwar
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from extensions.rich_text_components import base
NONNEGATIVE_INT_SCHEMA = {
'type': 'int',
'validators': [{
'id': 'is_at_least',
'min_value': 0
}],
}
class Video(base.BaseRichTextComponent):
"""A rich-text component representing a YouTube video."""
name = 'Video'
category = 'Basic Input'
description = 'A YouTube video.'
frontend_name = 'video'
tooltip = 'Insert video'
_customization_arg_specs = [{
'name': 'video_id',
'description': (
'The YouTube id for this video. This is the 11-character string '
'after \'v=\' in the video URL.'),
'schema': {
'type': 'unicode',
},
'default_value': '',
}, {
'name': 'start',
'description': (
'Video start time in seconds: (leave at 0 to start at the '
'beginning.)'),
'schema': NONNEGATIVE_INT_SCHEMA,
'default_value': 0
}, {
'name': 'end',
'description': (
'Video end time in seconds: (leave at 0 to play until the end.)'),
'schema': NONNEGATIVE_INT_SCHEMA,
'default_value': 0
}, {
'name': 'autoplay',
'description': (
'Autoplay this video once the question has loaded?'),
'schema': {
'type': 'bool'
},
'default_value': False,
}]
icon_data_url = (
'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAA'
'ABGdBTUEAAK/INwWK6QAAABl0RVh0%0AU29mdHdhcmUAQWRvYmUgSW1hZ2VSZWFkeXHJZ'
'TwAAAIfSURBVDjLpZNPaBNBGMXfbrubzBqbg4kL%0A0lJLgiVKE/AP6Kl6UUFQNAeDIAj'
'VS08aELx59GQPAREV/4BeiqcqROpRD4pUNCJSS21OgloISWME%0AZ/aPb6ARdNeTCz92m'
'O%2B9N9/w7RphGOJ/nsH%2Bolqtvg%2BCYJR8q9VquThxuVz%2BoJTKeZ63Uq/XC38E%0'
'A0Jj3ff8%2BOVupVGLbolkzQw5HOqAxQU4wXWWnZrykmYD0QsgAOJe9hpEUcPr8i0GaJ8'
'n2vs/sL2h8%0AR66TpVfWTdETHWE6GRGKjGiiKNLii5BSLpN7pBHpgMYhMkm8tPUWz3sL'
'2D1wFaY/jvnWcTTaE5Dy%0AjMfTT5J0XIAiTRYn3ASwZ1MKbTmN7z%2BKaHUOYqmb1fcP'
'iNa4kQBuyvWAHYfcHGzDgYcx9NKrwJYH%0ACAyF21JiPWBnXMAQOea6bmn%2B4ueYGZi8'
'gtymNVobF7BG5prNpjd%2BeW6X4BSUD0gOdCpzA8MpA/v2%0Av15kl4%2BpK0emwHSbjJ'
'GBlz%2BvYM1fQeDrYOBTdzOGvDf6EFNr%2BLYjHbBgsaCLxr%2BmoNQjU2vYhRXp%0AgI'
'UOmSWWnsJRfjlOZhrexgtYDZ/gWbetNRbNs6QT10GJglNk64HMaGgbAkoMo5fiFNy7CKD'
'QUGqE%0A5r38YktxAfSqW7Zt33l66WtkAkACjuNsaLVaDxlw5HdJ/86aYrG4WCgUZD6fX'
'%2Bjv/U0ymfxoWVZo%0AmuZyf%2B8XqfGP49CCrBUAAAAASUVORK5CYII%3D%0A'
)
| extensions/rich_text_components/Video/Video.py | 3,155 | A rich-text component representing a YouTube video.
coding: utf-8 Copyright 2014 The Oppia Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, softwar distributed under the License is distributed on an "AS-IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 644 | en | 0.854827 |
# Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mocktest import *
from pyqcy import *
import neat.locals.underload.trivial as trivial
import logging
logging.disable(logging.CRITICAL)
class Trivial(TestCase):
@qc(10)
def always_underloaded_factory(
time_step=int_(min=0, max=10),
migration_time=float_(min=0, max=10),
utilization=list_(of=float)
):
alg = trivial.always_underloaded_factory(time_step, migration_time, {})
assert alg(utilization) == (True, {})
def test_threshold_factory(self):
alg = trivial.threshold_factory(300, 20., {'threshold': 0.5})
self.assertEqual(alg([]), (False, {}))
self.assertEqual(alg([0.0, 0.0]), (True, {}))
self.assertEqual(alg([0.0, 0.4]), (True, {}))
self.assertEqual(alg([0.0, 0.5]), (True, {}))
self.assertEqual(alg([0.0, 0.6]), (False, {}))
self.assertEqual(alg([0.0, 1.0]), (False, {}))
def test_last_n_average_threshold_factory(self):
alg = trivial.last_n_average_threshold_factory(
300, 20., {'threshold': 0.5,
'n': 2})
self.assertEqual(alg([]), (False, {}))
self.assertEqual(alg([0.0, 0.0]), (True, {}))
self.assertEqual(alg([0.0, 0.4]), (True, {}))
self.assertEqual(alg([0.0, 0.5]), (True, {}))
self.assertEqual(alg([0.0, 0.6]), (True, {}))
self.assertEqual(alg([0.0, 1.0]), (True, {}))
self.assertEqual(alg([0.2, 1.0]), (False, {}))
self.assertEqual(alg([0.0, 0.2, 1.0]), (False, {}))
self.assertEqual(alg([0.0, 1.0, 1.0]), (False, {}))
self.assertEqual(alg([0.0, 0.6, 0.6]), (False, {}))
alg = trivial.last_n_average_threshold_factory(
300, 20., {'threshold': 0.5,
'n': 3})
self.assertEqual(alg([0.0, 0.6, 0.6]), (True, {}))
def test_threshold(self):
self.assertEqual(trivial.threshold(0.5, []), False)
self.assertEqual(trivial.threshold(0.5, [0.0, 0.0]), True)
self.assertEqual(trivial.threshold(0.5, [0.0, 0.4]), True)
self.assertEqual(trivial.threshold(0.5, [0.0, 0.5]), True)
self.assertEqual(trivial.threshold(0.5, [0.0, 0.6]), False)
self.assertEqual(trivial.threshold(0.5, [0.0, 1.0]), False)
def test_last_n_average_threshold(self):
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, []), False)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.0, 0.0]), True)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.0, 0.4]), True)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.0, 0.5]), True)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.0, 0.6]), True)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.0, 1.0]), True)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.2, 1.0]), False)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.0, 0.2, 1.0]), False)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.0, 1.0, 1.0]), False)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.0, 0.6, 0.6]), False)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 3, [0.0, 0.6, 0.6]), True)
| tests/locals/underload/test_trivial.py | 4,018 | Copyright 2012 Anton Beloglazov Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 553 | en | 0.86273 |
# coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NSynth Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
import numpy as np
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
_DESCRIPTION = """\
The NSynth Dataset is an audio dataset containing ~300k musical notes, each
with a unique pitch, timbre, and envelope. Each note is annotated with three
additional pieces of information based on a combination of human evaluation
and heuristic algorithms: Source, Family, and Qualities.
"""
_FULL_DESCRIPTION = """\
Full NSynth Dataset is split into train, valid, and test sets, with no
instruments overlapping between the train set and the valid/test sets.
"""
_GANSYNTH_DESCRIPTION = """\
NSynth Dataset limited to acoustic instruments in the MIDI pitch interval
[24, 84]. Uses alternate splits that have overlap in instruments (but not exact
notes) between the train set and valid/test sets. This variant was originally
introduced in the ICLR 2019 GANSynth paper (https://arxiv.org/abs/1902.08710).
"""
_F0_AND_LOUDNESS_ADDENDUM = """\
This version additionally contains estimates for F0 using CREPE
(Kim et al., 2018) and A-weighted perceptual loudness. Both signals are provided
at a frame rate of 250Hz.
"""
# From http://proceedings.mlr.press/v70/engel17a.html
_CITATION = """\
@InProceedings{pmlr-v70-engel17a,
title = {Neural Audio Synthesis of Musical Notes with {W}ave{N}et Autoencoders},
author = {Jesse Engel and Cinjon Resnick and Adam Roberts and Sander Dieleman and Mohammad Norouzi and Douglas Eck and Karen Simonyan},
booktitle = {Proceedings of the 34th International Conference on Machine Learning},
pages = {1068--1077},
year = {2017},
editor = {Doina Precup and Yee Whye Teh},
volume = {70},
series = {Proceedings of Machine Learning Research},
address = {International Convention Centre, Sydney, Australia},
month = {06--11 Aug},
publisher = {PMLR},
pdf = {http://proceedings.mlr.press/v70/engel17a/engel17a.pdf},
url = {http://proceedings.mlr.press/v70/engel17a.html},
}
"""
_NUM_SECS = 4
_AUDIO_RATE = 16000 # 16 kHz
_F0_AND_LOUDNESS_RATE = 250 # 250 Hz
_INSTRUMENT_FAMILIES = [
"bass", "brass", "flute", "guitar", "keyboard", "mallet", "organ", "reed",
"string", "synth_lead", "vocal"]
_INSTRUMENT_SOURCES = ["acoustic", "electronic", "synthetic"]
_QUALITIES = [
"bright",
"dark",
"distortion",
"fast_decay",
"long_release",
"multiphonic",
"nonlinear_env",
"percussive",
"reverb",
"tempo-synced"]
_BASE_DOWNLOAD_PATH = "http://download.magenta.tensorflow.org/datasets/nsynth/nsynth-"
_SPLITS = ["train", "valid", "test"]
_SPLIT_SHARDS = {
"train": 512,
"valid": 32,
"test": 8,
}
class NsynthConfig(tfds.core.BuilderConfig):
"""BuilderConfig for NSynth Dataset."""
def __init__(self,
gansynth_subset=False,
estimate_f0_and_loudness=False,
**kwargs):
"""Constructs a NsynthConfig.
Args:
gansynth_subset: bool, whether to use the subset of the dataset introduced
in the ICLR 2019 GANSynth paper (Engel, et al. 2018). This subset uses
acoustic-only instrument sources and limits the pitches to the interval
[24, 84]. The train and test splits are also modified so that
instruments (but not specific notes) overlap between them. See
https://arxiv.org/abs/1902.08710 for more details.
estimate_f0_and_loudness: bool, whether to estimate fundamental frequency
(F0) and loudness for the audio (at 250 Hz) and add them to the set of
features.
**kwargs: keyword arguments forwarded to super.
"""
name_parts = []
if gansynth_subset:
name_parts.append("gansynth_subset")
else:
name_parts.append("full")
if estimate_f0_and_loudness:
name_parts.append("f0_and_loudness")
super(NsynthConfig, self).__init__(
name=".".join(name_parts),
version=tfds.core.Version(
"1.1.0", experiments={tfds.core.Experiment.S3: False}),
**kwargs)
self.gansynth_subset = gansynth_subset
self.estimate_f0_and_loudness = estimate_f0_and_loudness
class Nsynth(tfds.core.BeamBasedBuilder):
"""A large-scale and high-quality dataset of annotated musical notes."""
BUILDER_CONFIGS = [
NsynthConfig(description=_FULL_DESCRIPTION),
NsynthConfig(
gansynth_subset=True,
description=_GANSYNTH_DESCRIPTION),
NsynthConfig(
gansynth_subset=True,
estimate_f0_and_loudness=True,
description=_GANSYNTH_DESCRIPTION + _F0_AND_LOUDNESS_ADDENDUM),
]
def _info(self):
features = {
"id":
tf.string,
"audio":
tfds.features.Tensor(
shape=(_AUDIO_RATE * _NUM_SECS,), dtype=tf.float32),
"pitch":
tfds.features.ClassLabel(num_classes=128),
"velocity":
tfds.features.ClassLabel(num_classes=128),
"instrument": {
# We read the list of labels in _split_generators.
"label": tfds.features.ClassLabel(num_classes=1006),
"family": tfds.features.ClassLabel(names=_INSTRUMENT_FAMILIES),
"source": tfds.features.ClassLabel(names=_INSTRUMENT_SOURCES),
},
"qualities": {quality: tf.bool for quality in _QUALITIES},
}
if self.builder_config.estimate_f0_and_loudness:
f0_and_ld_shape = (_F0_AND_LOUDNESS_RATE * _NUM_SECS + 1,)
features["f0"] = {
"hz":
tfds.features.Tensor(shape=f0_and_ld_shape, dtype=tf.float32),
"midi":
tfds.features.Tensor(shape=f0_and_ld_shape, dtype=tf.float32),
"confidence":
tfds.features.Tensor(shape=f0_and_ld_shape, dtype=tf.float32)
}
features["loudness"] = {
"db":
tfds.features.Tensor(shape=f0_and_ld_shape, dtype=tf.float32)
}
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict(features),
homepage="https://g.co/magenta/nsynth-dataset",
citation=_CITATION,
metadata=tfds.core.BeamMetadataDict(),
)
def _split_generators(self, dl_manager):
"""Returns splits."""
dl_urls = {}
dl_urls["examples"] = {
split: _BASE_DOWNLOAD_PATH + "%s.tfrecord.tar" % split
for split in _SPLITS
}
dl_urls["instrument_labels"] = (
_BASE_DOWNLOAD_PATH + "instrument_labels.txt")
if self.builder_config.gansynth_subset:
dl_urls["gansynth_splits"] = (
_BASE_DOWNLOAD_PATH + "gansynth_splits.csv")
dl_paths = dl_manager.download_and_extract(dl_urls)
with tf.io.gfile.GFile(dl_paths["instrument_labels"]) as f:
instrument_labels = f.read().strip().splitlines()
self.info.features["instrument"]["label"].names = instrument_labels
split_ids = {s: set() for s in _SPLITS}
split_dirs = {s: [dl_paths["examples"][s]] for s in _SPLITS}
if self.builder_config.gansynth_subset:
# Generator needs to see all original splits for each new split.
split_dirs = {s: dl_paths["examples"].values() for s in _SPLITS}
with tf.io.gfile.GFile(dl_paths["gansynth_splits"]) as f:
reader = csv.DictReader(f)
for row in reader:
split_ids[row["split"]].add(row["id"])
return [
tfds.core.SplitGenerator( # pylint: disable=g-complex-comprehension
name=split,
num_shards=_SPLIT_SHARDS[split],
gen_kwargs={
"tfrecord_dirs": split_dirs[split],
"ids": split_ids[split],
"split": split,
})
for split in _SPLITS
]
def _build_pcollection(self, pipeline, tfrecord_dirs, ids, split):
"""Build PCollection of examples for split."""
beam = tfds.core.lazy_imports.apache_beam
def _emit_base_example(ex):
"""Maps an input example to a TFDS example."""
beam.metrics.Metrics.counter(split, "base-examples").inc()
features = ex.features.feature
return {
"id": features["note_str"].bytes_list.value[0],
"audio":
np.array(features["audio"].float_list.value, dtype=np.float32),
"pitch":
features["pitch"].int64_list.value[0],
"velocity":
features["velocity"].int64_list.value[0],
"instrument": {
"label":
tf.compat.as_text(
features["instrument_str"].bytes_list.value[0]),
"family":
tf.compat.as_text(
features["instrument_family_str"].bytes_list.value[0]),
"source":
tf.compat.as_text(
features["instrument_source_str"].bytes_list.value[0])
},
"qualities": {
q: features["qualities"].int64_list.value[i]
for (i, q) in enumerate(_QUALITIES)
}
}
def _in_split(ex, split_ids):
if not split_ids or tf.compat.as_text(ex["id"]) in split_ids:
beam.metrics.Metrics.counter(split, "in-split").inc()
return True
return False
def _estimate_f0(ex):
"""Estimate the fundamental frequency using CREPE and add to example."""
ex = ex.copy()
beam.metrics.Metrics.counter(split, "estimate-f0").inc()
_, f0_hz, f0_confidence, _ = tfds.core.lazy_imports.crepe.predict(
ex["audio"],
sr=_AUDIO_RATE,
viterbi=True,
step_size=1000 / _F0_AND_LOUDNESS_RATE,
verbose=0)
f0_midi = tfds.core.lazy_imports.librosa.core.hz_to_midi(f0_hz)
# Set -infs introduced by hz_to_midi to 0.
f0_midi[f0_midi == -np.inf] = 0
# Set nans to 0 in confidence.
f0_confidence = np.nan_to_num(f0_confidence)
ex["f0"] = {
"hz": f0_hz.astype(np.float32),
"midi": f0_midi.astype(np.float32),
"confidence": f0_confidence.astype(np.float32),
}
return ex
def _compute_loudness(ex):
"""Compute loudness and add to example."""
ex = ex.copy()
beam.metrics.Metrics.counter(split, "compute-loudness").inc()
librosa = tfds.core.lazy_imports.librosa
n_fft = 2048
amin = 1e-15
top_db = 200.0
stft = librosa.stft(
ex["audio"],
n_fft=n_fft,
hop_length=int(_AUDIO_RATE // _F0_AND_LOUDNESS_RATE))
loudness_db = librosa.perceptual_weighting(
np.abs(stft)**2,
librosa.fft_frequencies(_AUDIO_RATE, n_fft=n_fft),
amin=amin,
top_db=top_db)
# Average across freq in linear scale.
mean_loudness_amp = np.mean(librosa.db_to_amplitude(loudness_db), axis=0)
mean_loudness_db = librosa.amplitude_to_db(
mean_loudness_amp,
amin=amin,
top_db=top_db)
ex["loudness"] = {"db": mean_loudness_db.astype(np.float32)}
return ex
examples = (
pipeline
| beam.Create([os.path.join(dir_, "*") for dir_ in tfrecord_dirs])
| beam.io.tfrecordio.ReadAllFromTFRecord(
coder=beam.coders.ProtoCoder(tf.train.Example))
| beam.Map(_emit_base_example)
| beam.Filter(_in_split, split_ids=ids))
if self.builder_config.estimate_f0_and_loudness:
examples = (
examples
| beam.Reshuffle()
| beam.Map(_estimate_f0)
| beam.Map(_compute_loudness))
if split == tfds.Split.TRAIN:
# Output mean and variance of loudness for TRAIN split.
loudness = examples | beam.Map(lambda x: np.mean(x["loudness"]["db"]))
loudness_mean = (
loudness
| "loudness_mean" >> beam.combiners.Mean.Globally())
loudness_variance = (
loudness
| beam.Map(lambda ld, ld_mean: (ld - ld_mean)**2,
ld_mean=beam.pvalue.AsSingleton(loudness_mean))
| "loudness_variance" >> beam.combiners.Mean.Globally())
self.info.metadata["loudness_db_mean"] = loudness_mean
self.info.metadata["loudness_db_variance"] = loudness_variance
return examples
| tensorflow_datasets/audio/nsynth.py | 12,874 | A large-scale and high-quality dataset of annotated musical notes.
BuilderConfig for NSynth Dataset.
Constructs a NsynthConfig.
Args:
gansynth_subset: bool, whether to use the subset of the dataset introduced
in the ICLR 2019 GANSynth paper (Engel, et al. 2018). This subset uses
acoustic-only instrument sources and limits the pitches to the interval
[24, 84]. The train and test splits are also modified so that
instruments (but not specific notes) overlap between them. See
https://arxiv.org/abs/1902.08710 for more details.
estimate_f0_and_loudness: bool, whether to estimate fundamental frequency
(F0) and loudness for the audio (at 250 Hz) and add them to the set of
features.
**kwargs: keyword arguments forwarded to super.
Build PCollection of examples for split.
Compute loudness and add to example.
Maps an input example to a TFDS example.
Estimate the fundamental frequency using CREPE and add to example.
Returns splits.
NSynth Dataset.
coding=utf-8 Copyright 2019 The TensorFlow Datasets Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. From http://proceedings.mlr.press/v70/engel17a.html 16 kHz 250 Hz We read the list of labels in _split_generators. Generator needs to see all original splits for each new split. pylint: disable=g-complex-comprehension Set -infs introduced by hz_to_midi to 0. Set nans to 0 in confidence. Average across freq in linear scale. Output mean and variance of loudness for TRAIN split. | 1,947 | en | 0.816061 |
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
# pylint: disable=unused-argument
from azure.cli.core.util import sdk_no_wait
def databoxedge_device_create(client,
device_name,
resource_group_name,
location,
tags=None,
sku=None,
etag=None,
data_box_edge_device_status=None,
description=None,
model_description=None,
friendly_name=None,
no_wait=False):
data_box_edge_device = {}
data_box_edge_device['location'] = location
data_box_edge_device['tags'] = tags
data_box_edge_device['etag'] = etag
data_box_edge_device['data_box_edge_device_status'] = data_box_edge_device_status
data_box_edge_device['description'] = description
data_box_edge_device['model_description'] = model_description
data_box_edge_device['friendly_name'] = friendly_name
if sku:
data_box_edge_device['sku'] = {}
data_box_edge_device['sku']['name'] = sku
return sdk_no_wait(no_wait,
client.create_or_update,
device_name=device_name,
resource_group_name=resource_group_name,
data_box_edge_device=data_box_edge_device)
def databoxedge_device_update(client,
device_name,
resource_group_name,
tags=None):
if tags is None:
return client.get(device_name=device_name,
resource_group_name=resource_group_name)
parameters = {'tags': tags}
return client.update(device_name=device_name,
resource_group_name=resource_group_name,
parameters=parameters)
def databoxedge_bandwidth_schedule_update(instance,
device_name,
name,
resource_group_name,
start=None,
stop=None,
rate_in_mbps=None,
days=None,
no_wait=False):
if start is not None:
instance.start = start
if stop is not None:
instance.stop = stop
if rate_in_mbps is not None:
instance.rate_in_mbps = rate_in_mbps
if days is not None:
instance.days = days
return instance
def databoxedge_order_create(client,
device_name,
resource_group_name,
address_line1,
postal_code,
city,
state,
country,
contact_person,
company_name,
phone,
email_list,
status=None,
comments=None,
address_line2=None,
address_line3=None,
no_wait=False):
order = {}
if status:
order['current_status'] = {}
order['current_status']['status'] = status
order['current_status']['comments'] = comments
order['shipping_address'] = {}
order['shipping_address']['address_line1'] = address_line1
order['shipping_address']['address_line2'] = address_line2
order['shipping_address']['address_line3'] = address_line3
order['shipping_address']['postal_code'] = postal_code
order['shipping_address']['city'] = city
order['shipping_address']['state'] = state
order['shipping_address']['country'] = country
order['contact_information'] = {}
order['contact_information']['contact_person'] = contact_person
order['contact_information']['company_name'] = company_name
order['contact_information']['phone'] = phone
order['contact_information']['email_list'] = email_list
return sdk_no_wait(no_wait,
client.create_or_update,
device_name=device_name,
resource_group_name=resource_group_name,
order=order)
| src/azure-cli/azure/cli/command_modules/databoxedge/manual/custom.py | 5,011 | -------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See License.txt in the project root for license information. Code generated by Microsoft (R) AutoRest Code Generator. Changes may cause incorrect behavior and will be lost if the code is regenerated. -------------------------------------------------------------------------- pylint: disable=too-many-lines pylint: disable=unused-argument | 502 | en | 0.52258 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import os.path
import re
from math import ceil
from ipaddress import ip_network
from knack.log import get_logger
from azure.cli.core.util import CLIError
import azure.cli.core.keys as keys
logger = get_logger(__name__)
def validate_ssh_key(namespace):
if hasattr(namespace, 'no_ssh_key') and namespace.no_ssh_key:
return
string_or_file = (namespace.ssh_key_value or
os.path.join(os.path.expanduser('~'), '.ssh', 'id_rsa.pub'))
content = string_or_file
if os.path.exists(string_or_file):
logger.info('Use existing SSH public key file: %s', string_or_file)
with open(string_or_file, 'r') as f:
content = f.read()
elif not keys.is_valid_ssh_rsa_public_key(content):
if namespace.generate_ssh_keys:
# figure out appropriate file names:
# 'base_name'(with private keys), and 'base_name.pub'(with public keys)
public_key_filepath = string_or_file
if public_key_filepath[-4:].lower() == '.pub':
private_key_filepath = public_key_filepath[:-4]
else:
private_key_filepath = public_key_filepath + '.private'
content = keys.generate_ssh_keys(private_key_filepath, public_key_filepath)
logger.warning("SSH key files '%s' and '%s' have been generated under ~/.ssh to "
"allow SSH access to the VM. If using machines without "
"permanent storage like Azure Cloud Shell without an attached "
"file share, back up your keys to a safe location",
private_key_filepath, public_key_filepath)
else:
raise CLIError('An RSA key file or key value must be supplied to SSH Key Value. '
'You can use --generate-ssh-keys to let CLI generate one for you')
namespace.ssh_key_value = content
def validate_create_parameters(namespace):
if not namespace.name:
raise CLIError('--name has no value')
if namespace.dns_name_prefix is not None and not namespace.dns_name_prefix:
raise CLIError('--dns-prefix has no value')
def validate_k8s_version(namespace):
"""Validates a string as a possible Kubernetes version. An empty string is also valid, which tells the server
to use its default version."""
if namespace.kubernetes_version:
k8s_release_regex = re.compile(r'^[v|V]?(\d+\.\d+\.\d+.*)$')
found = k8s_release_regex.findall(namespace.kubernetes_version)
if found:
namespace.kubernetes_version = found[0]
else:
raise CLIError('--kubernetes-version should be the full version number, '
'such as "1.7.12" or "1.8.7"')
def validate_linux_host_name(namespace):
"""Validates a string as a legal host name component.
This validation will also occur server-side in the ARM API, but that may take
a minute or two before the user sees it. So it's more user-friendly to validate
in the CLI pre-flight.
"""
# https://stackoverflow.com/questions/106179/regular-expression-to-match-dns-hostname-or-ip-address
rfc1123_regex = re.compile(r'^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$') # pylint:disable=line-too-long
found = rfc1123_regex.findall(namespace.name)
if not found:
raise CLIError('--name cannot exceed 63 characters and can only contain '
'letters, numbers, or dashes (-).')
def validate_max_pods(namespace):
"""Validates that max_pods is set to a reasonable minimum number."""
# kube-proxy and kube-svc reside each nodes,
# 2 kube-proxy pods, 1 azureproxy/heapster/dashboard/tunnelfront are in kube-system
minimum_pods_required = ceil((namespace.node_count * 2 + 6 + 1) / namespace.node_count)
if namespace.max_pods != 0 and namespace.max_pods < minimum_pods_required:
raise CLIError('--max-pods must be at least {} for a managed Kubernetes cluster to function.'
.format(minimum_pods_required))
def validate_nodes_count(namespace):
"""Validate that min_count and max_count is set to 1-100"""
if namespace.min_count is not None:
if namespace.min_count < 1 or namespace.min_count > 100:
raise CLIError('--min-count must be in the range [1,100]')
if namespace.max_count is not None:
if namespace.max_count < 1 or namespace.max_count > 100:
raise CLIError('--max-count must be in the range [1,100]')
def validate_ip_ranges(namespace):
if namespace.api_server_authorized_ip_ranges is not None:
if namespace.api_server_authorized_ip_ranges == '':
return
for ip in namespace.api_server_authorized_ip_ranges.split(','):
try:
ip_network(ip)
except ValueError:
raise CLIError("--api-server-authorized-ip-ranges should be list of IPv4 addresses or CIDRs")
def validate_nodepool_name(namespace):
"""Validates a nodepool name to be at most 12 characters, alphanumeric only."""
if namespace.nodepool_name != "":
if len(namespace.nodepool_name) > 12:
raise CLIError('--nodepool-name can contain atmost 12 characters')
if not namespace.nodepool_name.isalnum():
raise CLIError('--nodepool-name should only contain alphanumeric characters')
def validate_vm_set_type(namespace):
"""Validates the vm set type string."""
if namespace.vm_set_type is not None:
if namespace.vm_set_type == '':
return
if namespace.vm_set_type.lower() != "availabilityset" and \
namespace.vm_set_type.lower() != "virtualmachinescalesets":
raise CLIError("--vm-set-type can only be VirtualMachineScaleSets or AvailabilitySet")
def validate_load_balancer_sku(namespace):
"""Validates the load balancer sku string."""
if namespace.load_balancer_sku is not None:
if namespace.load_balancer_sku == '':
return
if namespace.load_balancer_sku.lower() != "basic" and namespace.load_balancer_sku.lower() != "standard":
raise CLIError("--load-balancer-sku can only be standard or basic")
def validate_load_balancer_outbound_ips(namespace):
"""validate load balancer profile outbound IP ids"""
if namespace.load_balancer_outbound_ips is not None:
ip_id_list = [x.strip() for x in namespace.load_balancer_outbound_ips.split(',')]
if not all(ip_id_list):
raise CLIError("--load-balancer-outbound-ips cannot contain whitespace")
def validate_load_balancer_outbound_ip_prefixes(namespace):
"""validate load balancer profile outbound IP prefix ids"""
if namespace.load_balancer_outbound_ip_prefixes is not None:
ip_prefix_id_list = [x.strip() for x in namespace.load_balancer_outbound_ip_prefixes.split(',')]
if not all(ip_prefix_id_list):
raise CLIError("--load-balancer-outbound-ip-prefixes cannot contain whitespace")
def validate_taints(namespace):
"""Validates that provided taint is a valid format"""
regex = re.compile(r"^[a-zA-Z\d][\w\-\.\/]{0,252}=[a-zA-Z\d][\w\-\.]{0,62}:(NoSchedule|PreferNoSchedule|NoExecute)$") # pylint: disable=line-too-long
if namespace.node_taints is not None and namespace.node_taints != '':
for taint in namespace.node_taints.split(','):
if taint == "":
continue
found = regex.findall(taint)
if not found:
raise CLIError('Invalid node taint: %s' % taint)
def validate_priority(namespace):
"""Validates the node pool priority string."""
if namespace.priority is not None:
if namespace.priority == '':
return
if namespace.priority != "Low" and \
namespace.priority != "Regular":
raise CLIError("--priority can only be Low or Regular")
def validate_eviction_policy(namespace):
"""Validates the node pool priority string."""
if namespace.eviction_policy is not None:
if namespace.eviction_policy == '':
return
if namespace.eviction_policy != "Delete" and \
namespace.eviction_policy != "Deallocate":
raise CLIError("--eviction-policy can only be Delete or Deallocate")
| src/aks-preview/azext_aks_preview/_validators.py | 8,760 | Validates the node pool priority string.
Validates a string as a possible Kubernetes version. An empty string is also valid, which tells the server
to use its default version.
Validates a string as a legal host name component.
This validation will also occur server-side in the ARM API, but that may take
a minute or two before the user sees it. So it's more user-friendly to validate
in the CLI pre-flight.
validate load balancer profile outbound IP prefix ids
validate load balancer profile outbound IP ids
Validates the load balancer sku string.
Validates that max_pods is set to a reasonable minimum number.
Validates a nodepool name to be at most 12 characters, alphanumeric only.
Validate that min_count and max_count is set to 1-100
Validates the node pool priority string.
Validates that provided taint is a valid format
Validates the vm set type string.
-------------------------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See License.txt in the project root for license information. -------------------------------------------------------------------------------------------- figure out appropriate file names: 'base_name'(with private keys), and 'base_name.pub'(with public keys) https://stackoverflow.com/questions/106179/regular-expression-to-match-dns-hostname-or-ip-address pylint:disable=line-too-long kube-proxy and kube-svc reside each nodes, 2 kube-proxy pods, 1 azureproxy/heapster/dashboard/tunnelfront are in kube-system pylint: disable=line-too-long | 1,589 | en | 0.760273 |
import os
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
import argparse
import cv2
import config
from utils import Mesh
from models import CMR
from models.smpl_from_lib import SMPL
from utils.pose_utils import compute_similarity_transform_batch, \
scale_and_translation_transform_batch
from utils.cam_utils import orthographic_project_torch, undo_keypoint_normalisation
from datasets.my_3dpw_eval_dataset import PW3DEvalDataset
def evaluate_3dpw(model,
eval_dataset,
metrics,
device,
vis_save_path,
num_workers=4,
pin_memory=True,
vis_every_n_batches=1000):
eval_dataloader = DataLoader(eval_dataset,
batch_size=1,
shuffle=False,
drop_last=True,
num_workers=num_workers,
pin_memory=pin_memory)
smpl = SMPL(config.SMPL_MODEL_DIR, batch_size=1)
smpl_male = SMPL(config.SMPL_MODEL_DIR, batch_size=1, gender='male')
smpl_female = SMPL(config.SMPL_MODEL_DIR, batch_size=1, gender='female')
smpl.to(device)
smpl_male.to(device)
smpl_female.to(device)
J_regressor = torch.from_numpy(np.load(config.JOINT_REGRESSOR_H36M)).float()
J_regressor_batch = J_regressor[None, :].to(device)
if 'pve' in metrics:
pve_smpl_sum = 0.0
pve_graph_sum = 0.0
pve_smpl_per_frame = []
pve_graph_per_frame = []
if 'pve_scale_corrected' in metrics:
pve_scale_corrected_smpl_sum = 0.0
pve_scale_corrected_graph_sum = 0.0
pve_scale_corrected_smpl_per_frame = []
pve_scale_corrected_graph_per_frame = []
if 'pve_pa' in metrics:
pve_pa_smpl_sum = 0.0
pve_pa_graph_sum = 0.0
pve_pa_smpl_per_frame = []
pve_pa_graph_per_frame = []
if 'pve-t' in metrics:
pvet_sum = 0.0
pvet_per_frame = []
if 'pve-t_scale_corrected' in metrics:
pvet_scale_corrected_sum = 0.0
pvet_scale_corrected_per_frame = []
if 'mpjpe' in metrics:
mpjpe_smpl_sum = 0.0
mpjpe_graph_sum = 0.0
mpjpe_smpl_per_frame = []
mpjpe_graph_per_frame = []
if 'mpjpe_scale_corrected' in metrics:
mpjpe_scale_corrected_smpl_sum = 0.0
mpjpe_scale_corrected_graph_sum = 0.0
mpjpe_scale_corrected_smpl_per_frame = []
mpjpe_scale_corrected_graph_per_frame = []
if 'j3d_rec_err' in metrics:
j3d_rec_err_smpl_sum = 0.0
j3d_rec_err_graph_sum = 0.0
j3d_rec_err_smpl_per_frame = []
j3d_rec_err_graph_per_frame = []
if 'pve_2d' in metrics:
pve_2d_smpl_sum = 0.0
pve_2d_graph_sum = 0.0
if 'pve_2d_scale_corrected' in metrics:
pve_2d_scale_corrected_smpl_sum = 0.0
pve_2d_scale_corrected_graph_sum = 0.0
if 'pve_2d_pa' in metrics:
pve_2d_pa_smpl_sum = 0.0
pve_2d_pa_graph_sum = 0.0
num_samples = 0
num_vertices = 6890
num_joints3d = 14
model.eval()
for batch_num, samples_batch in enumerate(tqdm(eval_dataloader)):
# ------------------------------- TARGETS and INPUTS -------------------------------
input = samples_batch['input']
input = input.to(device)
target_pose = samples_batch['pose'].to(device)
target_shape = samples_batch['shape'].to(device)
target_gender = samples_batch['gender'][0]
if target_gender == 'm':
target_smpl_output = smpl_male(body_pose=target_pose[:, 3:],
global_orient=target_pose[:, :3],
betas=target_shape)
target_vertices = target_smpl_output.vertices
target_reposed_smpl_output = smpl_male(betas=target_shape)
target_reposed_vertices = target_reposed_smpl_output.vertices
target_joints_h36m = torch.matmul(J_regressor_batch, target_vertices)
target_joints_h36mlsp = target_joints_h36m[:, config.H36M_TO_J14, :]
elif target_gender == 'f':
target_smpl_output = smpl_female(body_pose=target_pose[:, 3:],
global_orient=target_pose[:, :3],
betas=target_shape)
target_vertices = target_smpl_output.vertices
target_reposed_smpl_output = smpl_female(betas=target_shape)
target_reposed_vertices = target_reposed_smpl_output.vertices
target_joints_h36m = torch.matmul(J_regressor_batch, target_vertices)
target_joints_h36mlsp = target_joints_h36m[:, config.H36M_TO_J14, :]
# ------------------------------- PREDICTIONS -------------------------------
pred_vertices, pred_vertices_smpl, pred_camera, pred_rotmat, pred_betas = model(input)
pred_vertices_projected2d = orthographic_project_torch(pred_vertices, pred_camera)
pred_vertices_projected2d = undo_keypoint_normalisation(pred_vertices_projected2d, input.shape[-1])
pred_vertices_smpl_projected2d = orthographic_project_torch(pred_vertices_smpl, pred_camera)
pred_vertices_smpl_projected2d = undo_keypoint_normalisation(pred_vertices_smpl_projected2d, input.shape[-1])
pred_reposed_smpl_output = smpl(betas=pred_betas)
pred_reposed_vertices = pred_reposed_smpl_output.vertices
pred_joints_h36m = torch.matmul(J_regressor_batch, pred_vertices)
pred_joints_h36mlsp = pred_joints_h36m[:, config.H36M_TO_J14, :]
pred_joints_smpl_h36m = torch.matmul(J_regressor_batch, pred_vertices_smpl)
pred_joints_smpl_h36mlsp = pred_joints_smpl_h36m[:, config.H36M_TO_J14, :]
# Numpy-fying
target_vertices = target_vertices.cpu().detach().numpy()
target_reposed_vertices = target_reposed_vertices.cpu().detach().numpy()
target_joints_h36mlsp = target_joints_h36mlsp.cpu().detach().numpy()
pred_vertices = pred_vertices.cpu().detach().numpy()
pred_vertices_smpl = pred_vertices_smpl.cpu().detach().numpy()
pred_vertices_projected2d = pred_vertices_projected2d.cpu().detach().numpy()
pred_vertices_smpl_projected2d = pred_vertices_smpl_projected2d.cpu().detach().numpy()
pred_reposed_vertices = pred_reposed_vertices.cpu().detach().numpy()
pred_joints_h36mlsp = pred_joints_h36mlsp.cpu().detach().numpy()
pred_joints_smpl_h36mlsp = pred_joints_smpl_h36mlsp.cpu().detach().numpy()
# ------------------------------- METRICS -------------------------------
if 'pve' in metrics:
pve_smpl_batch = np.linalg.norm(pred_vertices_smpl - target_vertices, axis=-1) # (1, 6890)
pve_graph_batch = np.linalg.norm(pred_vertices - target_vertices, axis=-1)
pve_smpl_sum += np.sum(pve_smpl_batch) # scalar
pve_graph_sum += np.sum(pve_graph_batch)
pve_smpl_per_frame.append(np.mean(pve_smpl_batch, axis=-1))
pve_graph_per_frame.append(np.mean(pve_graph_batch, axis=-1))
# Scale and translation correction
if 'pve_scale_corrected' in metrics:
pred_vertices_smpl_sc = scale_and_translation_transform_batch(pred_vertices_smpl,
target_vertices)
pred_vertices_sc = scale_and_translation_transform_batch(pred_vertices,
target_vertices)
pve_sc_smpl_batch = np.linalg.norm(pred_vertices_smpl_sc - target_vertices,
axis=-1) # (1, 6890)
pve_sc_graph_batch = np.linalg.norm(pred_vertices_sc - target_vertices,
axis=-1) # (1, 6890)
pve_scale_corrected_smpl_sum += np.sum(pve_sc_smpl_batch) # scalar
pve_scale_corrected_graph_sum += np.sum(pve_sc_graph_batch) # scalar
pve_scale_corrected_smpl_per_frame.append(np.mean(pve_sc_smpl_batch, axis=-1))
pve_scale_corrected_graph_per_frame.append(np.mean(pve_sc_graph_batch, axis=-1))
# Procrustes analysis
if 'pve_pa' in metrics:
pred_vertices_smpl_pa = compute_similarity_transform_batch(pred_vertices_smpl, target_vertices)
pred_vertices_pa = compute_similarity_transform_batch(pred_vertices, target_vertices)
pve_pa_smpl_batch = np.linalg.norm(pred_vertices_smpl_pa - target_vertices, axis=-1) # (1, 6890)
pve_pa_graph_batch = np.linalg.norm(pred_vertices_pa - target_vertices, axis=-1) # (1, 6890)
pve_pa_smpl_sum += np.sum(pve_pa_smpl_batch) # scalar
pve_pa_graph_sum += np.sum(pve_pa_graph_batch) # scalar
pve_pa_smpl_per_frame.append(np.mean(pve_pa_smpl_batch, axis=-1))
pve_pa_graph_per_frame.append(np.mean(pve_pa_graph_batch, axis=-1))
if 'pve-t' in metrics:
pvet_batch = np.linalg.norm(pred_reposed_vertices - target_reposed_vertices, axis=-1)
pvet_sum += np.sum(pvet_batch)
pvet_per_frame.append(np.mean(pvet_batch, axis=-1))
# Scale and translation correction
if 'pve-t_scale_corrected' in metrics:
pred_reposed_vertices_sc = scale_and_translation_transform_batch(pred_reposed_vertices,
target_reposed_vertices)
pvet_scale_corrected_batch = np.linalg.norm(pred_reposed_vertices_sc - target_reposed_vertices,
axis=-1) # (bs, 6890)
pvet_scale_corrected_sum += np.sum(pvet_scale_corrected_batch) # scalar
pvet_scale_corrected_per_frame.append(np.mean(pvet_scale_corrected_batch, axis=-1))
if 'mpjpe' in metrics:
mpjpe_smpl_batch = np.linalg.norm(pred_joints_smpl_h36mlsp - target_joints_h36mlsp, axis=-1) # (bs, 14)
mpjpe_graph_batch = np.linalg.norm(pred_joints_h36mlsp - target_joints_h36mlsp, axis=-1) # (bs, 14)
mpjpe_smpl_sum += np.sum(mpjpe_smpl_batch)
mpjpe_graph_sum += np.sum(mpjpe_graph_batch)
mpjpe_smpl_per_frame.append(np.mean(mpjpe_smpl_batch, axis=-1))
mpjpe_graph_per_frame.append(np.mean(mpjpe_graph_batch, axis=-1))
# Scale and translation correction
if 'mpjpe_scale_corrected' in metrics:
pred_joints_smpl_h36mlsp_sc = scale_and_translation_transform_batch(pred_joints_smpl_h36mlsp,
target_joints_h36mlsp)
pred_joints_h36mlsp_sc = scale_and_translation_transform_batch(pred_joints_h36mlsp,
target_joints_h36mlsp)
mpjpe_scale_corrected_smpl_batch = np.linalg.norm(pred_joints_smpl_h36mlsp_sc - target_joints_h36mlsp,
axis=-1) # (bs, 14)
mpjpe_scale_corrected_graph_batch = np.linalg.norm(pred_joints_h36mlsp_sc - target_joints_h36mlsp,
axis=-1) # (bs, 14)
mpjpe_scale_corrected_smpl_sum += np.sum(mpjpe_scale_corrected_smpl_batch)
mpjpe_scale_corrected_graph_sum += np.sum(mpjpe_scale_corrected_graph_batch)
mpjpe_scale_corrected_smpl_per_frame.append(np.mean(mpjpe_scale_corrected_smpl_batch, axis=-1))
mpjpe_scale_corrected_graph_per_frame.append(np.mean(mpjpe_scale_corrected_graph_batch, axis=-1))
# Procrustes analysis
if 'j3d_rec_err' in metrics:
pred_joints_smpl_h36mlsp_pa = compute_similarity_transform_batch(pred_joints_smpl_h36mlsp,
target_joints_h36mlsp)
pred_joints_h36mlsp_pa = compute_similarity_transform_batch(pred_joints_h36mlsp, target_joints_h36mlsp)
j3d_rec_err_smpl_batch = np.linalg.norm(pred_joints_smpl_h36mlsp_pa - target_joints_h36mlsp, axis=-1) # (bs, 14)
j3d_rec_err_graph_batch = np.linalg.norm(pred_joints_h36mlsp_pa - target_joints_h36mlsp, axis=-1) # (bs, 14)
j3d_rec_err_smpl_sum += np.sum(j3d_rec_err_smpl_batch)
j3d_rec_err_graph_sum += np.sum(j3d_rec_err_graph_batch)
j3d_rec_err_smpl_per_frame.append(np.mean(j3d_rec_err_smpl_batch, axis=-1))
j3d_rec_err_graph_per_frame.append(np.mean(j3d_rec_err_graph_batch, axis=-1))
if 'pve_2d' in metrics:
pred_vertices_smpl_2d = pred_vertices_smpl[:, :, :2]
pred_vertices_2d = pred_vertices[:, :, :2]
target_vertices_2d = target_vertices[:, :, :2]
pve_2d_smpl_batch = np.linalg.norm(pred_vertices_smpl_2d - target_vertices_2d, axis=-1) # (bs, 6890)
pve_2d_graph_batch = np.linalg.norm(pred_vertices_2d - target_vertices_2d, axis=-1) # (bs, 6890)
pve_2d_smpl_sum += np.sum(pve_2d_smpl_batch)
pve_2d_graph_sum += np.sum(pve_2d_graph_batch)
# Scale and translation correction
if 'pve_2d_scale_corrected' in metrics:
pred_vertices_smpl_sc = scale_and_translation_transform_batch(pred_vertices_smpl,
target_vertices)
pred_vertices_sc = scale_and_translation_transform_batch(pred_vertices,
target_vertices)
pred_vertices_smpl_2d_sc = pred_vertices_smpl_sc[:, :, :2]
pred_vertices_2d_sc = pred_vertices_sc[:, :, :2]
target_vertices_2d = target_vertices[:, :, :2]
pve_2d_sc_smpl_batch = np.linalg.norm(pred_vertices_smpl_2d_sc - target_vertices_2d,
axis=-1) # (bs, 6890)
pve_2d_sc_graph_batch = np.linalg.norm(pred_vertices_2d_sc - target_vertices_2d,
axis=-1) # (bs, 6890)
pve_2d_scale_corrected_smpl_sum += np.sum(pve_2d_sc_smpl_batch)
pve_2d_scale_corrected_graph_sum += np.sum(pve_2d_sc_graph_batch)
# Procrustes analysis
if 'pve_2d_pa' in metrics:
pred_vertices_smpl_pa = compute_similarity_transform_batch(pred_vertices_smpl, target_vertices)
pred_vertices_pa = compute_similarity_transform_batch(pred_vertices, target_vertices)
pred_vertices_smpl_2d_pa = pred_vertices_smpl_pa[:, :, :2]
pred_vertices_2d_pa = pred_vertices_pa[:, :, :2]
target_vertices_2d = target_vertices[:, :, :2]
pve_2d_pa_smpl_batch = np.linalg.norm(pred_vertices_smpl_2d_pa - target_vertices_2d, axis=-1) # (bs, 6890)
pve_2d_pa_graph_batch = np.linalg.norm(pred_vertices_2d_pa - target_vertices_2d, axis=-1) # (bs, 6890)
pve_2d_pa_smpl_sum += np.sum(pve_2d_pa_smpl_batch)
pve_2d_pa_graph_sum += np.sum(pve_2d_pa_graph_batch)
num_samples += target_pose.shape[0]
# ------------------------------- VISUALISE -------------------------------
if vis_every_n_batches is not None:
if batch_num % vis_every_n_batches == 0:
vis_imgs = samples_batch['vis_img'].numpy()
vis_imgs = np.transpose(vis_imgs, [0, 2, 3, 1])
fnames = samples_batch['fname']
plt.figure(figsize=(16, 12))
plt.subplot(341)
plt.imshow(vis_imgs[0])
plt.subplot(342)
plt.imshow(vis_imgs[0])
plt.scatter(pred_vertices_projected2d[0, :, 0], pred_vertices_projected2d[0, :, 1], s=0.1, c='r')
plt.subplot(343)
plt.imshow(vis_imgs[0])
plt.scatter(pred_vertices_smpl_projected2d[0, :, 0], pred_vertices_smpl_projected2d[0, :, 1], s=0.1, c='r')
plt.subplot(345)
plt.scatter(target_vertices[0, :, 0], target_vertices[0, :, 1], s=0.1, c='b')
plt.scatter(pred_vertices[0, :, 0], pred_vertices[0, :, 1], s=0.1, c='r')
plt.gca().invert_yaxis()
plt.gca().set_aspect('equal', adjustable='box')
plt.subplot(346)
plt.scatter(target_vertices[0, :, 0], target_vertices[0, :, 1], s=0.1, c='b')
plt.scatter(pred_vertices_smpl[0, :, 0], pred_vertices_smpl[0, :, 1], s=0.1, c='r')
plt.gca().invert_yaxis()
plt.gca().set_aspect('equal', adjustable='box')
plt.subplot(347)
plt.scatter(target_vertices[0, :, 0], target_vertices[0, :, 1], s=0.1, c='b')
plt.scatter(pred_vertices_pa[0, :, 0], pred_vertices_pa[0, :, 1], s=0.1, c='r')
plt.gca().invert_yaxis()
plt.gca().set_aspect('equal', adjustable='box')
plt.subplot(348)
plt.scatter(target_vertices[0, :, 0], target_vertices[0, :, 1], s=0.1, c='b')
plt.scatter(pred_vertices_smpl_pa[0, :, 0], pred_vertices_smpl_pa[0, :, 1], s=0.1, c='r')
plt.gca().invert_yaxis()
plt.gca().set_aspect('equal', adjustable='box')
plt.subplot(349)
plt.scatter(target_reposed_vertices[0, :, 0], target_reposed_vertices[0, :, 1], s=0.1, c='b')
plt.scatter(pred_reposed_vertices_sc[0, :, 0], pred_reposed_vertices_sc[0, :, 1], s=0.1, c='r')
plt.gca().set_aspect('equal', adjustable='box')
plt.subplot(3, 4, 10)
for j in range(num_joints3d):
plt.scatter(pred_joints_h36mlsp[0, j, 0], pred_joints_h36mlsp[0, j, 1], c='r')
plt.scatter(target_joints_h36mlsp[0, j, 0], target_joints_h36mlsp[0, j, 1], c='b')
plt.text(pred_joints_h36mlsp[0, j, 0], pred_joints_h36mlsp[0, j, 1], s=str(j))
plt.text(target_joints_h36mlsp[0, j, 0], target_joints_h36mlsp[0, j, 1], s=str(j))
plt.gca().invert_yaxis()
plt.gca().set_aspect('equal', adjustable='box')
plt.subplot(3, 4, 11)
for j in range(num_joints3d):
plt.scatter(pred_joints_h36mlsp_pa[0, j, 0], pred_joints_h36mlsp_pa[0, j, 1], c='r')
plt.scatter(target_joints_h36mlsp[0, j, 0], target_joints_h36mlsp[0, j, 1], c='b')
plt.text(pred_joints_h36mlsp_pa[0, j, 0], pred_joints_h36mlsp_pa[0, j, 1], s=str(j))
plt.text(target_joints_h36mlsp[0, j, 0], target_joints_h36mlsp[0, j, 1], s=str(j))
plt.gca().invert_yaxis()
plt.gca().set_aspect('equal', adjustable='box')
plt.subplot(3, 4, 12)
for j in range(num_joints3d):
plt.scatter(pred_joints_smpl_h36mlsp_pa[0, j, 0], pred_joints_smpl_h36mlsp_pa[0, j, 1], c='r')
plt.scatter(target_joints_h36mlsp[0, j, 0], target_joints_h36mlsp[0, j, 1], c='b')
plt.text(pred_joints_smpl_h36mlsp_pa[0, j, 0], pred_joints_smpl_h36mlsp_pa[0, j, 1], s=str(j))
plt.text(target_joints_h36mlsp[0, j, 0], target_joints_h36mlsp[0, j, 1], s=str(j))
plt.gca().invert_yaxis()
plt.gca().set_aspect('equal', adjustable='box')
# plt.show()
save_fig_path = os.path.join(vis_save_path, fnames[0])
plt.savefig(save_fig_path, bbox_inches='tight')
plt.close()
if 'pve' in metrics:
pve_smpl = pve_smpl_sum / (num_samples * num_vertices)
print('PVE SMPL: {:.5f}'.format(pve_smpl))
pve_graph = pve_graph_sum / (num_samples * num_vertices)
print('PVE GRAPH: {:.5f}'.format(pve_graph))
pve_smpl_per_frame = np.concatenate(pve_smpl_per_frame, axis=0)
pve_graph_per_frame = np.concatenate(pve_graph_per_frame, axis=0)
np.save(os.path.join(save_path, 'pve_per_frame.npy'), pve_smpl_per_frame)
np.save(os.path.join(save_path, 'pve_graph_per_frame.npy'), pve_graph_per_frame)
if 'pve_scale_corrected' in metrics:
pve_sc_smpl = pve_scale_corrected_smpl_sum / (num_samples * num_vertices)
print('PVE SC SMPL: {:.5f}'.format(pve_sc_smpl))
pve_sc_graph = pve_scale_corrected_graph_sum / (num_samples * num_vertices)
print('PVE SC GRAPH: {:.5f}'.format(pve_sc_graph))
pve_scale_corrected_smpl_per_frame = np.concatenate(pve_scale_corrected_smpl_per_frame, axis=0)
pve_scale_corrected_graph_per_frame = np.concatenate(pve_scale_corrected_graph_per_frame, axis=0)
np.save(os.path.join(save_path, 'pve_scale_corrected_per_frame.npy'),
pve_scale_corrected_smpl_per_frame)
np.save(os.path.join(save_path, 'pve_scale_corrected_graph_per_frame.npy'),
pve_scale_corrected_graph_per_frame)
if 'pve_pa' in metrics:
pve_pa_smpl = pve_pa_smpl_sum / (num_samples * num_vertices)
print('PVE PA SMPL: {:.5f}'.format(pve_pa_smpl))
pve_pa_graph = pve_pa_graph_sum / (num_samples * num_vertices)
print('PVE PA GRAPH: {:.5f}'.format(pve_pa_graph))
pve_pa_smpl_per_frame = np.concatenate(pve_pa_smpl_per_frame, axis=0)
pve_pa_graph_per_frame = np.concatenate(pve_pa_graph_per_frame, axis=0)
np.save(os.path.join(save_path, 'pve_pa_per_frame.npy'), pve_pa_smpl_per_frame)
np.save(os.path.join(save_path, 'pve_pa_graph_per_frame.npy'), pve_pa_graph_per_frame)
if 'pve-t' in metrics:
pvet = pvet_sum / (num_samples * num_vertices)
print('PVE-T: {:.5f}'.format(pvet))
pvet_per_frame = np.concatenate(pvet_per_frame, axis=0)
np.save(os.path.join(save_path, 'pvet_per_frame.npy'), pvet_per_frame)
if 'pve-t_scale_corrected' in metrics:
pvet_sc = pvet_scale_corrected_sum / (num_samples * num_vertices)
print('PVE-T SC: {:.5f}'.format(pvet_sc))
pvet_scale_corrected_per_frame = np.concatenate(pvet_scale_corrected_per_frame, axis=0)
np.save(os.path.join(save_path, 'pvet_scale_corrected_per_frame.npy'),
pvet_scale_corrected_per_frame)
if 'mpjpe' in metrics:
mpjpe_smpl = mpjpe_smpl_sum / (num_samples * num_joints3d)
print('MPJPE SMPL: {:.5f}'.format(mpjpe_smpl))
mpjpe_graph = mpjpe_graph_sum / (num_samples * num_joints3d)
print('MPJPE GRAPH: {:.5f}'.format(mpjpe_graph))
mpjpe_smpl_per_frame = np.concatenate(mpjpe_smpl_per_frame, axis=0)
mpjpe_graph_per_frame = np.concatenate(mpjpe_graph_per_frame, axis=0)
np.save(os.path.join(save_path, 'mpjpe_per_frame.npy'), mpjpe_smpl_per_frame)
np.save(os.path.join(save_path, 'mpjpe_graph_per_frame.npy'), mpjpe_graph_per_frame)
if 'mpjpe_scale_corrected' in metrics:
mpjpe_sc_smpl = mpjpe_scale_corrected_smpl_sum / (num_samples * num_joints3d)
print('MPJPE SC SMPL: {:.5f}'.format(mpjpe_sc_smpl))
mpjpe_sc_graph = mpjpe_scale_corrected_graph_sum / (num_samples * num_joints3d)
print('MPJPE SC GRAPH: {:.5f}'.format(mpjpe_sc_graph))
mpjpe_scale_corrected_smpl_per_frame = np.concatenate(
mpjpe_scale_corrected_smpl_per_frame, axis=0)
mpjpe_scale_corrected_graph_per_frame = np.concatenate(
mpjpe_scale_corrected_graph_per_frame, axis=0)
np.save(os.path.join(save_path, 'mpjpe_scale_corrected_per_frame.npy'),
mpjpe_scale_corrected_smpl_per_frame)
np.save(os.path.join(save_path, 'mpjpe_scale_corrected_graph_per_frame.npy'),
mpjpe_scale_corrected_graph_per_frame)
if 'j3d_rec_err' in metrics:
j3d_rec_err_smpl = j3d_rec_err_smpl_sum / (num_samples * num_joints3d)
print('Rec Err SMPL: {:.5f}'.format(j3d_rec_err_smpl))
j3d_rec_err_graph = j3d_rec_err_graph_sum / (num_samples * num_joints3d)
print('Rec Err GRAPH: {:.5f}'.format(j3d_rec_err_graph))
j3d_rec_err_smpl_per_frame = np.concatenate(j3d_rec_err_smpl_per_frame, axis=0)
j3d_rec_err_graph_per_frame = np.concatenate(j3d_rec_err_graph_per_frame, axis=0)
np.save(os.path.join(save_path, 'j3d_rec_err_per_frame.npy'),
j3d_rec_err_smpl_per_frame)
np.save(os.path.join(save_path, 'j3d_rec_err_graph_per_frame.npy'),
j3d_rec_err_graph_per_frame)
if 'pve_2d' in metrics:
pve_2d_smpl = pve_2d_smpl_sum / (num_samples * num_vertices)
print('PVE 2D SMPL: {:.5f}'.format(pve_2d_smpl))
pve_2d_graph = pve_2d_graph_sum / (num_samples * num_vertices)
print('PVE 2D GRAPH: {:.5f}'.format(pve_2d_graph))
if 'pve_2d_scale_corrected' in metrics:
pve_2d_sc_smpl = pve_2d_scale_corrected_smpl_sum / (num_samples * num_vertices)
print('PVE 2D SC SMPL: {:.5f}'.format(pve_2d_sc_smpl))
pve_2d_sc_graph = pve_2d_scale_corrected_graph_sum / (num_samples * num_vertices)
print('PVE 2D SC GRAPH: {:.5f}'.format(pve_2d_sc_graph))
if 'pve_2d_pa' in metrics:
pve_2d_pa_smpl = pve_2d_pa_smpl_sum / (num_samples * num_vertices)
print('PVE 2D PA SMPL: {:.5f}'.format(pve_2d_pa_smpl))
pve_2d_pa_graph = pve_2d_pa_graph_sum / (num_samples * num_vertices)
print('PVE 2D PA GRAPH: {:.5f}'.format(pve_2d_pa_graph))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', default=None, help='Path to network checkpoint')
parser.add_argument('--gpu', default="0", type=str, help='GPU')
args = parser.parse_args()
# Device
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# Load model
mesh = Mesh(device=device)
# Our pretrained networks have 5 residual blocks with 256 channels.
# You might want to change this if you use a different architecture.
model = CMR(mesh, 5, 256, pretrained_checkpoint=args.checkpoint, device=device)
model.to(device)
model.eval()
# Setup evaluation dataset
dataset_path = '/scratch2/as2562/datasets/3DPW/test'
dataset = PW3DEvalDataset(dataset_path, img_wh=config.INPUT_RES)
print("Eval examples found:", len(dataset))
# Metrics
metrics = ['pve', 'pve-t', 'pve_pa', 'pve-t_pa', 'mpjpe', 'j3d_rec_err',
'pve_2d', 'pve_2d_pa', 'pve_2d_scale_corrected',
'pve_scale_corrected', 'pve-t_scale_corrected', 'mpjpe_scale_corrected']
save_path = '/data/cvfs/as2562/GraphCMR/evaluations/3dpw'
if not os.path.exists(save_path):
os.makedirs(save_path)
# Run evaluation
evaluate_3dpw(model=model,
eval_dataset=dataset,
metrics=metrics,
device=device,
vis_save_path=save_path,
num_workers=4,
pin_memory=True,
vis_every_n_batches=1000)
| evaluate_3dpw_mine.py | 27,416 | ------------------------------- TARGETS and INPUTS ------------------------------- ------------------------------- PREDICTIONS ------------------------------- Numpy-fying ------------------------------- METRICS ------------------------------- (1, 6890) scalar Scale and translation correction (1, 6890) (1, 6890) scalar scalar Procrustes analysis (1, 6890) (1, 6890) scalar scalar Scale and translation correction (bs, 6890) scalar (bs, 14) (bs, 14) Scale and translation correction (bs, 14) (bs, 14) Procrustes analysis (bs, 14) (bs, 14) (bs, 6890) (bs, 6890) Scale and translation correction (bs, 6890) (bs, 6890) Procrustes analysis (bs, 6890) (bs, 6890) ------------------------------- VISUALISE ------------------------------- plt.show() Device see issue 152 Load model Our pretrained networks have 5 residual blocks with 256 channels. You might want to change this if you use a different architecture. Setup evaluation dataset Metrics Run evaluation | 955 | en | 0.528554 |
# Copyright 2017-2020 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from src.api.entity import Entity
from .base import API
import json
from ..model.object_permission_model import ObjectPermissionModel
class User(API):
def __init__(self):
super(User, self).__init__()
@classmethod
def get_permissions(cls, identifier, acl_class):
entity = Entity.load_by_id_or_name(identifier, acl_class)
return cls.permissions(entity['id'], entity['aclClass']), entity['owner']
@classmethod
def permissions(cls, id, acl_class):
api = cls.instance()
response_data = api.call('permissions?id={}&aclClass={}'.format(id, acl_class.upper()), None)
if 'payload' in response_data and 'permissions' in response_data['payload']:
permissions = []
for permission_json in response_data['payload']['permissions']:
permission_object = ObjectPermissionModel.load(permission_json)
permission_object.parse_mask(True)
permissions.append(permission_object)
return permissions
else:
return []
@classmethod
def grant_permission(cls, identifier, acl_class, user_name, principal, mask):
api = cls.instance()
payload = {}
if acl_class is not None:
payload['aclClass'] = acl_class.upper()
if identifier is not None:
payload['id'] = identifier
if mask is not None:
payload['mask'] = mask
if principal is not None:
payload['principal'] = principal
if user_name is not None:
payload['userName'] = user_name
data = json.dumps(payload)
api.call('grant', data)
@classmethod
def change_owner(cls, user_name, class_name, object_id):
api = cls.instance()
response_data = api.call('/grant/owner?userName={}&aclClass={}&id={}'.format(
user_name, str(class_name).upper(), object_id), None, http_method='POST')
if 'payload' in response_data and 'entity' in response_data['payload']:
return response_data['payload']['entity']
if 'message' in response_data:
raise RuntimeError(response_data['message'])
else:
raise RuntimeError("Failed to change owner.")
@classmethod
def generate_user_token(cls, user_name, duration):
api = cls.instance()
query = '/user/token?name=%s' % user_name
if duration:
query = '&expiration='.join([query, str(duration)])
response_data = api.call(query, None)
if 'payload' in response_data and 'token' in response_data['payload']:
return response_data['payload']['token']
if 'message' in response_data:
raise RuntimeError(response_data['message'])
else:
raise RuntimeError("Failed to generate user token.")
| pipe-cli/src/api/user.py | 3,445 | Copyright 2017-2020 EPAM Systems, Inc. (https://www.epam.com/) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 583 | en | 0.853978 |
import pytest
import numpy as np
import tensorflow as tf
import tensorflow.keras
import librosa
from kapre import STFT, Magnitude, Phase, Delta, InverseSTFT, ApplyFilterbank
from kapre.composed import (
get_melspectrogram_layer,
get_log_frequency_spectrogram_layer,
get_stft_mag_phase,
get_perfectly_reconstructing_stft_istft,
get_stft_magnitude_layer,
)
from utils import get_audio, save_load_compare
def _num_frame_valid(nsp_src, nsp_win, len_hop):
"""Computes the number of frames with 'valid' setting"""
return (nsp_src - (nsp_win - len_hop)) // len_hop
def _num_frame_same(nsp_src, len_hop):
"""Computes the number of frames with 'same' setting"""
return int(np.ceil(float(nsp_src) / len_hop))
def allclose_phase(a, b, atol=1e-3):
"""Testing phase.
Remember that a small error in complex value may lead to a large phase difference
if the norm is very small.
Therefore, it makes more sense to test it on the complex value itself rather than breaking it down to phase.
"""
np.testing.assert_allclose(np.sin(a), np.sin(b), atol=atol)
np.testing.assert_allclose(np.cos(a), np.cos(b), atol=atol)
def allclose_complex_numbers(a, b, atol=1e-3):
np.testing.assert_equal(np.shape(a), np.shape(b))
np.testing.assert_allclose(np.abs(a), np.abs(b), rtol=1e-5, atol=atol)
np.testing.assert_allclose(np.real(a), np.real(b), rtol=1e-5, atol=atol)
np.testing.assert_allclose(np.imag(a), np.imag(b), rtol=1e-5, atol=atol)
@pytest.mark.parametrize('n_fft', [1000])
@pytest.mark.parametrize('hop_length', [None, 256])
@pytest.mark.parametrize('n_ch', [1, 2, 6])
@pytest.mark.parametrize('data_format', ['default', 'channels_first', 'channels_last'])
def test_spectrogram_correctness(n_fft, hop_length, n_ch, data_format):
def _get_stft_model(following_layer=None):
# compute with kapre
stft_model = tensorflow.keras.models.Sequential()
stft_model.add(
STFT(
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
window_fn=None,
pad_end=False,
input_data_format=data_format,
output_data_format=data_format,
input_shape=input_shape,
name='stft',
)
)
if following_layer is not None:
stft_model.add(following_layer)
return stft_model
src_mono, batch_src, input_shape = get_audio(data_format=data_format, n_ch=n_ch)
win_length = n_fft # test with x2
# compute with librosa
S_ref = librosa.core.stft(
src_mono, n_fft=n_fft, hop_length=hop_length, win_length=win_length, center=False
).T # (time, freq)
S_ref = np.expand_dims(S_ref, axis=2) # time, freq, ch=1
S_ref = np.tile(S_ref, [1, 1, n_ch]) # time, freq, ch=n_ch
if data_format == 'channels_first':
S_ref = np.transpose(S_ref, (2, 0, 1)) # ch, time, freq
stft_model = _get_stft_model()
S_complex = stft_model.predict(batch_src)[0] # 3d representation
allclose_complex_numbers(S_ref, S_complex)
# test Magnitude()
stft_mag_model = _get_stft_model(Magnitude())
S = stft_mag_model.predict(batch_src)[0] # 3d representation
np.testing.assert_allclose(np.abs(S_ref), S, atol=2e-4)
# # test Phase()
stft_phase_model = _get_stft_model(Phase())
S = stft_phase_model.predict(batch_src)[0] # 3d representation
allclose_phase(np.angle(S_complex), S)
@pytest.mark.parametrize('n_fft', [512])
@pytest.mark.parametrize('sr', [22050])
@pytest.mark.parametrize('hop_length', [None, 256])
@pytest.mark.parametrize('n_ch', [2])
@pytest.mark.parametrize('data_format', ['default', 'channels_first', 'channels_last'])
@pytest.mark.parametrize('amin', [1e-5, 1e-3])
@pytest.mark.parametrize('dynamic_range', [120.0, 80.0])
@pytest.mark.parametrize('n_mels', [40])
@pytest.mark.parametrize('mel_f_min', [0.0])
@pytest.mark.parametrize('mel_f_max', [8000])
def test_melspectrogram_correctness(
n_fft, sr, hop_length, n_ch, data_format, amin, dynamic_range, n_mels, mel_f_min, mel_f_max
):
"""Test the correctness of melspectrogram.
Note that mel filterbank is tested separated
"""
def _get_melgram_model(return_decibel, amin, dynamic_range, input_shape=None):
# compute with kapre
melgram_model = get_melspectrogram_layer(
n_fft=n_fft,
sample_rate=sr,
n_mels=n_mels,
mel_f_min=mel_f_min,
mel_f_max=mel_f_max,
win_length=win_length,
hop_length=hop_length,
input_data_format=data_format,
output_data_format=data_format,
return_decibel=return_decibel,
input_shape=input_shape,
db_amin=amin,
db_dynamic_range=dynamic_range,
)
return melgram_model
src_mono, batch_src, input_shape = get_audio(data_format=data_format, n_ch=n_ch)
win_length = n_fft # test with x2
# compute with librosa
S_ref = librosa.feature.melspectrogram(
src_mono,
sr=sr,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
center=False,
power=1.0,
n_mels=n_mels,
fmin=mel_f_min,
fmax=mel_f_max,
).T
S_ref = np.expand_dims(S_ref, axis=2) # time, freq, ch=1
S_ref = np.tile(S_ref, [1, 1, n_ch]) # time, freq, ch=n_ch
if data_format == 'channels_first':
S_ref = np.transpose(S_ref, (2, 0, 1)) # ch, time, freq
# melgram
melgram_model = _get_melgram_model(
return_decibel=False, input_shape=input_shape, amin=None, dynamic_range=120.0
)
S = melgram_model.predict(batch_src)[0] # 3d representation
np.testing.assert_allclose(S_ref, S, atol=1e-4)
# log melgram
melgram_model = _get_melgram_model(
return_decibel=True, input_shape=input_shape, amin=amin, dynamic_range=dynamic_range
)
S = melgram_model.predict(batch_src)[0] # 3d representation
S_ref_db = librosa.power_to_db(S_ref, ref=1.0, amin=amin, top_db=dynamic_range)
np.testing.assert_allclose(
S_ref_db, S, rtol=3e-3
) # decibel is evaluated with relative tolerance
@pytest.mark.parametrize('data_format', ['default', 'channels_first', 'channels_last'])
def test_log_spectrogram_runnable(data_format):
"""test if log spectrogram layer works well"""
src_mono, batch_src, input_shape = get_audio(data_format=data_format, n_ch=1)
_ = get_log_frequency_spectrogram_layer(input_shape, return_decibel=True)
_ = get_log_frequency_spectrogram_layer(input_shape, return_decibel=False)
@pytest.mark.xfail
def test_log_spectrogram_fail():
"""test if log spectrogram layer works well"""
src_mono, batch_src, input_shape = get_audio(data_format='channels_last', n_ch=1)
_ = get_log_frequency_spectrogram_layer(input_shape, return_decibel=True, log_n_bins=200)
def test_delta():
"""test delta layer"""
specgrams = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
specgrams = np.reshape(specgrams, (1, -1, 1, 1)) # (b, t, f, ch)
delta_model = tensorflow.keras.models.Sequential()
delta_model.add(Delta(win_length=3, input_shape=(4, 1, 1), data_format='channels_last'))
delta_kapre = delta_model(specgrams)
delta_ref = np.array([0.5, 1.0, 1.0, 0.5], dtype=np.float32)
delta_ref = np.reshape(delta_ref, (1, -1, 1, 1)) # (b, t, f, ch)
np.testing.assert_allclose(delta_kapre, delta_ref)
@pytest.mark.parametrize('data_format', ['default', 'channels_first', 'channels_last'])
def test_mag_phase(data_format):
n_ch = 1
n_fft, hop_length, win_length = 512, 256, 512
src_mono, batch_src, input_shape = get_audio(data_format=data_format, n_ch=n_ch)
mag_phase_layer = get_stft_mag_phase(
input_shape=input_shape,
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
input_data_format=data_format,
output_data_format=data_format,
)
model = tensorflow.keras.models.Sequential()
model.add(mag_phase_layer)
mag_phase_kapre = model(batch_src)[0] # a 2d image shape
ch_axis = 0 if data_format == 'channels_first' else 2 # non-batch
mag_phase_ref = np.stack(
librosa.magphase(
librosa.stft(
src_mono, n_fft=n_fft, hop_length=hop_length, win_length=win_length, center=False,
).T
),
axis=ch_axis,
)
np.testing.assert_equal(mag_phase_kapre.shape, mag_phase_ref.shape)
# magnitude test
np.testing.assert_allclose(
np.take(mag_phase_kapre, [0,], axis=ch_axis),
np.take(mag_phase_ref, [0,], axis=ch_axis),
atol=2e-4,
)
# phase test - todo - yeah..
@pytest.mark.parametrize('waveform_data_format', ['default', 'channels_first', 'channels_last'])
@pytest.mark.parametrize('stft_data_format', ['default', 'channels_first', 'channels_last'])
@pytest.mark.parametrize('hop_ratio', [0.5, 0.25, 0.125])
def test_perfectly_reconstructing_stft_istft(waveform_data_format, stft_data_format, hop_ratio):
n_ch = 1
src_mono, batch_src, input_shape = get_audio(data_format=waveform_data_format, n_ch=n_ch)
time_axis = 1 if waveform_data_format == 'channels_first' else 0 # non-batch!
len_src = input_shape[time_axis]
n_fft = 2048
hop_length = int(2048 * hop_ratio)
n_added_frames = int(1 / hop_ratio) - 1
stft, istft = get_perfectly_reconstructing_stft_istft(
stft_input_shape=input_shape,
n_fft=n_fft,
hop_length=hop_length,
waveform_data_format=waveform_data_format,
stft_data_format=stft_data_format,
)
# Test - [STFT -> ISTFT]
model = tf.keras.models.Sequential([stft, istft])
recon_waveform = model(batch_src)
# trim off the pad_begin part
len_pad_begin = n_fft - hop_length
if waveform_data_format == 'channels_first':
recon_waveform = recon_waveform[:, :, len_pad_begin : len_pad_begin + len_src]
else:
recon_waveform = recon_waveform[:, len_pad_begin : len_pad_begin + len_src, :]
np.testing.assert_allclose(batch_src, recon_waveform, atol=1e-5)
# Test - [ISTFT -> STFT]
S = librosa.stft(src_mono, n_fft=n_fft, hop_length=hop_length).T.astype(
np.complex64
) # (time, freq)
ch_axis = 1 if stft_data_format == 'channels_first' else 3 # batch shape
S = np.expand_dims(S, (0, ch_axis))
model = tf.keras.models.Sequential([istft, stft])
recon_S = model(S)
# trim off the frames coming from zero-pad result
n = n_added_frames
n_added_frames += n
if stft_data_format == 'channels_first':
if n != 0:
S = S[:, :, n:-n, :]
recon_S = recon_S[:, :, n_added_frames:-n_added_frames, :]
else:
if n != 0:
S = S[:, n:-n, :, :]
recon_S = recon_S[:, n_added_frames:-n_added_frames, :, :]
np.testing.assert_equal(S.shape, recon_S.shape)
allclose_complex_numbers(S, recon_S)
def test_save_load():
"""test saving/loading of models that has stft, melspectorgrma, and log frequency."""
src_mono, batch_src, input_shape = get_audio(data_format='channels_last', n_ch=1)
# test STFT save/load
save_load_compare(
STFT(input_shape=input_shape, pad_begin=True), batch_src, allclose_complex_numbers
)
# test melspectrogram save/load
save_load_compare(
get_melspectrogram_layer(input_shape=input_shape, return_decibel=True),
batch_src,
np.testing.assert_allclose,
)
# test log frequency spectrogram save/load
save_load_compare(
get_log_frequency_spectrogram_layer(input_shape=input_shape, return_decibel=True),
batch_src,
np.testing.assert_allclose,
)
# test stft_mag_phase
save_load_compare(
get_stft_mag_phase(input_shape=input_shape, return_decibel=True),
batch_src,
np.testing.assert_allclose,
)
# test stft mag
save_load_compare(
get_stft_magnitude_layer(input_shape=input_shape), batch_src, np.testing.assert_allclose
)
@pytest.mark.xfail()
@pytest.mark.parametrize('layer', [STFT, InverseSTFT])
def test_wrong_input_data_format(layer):
_ = layer(input_data_format='weird_string')
@pytest.mark.xfail()
@pytest.mark.parametrize('layer', [STFT, InverseSTFT])
def test_wrong_input_data_format(layer):
_ = layer(output_data_format='weird_string')
@pytest.mark.xfail()
@pytest.mark.parametrize('layer', [Delta, ApplyFilterbank])
def test_wrong_data_format(layer):
_ = layer(data_format='weird_string')
if __name__ == '__main__':
pytest.main([__file__])
| tests/test_time_frequency.py | 12,700 | Computes the number of frames with 'same' setting
Computes the number of frames with 'valid' setting
Testing phase.
Remember that a small error in complex value may lead to a large phase difference
if the norm is very small.
Therefore, it makes more sense to test it on the complex value itself rather than breaking it down to phase.
test delta layer
test if log spectrogram layer works well
test if log spectrogram layer works well
Test the correctness of melspectrogram.
Note that mel filterbank is tested separated
test saving/loading of models that has stft, melspectorgrma, and log frequency.
compute with kapre test with x2 compute with librosa (time, freq) time, freq, ch=1 time, freq, ch=n_ch ch, time, freq 3d representation test Magnitude() 3d representation test Phase() 3d representation compute with kapre test with x2 compute with librosa time, freq, ch=1 time, freq, ch=n_ch ch, time, freq melgram 3d representation log melgram 3d representation decibel is evaluated with relative tolerance (b, t, f, ch) (b, t, f, ch) a 2d image shape non-batch magnitude test phase test - todo - yeah.. non-batch! Test - [STFT -> ISTFT] trim off the pad_begin part Test - [ISTFT -> STFT] (time, freq) batch shape trim off the frames coming from zero-pad result test STFT save/load test melspectrogram save/load test log frequency spectrogram save/load test stft_mag_phase test stft mag | 1,390 | en | 0.769586 |
#!/usr/bin/env python2
import sys
import re
import datetime
import hashlib
import optparse
import urllib2
# cheers Dirk :)
url = 'https://testssl.sh/mapping-rfc.txt'
for line in urllib2.urlopen(url):
cipher = line.split()
print cipher[1]+'(0'+cipher[0]+'),'
| resources/cipher_suite_grabber.py | 274 | !/usr/bin/env python2 cheers Dirk :) | 36 | fr | 0.156704 |
"""Ray constants used in the Python code."""
import logging
import math
import os
logger = logging.getLogger(__name__)
def env_integer(key, default):
if key in os.environ:
return int(os.environ[key])
return default
def direct_call_enabled():
return bool(int(os.environ.get("RAY_FORCE_DIRECT", "1")))
ID_SIZE = 20
# The default maximum number of bytes to allocate to the object store unless
# overridden by the user.
DEFAULT_OBJECT_STORE_MAX_MEMORY_BYTES = 20 * 10**9
# The default number of retries to call `put` when the object store is full.
DEFAULT_PUT_OBJECT_RETRIES = 5
# The default seconds for delay between calls to retry `put` when
# the object store is full. This delay is exponentially doubled up to
# DEFAULT_PUT_OBJECT_RETRIES times.
DEFAULT_PUT_OBJECT_DELAY = 1
# The smallest cap on the memory used by the object store that we allow.
# This must be greater than MEMORY_RESOURCE_UNIT_BYTES * 0.7
OBJECT_STORE_MINIMUM_MEMORY_BYTES = 75 * 1024 * 1024
# The default maximum number of bytes that the non-primary Redis shards are
# allowed to use unless overridden by the user.
DEFAULT_REDIS_MAX_MEMORY_BYTES = 10**10
# The smallest cap on the memory used by Redis that we allow.
REDIS_MINIMUM_MEMORY_BYTES = 10**7
# Default resource requirements for actors when no resource requirements are
# specified.
DEFAULT_ACTOR_METHOD_CPU_SIMPLE = 1
DEFAULT_ACTOR_CREATION_CPU_SIMPLE = 0
# Default resource requirements for actors when some resource requirements are
# specified in .
DEFAULT_ACTOR_METHOD_CPU_SPECIFIED = 0
DEFAULT_ACTOR_CREATION_CPU_SPECIFIED = 1
# Default number of return values for each actor method.
DEFAULT_ACTOR_METHOD_NUM_RETURN_VALS = 1
# If a remote function or actor (or some other export) has serialized size
# greater than this quantity, print an warning.
PICKLE_OBJECT_WARNING_SIZE = 10**7
# If remote functions with the same source are imported this many times, then
# print a warning.
DUPLICATE_REMOTE_FUNCTION_THRESHOLD = 100
# The maximum resource quantity that is allowed. TODO(rkn): This could be
# relaxed, but the current implementation of the node manager will be slower
# for large resource quantities due to bookkeeping of specific resource IDs.
MAX_RESOURCE_QUANTITY = 100000
# Each memory "resource" counts as this many bytes of memory.
MEMORY_RESOURCE_UNIT_BYTES = 50 * 1024 * 1024
# Number of units 1 resource can be subdivided into.
MIN_RESOURCE_GRANULARITY = 0.0001
# Fraction of plasma memory that can be reserved. It is actually 70% but this
# is set to 69% to leave some headroom.
PLASMA_RESERVABLE_MEMORY_FRACTION = 0.69
def round_to_memory_units(memory_bytes, round_up):
"""Round bytes to the nearest memory unit."""
return from_memory_units(to_memory_units(memory_bytes, round_up))
def from_memory_units(memory_units):
"""Convert from memory units -> bytes."""
return memory_units * MEMORY_RESOURCE_UNIT_BYTES
def to_memory_units(memory_bytes, round_up):
"""Convert from bytes -> memory units."""
value = memory_bytes / MEMORY_RESOURCE_UNIT_BYTES
if value < 1:
raise ValueError(
"The minimum amount of memory that can be requested is {} bytes, "
"however {} bytes was asked.".format(MEMORY_RESOURCE_UNIT_BYTES,
memory_bytes))
if isinstance(value, float) and not value.is_integer():
# TODO(ekl) Ray currently does not support fractional resources when
# the quantity is greater than one. We should fix memory resources to
# be allocated in units of bytes and not 100MB.
if round_up:
value = int(math.ceil(value))
else:
value = int(math.floor(value))
return int(value)
# Different types of Ray errors that can be pushed to the driver.
# TODO(rkn): These should be defined in flatbuffers and must be synced with
# the existing C++ definitions.
WAIT_FOR_CLASS_PUSH_ERROR = "wait_for_class"
PICKLING_LARGE_OBJECT_PUSH_ERROR = "pickling_large_object"
WAIT_FOR_FUNCTION_PUSH_ERROR = "wait_for_function"
TASK_PUSH_ERROR = "task"
REGISTER_REMOTE_FUNCTION_PUSH_ERROR = "register_remote_function"
FUNCTION_TO_RUN_PUSH_ERROR = "function_to_run"
VERSION_MISMATCH_PUSH_ERROR = "version_mismatch"
CHECKPOINT_PUSH_ERROR = "checkpoint"
REGISTER_ACTOR_PUSH_ERROR = "register_actor"
WORKER_CRASH_PUSH_ERROR = "worker_crash"
WORKER_DIED_PUSH_ERROR = "worker_died"
WORKER_POOL_LARGE_ERROR = "worker_pool_large"
PUT_RECONSTRUCTION_PUSH_ERROR = "put_reconstruction"
INFEASIBLE_TASK_ERROR = "infeasible_task"
RESOURCE_DEADLOCK_ERROR = "resource_deadlock"
REMOVED_NODE_ERROR = "node_removed"
MONITOR_DIED_ERROR = "monitor_died"
LOG_MONITOR_DIED_ERROR = "log_monitor_died"
REPORTER_DIED_ERROR = "reporter_died"
DASHBOARD_DIED_ERROR = "dashboard_died"
RAYLET_CONNECTION_ERROR = "raylet_connection_error"
# Abort autoscaling if more than this number of errors are encountered. This
# is a safety feature to prevent e.g. runaway node launches.
AUTOSCALER_MAX_NUM_FAILURES = env_integer("AUTOSCALER_MAX_NUM_FAILURES", 5)
# The maximum number of nodes to launch in a single request.
# Multiple requests may be made for this batch size, up to
# the limit of AUTOSCALER_MAX_CONCURRENT_LAUNCHES.
AUTOSCALER_MAX_LAUNCH_BATCH = env_integer("AUTOSCALER_MAX_LAUNCH_BATCH", 5)
# Max number of nodes to launch at a time.
AUTOSCALER_MAX_CONCURRENT_LAUNCHES = env_integer(
"AUTOSCALER_MAX_CONCURRENT_LAUNCHES", 10)
# Interval at which to perform autoscaling updates.
AUTOSCALER_UPDATE_INTERVAL_S = env_integer("AUTOSCALER_UPDATE_INTERVAL_S", 5)
# The autoscaler will attempt to restart Ray on nodes it hasn't heard from
# in more than this interval.
AUTOSCALER_HEARTBEAT_TIMEOUT_S = env_integer("AUTOSCALER_HEARTBEAT_TIMEOUT_S",
30)
# The reporter will report its statistics this often (milliseconds).
REPORTER_UPDATE_INTERVAL_MS = env_integer("REPORTER_UPDATE_INTERVAL_MS", 2500)
# Max number of retries to AWS (default is 5, time increases exponentially)
BOTO_MAX_RETRIES = env_integer("BOTO_MAX_RETRIES", 12)
# Max number of retries to create an EC2 node (retry different subnet)
BOTO_CREATE_MAX_RETRIES = env_integer("BOTO_CREATE_MAX_RETRIES", 5)
LOGGER_FORMAT = (
"%(asctime)s\t%(levelname)s %(filename)s:%(lineno)s -- %(message)s")
LOGGER_FORMAT_HELP = "The logging format. default='{}'".format(LOGGER_FORMAT)
LOGGER_LEVEL = "info"
LOGGER_LEVEL_CHOICES = ["debug", "info", "warning", "error", "critical"]
LOGGER_LEVEL_HELP = ("The logging level threshold, choices=['debug', 'info',"
" 'warning', 'error', 'critical'], default='info'")
# A constant indicating that an actor doesn't need reconstructions.
NO_RECONSTRUCTION = 0
# A constant indicating that an actor should be reconstructed infinite times.
INFINITE_RECONSTRUCTION = 2**30
# Constants used to define the different process types.
PROCESS_TYPE_REAPER = "reaper"
PROCESS_TYPE_MONITOR = "monitor"
PROCESS_TYPE_RAYLET_MONITOR = "raylet_monitor"
PROCESS_TYPE_LOG_MONITOR = "log_monitor"
PROCESS_TYPE_REPORTER = "reporter"
PROCESS_TYPE_DASHBOARD = "dashboard"
PROCESS_TYPE_WORKER = "worker"
PROCESS_TYPE_RAYLET = "raylet"
PROCESS_TYPE_PLASMA_STORE = "plasma_store"
PROCESS_TYPE_REDIS_SERVER = "redis_server"
PROCESS_TYPE_WEB_UI = "web_ui"
LOG_MONITOR_MAX_OPEN_FILES = 200
# A constant used as object metadata to indicate the object is raw binary.
RAW_BUFFER_METADATA = b"RAW"
# A constant used as object metadata to indicate the object is pickled. This
# format is only ever used for Python inline task argument values.
PICKLE_BUFFER_METADATA = b"PICKLE"
# A constant used as object metadata to indicate the object is pickle5 format.
PICKLE5_BUFFER_METADATA = b"PICKLE5"
AUTOSCALER_RESOURCE_REQUEST_CHANNEL = b"autoscaler_resource_request"
# The default password to prevent redis port scanning attack.
# Hex for ray.
REDIS_DEFAULT_PASSWORD = "5241590000000000"
# The default ip address to bind to.
NODE_DEFAULT_IP = "127.0.0.1"
| python/ray/ray_constants.py | 8,014 | Convert from memory units -> bytes.
Round bytes to the nearest memory unit.
Convert from bytes -> memory units.
Ray constants used in the Python code.
The default maximum number of bytes to allocate to the object store unless overridden by the user. The default number of retries to call `put` when the object store is full. The default seconds for delay between calls to retry `put` when the object store is full. This delay is exponentially doubled up to DEFAULT_PUT_OBJECT_RETRIES times. The smallest cap on the memory used by the object store that we allow. This must be greater than MEMORY_RESOURCE_UNIT_BYTES * 0.7 The default maximum number of bytes that the non-primary Redis shards are allowed to use unless overridden by the user. The smallest cap on the memory used by Redis that we allow. Default resource requirements for actors when no resource requirements are specified. Default resource requirements for actors when some resource requirements are specified in . Default number of return values for each actor method. If a remote function or actor (or some other export) has serialized size greater than this quantity, print an warning. If remote functions with the same source are imported this many times, then print a warning. The maximum resource quantity that is allowed. TODO(rkn): This could be relaxed, but the current implementation of the node manager will be slower for large resource quantities due to bookkeeping of specific resource IDs. Each memory "resource" counts as this many bytes of memory. Number of units 1 resource can be subdivided into. Fraction of plasma memory that can be reserved. It is actually 70% but this is set to 69% to leave some headroom. TODO(ekl) Ray currently does not support fractional resources when the quantity is greater than one. We should fix memory resources to be allocated in units of bytes and not 100MB. Different types of Ray errors that can be pushed to the driver. TODO(rkn): These should be defined in flatbuffers and must be synced with the existing C++ definitions. Abort autoscaling if more than this number of errors are encountered. This is a safety feature to prevent e.g. runaway node launches. The maximum number of nodes to launch in a single request. Multiple requests may be made for this batch size, up to the limit of AUTOSCALER_MAX_CONCURRENT_LAUNCHES. Max number of nodes to launch at a time. Interval at which to perform autoscaling updates. The autoscaler will attempt to restart Ray on nodes it hasn't heard from in more than this interval. The reporter will report its statistics this often (milliseconds). Max number of retries to AWS (default is 5, time increases exponentially) Max number of retries to create an EC2 node (retry different subnet) A constant indicating that an actor doesn't need reconstructions. A constant indicating that an actor should be reconstructed infinite times. Constants used to define the different process types. A constant used as object metadata to indicate the object is raw binary. A constant used as object metadata to indicate the object is pickled. This format is only ever used for Python inline task argument values. A constant used as object metadata to indicate the object is pickle5 format. The default password to prevent redis port scanning attack. Hex for ray. The default ip address to bind to. | 3,338 | en | 0.880086 |
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from sklearn.base import BaseEstimator
from sklearn.base import ClassifierMixin
from sklearn.base import TransformerMixin
from bugbug.utils import numpy_to_dict
class KerasTextToSequences(BaseEstimator, TransformerMixin):
def __init__(self, maxlen, vocab_size):
self.maxlen = maxlen
self.tokenizer = Tokenizer(num_words=vocab_size)
def fit(self, x, y=None):
self.tokenizer.fit_on_texts(x)
return self
def transform(self, data):
sequences = self.tokenizer.texts_to_sequences(data)
return pad_sequences(sequences, maxlen=self.maxlen)
class KerasClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, epochs, batch_size):
self.epochs = epochs
self.batch_size = batch_size
def fit(self, X, y):
X_dict = numpy_to_dict(X)
self.model = self.model_creator(X_dict, y)
self.model.fit(X_dict, y, epochs=self.epochs, batch_size=self.batch_size, verbose=1)
return self
def predict_proba(self, X):
return self.model.predict(numpy_to_dict(X))
def predict(self, X):
return self.predict_proba(X).argmax(axis=-1)
| bugbug/nn.py | 1,482 | -*- coding: utf-8 -*- This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. | 214 | en | 0.926349 |
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 15 16:02:16 2018
@author: ning
"""
import os
working_dir = ''
import pandas as pd
pd.options.mode.chained_assignment = None
import numpy as np
from utils import (cv_counts)
saving_dir = '../results/cv_counts'
if not os.path.exists(saving_dir):
os.mkdir(saving_dir)
# Exp 1
for participant in ['AC', 'CL', 'FW', 'HB', 'KK', 'LM', 'MC', 'MP1', 'MP2', 'NN', 'RP','SD', 'TJ', 'TS', 'WT']:
experiment = 'pos'
df = pd.read_csv(os.path.join(working_dir,'../data/PoSdata.csv'))
df = df[df.columns[1:]]
df.columns = ['participant',
'blocks',
'trials',
'firstgabor',
'success',
'tilted',
'correct',
'RT_correct',
'awareness',
'RT_awareness',
'confidence',
'RT_confidence']
df_sub = df[df['participant'] == participant]
# make sure all the attributes are either 0 or 1
df_sub.loc[:,'success' ] = df_sub.loc[:,'success' ].values - 1
df_sub.loc[:,'awareness' ] = df_sub.loc[:,'awareness' ].values - 1
df_sub.loc[:,'confidence'] = df_sub.loc[:,'confidence'].values - 1
##################################################################
np.random.seed(12345)
# use all 6 possible features
feature_names = [
'correct',
'awareness',
'confidence',
'RT_correct',
'RT_awareness',
'RT_confidence']
target_name = 'success'
results = dict(sub = [],
window = [],
fold = [],
)
for name in feature_names:
results['{}_high_cond_{}_low'.format(target_name,name)] = []
results['{}_high_cond_{}_high'.format(target_name,name)] = []
for n_back in np.arange(1,5): # loop through the number of trials looking back
# this is the part that is redundent and the code is long
results = cv_counts(
df_sub,
feature_names,
target_name,
results,
participant,
experiment,
window=n_back,
)
temp = pd.DataFrame(results)
temp.to_csv(os.path.join(saving_dir,'Pos_6_features (cv_count)_{}.csv'.format(participant)),index=False) # save as a csv
################################################################################
# use success, awareness, and confidence as features
np.random.seed(12345)
# use judgement features
feature_names = [
'correct',
'awareness',
'confidence',]
target_name = 'success'
results = dict(sub = [],
window = [],
fold = [],
)
for name in feature_names:
results['{}_high_cond_{}_low'.format(target_name,name)] = []
results['{}_high_cond_{}_high'.format(target_name,name)] = []
for n_back in np.arange(1,5): # loop through the number of trials looking back
# this is the part that is redundent and the code is long
results = cv_counts(
df_sub,
feature_names,
target_name,
results,
participant,
experiment,
window=n_back,
)
temp = pd.DataFrame(results)
temp.to_csv(os.path.join(saving_dir,'Pos_3_1_features (cv_count)_{}.csv'.format(participant)),index=False) # save as a csv
###############################################################################
# use reactimes as features
np.random.seed(12345)
# use all 6 possible features
feature_names = [
'RT_correct',
'RT_awareness',
'RT_confidence']
target_name = 'success'
results = dict(sub = [],
window = [],
fold = [],
)
for name in feature_names:
results['{}_high_cond_{}_low'.format(target_name,name)] = []
results['{}_high_cond_{}_high'.format(target_name,name)] = []
for n_back in np.arange(1,5): # loop through the number of trials looking back
# this is the part that is redundent and the code is long
results = cv_counts(
df_sub,
feature_names,
target_name,
results,
participant,
experiment,
window=n_back,
)
temp = pd.DataFrame(results)
temp.to_csv(os.path.join(saving_dir,'Pos_RT_features (cv_count)_{}.csv'.format(participant)),index=False) # save as a csv
| scripts/classifcation_pos_n_trials_back (cv counts).py | 5,773 | Created on Sun Jul 15 16:02:16 2018
@author: ning
-*- coding: utf-8 -*- Exp 1 make sure all the attributes are either 0 or 1 use all 6 possible features loop through the number of trials looking back this is the part that is redundent and the code is long save as a csv use success, awareness, and confidence as features use judgement features loop through the number of trials looking back this is the part that is redundent and the code is long save as a csv use reactimes as features use all 6 possible features loop through the number of trials looking back this is the part that is redundent and the code is long save as a csv | 634 | en | 0.952075 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.