code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
#
# Copyright 2021 Budapest Quantum Computing Group
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
import numpy as np
from piquasso._backends.sampling.state import SamplingState
from piquasso._math.validations import all_natural
from piquasso.api.errors import InvalidState
from piquasso.api.result import Result
from piquasso.api.instruction import Instruction
from theboss.boson_sampling_simulator import BosonSamplingSimulator
# The fastest implemented permanent calculator is currently Ryser-Guan
from theboss.boson_sampling_utilities.permanent_calculators.bs_permanent_calculator_interface import ( # noqa: E501
BSPermanentCalculatorInterface,
)
from theboss.boson_sampling_utilities.permanent_calculators.ryser_guan_permanent_calculator import ( # noqa: E501
RyserGuanPermanentCalculator,
)
# Fastest boson sampling algorithm generalized for bunched states
from theboss.simulation_strategies.generalized_cliffords_simulation_strategy import (
GeneralizedCliffordsSimulationStrategy,
)
from theboss.simulation_strategies.generalized_cliffords_uniform_losses_simulation_strategy import ( # noqa: E501
GeneralizedCliffordsUniformLossesSimulationStrategy,
)
# Fastest BS algorithm generalized for bunched states, but with lossy network
from theboss.simulation_strategies.lossy_networks_generalized_cliffords_simulation_strategy import ( # noqa: E501
LossyNetworksGeneralizedCliffordsSimulationStrategy,
)
from theboss.simulation_strategies.simulation_strategy_interface import (
SimulationStrategyInterface,
)
def state_vector(state: SamplingState, instruction: Instruction, shots: int) -> Result:
if not np.all(state.initial_state == 0):
raise InvalidState("State vector is already set.")
coefficient = instruction._all_params["coefficient"]
occupation_numbers = instruction._all_params["occupation_numbers"]
initial_state = coefficient * np.array(occupation_numbers)
if not all_natural(initial_state):
raise InvalidState(
f"Invalid initial state specified: instruction={instruction}"
)
state.initial_state = np.rint(initial_state).astype(int)
return Result(state=state)
def passive_linear(
state: SamplingState, instruction: Instruction, shots: int
) -> Result:
r"""Applies an interferometer to the circuit.
This can be interpreted as placing another interferometer in the network, just
before performing the sampling. This instruction is realized by multiplying
current effective interferometer matrix with new interferometer matrix.
Do note, that new interferometer matrix works as interferometer matrix on
qumodes (provided as the arguments) and as an identity on every other mode.
"""
_apply_matrix_on_modes(
state=state,
matrix=instruction._all_params["passive_block"],
modes=instruction.modes,
)
return Result(state=state)
def _apply_matrix_on_modes(
state: SamplingState, matrix: np.ndarray, modes: Tuple[int, ...]
) -> None:
embedded = np.identity(len(state.interferometer), dtype=complex)
embedded[np.ix_(modes, modes)] = matrix
state.interferometer = embedded @ state.interferometer
def loss(state: SamplingState, instruction: Instruction, shots: int) -> Result:
state.is_lossy = True
_apply_matrix_on_modes(
state=state,
matrix=np.diag(instruction._all_params["transmissivity"]),
modes=instruction.modes,
)
return Result(state=state)
def sampling(state: SamplingState, instruction: Instruction, shots: int) -> Result:
initial_state = np.array(state.initial_state)
permanent_calculator = RyserGuanPermanentCalculator(
matrix=state.interferometer, input_state=initial_state
)
simulation_strategy = _get_sampling_simulation_strategy(state, permanent_calculator)
sampling_simulator = BosonSamplingSimulator(simulation_strategy)
samples = sampling_simulator.get_classical_simulation_results(
initial_state, samples_number=shots
)
return Result(state=state, samples=list(map(tuple, samples)))
def _get_sampling_simulation_strategy(
state: SamplingState, permanent_calculator: BSPermanentCalculatorInterface
) -> SimulationStrategyInterface:
if not state.is_lossy:
return GeneralizedCliffordsSimulationStrategy(permanent_calculator)
_, singular_values, _ = np.linalg.svd(state.interferometer)
if np.all(np.isclose(singular_values, singular_values[0])):
return GeneralizedCliffordsUniformLossesSimulationStrategy(permanent_calculator)
return LossyNetworksGeneralizedCliffordsSimulationStrategy(permanent_calculator)
| [
"theboss.simulation_strategies.generalized_cliffords_simulation_strategy.GeneralizedCliffordsSimulationStrategy",
"theboss.simulation_strategies.lossy_networks_generalized_cliffords_simulation_strategy.LossyNetworksGeneralizedCliffordsSimulationStrategy",
"piquasso._math.validations.all_natural",
"numpy.ix_",... | [((2685, 2704), 'piquasso.api.result.Result', 'Result', ([], {'state': 'state'}), '(state=state)\n', (2691, 2704), False, 'from piquasso.api.result import Result\n'), ((3417, 3436), 'piquasso.api.result.Result', 'Result', ([], {'state': 'state'}), '(state=state)\n', (3423, 3436), False, 'from piquasso.api.result import Result\n'), ((3997, 4016), 'piquasso.api.result.Result', 'Result', ([], {'state': 'state'}), '(state=state)\n', (4003, 4016), False, 'from piquasso.api.result import Result\n'), ((4123, 4152), 'numpy.array', 'np.array', (['state.initial_state'], {}), '(state.initial_state)\n', (4131, 4152), True, 'import numpy as np\n'), ((4180, 4269), 'theboss.boson_sampling_utilities.permanent_calculators.ryser_guan_permanent_calculator.RyserGuanPermanentCalculator', 'RyserGuanPermanentCalculator', ([], {'matrix': 'state.interferometer', 'input_state': 'initial_state'}), '(matrix=state.interferometer, input_state=\n initial_state)\n', (4208, 4269), False, 'from theboss.boson_sampling_utilities.permanent_calculators.ryser_guan_permanent_calculator import RyserGuanPermanentCalculator\n'), ((4395, 4438), 'theboss.boson_sampling_simulator.BosonSamplingSimulator', 'BosonSamplingSimulator', (['simulation_strategy'], {}), '(simulation_strategy)\n', (4417, 4438), False, 'from theboss.boson_sampling_simulator import BosonSamplingSimulator\n'), ((4910, 4945), 'numpy.linalg.svd', 'np.linalg.svd', (['state.interferometer'], {}), '(state.interferometer)\n', (4923, 4945), True, 'import numpy as np\n'), ((5112, 5185), 'theboss.simulation_strategies.lossy_networks_generalized_cliffords_simulation_strategy.LossyNetworksGeneralizedCliffordsSimulationStrategy', 'LossyNetworksGeneralizedCliffordsSimulationStrategy', (['permanent_calculator'], {}), '(permanent_calculator)\n', (5163, 5185), False, 'from theboss.simulation_strategies.lossy_networks_generalized_cliffords_simulation_strategy import LossyNetworksGeneralizedCliffordsSimulationStrategy\n'), ((2173, 2205), 'numpy.all', 'np.all', (['(state.initial_state == 0)'], {}), '(state.initial_state == 0)\n', (2179, 2205), True, 'import numpy as np\n'), ((2221, 2265), 'piquasso.api.errors.InvalidState', 'InvalidState', (['"""State vector is already set."""'], {}), "('State vector is already set.')\n", (2233, 2265), False, 'from piquasso.api.errors import InvalidState\n'), ((2430, 2458), 'numpy.array', 'np.array', (['occupation_numbers'], {}), '(occupation_numbers)\n', (2438, 2458), True, 'import numpy as np\n'), ((2471, 2497), 'piquasso._math.validations.all_natural', 'all_natural', (['initial_state'], {}), '(initial_state)\n', (2482, 2497), False, 'from piquasso._math.validations import all_natural\n'), ((2513, 2588), 'piquasso.api.errors.InvalidState', 'InvalidState', (['f"""Invalid initial state specified: instruction={instruction}"""'], {}), "(f'Invalid initial state specified: instruction={instruction}')\n", (2525, 2588), False, 'from piquasso.api.errors import InvalidState\n'), ((3630, 3650), 'numpy.ix_', 'np.ix_', (['modes', 'modes'], {}), '(modes, modes)\n', (3636, 3650), True, 'import numpy as np\n'), ((4820, 4880), 'theboss.simulation_strategies.generalized_cliffords_simulation_strategy.GeneralizedCliffordsSimulationStrategy', 'GeneralizedCliffordsSimulationStrategy', (['permanent_calculator'], {}), '(permanent_calculator)\n', (4858, 4880), False, 'from theboss.simulation_strategies.generalized_cliffords_simulation_strategy import GeneralizedCliffordsSimulationStrategy\n'), ((4961, 5008), 'numpy.isclose', 'np.isclose', (['singular_values', 'singular_values[0]'], {}), '(singular_values, singular_values[0])\n', (4971, 5008), True, 'import numpy as np\n'), ((5026, 5099), 'theboss.simulation_strategies.generalized_cliffords_uniform_losses_simulation_strategy.GeneralizedCliffordsUniformLossesSimulationStrategy', 'GeneralizedCliffordsUniformLossesSimulationStrategy', (['permanent_calculator'], {}), '(permanent_calculator)\n', (5077, 5099), False, 'from theboss.simulation_strategies.generalized_cliffords_uniform_losses_simulation_strategy import GeneralizedCliffordsUniformLossesSimulationStrategy\n'), ((2638, 2660), 'numpy.rint', 'np.rint', (['initial_state'], {}), '(initial_state)\n', (2645, 2660), True, 'import numpy as np\n'), ((3894, 3944), 'numpy.diag', 'np.diag', (["instruction._all_params['transmissivity']"], {}), "(instruction._all_params['transmissivity'])\n", (3901, 3944), True, 'import numpy as np\n')] |
from numpy import log, exp, allclose, sqrt
import numpy as np
from os.path import dirname, realpath, join
from arspy.ars import adaptive_rejection_sampling
data_file = "{}/ars_{{}}.npy".format(
join(dirname(realpath(__file__)), "reference_data", "ars_data")
).format
def gaussian(x, sigma=1):
return log(exp(-x ** 2 / sigma))
def half_gaussian(x, sigma=3):
return log(exp(-x ** 2 / sigma)) * (1 * (x <= 0) + 1e300 * (x > 0))
def relativistic_momentum_logpdf(p, m=1., c=1.):
return -m * c ** 2 * sqrt(p ** 2 / (m ** 2 * c ** 2) + 1)
tests = {
"1d-gaussian": {"name": "1d-gaussian",
"data": data_file("gaussian"),
"func": gaussian,
"a": -2, "b": 2,
"domain": (float("-inf"), float("inf")),
"n_samples": 20},
"1d-half-gaussian": {"name": "1d-half-gaussian",
"data": data_file("half_gaussian"),
"func": half_gaussian,
"a": -2, "b": 0,
"domain": [float("-inf"), 0],
"n_samples": 20},
"relativistic_monte_carlo_logpdf": {
"name": "relativistic_momentum_logpdf",
"data": data_file("relativistic_logpdf"),
"func": relativistic_momentum_logpdf,
"a": -10.0, "b": 10.0,
"domain": [float("-inf"), float("inf")],
"n_samples": 20
}
}
def _run(test_name):
input_dict = tests[test_name]
# name = input_dict["name"]
a = input_dict["a"]
b = input_dict["b"]
domain = input_dict["domain"]
n_samples = input_dict["n_samples"]
logpdf = input_dict["func"]
python_result = adaptive_rejection_sampling(
logpdf=logpdf, a=a, b=b, domain=domain, n_samples=n_samples, seed=1
)
# load old result computed by other implementation (julia)
julia_result = np.load(input_dict["data"])
assert(allclose(julia_result, python_result, atol=3e-01))
def test_gaussian():
_run("1d-gaussian")
def test_half_gaussian():
_run("1d-half-gaussian")
def test_relativistic_monte_carlo_logpdf():
_run("relativistic_monte_carlo_logpdf")
| [
"numpy.load",
"numpy.allclose",
"os.path.realpath",
"numpy.exp",
"arspy.ars.adaptive_rejection_sampling",
"numpy.sqrt"
] | [((1704, 1804), 'arspy.ars.adaptive_rejection_sampling', 'adaptive_rejection_sampling', ([], {'logpdf': 'logpdf', 'a': 'a', 'b': 'b', 'domain': 'domain', 'n_samples': 'n_samples', 'seed': '(1)'}), '(logpdf=logpdf, a=a, b=b, domain=domain,\n n_samples=n_samples, seed=1)\n', (1731, 1804), False, 'from arspy.ars import adaptive_rejection_sampling\n'), ((1898, 1925), 'numpy.load', 'np.load', (["input_dict['data']"], {}), "(input_dict['data'])\n", (1905, 1925), True, 'import numpy as np\n'), ((1938, 1985), 'numpy.allclose', 'allclose', (['julia_result', 'python_result'], {'atol': '(0.3)'}), '(julia_result, python_result, atol=0.3)\n', (1946, 1985), False, 'from numpy import log, exp, allclose, sqrt\n'), ((318, 338), 'numpy.exp', 'exp', (['(-x ** 2 / sigma)'], {}), '(-x ** 2 / sigma)\n', (321, 338), False, 'from numpy import log, exp, allclose, sqrt\n'), ((521, 557), 'numpy.sqrt', 'sqrt', (['(p ** 2 / (m ** 2 * c ** 2) + 1)'], {}), '(p ** 2 / (m ** 2 * c ** 2) + 1)\n', (525, 557), False, 'from numpy import log, exp, allclose, sqrt\n'), ((388, 408), 'numpy.exp', 'exp', (['(-x ** 2 / sigma)'], {}), '(-x ** 2 / sigma)\n', (391, 408), False, 'from numpy import log, exp, allclose, sqrt\n'), ((215, 233), 'os.path.realpath', 'realpath', (['__file__'], {}), '(__file__)\n', (223, 233), False, 'from os.path import dirname, realpath, join\n')] |
import argparse
from pathlib import Path
import numpy as np
import pandas as pd
from skimage import io
import matplotlib.pylab as plt
import torch
import torch.optim as optim
import torch.nn.functional as F
import torchvision.transforms as transforms
from model.enc_net import enc_net
from model.utils import calculate_l1_loss
from data.data_loader import CamVidDataset, loader
from model.utils import smooth_in, smooth_out
torch.manual_seed(555)
data_root_dir = './data'
label_table_name = 'label_color'
color_data = pd.read_csv(Path(data_root_dir, label_table_name))
def main(args):
model = enc_net(args.n_class)
if Path(args.resume_model).exists():
print("load model:", args.resume_model)
model.load_state_dict(torch.load(args.resume_model))
# setup optimizer
optimizer = optim.Adam(
model.parameters(), lr=args.lr, betas=(args.beta1, 0.999))
# optimizer = optim.RMSprop(model.parameters(), lr=0.0001)
# optimizer = optim.SGD(model.parameters(), lr=.01, momentum=.9, weight_decay=.01)
train_image_names =\
[line.rstrip() for line in open(args.train_image_pointer_path)]
test_image_names =\
[line.rstrip() for line in open(args.test_image_pointer_path)]
resize_shape=(240, 180)
train_dataset = CamVidDataset(train_image_names, args.root_dir)
test_dataset = CamVidDataset(test_image_names, args.root_dir)
train_loader = loader(train_dataset, args.batch_size)
test_loader = loader(test_dataset, 1, shuffle=False)
train(args, model, optimizer, train_loader)
test(args, model, test_loader)
def train(args, model, optimizer, data_loader):
model.train()
for epoch in range(args.epochs):
for i, (data, target) in enumerate(data_loader):
model.zero_grad()
optimizer.zero_grad()
output, se2, se1 = model(data)
n_batch = output.shape[0]
loss = F.nll_loss(F.log_softmax(output), target)
# loss += calculate_l1_loss(output, target)
exist_class = [[1 if c in target[i_batch].numpy() else 0 for c in range(32)]
for i_batch in range(n_batch)]
exist_class = torch.FloatTensor(exist_class)
loss += F.mse_loss(se2, exist_class)
loss += F.mse_loss(se1, exist_class)
# with torch.no_grad():
# l_noise = smooth_in(model)
loss.backward()
# with torch.no_grad():
# smooth_out(model, l_noise)
optimizer.step()
print('[{}/{}][{}/{}] Loss: {:.4f}'.format(
epoch, args.epochs, i,
len(data_loader), loss.item()))
# do checkpointing
torch.save(model.state_dict(),
'{}/encnet_ckpt.pth'.format(args.out_dir))
def test(args, model, data_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for i_batch, (data, target) in enumerate(data_loader):
output, se2, se1 = model(data)
# sum up batch loss
test_loss += torch.mean(F.nll_loss(
output, target, size_average=False)).item()
# get the index of the max log-probability
pred = output.argmax(1)
correct += pred.eq(target.view_as(pred)).sum().item()
restoration(pred.numpy(), i_batch, args.n_class)
test_loss /= len(data_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'
.format(test_loss, correct, len(data_loader.dataset),
100. * correct / len(data_loader.dataset)))
def restoration(result_labels, i_batch, n_class,
output_img_path='./results/images'):
for i_image, labels in enumerate(result_labels):
print("---------------labels")
print(labels.shape)
h, w = labels.shape
# 出力ラベルから画像への変換
img = np.zeros((h, w, 3))
for category in range(n_class):
idx = np.where(labels == category) # indexはタプルに格納される
if len(idx[0]) > 0:
color = color_data.ix[category]
img[idx[0], idx[1], :] = [color['r'], color['g'], color['b']]
img = img.astype(np.uint8)
io.imsave(output_img_path+'/test_result_{}.jpg'.format(str(i_batch).zfill(5)), img)
# plt.figure()
io.imshow(img)
# plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--root_dir', default='./data/CamSeq01', help='path to dataset')
parser.add_argument('--n-class', type=int, default=32, help='number of class')
parser.add_argument('--train-image-pointer-path', default='./data/train_image_pointer', help='path to train image pointer')
parser.add_argument('--test-image-pointer-path', default='./data/test_image_pointer', help='path to test image pointer')
parser.add_argument('--resume-model', default='./results/_encnet_ckpt.pth', help='path to trained model')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument('--batch-size', type=int, default=16, help='input batch size')
parser.add_argument('--image-size', type=int, default=256, help='the height / width of the input image to network')
parser.add_argument('--epochs', type=int, default=200, help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--out-dir', default='./results', help='folder to output images and model checkpoints')
args = parser.parse_args()
Path(args.out_dir).mkdir(parents=True, exist_ok=True),
main(args)
| [
"data.data_loader.loader",
"data.data_loader.CamVidDataset",
"argparse.ArgumentParser",
"torch.manual_seed",
"torch.load",
"torch.nn.functional.mse_loss",
"numpy.zeros",
"torch.FloatTensor",
"pathlib.Path",
"numpy.where",
"torch.nn.functional.nll_loss",
"torch.nn.functional.log_softmax",
"sk... | [((444, 466), 'torch.manual_seed', 'torch.manual_seed', (['(555)'], {}), '(555)\n', (461, 466), False, 'import torch\n'), ((555, 592), 'pathlib.Path', 'Path', (['data_root_dir', 'label_table_name'], {}), '(data_root_dir, label_table_name)\n', (559, 592), False, 'from pathlib import Path\n'), ((628, 649), 'model.enc_net.enc_net', 'enc_net', (['args.n_class'], {}), '(args.n_class)\n', (635, 649), False, 'from model.enc_net import enc_net\n'), ((1331, 1378), 'data.data_loader.CamVidDataset', 'CamVidDataset', (['train_image_names', 'args.root_dir'], {}), '(train_image_names, args.root_dir)\n', (1344, 1378), False, 'from data.data_loader import CamVidDataset, loader\n'), ((1399, 1445), 'data.data_loader.CamVidDataset', 'CamVidDataset', (['test_image_names', 'args.root_dir'], {}), '(test_image_names, args.root_dir)\n', (1412, 1445), False, 'from data.data_loader import CamVidDataset, loader\n'), ((1466, 1504), 'data.data_loader.loader', 'loader', (['train_dataset', 'args.batch_size'], {}), '(train_dataset, args.batch_size)\n', (1472, 1504), False, 'from data.data_loader import CamVidDataset, loader\n'), ((1524, 1562), 'data.data_loader.loader', 'loader', (['test_dataset', '(1)'], {'shuffle': '(False)'}), '(test_dataset, 1, shuffle=False)\n', (1530, 1562), False, 'from data.data_loader import CamVidDataset, loader\n'), ((4595, 4620), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4618, 4620), False, 'import argparse\n'), ((3017, 3032), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3030, 3032), False, 'import torch\n'), ((4057, 4076), 'numpy.zeros', 'np.zeros', (['(h, w, 3)'], {}), '((h, w, 3))\n', (4065, 4076), True, 'import numpy as np\n'), ((4512, 4526), 'skimage.io.imshow', 'io.imshow', (['img'], {}), '(img)\n', (4521, 4526), False, 'from skimage import io\n'), ((660, 683), 'pathlib.Path', 'Path', (['args.resume_model'], {}), '(args.resume_model)\n', (664, 683), False, 'from pathlib import Path\n'), ((774, 803), 'torch.load', 'torch.load', (['args.resume_model'], {}), '(args.resume_model)\n', (784, 803), False, 'import torch\n'), ((2267, 2297), 'torch.FloatTensor', 'torch.FloatTensor', (['exist_class'], {}), '(exist_class)\n', (2284, 2297), False, 'import torch\n'), ((2321, 2349), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['se2', 'exist_class'], {}), '(se2, exist_class)\n', (2331, 2349), True, 'import torch.nn.functional as F\n'), ((2371, 2399), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['se1', 'exist_class'], {}), '(se1, exist_class)\n', (2381, 2399), True, 'import torch.nn.functional as F\n'), ((4137, 4165), 'numpy.where', 'np.where', (['(labels == category)'], {}), '(labels == category)\n', (4145, 4165), True, 'import numpy as np\n'), ((2000, 2021), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['output'], {}), '(output)\n', (2013, 2021), True, 'import torch.nn.functional as F\n'), ((5912, 5930), 'pathlib.Path', 'Path', (['args.out_dir'], {}), '(args.out_dir)\n', (5916, 5930), False, 'from pathlib import Path\n'), ((3212, 3258), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['output', 'target'], {'size_average': '(False)'}), '(output, target, size_average=False)\n', (3222, 3258), True, 'import torch.nn.functional as F\n')] |
# Copyright (c) <NAME>, <NAME>, and ZOZO Technologies, Inc. All rights reserved.
# Licensed under the Apache 2.0 License.
"""Useful Tools."""
from typing import Dict, Optional, Union
import numpy as np
from sklearn.utils import check_random_state
import torch
def check_confidence_interval_arguments(
alpha: float = 0.05,
n_bootstrap_samples: int = 10000,
random_state: Optional[int] = None,
) -> Optional[ValueError]:
"""Check confidence interval arguments.
Parameters
----------
alpha: float, default=0.05
Significant level of confidence intervals.
n_bootstrap_samples: int, default=10000
Number of resampling performed in the bootstrap procedure.
random_state: int, default=None
Controls the random seed in bootstrap sampling.
Returns
----------
estimated_confidence_interval: Dict[str, float]
Dictionary storing the estimated mean and upper-lower confidence bounds.
"""
if not (isinstance(alpha, float) and (0.0 < alpha < 1.0)):
raise ValueError(
f"alpha must be a positive float (< 1.0), but {alpha} is given"
)
if not (isinstance(n_bootstrap_samples, int) and n_bootstrap_samples > 0):
raise ValueError(
f"n_bootstrap_samples must be a positive integer, but {n_bootstrap_samples} is given"
)
if random_state is not None and not isinstance(random_state, int):
raise ValueError(
f"random_state must be an integer, but {random_state} is given"
)
def estimate_confidence_interval_by_bootstrap(
samples: np.ndarray,
alpha: float = 0.05,
n_bootstrap_samples: int = 10000,
random_state: Optional[int] = None,
) -> Dict[str, float]:
"""Estimate confidence interval by nonparametric bootstrap-like procedure.
Parameters
----------
samples: array-like
Empirical observed samples to be used to estimate cumulative distribution function.
alpha: float, default=0.05
Significant level of confidence intervals.
n_bootstrap_samples: int, default=10000
Number of resampling performed in the bootstrap procedure.
random_state: int, default=None
Controls the random seed in bootstrap sampling.
Returns
----------
estimated_confidence_interval: Dict[str, float]
Dictionary storing the estimated mean and upper-lower confidence bounds.
"""
check_confidence_interval_arguments(
alpha=alpha, n_bootstrap_samples=n_bootstrap_samples, random_state=random_state
)
boot_samples = list()
random_ = check_random_state(random_state)
for _ in np.arange(n_bootstrap_samples):
boot_samples.append(np.mean(random_.choice(samples, size=samples.shape[0])))
lower_bound = np.percentile(boot_samples, 100 * (alpha / 2))
upper_bound = np.percentile(boot_samples, 100 * (1.0 - alpha / 2))
return {
"mean": np.mean(boot_samples),
f"{100 * (1. - alpha)}% CI (lower)": lower_bound,
f"{100 * (1. - alpha)}% CI (upper)": upper_bound,
}
def convert_to_action_dist(
n_actions: int,
selected_actions: np.ndarray,
) -> np.ndarray:
"""Convert selected actions (output of `run_bandit_simulation`) to distribution over actions.
Parameters
----------
n_actions: int
Number of actions.
selected_actions: array-like, shape (n_rounds, len_list)
Sequence of actions selected by evaluation policy
at each round in offline bandit simulation.
Returns
----------
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities (can be deterministic).
"""
n_rounds, len_list = selected_actions.shape
action_dist = np.zeros((n_rounds, n_actions, len_list))
for pos in np.arange(len_list):
selected_actions_ = selected_actions[:, pos]
action_dist[
np.arange(n_rounds),
selected_actions_,
pos * np.ones(n_rounds, int),
] = 1
return action_dist
def check_bandit_feedback_inputs(
context: np.ndarray,
action: np.ndarray,
reward: np.ndarray,
expected_reward: Optional[np.ndarray] = None,
position: Optional[np.ndarray] = None,
pscore: Optional[np.ndarray] = None,
action_context: Optional[np.ndarray] = None,
) -> Optional[ValueError]:
"""Check inputs for bandit learning or simulation.
Parameters
-----------
context: array-like, shape (n_rounds, dim_context)
Context vectors in each round, i.e., :math:`x_t`.
action: array-like, shape (n_rounds,)
Action sampled by a behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.
reward: array-like, shape (n_rounds,)
Observed rewards (or outcome) in each round, i.e., :math:`r_t`.
expected_reward: array-like, shape (n_rounds, n_actions), default=None
Expected rewards (or outcome) in each round, i.e., :math:`\\mathbb{E}[r_t]`.
position: array-like, shape (n_rounds,), default=None
Positions of each round in the given logged bandit feedback.
pscore: array-like, shape (n_rounds,), default=None
Propensity scores, the probability of selecting each action by behavior policy,
in the given logged bandit feedback.
action_context: array-like, shape (n_actions, dim_action_context)
Context vectors characterizing each action.
"""
if not isinstance(context, np.ndarray):
raise ValueError("context must be ndarray")
if context.ndim != 2:
raise ValueError("context must be 2-dimensional")
if not isinstance(action, np.ndarray):
raise ValueError("action must be ndarray")
if action.ndim != 1:
raise ValueError("action must be 1-dimensional")
if not isinstance(reward, np.ndarray):
raise ValueError("reward must be ndarray")
if reward.ndim != 1:
raise ValueError("reward must be 1-dimensional")
if not (np.issubdtype(action.dtype, np.integer) and action.min() >= 0):
raise ValueError("action elements must be non-negative integers")
if expected_reward is not None:
if not isinstance(expected_reward, np.ndarray):
raise ValueError("expected_reward must be ndarray")
if expected_reward.ndim != 2:
raise ValueError("expected_reward must be 2-dimensional")
if not (
context.shape[0]
== action.shape[0]
== reward.shape[0]
== expected_reward.shape[0]
):
raise ValueError(
"context, action, reward, and expected_reward must be the same size."
)
if action.max() >= expected_reward.shape[1]:
raise ValueError(
"action elements must be smaller than the size of the second dimension of expected_reward"
)
if pscore is not None:
if not isinstance(pscore, np.ndarray):
raise ValueError("pscore must be ndarray")
if pscore.ndim != 1:
raise ValueError("pscore must be 1-dimensional")
if not (
context.shape[0] == action.shape[0] == reward.shape[0] == pscore.shape[0]
):
raise ValueError(
"context, action, reward, and pscore must be the same size."
)
if np.any(pscore <= 0):
raise ValueError("pscore must be positive")
if position is not None:
if not isinstance(position, np.ndarray):
raise ValueError("position must be ndarray")
if position.ndim != 1:
raise ValueError("position must be 1-dimensional")
if not (
context.shape[0] == action.shape[0] == reward.shape[0] == position.shape[0]
):
raise ValueError(
"context, action, reward, and position must be the same size."
)
if not (np.issubdtype(position.dtype, np.integer) and position.min() >= 0):
raise ValueError("position elements must be non-negative integers")
else:
if not (context.shape[0] == action.shape[0] == reward.shape[0]):
raise ValueError("context, action, and reward must be the same size.")
if action_context is not None:
if not isinstance(action_context, np.ndarray):
raise ValueError("action_context must be ndarray")
if action_context.ndim != 2:
raise ValueError("action_context must be 2-dimensional")
if action.max() >= action_context.shape[0]:
raise ValueError(
"action elements must be smaller than the size of the first dimension of action_context"
)
def check_ope_inputs(
action_dist: np.ndarray,
position: Optional[np.ndarray] = None,
action: Optional[np.ndarray] = None,
reward: Optional[np.ndarray] = None,
pscore: Optional[np.ndarray] = None,
estimated_rewards_by_reg_model: Optional[np.ndarray] = None,
) -> Optional[ValueError]:
"""Check inputs for bandit learning or simulation.
Parameters
-----------
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities by the evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_t|x_t)`.
position: array-like, shape (n_rounds,), default=None
Positions of each round in the given logged bandit feedback.
action: array-like, shape (n_rounds,), default=None
Action sampled by a behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.
reward: array-like, shape (n_rounds,), default=None
Observed rewards (or outcome) in each round, i.e., :math:`r_t`.
pscore: array-like, shape (n_rounds,), default=None
Propensity scores, the probability of selecting each action by behavior policy,
in the given logged bandit feedback.
estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list), default=None
Expected rewards for each round, action, and position estimated by a regression model, i.e., :math:`\\hat{q}(x_t,a_t)`.
"""
# action_dist
if not isinstance(action_dist, np.ndarray):
raise ValueError("action_dist must be ndarray")
if action_dist.ndim != 3:
raise ValueError(
f"action_dist.ndim must be 3-dimensional, but is {action_dist.ndim}"
)
if not np.allclose(action_dist.sum(axis=1), 1):
raise ValueError("action_dist must be a probability distribution")
# position
if position is not None:
if not isinstance(position, np.ndarray):
raise ValueError("position must be ndarray")
if position.ndim != 1:
raise ValueError("position must be 1-dimensional")
if not (position.shape[0] == action_dist.shape[0]):
raise ValueError(
"the first dimension of position and the first dimension of action_dist must be the same"
)
if not (np.issubdtype(position.dtype, np.integer) and position.min() >= 0):
raise ValueError("position elements must be non-negative integers")
if position.max() >= action_dist.shape[2]:
raise ValueError(
"position elements must be smaller than the third dimension of action_dist"
)
elif action_dist.shape[2] > 1:
raise ValueError(
"position elements must be given when the third dimension of action_dist is greater than 1"
)
# estimated_rewards_by_reg_model
if estimated_rewards_by_reg_model is not None:
if not isinstance(estimated_rewards_by_reg_model, np.ndarray):
raise ValueError("estimated_rewards_by_reg_model must be ndarray")
if estimated_rewards_by_reg_model.shape != action_dist.shape:
raise ValueError(
"estimated_rewards_by_reg_model.shape must be the same as action_dist.shape"
)
# action, reward
if action is not None or reward is not None:
if not isinstance(action, np.ndarray):
raise ValueError("action must be ndarray")
if action.ndim != 1:
raise ValueError("action must be 1-dimensional")
if not isinstance(reward, np.ndarray):
raise ValueError("reward must be ndarray")
if reward.ndim != 1:
raise ValueError("reward must be 1-dimensional")
if not (action.shape[0] == reward.shape[0]):
raise ValueError("action and reward must be the same size.")
if not (np.issubdtype(action.dtype, np.integer) and action.min() >= 0):
raise ValueError("action elements must be non-negative integers")
if action.max() >= action_dist.shape[1]:
raise ValueError(
"action elements must be smaller than the second dimension of action_dist"
)
# pscpre
if pscore is not None:
if not isinstance(pscore, np.ndarray):
raise ValueError("pscore must be ndarray")
if pscore.ndim != 1:
raise ValueError("pscore must be 1-dimensional")
if not (action.shape[0] == reward.shape[0] == pscore.shape[0]):
raise ValueError("action, reward, and pscore must be the same size.")
if np.any(pscore <= 0):
raise ValueError("pscore must be positive")
def check_ope_inputs_tensor(
action_dist: torch.Tensor,
position: Optional[torch.Tensor] = None,
action: Optional[torch.Tensor] = None,
reward: Optional[torch.Tensor] = None,
pscore: Optional[torch.Tensor] = None,
estimated_rewards_by_reg_model: Optional[torch.Tensor] = None,
) -> Optional[ValueError]:
"""Check inputs for bandit learning or simulation.
This is intended for being used with NNPolicyLearner.
Parameters
-----------
action_dist: Tensor, shape (n_rounds, n_actions, len_list)
Action choice probabilities by the evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_t|x_t)`.
position: Tensor, shape (n_rounds,), default=None
Positions of each round in the given logged bandit feedback.
action: Tensor, shape (n_rounds,), default=None
Action sampled by a behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.
reward: Tensor, shape (n_rounds,), default=None
Observed rewards (or outcome) in each round, i.e., :math:`r_t`.
pscore: Tensor, shape (n_rounds,), default=None
Propensity scores, the probability of selecting each action by behavior policy,
in the given logged bandit feedback.
estimated_rewards_by_reg_model: Tensor, shape (n_rounds, n_actions, len_list), default=None
Expected rewards for each round, action, and position estimated by a regression model, i.e., :math:`\\hat{q}(x_t,a_t)`.
"""
# action_dist
if not isinstance(action_dist, torch.Tensor):
raise ValueError("action_dist must be Tensor")
if action_dist.ndim != 3:
raise ValueError(
f"action_dist.ndim must be 3-dimensional, but is {action_dist.ndim}"
)
action_dist_sum = action_dist.sum(axis=1)
action_dist_ones = torch.ones_like(action_dist_sum)
if not torch.allclose(action_dist_sum, action_dist_ones):
raise ValueError("action_dist must be a probability distribution")
# position
if position is not None:
if not isinstance(position, torch.Tensor):
raise ValueError("position must be Tensor")
if position.ndim != 1:
raise ValueError("position must be 1-dimensional")
if not (position.shape[0] == action_dist.shape[0]):
raise ValueError(
"the first dimension of position and the first dimension of action_dist must be the same"
)
if not (position.dtype == torch.int64 and position.min() >= 0):
raise ValueError("position elements must be non-negative integers")
if position.max() >= action_dist.shape[2]:
raise ValueError(
"position elements must be smaller than the third dimension of action_dist"
)
elif action_dist.shape[2] > 1:
raise ValueError(
"position elements must be given when the third dimension of action_dist is greater than 1"
)
# estimated_rewards_by_reg_model
if estimated_rewards_by_reg_model is not None:
if not isinstance(estimated_rewards_by_reg_model, torch.Tensor):
raise ValueError("estimated_rewards_by_reg_model must be Tensor")
if estimated_rewards_by_reg_model.shape != action_dist.shape:
raise ValueError(
"estimated_rewards_by_reg_model.shape must be the same as action_dist.shape"
)
# action, reward
if action is not None or reward is not None:
if not isinstance(action, torch.Tensor):
raise ValueError("action must be Tensor")
if action.ndim != 1:
raise ValueError("action must be 1-dimensional")
if not isinstance(reward, torch.Tensor):
raise ValueError("reward must be Tensor")
if reward.ndim != 1:
raise ValueError("reward must be 1-dimensional")
if not (action.shape[0] == reward.shape[0]):
raise ValueError("action and reward must be the same size.")
if not (action.dtype == torch.int64 and action.min() >= 0):
raise ValueError("action elements must be non-negative integers")
if action.max() >= action_dist.shape[1]:
raise ValueError(
"action elements must be smaller than the second dimension of action_dist"
)
# pscpre
if pscore is not None:
if not isinstance(pscore, torch.Tensor):
raise ValueError("pscore must be Tensor")
if pscore.ndim != 1:
raise ValueError("pscore must be 1-dimensional")
if not (action.shape[0] == reward.shape[0] == pscore.shape[0]):
raise ValueError("action, reward, and pscore must be the same size.")
if torch.any(pscore <= 0):
raise ValueError("pscore must be positive")
def sigmoid(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
"""Calculate sigmoid function."""
return 1.0 / (1.0 + np.exp(-x))
def softmax(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
"""Calculate softmax function."""
b = np.max(x, axis=1)[:, np.newaxis]
numerator = np.exp(x - b)
denominator = np.sum(numerator, axis=1)[:, np.newaxis]
return numerator / denominator
| [
"torch.ones_like",
"sklearn.utils.check_random_state",
"numpy.sum",
"torch.any",
"numpy.zeros",
"numpy.ones",
"numpy.percentile",
"numpy.any",
"numpy.max",
"numpy.mean",
"numpy.arange",
"numpy.exp",
"torch.allclose",
"numpy.issubdtype"
] | [((2598, 2630), 'sklearn.utils.check_random_state', 'check_random_state', (['random_state'], {}), '(random_state)\n', (2616, 2630), False, 'from sklearn.utils import check_random_state\n'), ((2644, 2674), 'numpy.arange', 'np.arange', (['n_bootstrap_samples'], {}), '(n_bootstrap_samples)\n', (2653, 2674), True, 'import numpy as np\n'), ((2779, 2825), 'numpy.percentile', 'np.percentile', (['boot_samples', '(100 * (alpha / 2))'], {}), '(boot_samples, 100 * (alpha / 2))\n', (2792, 2825), True, 'import numpy as np\n'), ((2844, 2896), 'numpy.percentile', 'np.percentile', (['boot_samples', '(100 * (1.0 - alpha / 2))'], {}), '(boot_samples, 100 * (1.0 - alpha / 2))\n', (2857, 2896), True, 'import numpy as np\n'), ((3757, 3798), 'numpy.zeros', 'np.zeros', (['(n_rounds, n_actions, len_list)'], {}), '((n_rounds, n_actions, len_list))\n', (3765, 3798), True, 'import numpy as np\n'), ((3814, 3833), 'numpy.arange', 'np.arange', (['len_list'], {}), '(len_list)\n', (3823, 3833), True, 'import numpy as np\n'), ((15164, 15196), 'torch.ones_like', 'torch.ones_like', (['action_dist_sum'], {}), '(action_dist_sum)\n', (15179, 15196), False, 'import torch\n'), ((18456, 18469), 'numpy.exp', 'np.exp', (['(x - b)'], {}), '(x - b)\n', (18462, 18469), True, 'import numpy as np\n'), ((2926, 2947), 'numpy.mean', 'np.mean', (['boot_samples'], {}), '(boot_samples)\n', (2933, 2947), True, 'import numpy as np\n'), ((7353, 7372), 'numpy.any', 'np.any', (['(pscore <= 0)'], {}), '(pscore <= 0)\n', (7359, 7372), True, 'import numpy as np\n'), ((13264, 13283), 'numpy.any', 'np.any', (['(pscore <= 0)'], {}), '(pscore <= 0)\n', (13270, 13283), True, 'import numpy as np\n'), ((15208, 15257), 'torch.allclose', 'torch.allclose', (['action_dist_sum', 'action_dist_ones'], {}), '(action_dist_sum, action_dist_ones)\n', (15222, 15257), False, 'import torch\n'), ((18063, 18085), 'torch.any', 'torch.any', (['(pscore <= 0)'], {}), '(pscore <= 0)\n', (18072, 18085), False, 'import torch\n'), ((18407, 18424), 'numpy.max', 'np.max', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (18413, 18424), True, 'import numpy as np\n'), ((18488, 18513), 'numpy.sum', 'np.sum', (['numerator'], {'axis': '(1)'}), '(numerator, axis=1)\n', (18494, 18513), True, 'import numpy as np\n'), ((5992, 6031), 'numpy.issubdtype', 'np.issubdtype', (['action.dtype', 'np.integer'], {}), '(action.dtype, np.integer)\n', (6005, 6031), True, 'import numpy as np\n'), ((18277, 18287), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (18283, 18287), True, 'import numpy as np\n'), ((3921, 3940), 'numpy.arange', 'np.arange', (['n_rounds'], {}), '(n_rounds)\n', (3930, 3940), True, 'import numpy as np\n'), ((7915, 7956), 'numpy.issubdtype', 'np.issubdtype', (['position.dtype', 'np.integer'], {}), '(position.dtype, np.integer)\n', (7928, 7956), True, 'import numpy as np\n'), ((10987, 11028), 'numpy.issubdtype', 'np.issubdtype', (['position.dtype', 'np.integer'], {}), '(position.dtype, np.integer)\n', (11000, 11028), True, 'import numpy as np\n'), ((12540, 12579), 'numpy.issubdtype', 'np.issubdtype', (['action.dtype', 'np.integer'], {}), '(action.dtype, np.integer)\n', (12553, 12579), True, 'import numpy as np\n'), ((3991, 4013), 'numpy.ones', 'np.ones', (['n_rounds', 'int'], {}), '(n_rounds, int)\n', (3998, 4013), True, 'import numpy as np\n')] |
import numpy as np
from streamEmbedding import StreamEmbedding
class RandomSums(StreamEmbedding):
def __init__(self, n, d):
super().__init__(n, d)
self.signs = [1.0, -1.0]
def append(self, vector):
row = np.random.randint(self.d)
sign = np.random.choice(self.signs)
#v = (sign*vector).tolist()
#self._sketch[row,:] += v[0]
self._sketch[row, :] += sign * vector
| [
"numpy.random.randint",
"numpy.random.choice"
] | [((240, 265), 'numpy.random.randint', 'np.random.randint', (['self.d'], {}), '(self.d)\n', (257, 265), True, 'import numpy as np\n'), ((281, 309), 'numpy.random.choice', 'np.random.choice', (['self.signs'], {}), '(self.signs)\n', (297, 309), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import src.main_2d as main
import src.systemO1 as system
import utilities as util
# System
ndim = 2
system_type = "waveO1"
xDiscretisation_type = "dG"
tIntegrators = ["dG", "Crank-Nicolson"]
meshes = ["quasiUniform", "bisectionRefined"]
def call_runFG(stabParamsType):
deg_x = 4
deg_t = 1
test_case_name = "test1_smooth_squareDomain"
cfg = {'ndim': ndim,
'system': system_type,
'test case': test_case_name,
'deg_x_v': deg_x,
'deg_x_sigma': deg_x - 1,
'deg_t': deg_t,
'time integrator': tIntegrators[0],
'spatial discretisation': xDiscretisation_type,
'mesh type': meshes[0],
'stab params type': stabParamsType,
'save xt sol': False,
'write output': False,
'output dir': "output/waveO1/",
'output filename': "runFG_test3",
'error type': "L2L2",
'bool measure signal': True,
'bool write signal': True,
'signal outFile': "output/scattering_signal.txt",
'dump sol': True,
'dump sol subdir': "test3/",
'dump sol at time': np.array([0.1, 0.2, 0.3, 0.4, 0.5])
}
util.print_config(cfg)
lx = 4
lt = 6
system.runFG_2d(cfg, main.schemeFG_2d, lx, lt)
if __name__ == "__main__":
stabParamsType_ = 4
call_runFG(stabParamsType_)
# End of file
| [
"numpy.array",
"utilities.print_config",
"src.systemO1.runFG_2d"
] | [((1278, 1300), 'utilities.print_config', 'util.print_config', (['cfg'], {}), '(cfg)\n', (1295, 1300), True, 'import utilities as util\n'), ((1328, 1374), 'src.systemO1.runFG_2d', 'system.runFG_2d', (['cfg', 'main.schemeFG_2d', 'lx', 'lt'], {}), '(cfg, main.schemeFG_2d, lx, lt)\n', (1343, 1374), True, 'import src.systemO1 as system\n'), ((1224, 1259), 'numpy.array', 'np.array', (['[0.1, 0.2, 0.3, 0.4, 0.5]'], {}), '([0.1, 0.2, 0.3, 0.4, 0.5])\n', (1232, 1259), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from time import time
import matplotlib.pyplot as plt
from quantfin.statistics import marchenko_pastur, detone, cov2corr
from quantfin.portfolio import Markowitz, BlackLitterman, HRP
# User defined parameters
ew_com = 21 * 3
# fixed parameters
file_path = r'/Users/gustavoamarante/Dropbox/CQF/Final Project/' # Mac
# file_path = r'/Users/gusamarante/Dropbox/CQF/Final Project/' # Macbook
tic = time()
# =========================
# ===== READ THE DATA =====
# =========================
# Read Bloomberg Tickers and create dictionaries for renaming
df_tickers = pd.read_excel(file_path + r'Data - BBG Data Values.xlsx',
index_col=0, sheet_name='Tickers')
tr_dict = df_tickers['Total Return Index (UBS)'].to_dict()
tr_dict = {v: k for k, v in tr_dict.items()}
fwd_dict = df_tickers['Forward 3m (in bps)'].to_dict()
fwd_dict = {v: k for k, v in fwd_dict.items()}
spot_dict = df_tickers['Spot'].to_dict()
spot_dict = {v: k for k, v in spot_dict.items()}
ppp_dict = df_tickers['PPP'].to_dict()
ppp_dict = {v: k for k, v in ppp_dict.items()}
# Read Total Return Index
df_tr = pd.read_excel(file_path + r'Data - BBG Data Values.xlsx',
index_col=0, sheet_name='Total Return')
df_tr = df_tr.rename(tr_dict, axis=1)
# Read fwds
df_fwd = pd.read_excel(file_path + r'Data - BBG Data Values.xlsx',
index_col=0, sheet_name='FWD 3M')
df_fwd = df_fwd.rename(fwd_dict, axis=1)
# Read Spot
df_spot = pd.read_excel(file_path + r'Data - BBG Data Values.xlsx',
index_col=0, sheet_name='Spot')
df_spot = df_spot.rename(spot_dict, axis=1)
# Read PPP
df_ppp = pd.read_excel(file_path + r'Data - BBG Data Values.xlsx',
index_col=0, sheet_name='PPP')
df_ppp = df_ppp.rename(ppp_dict, axis=1)
# Read Libor
df_libor = pd.read_excel(file_path + r'Data - LIBOR.xlsx',
index_col=0, sheet_name='LIBOR', na_values=['#N/A'])
df_libor = df_libor.fillna(method='ffill') / 100
# ========================
# ===== COMPUTATIONS =====
# ========================
# exponentially weighted covariance/correlation
df_cov = df_tr.pct_change(1).ewm(ew_com, min_periods=ew_com).cov()
df_cov = df_cov.dropna(how='all') * 3 * 21 # covariances from daily to quarterly
df_corr = df_tr.pct_change(1).ewm(ew_com, min_periods=ew_com).corr()
df_corr = df_corr.dropna(how='all')
# Carry
df_carry = (df_spot + df_fwd/10000) / df_spot - 1
# Value
df_value = (1 - df_ppp / df_spot) * 0.0445
df_value = df_value.dropna(axis=1, how='all')
# Momentum
df_mom = df_tr.pct_change(21 * 3)
# ============================
# ===== STATIC PORTFOLIO =====
# ============================
# date for the static portfolio
last_date = df_libor.index[-1]
print(last_date)
# 3-month volatility
vols = pd.Series(data=np.sqrt(df_cov.loc[last_date].values.diagonal()),
index=df_tr.columns, name='Vol')
# Dataframe that is going to hold the weights of different methods for comparison.
df_weights = pd.DataFrame(index=df_tr.columns)
# ----- equal weighted -----
df_weights['Equal Weighted'] = 1 / df_tr.shape[1]
# ----- inverse volatility -----
aux = 1 / vols
df_weights['Inverse Volatility'] = aux / aux.sum()
# ----- Hierarchical Risk Parity -----
hrp = HRP(cov=df_cov.loc[last_date])
df_weights['Hierarchical Risk Parity'] = hrp.weights
# ----- Detoned HRP -----
corr_detoned = detone(df_corr.loc[last_date])
dhrp = HRP(cov=df_cov.loc[last_date], corr=corr_detoned)
df_weights['Detoned Hierarchical Risk Parity'] = dhrp.weights
# ----- Black-Litterman + <NAME> -----
# generate the matrices of views
P = pd.DataFrame()
v = pd.Series()
# add carry views
for ccy in df_tr.columns:
try:
v.loc[f'{ccy} carry'] = df_carry.loc[last_date, ccy]
P.loc[f'{ccy} carry', ccy] = 1
except KeyError: # If a currency does not have a carry signal, skips this view.
continue
# add value views
for ccy in df_tr.columns:
try:
v.loc[f'{ccy} value'] = df_value.loc[last_date, ccy]
P.loc[f'{ccy} value', ccy] = 1
except KeyError: # If a currency does not have a value signal, skips this view.
continue
P = P.fillna(0)
v = v.to_frame('Views')
# denoise the covariance
mp_corr, _, _ = marchenko_pastur(df_corr.loc[last_date],
T=21 * 3, N=df_tr.shape[1])
mp_cov = pd.DataFrame(data=np.diag(vols) @ mp_corr @ np.diag(vols),
index=vols.index, columns=vols.index)
bl = BlackLitterman(sigma=mp_cov,
estimation_error=1 / (21 * 3),
views_p=P,
views_v=v,
w_equilibrium=df_weights['Inverse Volatility'].to_frame(),
avg_risk_aversion=1.2,
mu_historical=df_mom.loc[last_date].to_frame('Historical'),
mu_shrink=0.99, # needs to be tuned
overall_confidence=100) # needs to be tuned
vol_bl = pd.Series(data=np.sqrt(np.diag(bl.sigma_bl)), index=bl.sigma_bl.index)
corr_bl = cov2corr(bl.sigma_bl)
mkw = Markowitz(mu=bl.mu_bl,
sigma=vol_bl,
corr=corr_bl,
rf=(1 + df_libor.loc[last_date, 'US 3m LIBOR']) ** 0.25 - 1,
risk_aversion=1.2)
df_weights['Marchanko-Pastur + Black-Litterman'] = mkw.risky_weights
# === END ===
toc = time()
print(round(toc - tic, 1), 'seconds')
# ===== CHART =====
# chart weights
df_plot = df_weights.sort_index(ascending=False)
df_plot.plot(kind='barh', figsize=(6, 10), width=0.8)
plt.grid(axis='x')
plt.axvline(0, color='black', linewidth=1)
plt.tight_layout()
plt.savefig(file_path + r'figures/Static Weights.pdf', pad_inches=0)
plt.show()
# Chart signals
df_plot = pd.concat([df_carry.loc[last_date].rename('Carry'),
df_value.loc[last_date].rename('Value'),
df_mom.loc[last_date].rename('Momentum')], axis=1)
df_plot = df_plot.sort_index(ascending=False)
df_plot.plot(kind='barh', figsize=(6, 10), width=0.8)
plt.grid(axis='x')
plt.axvline(0, color='black', linewidth=1)
plt.tight_layout()
plt.savefig(file_path + r'figures/Static Signals.pdf', pad_inches=0)
plt.show()
| [
"quantfin.portfolio.HRP",
"pandas.DataFrame",
"quantfin.statistics.detone",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.show",
"quantfin.statistics.marchenko_pastur",
"quantfin.statistics.cov2corr",
"time.time",
"pandas.read_excel",
"quantfin.portfolio.Marko... | [((438, 444), 'time.time', 'time', ([], {}), '()\n', (442, 444), False, 'from time import time\n'), ((604, 699), 'pandas.read_excel', 'pd.read_excel', (["(file_path + 'Data - BBG Data Values.xlsx')"], {'index_col': '(0)', 'sheet_name': '"""Tickers"""'}), "(file_path + 'Data - BBG Data Values.xlsx', index_col=0,\n sheet_name='Tickers')\n", (617, 699), True, 'import pandas as pd\n'), ((1145, 1245), 'pandas.read_excel', 'pd.read_excel', (["(file_path + 'Data - BBG Data Values.xlsx')"], {'index_col': '(0)', 'sheet_name': '"""Total Return"""'}), "(file_path + 'Data - BBG Data Values.xlsx', index_col=0,\n sheet_name='Total Return')\n", (1158, 1245), True, 'import pandas as pd\n'), ((1325, 1419), 'pandas.read_excel', 'pd.read_excel', (["(file_path + 'Data - BBG Data Values.xlsx')"], {'index_col': '(0)', 'sheet_name': '"""FWD 3M"""'}), "(file_path + 'Data - BBG Data Values.xlsx', index_col=0,\n sheet_name='FWD 3M')\n", (1338, 1419), True, 'import pandas as pd\n'), ((1504, 1596), 'pandas.read_excel', 'pd.read_excel', (["(file_path + 'Data - BBG Data Values.xlsx')"], {'index_col': '(0)', 'sheet_name': '"""Spot"""'}), "(file_path + 'Data - BBG Data Values.xlsx', index_col=0,\n sheet_name='Spot')\n", (1517, 1596), True, 'import pandas as pd\n'), ((1683, 1774), 'pandas.read_excel', 'pd.read_excel', (["(file_path + 'Data - BBG Data Values.xlsx')"], {'index_col': '(0)', 'sheet_name': '"""PPP"""'}), "(file_path + 'Data - BBG Data Values.xlsx', index_col=0,\n sheet_name='PPP')\n", (1696, 1774), True, 'import pandas as pd\n'), ((1861, 1965), 'pandas.read_excel', 'pd.read_excel', (["(file_path + 'Data - LIBOR.xlsx')"], {'index_col': '(0)', 'sheet_name': '"""LIBOR"""', 'na_values': "['#N/A']"}), "(file_path + 'Data - LIBOR.xlsx', index_col=0, sheet_name=\n 'LIBOR', na_values=['#N/A'])\n", (1874, 1965), True, 'import pandas as pd\n'), ((3041, 3074), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'df_tr.columns'}), '(index=df_tr.columns)\n', (3053, 3074), True, 'import pandas as pd\n'), ((3301, 3331), 'quantfin.portfolio.HRP', 'HRP', ([], {'cov': 'df_cov.loc[last_date]'}), '(cov=df_cov.loc[last_date])\n', (3304, 3331), False, 'from quantfin.portfolio import Markowitz, BlackLitterman, HRP\n'), ((3427, 3457), 'quantfin.statistics.detone', 'detone', (['df_corr.loc[last_date]'], {}), '(df_corr.loc[last_date])\n', (3433, 3457), False, 'from quantfin.statistics import marchenko_pastur, detone, cov2corr\n'), ((3465, 3514), 'quantfin.portfolio.HRP', 'HRP', ([], {'cov': 'df_cov.loc[last_date]', 'corr': 'corr_detoned'}), '(cov=df_cov.loc[last_date], corr=corr_detoned)\n', (3468, 3514), False, 'from quantfin.portfolio import Markowitz, BlackLitterman, HRP\n'), ((3654, 3668), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3666, 3668), True, 'import pandas as pd\n'), ((3673, 3684), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (3682, 3684), True, 'import pandas as pd\n'), ((4281, 4349), 'quantfin.statistics.marchenko_pastur', 'marchenko_pastur', (['df_corr.loc[last_date]'], {'T': '(21 * 3)', 'N': 'df_tr.shape[1]'}), '(df_corr.loc[last_date], T=21 * 3, N=df_tr.shape[1])\n', (4297, 4349), False, 'from quantfin.statistics import marchenko_pastur, detone, cov2corr\n'), ((5075, 5096), 'quantfin.statistics.cov2corr', 'cov2corr', (['bl.sigma_bl'], {}), '(bl.sigma_bl)\n', (5083, 5096), False, 'from quantfin.statistics import marchenko_pastur, detone, cov2corr\n'), ((5104, 5239), 'quantfin.portfolio.Markowitz', 'Markowitz', ([], {'mu': 'bl.mu_bl', 'sigma': 'vol_bl', 'corr': 'corr_bl', 'rf': "((1 + df_libor.loc[last_date, 'US 3m LIBOR']) ** 0.25 - 1)", 'risk_aversion': '(1.2)'}), "(mu=bl.mu_bl, sigma=vol_bl, corr=corr_bl, rf=(1 + df_libor.loc[\n last_date, 'US 3m LIBOR']) ** 0.25 - 1, risk_aversion=1.2)\n", (5113, 5239), False, 'from quantfin.portfolio import Markowitz, BlackLitterman, HRP\n'), ((5391, 5397), 'time.time', 'time', ([], {}), '()\n', (5395, 5397), False, 'from time import time\n'), ((5576, 5594), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'axis': '"""x"""'}), "(axis='x')\n", (5584, 5594), True, 'import matplotlib.pyplot as plt\n'), ((5595, 5637), 'matplotlib.pyplot.axvline', 'plt.axvline', (['(0)'], {'color': '"""black"""', 'linewidth': '(1)'}), "(0, color='black', linewidth=1)\n", (5606, 5637), True, 'import matplotlib.pyplot as plt\n'), ((5638, 5656), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5654, 5656), True, 'import matplotlib.pyplot as plt\n'), ((5657, 5724), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(file_path + 'figures/Static Weights.pdf')"], {'pad_inches': '(0)'}), "(file_path + 'figures/Static Weights.pdf', pad_inches=0)\n", (5668, 5724), True, 'import matplotlib.pyplot as plt\n'), ((5726, 5736), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5734, 5736), True, 'import matplotlib.pyplot as plt\n'), ((6050, 6068), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'axis': '"""x"""'}), "(axis='x')\n", (6058, 6068), True, 'import matplotlib.pyplot as plt\n'), ((6069, 6111), 'matplotlib.pyplot.axvline', 'plt.axvline', (['(0)'], {'color': '"""black"""', 'linewidth': '(1)'}), "(0, color='black', linewidth=1)\n", (6080, 6111), True, 'import matplotlib.pyplot as plt\n'), ((6112, 6130), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6128, 6130), True, 'import matplotlib.pyplot as plt\n'), ((6131, 6198), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(file_path + 'figures/Static Signals.pdf')"], {'pad_inches': '(0)'}), "(file_path + 'figures/Static Signals.pdf', pad_inches=0)\n", (6142, 6198), True, 'import matplotlib.pyplot as plt\n'), ((6200, 6210), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6208, 6210), True, 'import matplotlib.pyplot as plt\n'), ((4437, 4450), 'numpy.diag', 'np.diag', (['vols'], {}), '(vols)\n', (4444, 4450), True, 'import numpy as np\n'), ((5017, 5037), 'numpy.diag', 'np.diag', (['bl.sigma_bl'], {}), '(bl.sigma_bl)\n', (5024, 5037), True, 'import numpy as np\n'), ((4411, 4424), 'numpy.diag', 'np.diag', (['vols'], {}), '(vols)\n', (4418, 4424), True, 'import numpy as np\n')] |
#!/usr/bin/python3
import sys
import xml.dom.minidom as dom
from scipy.io.wavfile import read, write
import configparser
import numpy as np
import os
basstemplate = '''
<drumkit_info>
<name>{name}</name>
<author>kwt</author>
<info><!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li {{ white-space: pre-wrap; }}
</style></head><body style=" font-family:'Lucida Grande'; font-size:10pt; font-weight:400; font-style:normal;">
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p></body></html></info>
<license>{license}</license>
<instrumentList>{content}
</instrumentList>
</drumkit_info>
'''
template = '''
<instrument>
<id>{id}</id>
<name>{instr}</name>
<volume>1</volume>
<isMuted>false</isMuted>
<pan_L>1</pan_L>
<pan_R>1</pan_R>
<randomPitchFactor>0</randomPitchFactor>
<gain>1</gain>
<filterActive>false</filterActive>
<filterCutoff>1</filterCutoff>
<filterResonance>0</filterResonance>
<Attack>0</Attack>
<Decay>0</Decay>
<Sustain>1</Sustain>
<Release>100256</Release>
<muteGroup>-1</muteGroup>
<midiOutChannel>0</midiOutChannel>
<midiOutNote>{id2}</midiOutNote>
<layer>
<filename>{instr}.wav</filename>
<min>0</min>
<max>1</max>
<gain>1</gain>
<pitch>0</pitch>
</layer>
</instrument>
'''
def writehydrogen(config):
print(config.get('drumkit', 'name'))
basedir = config.get('drumkit', 'basedir')
midi = basedir + '/' + config.get('drumkit', 'mididef')
kit = basedir + '/' + config.get('drumkit', 'kitdef')
destdir = config.get('drumkit', 'destdir')
midimap = dom.parseString(open(midi).read())
drumkit = dom.parseString(open(kit).read())
maps = midimap.getElementsByTagName('map')
items = []
for e in maps:
ins = e.getAttribute('instr')
items.append((e.getAttribute('note'), template.format(id=e.getAttribute('note'), id2=e.getAttribute('note'),instr=ins)))
insfile = None
for i in drumkit.getElementsByTagName('instrument'):
if i.getAttribute('name') == ins:
insfile = i.getAttribute('file')
break
assert insfile is not None
instrdef = '{}/{}'.format(basedir, insfile)
generatesample(instrdef, ins, insfile.rsplit('/', 1)[0], config)
items.sort(key=lambda x:x[0])
out = basstemplate.format(content='\n'.join([i[1] for i in items]), name=config.get('drumkit', 'name'), license=config.get('drumkit', 'license'))
open(destdir + '/drumkit.xml', 'w').write(out)
def generatesample(instrdef, instr, insdir, config):
print ('generating {}'.format(instr))
destdir = config.get('drumkit', 'destdir')
basedir = config.get('drumkit', 'basedir')
xml = dom.parseString(open(instrdef).read())
samples = xml.getElementsByTagName('sample')
assert len(samples) >= 1
sample = samples[-1]
audiofile = sample.getElementsByTagName('audiofile')[0].getAttribute('file')
wavfile = '{}/{}/{}'.format(basedir, insdir, audiofile)
rate, data = read(wavfile)
mics = [k.strip() for k in config.get('mic_settings', 'mics').split(',')]
samplemap = {}
leftchannel = []
rightchannel = []
for i, k in enumerate(mics):
samplemap[k] = data[:, i]
for i in range(len(samplemap[mics[0]])):
leftchannel.append(0.0)
rightchannel.append(0.0)
for mic in mics:
pan = float(config.get('mic_settings', mic))
wav = samplemap[mic]
if pan == 0:
left = 0.5
right = 0.5
elif pan > 0.0:
left = pan
right = 1 - pan
else:
left = 1 + pan
right = -pan
for i, d in enumerate(wav):
leftchannel[i] += d * left
rightchannel[i] += d * right
outdata = []
result = np.transpose(np.array([leftchannel, rightchannel]))
path = destdir + '/' + instr + '.wav'
write(path, rate, result)
if len(sys.argv) < 2:
print('Usage: {} CONFIGFILE'.format(sys.argv[0]))
print('''Example:
[drumkit]
name = SomeDrumkit
license = CC BY-SA
basedir = /path/to/drumgizmo/SomeDrumKit
mididef = midimap.xml
kitdef = drumkit.xml
destdir = /path/to/outdir
[mic_settings]
mics = Amb L, Amb R, Hihat, Kick L, Kick R, Overhead L, Overhead R, Ride, SnareBottom, SnareTop, Tom1, Tom2, Floor Tom1, Floor Tom2
Amb L = 1.0
Amb R = -1.0
Hihat = -0.7
Kick L = 0.0
Kick R = 0.0
Overhead L = 1.0
Overhead R = -1.0
Ride = 0.7
Snare Bottom = 0.0
Snare Top = 0.0
Tom1 = -0.2
Tom2 = 0.2
Floor Tom1 = 0.3
Fllor Tom2 = 0.4
''')
else:
config = configparser.ConfigParser()
config.read(sys.argv[1])
writehydrogen(config)
| [
"scipy.io.wavfile.write",
"configparser.ConfigParser",
"numpy.array",
"scipy.io.wavfile.read"
] | [((3656, 3669), 'scipy.io.wavfile.read', 'read', (['wavfile'], {}), '(wavfile)\n', (3660, 3669), False, 'from scipy.io.wavfile import read, write\n'), ((4535, 4560), 'scipy.io.wavfile.write', 'write', (['path', 'rate', 'result'], {}), '(path, rate, result)\n', (4540, 4560), False, 'from scipy.io.wavfile import read, write\n'), ((5191, 5218), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (5216, 5218), False, 'import configparser\n'), ((4450, 4487), 'numpy.array', 'np.array', (['[leftchannel, rightchannel]'], {}), '([leftchannel, rightchannel])\n', (4458, 4487), True, 'import numpy as np\n')] |
#-*-coding:utf-8-*-
# date:2020-04-25
# Author: X.L.Eric
# function: inference
import os
import argparse
import torch
import torch.nn as nn
from data_iter.datasets import letterbox
import numpy as np
import time
import datetime
import os
import math
from datetime import datetime
import cv2
import torch.nn.functional as F
from models.resnet_50 import resnet50
from models.my_model import MY_Net
from utils.common_utils import *
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=' Project Landmarks Test')
parser.add_argument('--test_model', type=str, default = './model_exp/2020-09-07_14-09-44/model_epoch-5.pth',
help = 'test_model') # 模型路径
parser.add_argument('--model', type=str, default = 'MY_Net',
help = 'model : resnet_50,MY_Net') # 模型类型
parser.add_argument('--num_classes', type=int , default = 196,
help = 'num_classes') # 分类类别个数
parser.add_argument('--GPUS', type=str, default = '0',
help = 'GPUS') # GPU选择
parser.add_argument('--test_path', type=str, default = './datasets/test_expand_datasets/',
help = 'test_path') # 测试集路径
parser.add_argument('--img_size', type=tuple , default = (256,256),
help = 'img_size') # 输入模型图片尺寸
parser.add_argument('--fix_res', type=bool , default = False,
help = 'fix_resolution') # 输入模型样本图片是否保证图像分辨率的长宽比
parser.add_argument('--vis', type=bool , default = True,
help = 'vis') # 是否可视化图片
print('\n/******************* {} ******************/\n'.format(parser.description))
#--------------------------------------------------------------------------
ops = parser.parse_args()# 解析添加参数
#--------------------------------------------------------------------------
print('----------------------------------')
unparsed = vars(ops) # parse_args()方法的返回值为namespace,用vars()内建函数化为字典
for key in unparsed.keys():
print('{} : {}'.format(key,unparsed[key]))
#---------------------------------------------------------------------------
os.environ['CUDA_VISIBLE_DEVICES'] = ops.GPUS
test_path = ops.test_path # 测试图片文件夹路径
#---------------------------------------------------------------- 构建模型
print('use model : %s'%(ops.model))
if ops.model == 'resnet_50':
model_ = resnet50(num_classes = ops.num_classes)
else:
model_ = MY_Net(num_classes = ops.num_classes)
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
model_ = model_.to(device)
model_.eval() # 设置为前向推断模式
# print(model_)# 打印模型结构
# 加载测试模型
if os.access(ops.test_model,os.F_OK):# checkpoint
chkpt = torch.load(ops.test_model, map_location=device)
model_.load_state_dict(chkpt)
print('load test model : {}'.format(ops.test_model))
#---------------------------------------------------------------- 预测图片
font = cv2.FONT_HERSHEY_SIMPLEX
with torch.no_grad():
idx = 0
for file in os.listdir(ops.test_path):
if '.jpg' not in file:
continue
idx += 1
print('{}) image : {}'.format(idx,file))
img = cv2.imread(ops.test_path + file)
img_width = img.shape[1]
img_height = img.shape[0]
# 输入图片预处理
if ops.fix_res:
img_ = letterbox(img,size_=ops.img_size[0],mean_rgb = (128,128,128))
else:
img_ = cv2.resize(img, (ops.img_size[1],ops.img_size[0]), interpolation = cv2.INTER_CUBIC)
img_ = img_.astype(np.float32)
img_ = (img_-128.)/256.
img_ = img_.transpose(2, 0, 1)
img_ = torch.from_numpy(img_)
img_ = img_.unsqueeze_(0)
if use_cuda:
img_ = img_.cuda() # (bs, 3, h, w)
pre_ = model_(img_.float())
# print(pre_.size())
output = pre_.cpu().detach().numpy()
output = np.squeeze(output)
# print(output.shape)
dict_landmarks = draw_landmarks(img,output,draw_circle = False)
draw_contour(img,dict_landmarks)
if ops.vis:
cv2.namedWindow('image',0)
cv2.imshow('image',img)
if cv2.waitKey(10) == 27 :
break
cv2.destroyAllWindows()
print('well done ')
| [
"os.listdir",
"data_iter.datasets.letterbox",
"torch.from_numpy",
"argparse.ArgumentParser",
"cv2.waitKey",
"torch.load",
"cv2.imshow",
"cv2.imread",
"cv2.namedWindow",
"torch.cuda.is_available",
"torch.device",
"numpy.squeeze",
"models.resnet_50.resnet50",
"cv2.destroyAllWindows",
"torc... | [((475, 537), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '""" Project Landmarks Test"""'}), "(description=' Project Landmarks Test')\n", (498, 537), False, 'import argparse\n'), ((2412, 2437), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2435, 2437), False, 'import torch\n'), ((2452, 2497), 'torch.device', 'torch.device', (["('cuda:0' if use_cuda else 'cpu')"], {}), "('cuda:0' if use_cuda else 'cpu')\n", (2464, 2497), False, 'import torch\n'), ((2609, 2643), 'os.access', 'os.access', (['ops.test_model', 'os.F_OK'], {}), '(ops.test_model, os.F_OK)\n', (2618, 2643), False, 'import os\n'), ((4323, 4346), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4344, 4346), False, 'import cv2\n'), ((2291, 2328), 'models.resnet_50.resnet50', 'resnet50', ([], {'num_classes': 'ops.num_classes'}), '(num_classes=ops.num_classes)\n', (2299, 2328), False, 'from models.resnet_50 import resnet50\n'), ((2358, 2393), 'models.my_model.MY_Net', 'MY_Net', ([], {'num_classes': 'ops.num_classes'}), '(num_classes=ops.num_classes)\n', (2364, 2393), False, 'from models.my_model import MY_Net\n'), ((2672, 2719), 'torch.load', 'torch.load', (['ops.test_model'], {'map_location': 'device'}), '(ops.test_model, map_location=device)\n', (2682, 2719), False, 'import torch\n'), ((2940, 2955), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2953, 2955), False, 'import torch\n'), ((2993, 3018), 'os.listdir', 'os.listdir', (['ops.test_path'], {}), '(ops.test_path)\n', (3003, 3018), False, 'import os\n'), ((3172, 3204), 'cv2.imread', 'cv2.imread', (['(ops.test_path + file)'], {}), '(ops.test_path + file)\n', (3182, 3204), False, 'import cv2\n'), ((3683, 3705), 'torch.from_numpy', 'torch.from_numpy', (['img_'], {}), '(img_)\n', (3699, 3705), False, 'import torch\n'), ((3966, 3984), 'numpy.squeeze', 'np.squeeze', (['output'], {}), '(output)\n', (3976, 3984), True, 'import numpy as np\n'), ((3353, 3416), 'data_iter.datasets.letterbox', 'letterbox', (['img'], {'size_': 'ops.img_size[0]', 'mean_rgb': '(128, 128, 128)'}), '(img, size_=ops.img_size[0], mean_rgb=(128, 128, 128))\n', (3362, 3416), False, 'from data_iter.datasets import letterbox\n'), ((3456, 3543), 'cv2.resize', 'cv2.resize', (['img', '(ops.img_size[1], ops.img_size[0])'], {'interpolation': 'cv2.INTER_CUBIC'}), '(img, (ops.img_size[1], ops.img_size[0]), interpolation=cv2.\n INTER_CUBIC)\n', (3466, 3543), False, 'import cv2\n'), ((4182, 4209), 'cv2.namedWindow', 'cv2.namedWindow', (['"""image"""', '(0)'], {}), "('image', 0)\n", (4197, 4209), False, 'import cv2\n'), ((4225, 4249), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'img'], {}), "('image', img)\n", (4235, 4249), False, 'import cv2\n'), ((4268, 4283), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (4279, 4283), False, 'import cv2\n')] |
import logging
import numpy as np
import pybullet as p
from gym.envs.balance_bot.balancebot_env import BalancebotEnv
logger = logging.getLogger(__name__)
class BalancebotEnvNoise(BalancebotEnv):
def _compute_observation(self):
observation = super(BalancebotEnvNoise, self)._compute_observation()
return np.array([observation[0] + np.random.normal(0,0.05) + self.pitch_offset,
observation[1] + np.random.normal(0,0.01),
observation[2] + np.random.normal(0,0.05)])
def _reset(self):
self.pitch_offset = np.random.normal(0,0.1)
observation = super(BalancebotEnvNoise, self)._reset()
return np.array([observation[0] + np.random.normal(0,0.05) + self.pitch_offset,
observation[1] + np.random.normal(0,0.01),
observation[2] + np.random.normal(0,0.05)])
| [
"numpy.random.normal",
"logging.getLogger"
] | [((129, 156), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (146, 156), False, 'import logging\n'), ((571, 595), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.1)'], {}), '(0, 0.1)\n', (587, 595), True, 'import numpy as np\n'), ((434, 459), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.01)'], {}), '(0, 0.01)\n', (450, 459), True, 'import numpy as np\n'), ((493, 518), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.05)'], {}), '(0, 0.05)\n', (509, 518), True, 'import numpy as np\n'), ((779, 804), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.01)'], {}), '(0, 0.01)\n', (795, 804), True, 'import numpy as np\n'), ((838, 863), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.05)'], {}), '(0, 0.05)\n', (854, 863), True, 'import numpy as np\n'), ((355, 380), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.05)'], {}), '(0, 0.05)\n', (371, 380), True, 'import numpy as np\n'), ((700, 725), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.05)'], {}), '(0, 0.05)\n', (716, 725), True, 'import numpy as np\n')] |
"""Test the DAP handler, which forms the core of the client."""
import sys
from netCDF4 import Dataset
import tempfile
import os
import numpy as np
from six.moves import zip
from pydap.handlers.netcdf import NetCDFHandler
from pydap.handlers.dap import DAPHandler
from pydap.wsgi.ssf import ServerSideFunctions
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
class TestNetCDFHandler(unittest.TestCase):
"""Test that the handler creates the correct dataset from a URL."""
data = [(10, 15.2, 'Diamond_St'),
(11, 13.1, 'Blacktail_Loop'),
(12, 13.3, 'Platinum_St'),
(13, 12.1, 'Kodiak_Trail')]
def setUp(self):
"""Create WSGI apps"""
# Create tempfile:
fileno, self.test_file = tempfile.mkstemp(suffix='.nc')
# must close file number:
os.close(fileno)
with Dataset(self.test_file, 'w') as output:
output.createDimension('index', None)
temp = output.createVariable('index', '<i4', ('index',))
split_data = zip(*self.data)
temp[:] = next(split_data)
temp = output.createVariable('temperature', '<f8', ('index',))
temp[:] = next(split_data)
temp = output.createVariable('station', 'S40', ('index',))
for item_id, item in enumerate(next(split_data)):
temp[item_id] = item
def test_handler_direct(self):
"""Test that dataset has the correct data proxies for grids."""
dataset = NetCDFHandler(self.test_file).dataset
dtype = [('index', '<i4'),
('temperature', '<f8'),
('station', 'S40')]
retrieved_data = list(zip(dataset['index'][:],
dataset['temperature'].array[:],
dataset['station'].array[:]))
np.testing.assert_array_equal(np.array(retrieved_data, dtype=dtype),
np.array(self.data, dtype=dtype))
def tearDown(self):
os.remove(self.test_file)
class TestNetCDFHandlerServer(unittest.TestCase):
"""Test that the handler creates the correct dataset from a URL."""
data = [(10, 15.2, 'Diamond_St'),
(11, 13.1, 'Blacktail_Loop'),
(12, 13.3, 'Platinum_St'),
(13, 12.1, 'Kodiak_Trail')]
def setUp(self):
"""Create WSGI apps"""
# Create tempfile:
fileno, self.test_file = tempfile.mkstemp(suffix='.nc')
# must close file number:
os.close(fileno)
with Dataset(self.test_file, 'w') as output:
output.createDimension('index', None)
temp = output.createVariable('index', '<i4', ('index',))
split_data = zip(*self.data)
temp[:] = next(split_data)
temp = output.createVariable('temperature', '<f8', ('index',))
temp[:] = next(split_data)
temp = output.createVariable('station', 'S40', ('index',))
for item_id, item in enumerate(next(split_data)):
temp[item_id] = item
def test_open(self):
"""Test that NetCDFHandler can be read through open_url."""
handler = NetCDFHandler(self.test_file)
application = ServerSideFunctions(handler)
dataset = DAPHandler("http://localhost:8001/", application).dataset
dtype = [('index', '<i4'),
('temperature', '<f8'),
('station', 'S40')]
retrieved_data = list(zip(dataset['index'][:],
dataset['temperature'].array[:],
dataset['station'].array[:]))
np.testing.assert_array_equal(np.array(retrieved_data, dtype=dtype),
np.array(self.data, dtype=dtype))
def tearDown(self):
os.remove(self.test_file)
| [
"netCDF4.Dataset",
"os.remove",
"pydap.wsgi.ssf.ServerSideFunctions",
"tempfile.mkstemp",
"six.moves.zip",
"pydap.handlers.netcdf.NetCDFHandler",
"os.close",
"numpy.array",
"pydap.handlers.dap.DAPHandler"
] | [((795, 825), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".nc"""'}), "(suffix='.nc')\n", (811, 825), False, 'import tempfile\n'), ((868, 884), 'os.close', 'os.close', (['fileno'], {}), '(fileno)\n', (876, 884), False, 'import os\n'), ((2066, 2091), 'os.remove', 'os.remove', (['self.test_file'], {}), '(self.test_file)\n', (2075, 2091), False, 'import os\n'), ((2490, 2520), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".nc"""'}), "(suffix='.nc')\n", (2506, 2520), False, 'import tempfile\n'), ((2563, 2579), 'os.close', 'os.close', (['fileno'], {}), '(fileno)\n', (2571, 2579), False, 'import os\n'), ((3228, 3257), 'pydap.handlers.netcdf.NetCDFHandler', 'NetCDFHandler', (['self.test_file'], {}), '(self.test_file)\n', (3241, 3257), False, 'from pydap.handlers.netcdf import NetCDFHandler\n'), ((3280, 3308), 'pydap.wsgi.ssf.ServerSideFunctions', 'ServerSideFunctions', (['handler'], {}), '(handler)\n', (3299, 3308), False, 'from pydap.wsgi.ssf import ServerSideFunctions\n'), ((3866, 3891), 'os.remove', 'os.remove', (['self.test_file'], {}), '(self.test_file)\n', (3875, 3891), False, 'import os\n'), ((898, 926), 'netCDF4.Dataset', 'Dataset', (['self.test_file', '"""w"""'], {}), "(self.test_file, 'w')\n", (905, 926), False, 'from netCDF4 import Dataset\n'), ((1082, 1097), 'six.moves.zip', 'zip', (['*self.data'], {}), '(*self.data)\n', (1085, 1097), False, 'from six.moves import zip\n'), ((1547, 1576), 'pydap.handlers.netcdf.NetCDFHandler', 'NetCDFHandler', (['self.test_file'], {}), '(self.test_file)\n', (1560, 1576), False, 'from pydap.handlers.netcdf import NetCDFHandler\n'), ((1728, 1819), 'six.moves.zip', 'zip', (["dataset['index'][:]", "dataset['temperature'].array[:]", "dataset['station'].array[:]"], {}), "(dataset['index'][:], dataset['temperature'].array[:], dataset['station'\n ].array[:])\n", (1731, 1819), False, 'from six.moves import zip\n'), ((1922, 1959), 'numpy.array', 'np.array', (['retrieved_data'], {'dtype': 'dtype'}), '(retrieved_data, dtype=dtype)\n', (1930, 1959), True, 'import numpy as np\n'), ((1999, 2031), 'numpy.array', 'np.array', (['self.data'], {'dtype': 'dtype'}), '(self.data, dtype=dtype)\n', (2007, 2031), True, 'import numpy as np\n'), ((2593, 2621), 'netCDF4.Dataset', 'Dataset', (['self.test_file', '"""w"""'], {}), "(self.test_file, 'w')\n", (2600, 2621), False, 'from netCDF4 import Dataset\n'), ((2777, 2792), 'six.moves.zip', 'zip', (['*self.data'], {}), '(*self.data)\n', (2780, 2792), False, 'from six.moves import zip\n'), ((3327, 3376), 'pydap.handlers.dap.DAPHandler', 'DAPHandler', (['"""http://localhost:8001/"""', 'application'], {}), "('http://localhost:8001/', application)\n", (3337, 3376), False, 'from pydap.handlers.dap import DAPHandler\n'), ((3528, 3619), 'six.moves.zip', 'zip', (["dataset['index'][:]", "dataset['temperature'].array[:]", "dataset['station'].array[:]"], {}), "(dataset['index'][:], dataset['temperature'].array[:], dataset['station'\n ].array[:])\n", (3531, 3619), False, 'from six.moves import zip\n'), ((3722, 3759), 'numpy.array', 'np.array', (['retrieved_data'], {'dtype': 'dtype'}), '(retrieved_data, dtype=dtype)\n', (3730, 3759), True, 'import numpy as np\n'), ((3799, 3831), 'numpy.array', 'np.array', (['self.data'], {'dtype': 'dtype'}), '(self.data, dtype=dtype)\n', (3807, 3831), True, 'import numpy as np\n')] |
# semantic segmentation with Unet
# heavily inspired on https://github.com/zhixuhao/unet
# uses isbi dataset
import os
import matplotlib.pyplot as plt
import skimage.io as skimage_io
import skimage.transform as skimage_transform
import random as r
import numpy as np
import datetime
import tensorflow as tf
print("Tensorflow {}".format(tf.__version__))
print("GPU devices: {}".format(tf.config.list_physical_devices('GPU')))
# import PIL.Image as PImage
datasetPath = "membrane"
trainFolder = "train"
valFolder = "aug-val"
testFolder = "test"
modelsPath = "models"
trainSize = -1 # -1 for all
valSize = -1 # -1 for all
testSize = -1 # -1 for all
exampleSize = (512, 512)
inputSize = (256, 256)
maskSize = (256, 256)
batchSize = 4
epochs = 100
learning_rate = 1e-4
numClasses = 2
showImages = False
modelFileName = "unet_membrane_TCV" + "E" + str(epochs) + "LR" + str(learning_rate) + ".hdf5"
resultsPath = "membrane/test/predict" + "E" + str(epochs) + "LR" + str(learning_rate)
augmentation_args = dict(
width_shift_range=range(256),
height_shift_range=range(256),
rotation_range=[0, 90, 180, 270],
horizontal_flip=True,
vertical_flip=True
)
def prepareDataset(datasetPath, trainFolder, valFolder, testFolder):
trainSetX = []
trainSetY = []
valSetX = []
valSetY = []
testSetX = []
trainImagesPath = os.path.join(datasetPath, trainFolder, "image")
trainMasksPath = os.path.join(datasetPath, trainFolder, "label")
trainSetFolder = os.scandir(trainImagesPath)
for tile in trainSetFolder:
imagePath = tile.path
trainSetX.append(imagePath)
if (showImages):
image = skimage_io.imread(imagePath)
maskPath = os.path.join(trainMasksPath, os.path.basename(imagePath))
mask = skimage_io.imread(maskPath)
plt.figure(figsize=(6, 3))
plt.subplot(1, 2, 1)
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(image, cmap='gray')
plt.xlabel("Image - {}".format(os.path.basename(imagePath)))
plt.subplot(1, 2, 2)
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(mask, cmap='gray')
plt.xlabel("Mask")
plt.show()
r.shuffle(trainSetX)
for trainExample in trainSetX:
maskPath = os.path.join(trainMasksPath, os.path.basename(trainExample))
trainSetY.append(maskPath)
valImagesPath = os.path.join(datasetPath, valFolder, "image")
valSetXFolder = os.scandir(valImagesPath)
for tile in valSetXFolder:
imagePath = tile.path
valSetX.append(imagePath)
valMasksPath = os.path.join(datasetPath, valFolder, "label")
valSetYFolder = os.scandir(valMasksPath)
for tile in valSetYFolder:
maskPath = tile.path
valSetY.append(maskPath)
testImagesPath = os.path.join(datasetPath, testFolder, "image")
testSetFolder = os.scandir(testImagesPath)
for tile in testSetFolder:
imagePath = tile.path
testSetX.append(imagePath)
return trainSetX, trainSetY, valSetX, valSetY, testSetX
def normalizeMask(mask, num_class=2):
mask = mask/255
new_mask = np.zeros(mask.shape + (num_class,))
for i in range(num_class):
new_mask[mask == i, i] = 1.
return new_mask
class BatchLossHistory(tf.keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.batch_losses = []
self.batch_accuracies = []
def on_batch_end(self, batch, logs={}):
self.batch_losses.append(logs.get('loss'))
self.batch_accuracies.append(logs.get('accuracy'))
def normalizeChannel(channel):
return (channel - 128.0) / 128.0
def getImageChannels(tile):
channel0 = skimage_io.imread(tile, as_gray=True)
channel0 = normalizeChannel(channel0)
return [channel0]
def augmentImage(image, inputSize, mask, maskSize, aug_dict):
if 'width_shift_range' in aug_dict:
cropx = r.sample(aug_dict['width_shift_range'], 1)[0]
else:
cropx = (int)((image[0].shape[1] - inputSize[1]) / 2)
if 'height_shift_range' in aug_dict:
cropy = r.sample(aug_dict['height_shift_range'], 1)[0]
else:
cropy = (int)((image[0].shape[0] - inputSize[0]) / 2)
if 'rotation_range' in aug_dict:
rotation = r.sample(aug_dict['rotation_range'], 1)[0]
else:
rotation = 0
if 'horizontal_flip' in aug_dict and aug_dict['horizontal_flip']:
do_horizontal_flip = r.sample([False,True], 1)[0]
else:
do_horizontal_flip = False
if 'vertical_flip' in aug_dict and aug_dict['vertical_flip']:
do_vertical_flip = r.sample([False, True], 1)[0]
else:
do_vertical_flip = False
maskOffsety = int((inputSize[0]-maskSize[0])/2)
maskOffsetx = int((inputSize[1]-maskSize[1])/2)
mask = mask[maskOffsety+cropy:maskOffsety+cropy+maskSize[0], maskOffsetx+cropx:maskOffsetx+cropx+maskSize[1]]
if rotation:
mask = skimage_transform.rotate(mask, rotation)
if do_horizontal_flip:
mask = mask[:, ::-1]
if do_vertical_flip:
mask = mask[::-1, :]
for i in range(len(image)):
channel = image[i]
channel = channel[cropy:cropy+inputSize[0], cropx:cropx+inputSize[1]]
if rotation:
channel = skimage_transform.rotate(channel, rotation)
if do_horizontal_flip:
channel = channel[:, ::-1]
if do_vertical_flip:
channel = channel[::-1, :]
image[i] = channel
return image, mask
def trainGenerator(batch_size, trainSetX, trainSetY, aug_dict, inputSize=(256, 256), inputChannels=1, maskSize=(256, 256), numClasses=2):
if batch_size > 0:
while 1:
iTile = 0
nBatches = int(np.ceil(len(trainSetX)/batch_size))
for batchID in range(nBatches):
images = np.zeros(((batch_size,) + inputSize + (inputChannels,))) # 1 channel
masks = np.zeros(((batch_size,) + maskSize + (numClasses,)))
iTileInBatch = 0
while iTileInBatch<batch_size:
if iTile < len(trainSetX):
# print(iTile, "/", len(trainSetX), ";", iTileInBatch, "/", batch_size, ";", trainSetX[iTile], trainSetY[iTile])
image = getImageChannels(trainSetX[iTile])
mask = skimage_io.imread(trainSetY[iTile], as_gray=True)
mask = normalizeMask(mask)
image, mask = augmentImage(image, inputSize, mask, maskSize, aug_dict)
for i in range(len(image)):
images[iTileInBatch, :, :, i] = image[i]
masks[iTileInBatch, :, :, :] = mask
iTile = iTile + 1
iTileInBatch = iTileInBatch + 1
else:
images = images[0:iTileInBatch,:,:,:]
masks = masks[0:iTileInBatch,:,:,:]
break
yield (images, masks)
def unetCustom(pretrained_weights=None, inputSize=(256, 256, 1), numClass=2, do_batch_normalization=False, use_transpose_convolution=False):
inputs = tf.keras.layers.Input(inputSize)
conv1 = tf.keras.layers.Conv2D(64, 3, padding = 'same', kernel_initializer = 'he_normal')(inputs)
if do_batch_normalization:
conv1 = tf.keras.layers.BatchNormalization()(conv1)
conv1 = tf.keras.layers.Activation('relu')(conv1)
conv1 = tf.keras.layers.Conv2D(64, 3, padding = 'same', kernel_initializer = 'he_normal')(conv1)
if do_batch_normalization:
conv1 = tf.keras.layers.BatchNormalization()(conv1)
conv1 = tf.keras.layers.Activation('relu')(conv1)
pool1 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = tf.keras.layers.Conv2D(128, 3, padding = 'same', kernel_initializer = 'he_normal')(pool1)
if do_batch_normalization:
conv2 = tf.keras.layers.BatchNormalization()(conv2)
conv2 = tf.keras.layers.Activation('relu')(conv2)
conv2 = tf.keras.layers.Conv2D(128, 3, padding = 'same', kernel_initializer = 'he_normal')(conv2)
if do_batch_normalization:
conv2 = tf.keras.layers.BatchNormalization()(conv2)
conv2 = tf.keras.layers.Activation('relu')(conv2)
pool2 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = tf.keras.layers.Conv2D(256, 3, padding = 'same', kernel_initializer = 'he_normal')(pool2)
if do_batch_normalization:
conv3 = tf.keras.layers.BatchNormalization()(conv3)
conv3 = tf.keras.layers.Activation('relu')(conv3)
conv3 = tf.keras.layers.Conv2D(256, 3, padding = 'same', kernel_initializer = 'he_normal')(conv3)
if do_batch_normalization:
conv3 = tf.keras.layers.BatchNormalization()(conv3)
conv3 = tf.keras.layers.Activation('relu')(conv3)
pool3 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = tf.keras.layers.Conv2D(512, 3, padding = 'same', kernel_initializer = 'he_normal')(pool3)
if do_batch_normalization:
conv4 = tf.keras.layers.BatchNormalization()(conv4)
conv4 = tf.keras.layers.Activation('relu')(conv4)
conv4 = tf.keras.layers.Conv2D(512, 3, padding = 'same', kernel_initializer = 'he_normal')(conv4)
if do_batch_normalization:
conv4 = tf.keras.layers.BatchNormalization()(conv4)
conv4 = tf.keras.layers.Activation('relu')(conv4)
drop4 = tf.keras.layers.Dropout(0.5)(conv4)
pool4 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = tf.keras.layers.Conv2D(1024, 3, padding = 'same', kernel_initializer = 'he_normal')(pool4)
if do_batch_normalization:
conv5 = tf.keras.layers.BatchNormalization()(conv5)
conv5 = tf.keras.layers.Activation('relu')(conv5)
conv5 = tf.keras.layers.Conv2D(1024, 3, padding = 'same', kernel_initializer = 'he_normal')(conv5)
if do_batch_normalization:
conv5 = tf.keras.layers.BatchNormalization()(conv5)
conv5 = tf.keras.layers.Activation('relu')(conv5)
drop5 = tf.keras.layers.Dropout(0.5)(conv5)
if use_transpose_convolution:
up6 = tf.keras.layers.Conv2DTranspose(512, (2, 2), strides=(2, 2))(drop5)
else:
up6 = tf.keras.layers.Conv2D(512, 2, padding = 'same', kernel_initializer = 'he_normal')(tf.keras.layers.UpSampling2D(size = (2,2))(drop5))
if do_batch_normalization:
up6 = tf.keras.layers.BatchNormalization()(up6)
up6 = tf.keras.layers.Activation('relu')(up6)
merge6 = tf.keras.layers.concatenate([drop4,up6], axis = 3)
conv6 = tf.keras.layers.Conv2D(512, 3, padding = 'same', kernel_initializer = 'he_normal')(merge6)
if do_batch_normalization:
conv6 = tf.keras.layers.BatchNormalization()(conv6)
conv6 = tf.keras.layers.Activation('relu')(conv6)
conv6 = tf.keras.layers.Conv2D(512, 3, padding = 'same', kernel_initializer = 'he_normal')(conv6)
if do_batch_normalization:
conv6 = tf.keras.layers.BatchNormalization()(conv6)
conv6 = tf.keras.layers.Activation('relu')(conv6)
if use_transpose_convolution:
up7 = tf.keras.layers.Conv2DTranspose(256, (2, 2), strides=(2, 2))(conv6)
else:
up7 = tf.keras.layers.Conv2D(256, 2, padding = 'same', kernel_initializer = 'he_normal')(tf.keras.layers.UpSampling2D(size = (2,2))(conv6))
if do_batch_normalization:
up7 = tf.keras.layers.BatchNormalization()(up7)
up7 = tf.keras.layers.Activation('relu')(up7)
merge7 = tf.keras.layers.concatenate([conv3,up7], axis = 3)
conv7 = tf.keras.layers.Conv2D(256, 3, padding = 'same', kernel_initializer = 'he_normal')(merge7)
if do_batch_normalization:
conv7 = tf.keras.layers.BatchNormalization()(conv7)
conv7 = tf.keras.layers.Activation('relu')(conv7)
conv7 = tf.keras.layers.Conv2D(256, 3, padding = 'same', kernel_initializer = 'he_normal')(conv7)
if do_batch_normalization:
conv7 = tf.keras.layers.BatchNormalization()(conv7)
conv7 = tf.keras.layers.Activation('relu')(conv7)
if use_transpose_convolution:
up8 = tf.keras.layers.Conv2DTranspose(128, (2, 2), strides=(2, 2))(conv7)
else:
up8 = tf.keras.layers.Conv2D(128, 2, padding = 'same', kernel_initializer = 'he_normal')(tf.keras.layers.UpSampling2D(size = (2,2))(conv7))
if do_batch_normalization:
up8 = tf.keras.layers.BatchNormalization()(up8)
up8 = tf.keras.layers.Activation('relu')(up8)
merge8 = tf.keras.layers.concatenate([conv2,up8], axis = 3)
conv8 = tf.keras.layers.Conv2D(128, 3, padding = 'same', kernel_initializer = 'he_normal')(merge8)
if do_batch_normalization:
conv8 = tf.keras.layers.BatchNormalization()(conv8)
conv8 = tf.keras.layers.Activation('relu')(conv8)
conv8 = tf.keras.layers.Conv2D(128, 3, padding = 'same', kernel_initializer = 'he_normal')(conv8)
if do_batch_normalization:
conv8 = tf.keras.layers.BatchNormalization()(conv8)
conv8 = tf.keras.layers.Activation('relu')(conv8)
if use_transpose_convolution:
up9 = tf.keras.layers.Conv2DTranspose(64, (2, 2), strides=(2, 2))(conv8)
else:
up9 = tf.keras.layers.Conv2D(64, 2, padding = 'same', kernel_initializer = 'he_normal')(tf.keras.layers.UpSampling2D(size = (2,2))(conv8))
if do_batch_normalization:
up9 = tf.keras.layers.BatchNormalization()(up9)
up9 = tf.keras.layers.Activation('relu')(up9)
merge9 = tf.keras.layers.concatenate([conv1,up9], axis = 3)
conv9 = tf.keras.layers.Conv2D(64, 3, padding = 'same', kernel_initializer = 'he_normal')(merge9)
if do_batch_normalization:
conv9 = tf.keras.layers.BatchNormalization()(conv9)
conv9 = tf.keras.layers.Activation('relu')(conv9)
conv9 = tf.keras.layers.Conv2D(64, 3, padding = 'same', kernel_initializer = 'he_normal')(conv9)
if do_batch_normalization:
conv9 = tf.keras.layers.BatchNormalization()(conv9)
conv9 = tf.keras.layers.Activation('relu')(conv9)
conv10 = tf.keras.layers.Conv2D(numClass, 1, activation='softmax', kernel_initializer ='he_normal')(conv9)
model = tf.keras.models.Model(inputs=inputs, outputs=conv10)
model.compile(optimizer=tf.keras.optimizers.Adam(lr=learning_rate),
loss='categorical_crossentropy',
metrics=['accuracy'])
model.summary()
if (pretrained_weights):
model.load_weights(pretrained_weights)
return model
def do_center_crop(image, newSize):
cropy = (int)((image[0].shape[0] - newSize[0]) / 2)
cropx = (int)((image[0].shape[1] - newSize[1]) / 2)
for i in range(len(image)):
channel = image[i]
channel = channel[cropy:image[0].shape[0] - cropy, cropx:image[0].shape[1] - cropx]
image[i] = channel
return image
def testGenerator(testSetX, inputSize=(256, 256), inputChannels=1):
for tile in testSetX:
image = getImageChannels(tile)
image = do_center_crop(image, inputSize)
img = np.zeros(inputSize + (inputChannels,))
for i in range(len(image)):
img[:, :, i] = image[i]
img = np.array([img])
yield (img)
def do_center_crop_channel(image, newSize):
cropy = (int)((image.shape[0] - newSize[0]) / 2)
cropx = (int)((image.shape[1] - newSize[1]) / 2)
return image[cropy:image.shape[0] - cropy, cropx:image.shape[1] - cropx]
def saveResults(testSetX, results, resultsPath):
for i,item in enumerate(results):
filename = testSetX[i]
mask_predict = np.argmax(item, axis=-1)
mask_predict = mask_predict.astype(np.uint8)
mask_predict = mask_predict * 255
skimage_io.imsave(os.path.join(resultsPath, os.path.basename(filename) + "_predict.png"), mask_predict)
if (showImages):
imagePath = filename
image = skimage_io.imread(imagePath)
image = do_center_crop_channel(image, newSize=(256, 256))
plt.figure(figsize=(6, 3))
plt.subplot(1, 2, 1)
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(image, cmap='gray')
plt.xlabel("Image - {}".format(os.path.basename(imagePath)))
plt.subplot(1, 2, 2)
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(mask_predict, cmap='gray')
plt.xlabel("Predicted Mask")
plt.show()
def moving_average(a, n=3) :
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
def main():
r.seed(1)
trainSetX, trainSetY, valSetX, valSetY, testSetX = prepareDataset(datasetPath=datasetPath, trainFolder=trainFolder,
valFolder=valFolder, testFolder=testFolder)
batch_history = BatchLossHistory()
if trainSize > 0:
trainSetX = trainSetX[0:trainSize]
trainSetY = trainSetY[0:trainSize]
if valSize > 0:
valSetX = valSetX[0:valSize]
valSetY = valSetY[0:valSize]
if testSize > 0:
testSetX = testSetX[0:testSize]
trainGene = trainGenerator(batchSize, trainSetX, trainSetY, augmentation_args, inputSize=inputSize, inputChannels=1,
maskSize=maskSize, numClasses=numClasses)
valGene = trainGenerator(batchSize, valSetX, valSetY, dict(), inputSize=inputSize, inputChannels=1,
maskSize=maskSize, numClasses=numClasses)
modelFilePath = os.path.join(modelsPath, modelFileName)
model = unetCustom(inputSize=(256, 256, 1),
numClass=2,
do_batch_normalization=False,
use_transpose_convolution=False)
model_checkpoint = tf.keras.callbacks.ModelCheckpoint(modelFilePath, monitor='val_loss', verbose=1, save_best_only=True)
log_dir = os.path.join("logs", "fit", datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
Ntrain = len(trainSetX)
stepsPerEpoch = np.ceil(Ntrain / batchSize)
Nval = len(valSetX)
validationSteps = np.ceil(Nval / batchSize)
history = model.fit(trainGene,
steps_per_epoch=stepsPerEpoch,
epochs=epochs,
callbacks=[model_checkpoint,
batch_history,
tensorboard_callback,
],
validation_data=valGene,
validation_steps=validationSteps)
# load best model
model = unetCustom(pretrained_weights=modelFilePath,
inputSize=(256, 256, 1),
numClass=2,
do_batch_normalization=False,
use_transpose_convolution=False)
testGene = testGenerator(testSetX, inputSize=inputSize, inputChannels=1)
NTest=len(testSetX)
testSteps = np.ceil(NTest / batchSize)
results = model.predict(testGene, verbose=1)
if not os.path.exists(resultsPath):
os.makedirs(resultsPath)
saveResults(testSetX, results, resultsPath)
plt.subplot(2, 2, 1)
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='lower right')
#plt.show()
# Plot training & validation loss values
plt.subplot(2, 2, 2)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper right')
plt.subplot(2, 2, 3)
plt.plot(moving_average(batch_history.batch_accuracies, 5))
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Batch')
plt.legend(['Train'], loc='lower right')
# Plot training & validation loss values
plt.subplot(2, 2, 4)
plt.plot(moving_average(batch_history.batch_losses, 5))
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Batch')
plt.legend(['Train'], loc='upper right')
plt.show()
if __name__ == '__main__':
main()
| [
"matplotlib.pyplot.title",
"tensorflow.keras.layers.MaxPooling2D",
"numpy.argmax",
"random.sample",
"random.shuffle",
"tensorflow.keras.callbacks.ModelCheckpoint",
"matplotlib.pyplot.figure",
"tensorflow.keras.layers.concatenate",
"skimage.transform.rotate",
"os.path.join",
"matplotlib.pyplot.xl... | [((1352, 1399), 'os.path.join', 'os.path.join', (['datasetPath', 'trainFolder', '"""image"""'], {}), "(datasetPath, trainFolder, 'image')\n", (1364, 1399), False, 'import os\n'), ((1421, 1468), 'os.path.join', 'os.path.join', (['datasetPath', 'trainFolder', '"""label"""'], {}), "(datasetPath, trainFolder, 'label')\n", (1433, 1468), False, 'import os\n'), ((1490, 1517), 'os.scandir', 'os.scandir', (['trainImagesPath'], {}), '(trainImagesPath)\n', (1500, 1517), False, 'import os\n'), ((2306, 2326), 'random.shuffle', 'r.shuffle', (['trainSetX'], {}), '(trainSetX)\n', (2315, 2326), True, 'import random as r\n'), ((2498, 2543), 'os.path.join', 'os.path.join', (['datasetPath', 'valFolder', '"""image"""'], {}), "(datasetPath, valFolder, 'image')\n", (2510, 2543), False, 'import os\n'), ((2564, 2589), 'os.scandir', 'os.scandir', (['valImagesPath'], {}), '(valImagesPath)\n', (2574, 2589), False, 'import os\n'), ((2704, 2749), 'os.path.join', 'os.path.join', (['datasetPath', 'valFolder', '"""label"""'], {}), "(datasetPath, valFolder, 'label')\n", (2716, 2749), False, 'import os\n'), ((2770, 2794), 'os.scandir', 'os.scandir', (['valMasksPath'], {}), '(valMasksPath)\n', (2780, 2794), False, 'import os\n'), ((2910, 2956), 'os.path.join', 'os.path.join', (['datasetPath', 'testFolder', '"""image"""'], {}), "(datasetPath, testFolder, 'image')\n", (2922, 2956), False, 'import os\n'), ((2977, 3003), 'os.scandir', 'os.scandir', (['testImagesPath'], {}), '(testImagesPath)\n', (2987, 3003), False, 'import os\n'), ((3236, 3271), 'numpy.zeros', 'np.zeros', (['(mask.shape + (num_class,))'], {}), '(mask.shape + (num_class,))\n', (3244, 3271), True, 'import numpy as np\n'), ((3790, 3827), 'skimage.io.imread', 'skimage_io.imread', (['tile'], {'as_gray': '(True)'}), '(tile, as_gray=True)\n', (3807, 3827), True, 'import skimage.io as skimage_io\n'), ((7281, 7313), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', (['inputSize'], {}), '(inputSize)\n', (7302, 7313), True, 'import tensorflow as tf\n'), ((10571, 10620), 'tensorflow.keras.layers.concatenate', 'tf.keras.layers.concatenate', (['[drop4, up6]'], {'axis': '(3)'}), '([drop4, up6], axis=3)\n', (10598, 10620), True, 'import tensorflow as tf\n'), ((11542, 11591), 'tensorflow.keras.layers.concatenate', 'tf.keras.layers.concatenate', (['[conv3, up7]'], {'axis': '(3)'}), '([conv3, up7], axis=3)\n', (11569, 11591), True, 'import tensorflow as tf\n'), ((12513, 12562), 'tensorflow.keras.layers.concatenate', 'tf.keras.layers.concatenate', (['[conv2, up8]'], {'axis': '(3)'}), '([conv2, up8], axis=3)\n', (12540, 12562), True, 'import tensorflow as tf\n'), ((13482, 13531), 'tensorflow.keras.layers.concatenate', 'tf.keras.layers.concatenate', (['[conv1, up9]'], {'axis': '(3)'}), '([conv1, up9], axis=3)\n', (13509, 13531), True, 'import tensorflow as tf\n'), ((14150, 14202), 'tensorflow.keras.models.Model', 'tf.keras.models.Model', ([], {'inputs': 'inputs', 'outputs': 'conv10'}), '(inputs=inputs, outputs=conv10)\n', (14171, 14202), True, 'import tensorflow as tf\n'), ((16511, 16536), 'numpy.cumsum', 'np.cumsum', (['a'], {'dtype': 'float'}), '(a, dtype=float)\n', (16520, 16536), True, 'import numpy as np\n'), ((16615, 16624), 'random.seed', 'r.seed', (['(1)'], {}), '(1)\n', (16621, 16624), True, 'import random as r\n'), ((17553, 17592), 'os.path.join', 'os.path.join', (['modelsPath', 'modelFileName'], {}), '(modelsPath, modelFileName)\n', (17565, 17592), False, 'import os\n'), ((17808, 17913), 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', (['modelFilePath'], {'monitor': '"""val_loss"""', 'verbose': '(1)', 'save_best_only': '(True)'}), "(modelFilePath, monitor='val_loss',\n verbose=1, save_best_only=True)\n", (17842, 17913), True, 'import tensorflow as tf\n'), ((18030, 18095), 'tensorflow.keras.callbacks.TensorBoard', 'tf.keras.callbacks.TensorBoard', ([], {'log_dir': 'log_dir', 'histogram_freq': '(1)'}), '(log_dir=log_dir, histogram_freq=1)\n', (18060, 18095), True, 'import tensorflow as tf\n'), ((18145, 18172), 'numpy.ceil', 'np.ceil', (['(Ntrain / batchSize)'], {}), '(Ntrain / batchSize)\n', (18152, 18172), True, 'import numpy as np\n'), ((18219, 18244), 'numpy.ceil', 'np.ceil', (['(Nval / batchSize)'], {}), '(Nval / batchSize)\n', (18226, 18244), True, 'import numpy as np\n'), ((19071, 19097), 'numpy.ceil', 'np.ceil', (['(NTest / batchSize)'], {}), '(NTest / batchSize)\n', (19078, 19097), True, 'import numpy as np\n'), ((19273, 19293), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (19284, 19293), True, 'import matplotlib.pyplot as plt\n'), ((19298, 19335), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['accuracy']"], {}), "(history.history['accuracy'])\n", (19306, 19335), True, 'import matplotlib.pyplot as plt\n'), ((19340, 19381), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_accuracy']"], {}), "(history.history['val_accuracy'])\n", (19348, 19381), True, 'import matplotlib.pyplot as plt\n'), ((19386, 19413), 'matplotlib.pyplot.title', 'plt.title', (['"""Model accuracy"""'], {}), "('Model accuracy')\n", (19395, 19413), True, 'import matplotlib.pyplot as plt\n'), ((19418, 19440), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (19428, 19440), True, 'import matplotlib.pyplot as plt\n'), ((19445, 19464), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (19455, 19464), True, 'import matplotlib.pyplot as plt\n'), ((19469, 19517), 'matplotlib.pyplot.legend', 'plt.legend', (["['Train', 'Test']"], {'loc': '"""lower right"""'}), "(['Train', 'Test'], loc='lower right')\n", (19479, 19517), True, 'import matplotlib.pyplot as plt\n'), ((19584, 19604), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (19595, 19604), True, 'import matplotlib.pyplot as plt\n'), ((19609, 19642), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['loss']"], {}), "(history.history['loss'])\n", (19617, 19642), True, 'import matplotlib.pyplot as plt\n'), ((19647, 19684), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_loss']"], {}), "(history.history['val_loss'])\n", (19655, 19684), True, 'import matplotlib.pyplot as plt\n'), ((19689, 19712), 'matplotlib.pyplot.title', 'plt.title', (['"""Model loss"""'], {}), "('Model loss')\n", (19698, 19712), True, 'import matplotlib.pyplot as plt\n'), ((19717, 19735), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (19727, 19735), True, 'import matplotlib.pyplot as plt\n'), ((19740, 19759), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (19750, 19759), True, 'import matplotlib.pyplot as plt\n'), ((19764, 19812), 'matplotlib.pyplot.legend', 'plt.legend', (["['Train', 'Test']"], {'loc': '"""upper right"""'}), "(['Train', 'Test'], loc='upper right')\n", (19774, 19812), True, 'import matplotlib.pyplot as plt\n'), ((19818, 19838), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (19829, 19838), True, 'import matplotlib.pyplot as plt\n'), ((19907, 19934), 'matplotlib.pyplot.title', 'plt.title', (['"""Model accuracy"""'], {}), "('Model accuracy')\n", (19916, 19934), True, 'import matplotlib.pyplot as plt\n'), ((19939, 19961), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (19949, 19961), True, 'import matplotlib.pyplot as plt\n'), ((19966, 19985), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Batch"""'], {}), "('Batch')\n", (19976, 19985), True, 'import matplotlib.pyplot as plt\n'), ((19990, 20030), 'matplotlib.pyplot.legend', 'plt.legend', (["['Train']"], {'loc': '"""lower right"""'}), "(['Train'], loc='lower right')\n", (20000, 20030), True, 'import matplotlib.pyplot as plt\n'), ((20081, 20101), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (20092, 20101), True, 'import matplotlib.pyplot as plt\n'), ((20166, 20189), 'matplotlib.pyplot.title', 'plt.title', (['"""Model loss"""'], {}), "('Model loss')\n", (20175, 20189), True, 'import matplotlib.pyplot as plt\n'), ((20194, 20212), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (20204, 20212), True, 'import matplotlib.pyplot as plt\n'), ((20217, 20236), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Batch"""'], {}), "('Batch')\n", (20227, 20236), True, 'import matplotlib.pyplot as plt\n'), ((20241, 20281), 'matplotlib.pyplot.legend', 'plt.legend', (["['Train']"], {'loc': '"""upper right"""'}), "(['Train'], loc='upper right')\n", (20251, 20281), True, 'import matplotlib.pyplot as plt\n'), ((20287, 20297), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20295, 20297), True, 'import matplotlib.pyplot as plt\n'), ((386, 424), 'tensorflow.config.list_physical_devices', 'tf.config.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (417, 424), True, 'import tensorflow as tf\n'), ((5026, 5066), 'skimage.transform.rotate', 'skimage_transform.rotate', (['mask', 'rotation'], {}), '(mask, rotation)\n', (5050, 5066), True, 'import skimage.transform as skimage_transform\n'), ((7326, 7403), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(64)', '(3)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(64, 3, padding='same', kernel_initializer='he_normal')\n", (7348, 7403), True, 'import tensorflow as tf\n'), ((7519, 7553), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (7545, 7553), True, 'import tensorflow as tf\n'), ((7573, 7650), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(64)', '(3)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(64, 3, padding='same', kernel_initializer='he_normal')\n", (7595, 7650), True, 'import tensorflow as tf\n'), ((7765, 7799), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (7791, 7799), True, 'import tensorflow as tf\n'), ((7819, 7865), 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (7847, 7865), True, 'import tensorflow as tf\n'), ((7885, 7963), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(128)', '(3)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(128, 3, padding='same', kernel_initializer='he_normal')\n", (7907, 7963), True, 'import tensorflow as tf\n'), ((8078, 8112), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (8104, 8112), True, 'import tensorflow as tf\n'), ((8132, 8210), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(128)', '(3)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(128, 3, padding='same', kernel_initializer='he_normal')\n", (8154, 8210), True, 'import tensorflow as tf\n'), ((8325, 8359), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (8351, 8359), True, 'import tensorflow as tf\n'), ((8379, 8425), 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (8407, 8425), True, 'import tensorflow as tf\n'), ((8445, 8523), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(256)', '(3)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(256, 3, padding='same', kernel_initializer='he_normal')\n", (8467, 8523), True, 'import tensorflow as tf\n'), ((8638, 8672), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (8664, 8672), True, 'import tensorflow as tf\n'), ((8692, 8770), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(256)', '(3)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(256, 3, padding='same', kernel_initializer='he_normal')\n", (8714, 8770), True, 'import tensorflow as tf\n'), ((8885, 8919), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (8911, 8919), True, 'import tensorflow as tf\n'), ((8939, 8985), 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (8967, 8985), True, 'import tensorflow as tf\n'), ((9005, 9083), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(512)', '(3)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(512, 3, padding='same', kernel_initializer='he_normal')\n", (9027, 9083), True, 'import tensorflow as tf\n'), ((9198, 9232), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (9224, 9232), True, 'import tensorflow as tf\n'), ((9252, 9330), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(512)', '(3)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(512, 3, padding='same', kernel_initializer='he_normal')\n", (9274, 9330), True, 'import tensorflow as tf\n'), ((9445, 9479), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (9471, 9479), True, 'import tensorflow as tf\n'), ((9499, 9527), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (9522, 9527), True, 'import tensorflow as tf\n'), ((9547, 9593), 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (9575, 9593), True, 'import tensorflow as tf\n'), ((9614, 9693), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(1024)', '(3)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(1024, 3, padding='same', kernel_initializer='he_normal')\n", (9636, 9693), True, 'import tensorflow as tf\n'), ((9808, 9842), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (9834, 9842), True, 'import tensorflow as tf\n'), ((9862, 9941), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(1024)', '(3)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(1024, 3, padding='same', kernel_initializer='he_normal')\n", (9884, 9941), True, 'import tensorflow as tf\n'), ((10056, 10090), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (10082, 10090), True, 'import tensorflow as tf\n'), ((10110, 10138), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (10133, 10138), True, 'import tensorflow as tf\n'), ((10518, 10552), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (10544, 10552), True, 'import tensorflow as tf\n'), ((10634, 10712), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(512)', '(3)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(512, 3, padding='same', kernel_initializer='he_normal')\n", (10656, 10712), True, 'import tensorflow as tf\n'), ((10828, 10862), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (10854, 10862), True, 'import tensorflow as tf\n'), ((10882, 10960), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(512)', '(3)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(512, 3, padding='same', kernel_initializer='he_normal')\n", (10904, 10960), True, 'import tensorflow as tf\n'), ((11075, 11109), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (11101, 11109), True, 'import tensorflow as tf\n'), ((11489, 11523), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (11515, 11523), True, 'import tensorflow as tf\n'), ((11605, 11683), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(256)', '(3)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(256, 3, padding='same', kernel_initializer='he_normal')\n", (11627, 11683), True, 'import tensorflow as tf\n'), ((11799, 11833), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (11825, 11833), True, 'import tensorflow as tf\n'), ((11853, 11931), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(256)', '(3)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(256, 3, padding='same', kernel_initializer='he_normal')\n", (11875, 11931), True, 'import tensorflow as tf\n'), ((12046, 12080), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (12072, 12080), True, 'import tensorflow as tf\n'), ((12460, 12494), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (12486, 12494), True, 'import tensorflow as tf\n'), ((12576, 12654), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(128)', '(3)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(128, 3, padding='same', kernel_initializer='he_normal')\n", (12598, 12654), True, 'import tensorflow as tf\n'), ((12770, 12804), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (12796, 12804), True, 'import tensorflow as tf\n'), ((12824, 12902), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(128)', '(3)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(128, 3, padding='same', kernel_initializer='he_normal')\n", (12846, 12902), True, 'import tensorflow as tf\n'), ((13017, 13051), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (13043, 13051), True, 'import tensorflow as tf\n'), ((13429, 13463), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (13455, 13463), True, 'import tensorflow as tf\n'), ((13545, 13622), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(64)', '(3)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(64, 3, padding='same', kernel_initializer='he_normal')\n", (13567, 13622), True, 'import tensorflow as tf\n'), ((13738, 13772), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (13764, 13772), True, 'import tensorflow as tf\n'), ((13792, 13869), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(64)', '(3)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(64, 3, padding='same', kernel_initializer='he_normal')\n", (13814, 13869), True, 'import tensorflow as tf\n'), ((13984, 14018), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (14010, 14018), True, 'import tensorflow as tf\n'), ((14039, 14132), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['numClass', '(1)'], {'activation': '"""softmax"""', 'kernel_initializer': '"""he_normal"""'}), "(numClass, 1, activation='softmax',\n kernel_initializer='he_normal')\n", (14061, 14132), True, 'import tensorflow as tf\n'), ((15027, 15065), 'numpy.zeros', 'np.zeros', (['(inputSize + (inputChannels,))'], {}), '(inputSize + (inputChannels,))\n', (15035, 15065), True, 'import numpy as np\n'), ((15153, 15168), 'numpy.array', 'np.array', (['[img]'], {}), '([img])\n', (15161, 15168), True, 'import numpy as np\n'), ((15561, 15585), 'numpy.argmax', 'np.argmax', (['item'], {'axis': '(-1)'}), '(item, axis=-1)\n', (15570, 15585), True, 'import numpy as np\n'), ((19158, 19185), 'os.path.exists', 'os.path.exists', (['resultsPath'], {}), '(resultsPath)\n', (19172, 19185), False, 'import os\n'), ((19195, 19219), 'os.makedirs', 'os.makedirs', (['resultsPath'], {}), '(resultsPath)\n', (19206, 19219), False, 'import os\n'), ((1663, 1691), 'skimage.io.imread', 'skimage_io.imread', (['imagePath'], {}), '(imagePath)\n', (1680, 1691), True, 'import skimage.io as skimage_io\n'), ((1792, 1819), 'skimage.io.imread', 'skimage_io.imread', (['maskPath'], {}), '(maskPath)\n', (1809, 1819), True, 'import skimage.io as skimage_io\n'), ((1832, 1858), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 3)'}), '(figsize=(6, 3))\n', (1842, 1858), True, 'import matplotlib.pyplot as plt\n'), ((1871, 1891), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (1882, 1891), True, 'import matplotlib.pyplot as plt\n'), ((1904, 1919), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (1912, 1919), True, 'import matplotlib.pyplot as plt\n'), ((1932, 1946), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (1942, 1946), True, 'import matplotlib.pyplot as plt\n'), ((1959, 1973), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (1969, 1973), True, 'import matplotlib.pyplot as plt\n'), ((1986, 2016), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {'cmap': '"""gray"""'}), "(image, cmap='gray')\n", (1996, 2016), True, 'import matplotlib.pyplot as plt\n'), ((2102, 2122), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (2113, 2122), True, 'import matplotlib.pyplot as plt\n'), ((2135, 2150), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (2143, 2150), True, 'import matplotlib.pyplot as plt\n'), ((2163, 2177), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (2173, 2177), True, 'import matplotlib.pyplot as plt\n'), ((2190, 2204), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (2200, 2204), True, 'import matplotlib.pyplot as plt\n'), ((2217, 2246), 'matplotlib.pyplot.imshow', 'plt.imshow', (['mask'], {'cmap': '"""gray"""'}), "(mask, cmap='gray')\n", (2227, 2246), True, 'import matplotlib.pyplot as plt\n'), ((2259, 2277), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Mask"""'], {}), "('Mask')\n", (2269, 2277), True, 'import matplotlib.pyplot as plt\n'), ((2290, 2300), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2298, 2300), True, 'import matplotlib.pyplot as plt\n'), ((2410, 2440), 'os.path.basename', 'os.path.basename', (['trainExample'], {}), '(trainExample)\n', (2426, 2440), False, 'import os\n'), ((4012, 4054), 'random.sample', 'r.sample', (["aug_dict['width_shift_range']", '(1)'], {}), "(aug_dict['width_shift_range'], 1)\n", (4020, 4054), True, 'import random as r\n'), ((4187, 4230), 'random.sample', 'r.sample', (["aug_dict['height_shift_range']", '(1)'], {}), "(aug_dict['height_shift_range'], 1)\n", (4195, 4230), True, 'import random as r\n'), ((4362, 4401), 'random.sample', 'r.sample', (["aug_dict['rotation_range']", '(1)'], {}), "(aug_dict['rotation_range'], 1)\n", (4370, 4401), True, 'import random as r\n'), ((4535, 4561), 'random.sample', 'r.sample', (['[False, True]', '(1)'], {}), '([False, True], 1)\n', (4543, 4561), True, 'import random as r\n'), ((4702, 4728), 'random.sample', 'r.sample', (['[False, True]', '(1)'], {}), '([False, True], 1)\n', (4710, 4728), True, 'import random as r\n'), ((5358, 5401), 'skimage.transform.rotate', 'skimage_transform.rotate', (['channel', 'rotation'], {}), '(channel, rotation)\n', (5382, 5401), True, 'import skimage.transform as skimage_transform\n'), ((7463, 7499), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (7497, 7499), True, 'import tensorflow as tf\n'), ((7709, 7745), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (7743, 7745), True, 'import tensorflow as tf\n'), ((8022, 8058), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (8056, 8058), True, 'import tensorflow as tf\n'), ((8269, 8305), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (8303, 8305), True, 'import tensorflow as tf\n'), ((8582, 8618), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (8616, 8618), True, 'import tensorflow as tf\n'), ((8829, 8865), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (8863, 8865), True, 'import tensorflow as tf\n'), ((9142, 9178), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (9176, 9178), True, 'import tensorflow as tf\n'), ((9389, 9425), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (9423, 9425), True, 'import tensorflow as tf\n'), ((9752, 9788), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (9786, 9788), True, 'import tensorflow as tf\n'), ((10000, 10036), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (10034, 10036), True, 'import tensorflow as tf\n'), ((10195, 10255), 'tensorflow.keras.layers.Conv2DTranspose', 'tf.keras.layers.Conv2DTranspose', (['(512)', '(2, 2)'], {'strides': '(2, 2)'}), '(512, (2, 2), strides=(2, 2))\n', (10226, 10255), True, 'import tensorflow as tf\n'), ((10287, 10365), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(512)', '(2)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(512, 2, padding='same', kernel_initializer='he_normal')\n", (10309, 10365), True, 'import tensorflow as tf\n'), ((10466, 10502), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (10500, 10502), True, 'import tensorflow as tf\n'), ((10772, 10808), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (10806, 10808), True, 'import tensorflow as tf\n'), ((11019, 11055), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (11053, 11055), True, 'import tensorflow as tf\n'), ((11166, 11226), 'tensorflow.keras.layers.Conv2DTranspose', 'tf.keras.layers.Conv2DTranspose', (['(256)', '(2, 2)'], {'strides': '(2, 2)'}), '(256, (2, 2), strides=(2, 2))\n', (11197, 11226), True, 'import tensorflow as tf\n'), ((11258, 11336), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(256)', '(2)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(256, 2, padding='same', kernel_initializer='he_normal')\n", (11280, 11336), True, 'import tensorflow as tf\n'), ((11437, 11473), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (11471, 11473), True, 'import tensorflow as tf\n'), ((11743, 11779), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (11777, 11779), True, 'import tensorflow as tf\n'), ((11990, 12026), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (12024, 12026), True, 'import tensorflow as tf\n'), ((12137, 12197), 'tensorflow.keras.layers.Conv2DTranspose', 'tf.keras.layers.Conv2DTranspose', (['(128)', '(2, 2)'], {'strides': '(2, 2)'}), '(128, (2, 2), strides=(2, 2))\n', (12168, 12197), True, 'import tensorflow as tf\n'), ((12229, 12307), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(128)', '(2)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(128, 2, padding='same', kernel_initializer='he_normal')\n", (12251, 12307), True, 'import tensorflow as tf\n'), ((12408, 12444), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (12442, 12444), True, 'import tensorflow as tf\n'), ((12714, 12750), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (12748, 12750), True, 'import tensorflow as tf\n'), ((12961, 12997), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (12995, 12997), True, 'import tensorflow as tf\n'), ((13108, 13167), 'tensorflow.keras.layers.Conv2DTranspose', 'tf.keras.layers.Conv2DTranspose', (['(64)', '(2, 2)'], {'strides': '(2, 2)'}), '(64, (2, 2), strides=(2, 2))\n', (13139, 13167), True, 'import tensorflow as tf\n'), ((13199, 13276), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(64)', '(2)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), "(64, 2, padding='same', kernel_initializer='he_normal')\n", (13221, 13276), True, 'import tensorflow as tf\n'), ((13377, 13413), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (13411, 13413), True, 'import tensorflow as tf\n'), ((13682, 13718), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (13716, 13718), True, 'import tensorflow as tf\n'), ((13928, 13964), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (13962, 13964), True, 'import tensorflow as tf\n'), ((14232, 14274), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'lr': 'learning_rate'}), '(lr=learning_rate)\n', (14256, 14274), True, 'import tensorflow as tf\n'), ((15871, 15899), 'skimage.io.imread', 'skimage_io.imread', (['imagePath'], {}), '(imagePath)\n', (15888, 15899), True, 'import skimage.io as skimage_io\n'), ((15983, 16009), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 3)'}), '(figsize=(6, 3))\n', (15993, 16009), True, 'import matplotlib.pyplot as plt\n'), ((16022, 16042), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (16033, 16042), True, 'import matplotlib.pyplot as plt\n'), ((16055, 16070), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (16063, 16070), True, 'import matplotlib.pyplot as plt\n'), ((16083, 16097), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (16093, 16097), True, 'import matplotlib.pyplot as plt\n'), ((16110, 16124), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (16120, 16124), True, 'import matplotlib.pyplot as plt\n'), ((16137, 16167), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {'cmap': '"""gray"""'}), "(image, cmap='gray')\n", (16147, 16167), True, 'import matplotlib.pyplot as plt\n'), ((16253, 16273), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (16264, 16273), True, 'import matplotlib.pyplot as plt\n'), ((16286, 16301), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (16294, 16301), True, 'import matplotlib.pyplot as plt\n'), ((16314, 16328), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (16324, 16328), True, 'import matplotlib.pyplot as plt\n'), ((16341, 16355), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (16351, 16355), True, 'import matplotlib.pyplot as plt\n'), ((16368, 16405), 'matplotlib.pyplot.imshow', 'plt.imshow', (['mask_predict'], {'cmap': '"""gray"""'}), "(mask_predict, cmap='gray')\n", (16378, 16405), True, 'import matplotlib.pyplot as plt\n'), ((16418, 16446), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted Mask"""'], {}), "('Predicted Mask')\n", (16428, 16446), True, 'import matplotlib.pyplot as plt\n'), ((16459, 16469), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16467, 16469), True, 'import matplotlib.pyplot as plt\n'), ((1744, 1771), 'os.path.basename', 'os.path.basename', (['imagePath'], {}), '(imagePath)\n', (1760, 1771), False, 'import os\n'), ((5924, 5978), 'numpy.zeros', 'np.zeros', (['((batch_size,) + inputSize + (inputChannels,))'], {}), '((batch_size,) + inputSize + (inputChannels,))\n', (5932, 5978), True, 'import numpy as np\n'), ((6017, 6067), 'numpy.zeros', 'np.zeros', (['((batch_size,) + maskSize + (numClasses,))'], {}), '((batch_size,) + maskSize + (numClasses,))\n', (6025, 6067), True, 'import numpy as np\n'), ((10370, 10411), 'tensorflow.keras.layers.UpSampling2D', 'tf.keras.layers.UpSampling2D', ([], {'size': '(2, 2)'}), '(size=(2, 2))\n', (10398, 10411), True, 'import tensorflow as tf\n'), ((11341, 11382), 'tensorflow.keras.layers.UpSampling2D', 'tf.keras.layers.UpSampling2D', ([], {'size': '(2, 2)'}), '(size=(2, 2))\n', (11369, 11382), True, 'import tensorflow as tf\n'), ((12312, 12353), 'tensorflow.keras.layers.UpSampling2D', 'tf.keras.layers.UpSampling2D', ([], {'size': '(2, 2)'}), '(size=(2, 2))\n', (12340, 12353), True, 'import tensorflow as tf\n'), ((13281, 13322), 'tensorflow.keras.layers.UpSampling2D', 'tf.keras.layers.UpSampling2D', ([], {'size': '(2, 2)'}), '(size=(2, 2))\n', (13309, 13322), True, 'import tensorflow as tf\n'), ((17952, 17975), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (17973, 17975), False, 'import datetime\n'), ((2060, 2087), 'os.path.basename', 'os.path.basename', (['imagePath'], {}), '(imagePath)\n', (2076, 2087), False, 'import os\n'), ((15733, 15759), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (15749, 15759), False, 'import os\n'), ((16211, 16238), 'os.path.basename', 'os.path.basename', (['imagePath'], {}), '(imagePath)\n', (16227, 16238), False, 'import os\n'), ((6433, 6482), 'skimage.io.imread', 'skimage_io.imread', (['trainSetY[iTile]'], {'as_gray': '(True)'}), '(trainSetY[iTile], as_gray=True)\n', (6450, 6482), True, 'import skimage.io as skimage_io\n')] |
import numpy as np
from numpy.random import choice
from .abstract_sampler import AbstractSampler
from ..dataset import AbstractDataset
class WeightedRandomSampler(AbstractSampler):
"""
Implements Weighted Random Sampling
"""
def __init__(self, indices, weights=None):
"""
Parameters
----------
indices : list
list of classes each sample belongs to. List index corresponds to
data index and the value at a certain index indicates the
corresponding class
weights : Any or None
sampling weights; for more details see numpy.random.choice
(parameter ``p``)
"""
super().__init__(indices)
self._indices = list(range(len(indices)))
self._weights = weights
self._global_index = 0
@classmethod
def from_dataset(cls, dataset: AbstractDataset, **kwargs):
"""
Classmethod to initialize the sampler from a given dataset
Parameters
----------
dataset : AbstractDataset
the given dataset
Returns
-------
AbstractSampler
The initialzed sampler
"""
labels = [d['label'] for d in dataset]
return cls(labels, **kwargs)
def _get_indices(self, n_indices):
"""
Actual Sampling
Parameters
----------
n_indices : int
number of indices to return
Returns
-------
list
list of sampled indices
Raises
------
StopIteration
If maximal number of samples is reached
ValueError
if weights or cum_weights don't match the population
"""
n_indices = self._check_batchsize(n_indices)
samples = choice(self._indices, size=n_indices, p=self._weights)
return samples
def __len__(self):
return len(self._indices)
class WeightedPrevalenceRandomSampler(WeightedRandomSampler):
def __init__(self, indices):
"""
Implements random Per-Class Sampling and ensures uniform sampling
of all classes
Parameters
----------
indices : array-like
list of classes each sample belongs to. List index corresponds to
data index and the value at a certain index indicates the
corresponding class
"""
weights = np.array(indices).astype(np.float)
classes, classes_count = np.unique(indices, return_counts=True)
# compute probabilities
target_prob = 1 / classes.shape[0]
# generate weight matrix
for i, c in enumerate(classes):
weights[weights == c] = (target_prob / classes_count[i])
super().__init__(indices, weights=weights)
| [
"numpy.array",
"numpy.unique",
"numpy.random.choice"
] | [((1824, 1878), 'numpy.random.choice', 'choice', (['self._indices'], {'size': 'n_indices', 'p': 'self._weights'}), '(self._indices, size=n_indices, p=self._weights)\n', (1830, 1878), False, 'from numpy.random import choice\n'), ((2514, 2552), 'numpy.unique', 'np.unique', (['indices'], {'return_counts': '(True)'}), '(indices, return_counts=True)\n', (2523, 2552), True, 'import numpy as np\n'), ((2446, 2463), 'numpy.array', 'np.array', (['indices'], {}), '(indices)\n', (2454, 2463), True, 'import numpy as np\n')] |
# BSD 3-Clause License
#
# Copyright (c) 2020, <NAME>'s Research Lab
import numpy as np
from sklearn.preprocessing import KBinsDiscretizer
from data_generators.tnkde import TruncatedNormalKernelDensity
from pomegranate import *
class BayesianSampleGenerator():
def fit(self, data, discrete_features=None, bandwidth=1.0,
num_discretization_bins=4, pseudocount=1.0):
"""Fit the Chow Liu Bayesian Sampler on the data.
Parameters
----------
data : array_like, shape (n_samples, n_features)
List of data points to train from.
discrete_features : array_like, shape (n_features)
Array with true for discrete features and false for continuous
features. If None all features are treated as continuous.
bandwidth : double
Bandwidth of the truncated kde to use for the continuous features.
num_discretization_bins : int
Number of bins used to discretize the continuous features. Uses less
bins for features where the bin width is too narrow.
pseudocount : double
Pseudocount to use in the bayesian network.
"""
if bandwidth <= 0:
raise ValueError("Bandwidth must be positive.")
if discrete_features != None and \
len(discrete_features) != data.shape[1]:
raise ValueError("Discrete features array and data arrays"
"shape don't match.")
if num_discretization_bins < 0:
raise ValueError("Number of descretization bins can't be negetive.")
if num_discretization_bins == 0:
for bool in discrete_features:
if bool:
raise ValueError("Number of descretization bins can't be"
"zero if there is a continuous feature.")
if pseudocount < 0:
raise ValueError("Pseudocount can't be negative.")
if discrete_features == None:
discrete_features = [False] * data.shape[1]
self.num_features_ = data.shape[1]
self.discrete_features_ = discrete_features
self.num_discretization_bins_ = num_discretization_bins
discretized_data = np.array(data, copy=True)
continuous_data = data[:, np.invert(discrete_features)]
discretizer = KBinsDiscretizer(n_bins=num_discretization_bins,
encode='ordinal', strategy='quantile')
discretizer.fit(continuous_data)
discretized_data[:, np.invert(discrete_features)] = \
discretizer.transform(continuous_data)
self.discretizer_ = discretizer
self.model_ = BayesianNetwork.from_samples(discretized_data,
algorithm='chow-liu', n_jobs=-1, pseudocount=pseudocount)
self.model_.bake()
# Table for bin edges
bins = discretizer.bin_edges_
# Kdes for continuous data.
self.tnkdes_ = []
i = 0
for k in range(self.num_features_):
if discrete_features[k]:
continue
bins[i][0] = -np.inf
bins[i][len(bins[i]) - 1] = np.inf
bin_kdes = []
# loop of boundary
for j in range(len(bins[i]) - 1):
# Bound for this bin.
lower_bound = bins[i][j]
upper_bound = bins[i][j+1]
# Create a kde using the data in the current bin.
current_feature_data = data[:, k]
cur_bin_data = current_feature_data[discretized_data[:, k] == j]
kde = TruncatedNormalKernelDensity(bandwidth=bandwidth,
lowerbound=lower_bound, upperbound=upper_bound)
kde.fit(cur_bin_data)
bin_kdes.append(kde)
i = i + 1
self.tnkdes_.append(bin_kdes)
def sample(self, num_samples=1, burnin_period=100, constraints={}):
"""Get new samples similar to the training data.
Parameters
----------
num_samples : int, optional
Number of samples to return. Defaults to 1.
burnin_period : int, optional
Burn-in period before drawing each instance from the gibbs sampler.
Defaults to 100.
constraints : dict, optional
Evidence to set constant while samples are generated. The format of
the disctionry enteries are 'string representation of feature
number' : val. For example: {'0' : 6.0, '5' : -4.5}
"""
if num_samples <= 0:
raise ValueError("Number of samples requested must be positive.")
if burnin_period < 0:
raise ValueError("Burn-in period can't be negative.")
if constraints == None:
constraints = {}
original_constraints = constraints.copy()
constraints_array = np.array([0] * self.num_features_)
i = 0
for key in constraints:
constraints_array[int(key)] = constraints[key]
constraints_array[np.invert(self.discrete_features_)] = \
self.discretizer_.transform(
constraints_array[np.invert(self.discrete_features_)].reshape(1, -1)
)[0]
for key in constraints:
constraints[key] = constraints_array[int(key)]
# Get samples from the bayesian net. We still need to sample the kdes
# for the continuos data.
sample_table = np.array(self.model_.sample(n=num_samples,
burnin=burnin_period, evidence=constraints))
# Loop over all continuos features.
i = 0
for k in range(self.num_features_):
if self.discrete_features_[k]:
continue
# Loop over all bins for the feature.
for j in range(len(self.tnkdes_[i])):
current_feature_samples = sample_table[:,k]
num_samples_in_bin = \
current_feature_samples[current_feature_samples == j].shape[0]
# Add number of bins to avoid collisions in future iterations.
# This is subtracted below.
current_feature_samples[current_feature_samples == j] = \
self.tnkdes_[i][j].sample(n_samples = num_samples_in_bin) + \
self.num_discretization_bins_
sample_table[:,k] = current_feature_samples
i = i + 1
# Subtract number of bins added above to avoid collisions.
sample_table[:, k] -= self.num_discretization_bins_
for key in original_constraints:
if not self.discrete_features_[int(key)]:
sample_table[:, int(key)] = original_constraints[key]
return sample_table
| [
"data_generators.tnkde.TruncatedNormalKernelDensity",
"sklearn.preprocessing.KBinsDiscretizer",
"numpy.array",
"numpy.invert"
] | [((2297, 2322), 'numpy.array', 'np.array', (['data'], {'copy': '(True)'}), '(data, copy=True)\n', (2305, 2322), True, 'import numpy as np\n'), ((2410, 2502), 'sklearn.preprocessing.KBinsDiscretizer', 'KBinsDiscretizer', ([], {'n_bins': 'num_discretization_bins', 'encode': '"""ordinal"""', 'strategy': '"""quantile"""'}), "(n_bins=num_discretization_bins, encode='ordinal', strategy\n ='quantile')\n", (2426, 2502), False, 'from sklearn.preprocessing import KBinsDiscretizer\n'), ((5076, 5110), 'numpy.array', 'np.array', (['([0] * self.num_features_)'], {}), '([0] * self.num_features_)\n', (5084, 5110), True, 'import numpy as np\n'), ((5260, 5294), 'numpy.invert', 'np.invert', (['self.discrete_features_'], {}), '(self.discrete_features_)\n', (5269, 5294), True, 'import numpy as np\n'), ((2357, 2385), 'numpy.invert', 'np.invert', (['discrete_features'], {}), '(discrete_features)\n', (2366, 2385), True, 'import numpy as np\n'), ((2604, 2632), 'numpy.invert', 'np.invert', (['discrete_features'], {}), '(discrete_features)\n', (2613, 2632), True, 'import numpy as np\n'), ((3746, 3847), 'data_generators.tnkde.TruncatedNormalKernelDensity', 'TruncatedNormalKernelDensity', ([], {'bandwidth': 'bandwidth', 'lowerbound': 'lower_bound', 'upperbound': 'upper_bound'}), '(bandwidth=bandwidth, lowerbound=lower_bound,\n upperbound=upper_bound)\n', (3774, 3847), False, 'from data_generators.tnkde import TruncatedNormalKernelDensity\n'), ((5367, 5401), 'numpy.invert', 'np.invert', (['self.discrete_features_'], {}), '(self.discrete_features_)\n', (5376, 5401), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import sys
import boto3
import os
import zipfile
import rasterio as rio
import numpy as np
temp_indicators = [
'annual_tasmin',
'annual_tasmax',
'tavg-tasmin_tasmax',
'hdd65f-tasmin_tasmax',
'cdd65f-tasmin_tasmax',
'frostfree_tasmin',
'gt-q99_tasmax'
]
pr_indicators = [
'dryspells_pr',
'annual_pr',
'gt-q99_pr',
]
ensembles = [
'q50',
'q25',
'q75',
'iqr'
]
scenarios = [
'rcp45',
'rcp85'
]
datasets = [
'nexgddp',
'loca'
]
startyear = 2000
endyear = 2080
BUCKET = os.getenv('GDDP_BUCKET')
PREFIX = os.getenv('GDDP_PREFIX') or 'tmp/nex-gddp'
ARCHIVE_PREFIX = 'prepdata/nex-gddp'
DATA_DIR = 'prep_share'
def k2c(k):
return k-273.15
def kgs2mmyr(kgs):
return kgs*86400*365
def raster_template(e, ch, i, s, y1, y2, d):
if not d:
return f'{e}-{ch}-{i}_{s}_ens_{y1}-{y2}.tif'
return f'{e}-{ch}-{i}_{s}_ens_{y1}-{y2}_{d}.tif'
def url_template(rast):
return os.path.join(f'https://s3.amazonaws.com/md.cc/{ARCHIVE_PREFIX}', rast)
def _gen_all_args():
for d in datasets:
for s in scenarios:
for y in range(startyear, endyear+1, 10):
y1 = y-15
y2 = y+15
for e in ensembles:
for i in temp_indicators:
for ch in ['abs', 'diff']:
yield (e, ch, i, s, y1, y2, d)
for i in pr_indicators:
for ch in ['abs', 'ch']:
yield (e, ch, i, s, y1, y2, d)
for i in temp_indicators+pr_indicators:
yield ('q50', 'abs', i, 'historical', 1960, 1990, d)
def main():
for args in _gen_all_args():
print(raster_template(*args))
def baselines():
for d in datasets:
for i in temp_indicators+pr_indicators:
print(raster_template('q50', 'abs', i, 'historical', 1960, 1990, d))
def csv():
print('indicator, scenario, year, change, ensemble, dataset, tiff, url')
for e, ch, i, s, y1, y2, d in _gen_all_args():
rast = raster_template(e, ch, i, s, y1, y2, d)
url = url_template(rast)
y = f'{y1}-{y2}'
print(', '.join([i, s, str(y), ch, e, d, rast, url]))
def move_to_archive():
session = boto3.session.Session(
aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID'),
aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY')
)
client = session.resource('s3')
bucket = client.Bucket(BUCKET)
for e, ch, i, s, y1, y2, d in _gen_all_args():
rast = raster_template(e, ch, i, s, y1, y2, d)
rast0 = os.path.join(PREFIX, rast)
rast1 = os.path.join(ARCHIVE_PREFIX, rast)
try:
bucket.Object(rast1).copy_from(CopySource=os.path.join(BUCKET, rast0))
print(f'copy from {rast0} to {rast1}')
except Exception as ex:
print(ex, rast0)
pass
def download():
session = boto3.session.Session(
aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID'),
aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY')
)
client = session.resource('s3')
bucket = client.Bucket(BUCKET)
os.makedirs(DATA_DIR, exist_ok=True)
for args in _gen_all_args():
rast = raster_template(*args)
obj = os.path.join(ARCHIVE_PREFIX, rast)
fname = os.path.join(DATA_DIR, rast)
if os.path.isfile(fname):
print(f'{fname} exists; skipping.')
else:
try:
bucket.download_file(obj, fname)
print(f'downloading {obj}')
except Exception as ex:
print(ex, obj)
pass
def download_zip():
zips = {}
download()
for e, ch, i, s, y1, y2, d in _gen_all_args():
rast = raster_template(e, ch, i, s, y1, y2, d)
zipname = os.path.join(DATA_DIR, f'{d}_{i}.zip')
write_opts = {
'filename': os.path.join(DATA_DIR, rast),
'arcname': os.path.join(s, e, rast)
}
if zipname in zips:
zips[zipname].append(write_opts)
else:
zips[zipname] = [write_opts]
if s == 'historical':
zipname = 'baselines.zip'
write_opts['arcname'] = rast
if zipname in zips:
zips[zipname].append(write_opts)
else:
zips[zipname] = [write_opts]
for zipname, opts_list in zips.items():
with zipfile.ZipFile(zipname, 'w', compression=zipfile.ZIP_DEFLATED) as z:
for i, opts in enumerate(opts_list):
z.write(**opts)
print(f'{zipname}: {i+1}/{len(opts_list)}', end='\r')
print('')
def fix_loca_projection():
download()
count = 0
for e, ch, i, s, y1, y2, d in _gen_all_args():
if d == 'loca':
rast = raster_template(e, ch, i, s, y1, y2, d)
fname = os.path.join(DATA_DIR, rast)
with rio.open(fname, 'r+') as dst:
if dst.crs is None:
dst.crs = 'EPSG:4326'
count += 1
print(f'fixed proj for {count} rasters', end='\r')
print('All LOCA rasts have CRS')
def fix_loca_mask():
download()
rast = raster_template('q50', 'abs', 'annual_tasmax', 'historical', 1960, 1990, 'loca')
fname = os.path.join(DATA_DIR, rast)
with rio.open(fname, 'r') as src:
mask = src.read()
mask = (mask >= 1e30) | (mask == np.nan)
print(f"masksize: {mask.sum()/mask.size}")
for e, ch, i, s, y1, y2, d in _gen_all_args():
if d == 'loca':
rast = raster_template(e, ch, i, s, y1, y2, d)
fname = os.path.join(DATA_DIR, rast)
print(f'masking {rast}')
with rio.open(fname, 'r+') as dst:
arr = dst.read()
arr[mask] = np.nan
dst.write(arr)
def stack_and_scale():
download()
stacks = {}
zips = {}
for d in datasets:
for s in scenarios:
for y in range(startyear, endyear+1, 10):
y1 = y-15
y2 = y+15
for i in temp_indicators:
for ch in ['abs', 'diff']:
outargs = ('stacked', ch, i, s, y1, y2, d)
stacks[outargs] = [raster_template(e, ch, i, s, y1, y2, d) for e in ('q25', 'q50', 'q75')]
for i in pr_indicators:
for ch in ['abs', 'ch']:
outargs = ('stacked', ch, i, s, y1, y2, d)
stacks[outargs] = [raster_template(e, ch, i, s, y1, y2, d) for e in ('q25', 'q50', 'q75')]
for outargs, rasts in stacks.items():
(pfx, ch, i, s, y1, y2, d) = outargs
pfx = 'stacked'
conv_degC = i in ['annual_tasmin','annual_tasmax','tavg-tasmin_tasmax'] and ch == 'abs'
conv_mmyr = i in ['annual_pr'] and ch == 'abs'
if conv_degC:
pfx += '-degC'
elif conv_mmyr:
pfx += '-mmyr'
outrast = raster_template(pfx, ch, i, s, y1, y2, d)
outfile = os.path.join(DATA_DIR, outrast)
if not os.path.isfile(outfile):
arr = None
profile = None
for rast in rasts:
fname = os.path.join(DATA_DIR, rast)
with rio.open(fname, 'r') as src:
_arr = src.read()
if arr is None:
arr = _arr
profile = src.profile
else:
arr = arr = np.concatenate((arr, _arr), axis=0)
if conv_degC:
arr = k2c(arr)
elif conv_mmyr:
arr = kgs2mmyr(arr)
print(f' writing {outrast}')
profile["driver"] = "GTiff"
profile["count"] = arr.shape[0]
profile['crs'] = 'EPSG:4326'
with rio.open(outfile, 'w', **profile) as dst:
dst.write(arr.astype(profile['dtype']))
else:
print(f'{outfile} exists, skipping')
zipname = os.path.join(DATA_DIR, f'prep_share_{d}.zip')
write_opts = {
'filename': os.path.join(DATA_DIR, outrast),
'arcname': outrast
}
if zipname in zips:
zips[zipname].append(write_opts)
else:
zips[zipname] = [write_opts]
for zipname, opts_list in zips.items():
with zipfile.ZipFile(zipname, 'w', compression=zipfile.ZIP_DEFLATED) as z:
for i, opts in enumerate(opts_list):
z.write(**opts)
print(f'{zipname}: {i+1}/{len(opts_list)}', end='\r')
print('')
if __name__ == '__main__':
if len(sys.argv)>1 and sys.argv[1]=='csv':
csv()
elif len(sys.argv)>1 and sys.argv[1]=='archive':
move_to_archive()
elif len(sys.argv)>1 and sys.argv[1]=='baselines':
baselines()
elif len(sys.argv)>1 and sys.argv[1]=='zipdl':
download_zip()
elif len(sys.argv)>1 and sys.argv[1]=='stackscale':
stack_and_scale()
elif len(sys.argv)>1 and sys.argv[1]=='locaproj':
fix_loca_projection()
elif len(sys.argv)>1 and sys.argv[1]=='locamask':
fix_loca_mask()
else:
main()
| [
"rasterio.open",
"zipfile.ZipFile",
"os.makedirs",
"os.path.isfile",
"os.path.join",
"os.getenv",
"numpy.concatenate"
] | [((568, 592), 'os.getenv', 'os.getenv', (['"""GDDP_BUCKET"""'], {}), "('GDDP_BUCKET')\n", (577, 592), False, 'import os\n'), ((602, 626), 'os.getenv', 'os.getenv', (['"""GDDP_PREFIX"""'], {}), "('GDDP_PREFIX')\n", (611, 626), False, 'import os\n'), ((987, 1057), 'os.path.join', 'os.path.join', (['f"""https://s3.amazonaws.com/md.cc/{ARCHIVE_PREFIX}"""', 'rast'], {}), "(f'https://s3.amazonaws.com/md.cc/{ARCHIVE_PREFIX}', rast)\n", (999, 1057), False, 'import os\n'), ((3242, 3278), 'os.makedirs', 'os.makedirs', (['DATA_DIR'], {'exist_ok': '(True)'}), '(DATA_DIR, exist_ok=True)\n', (3253, 3278), False, 'import os\n'), ((5415, 5443), 'os.path.join', 'os.path.join', (['DATA_DIR', 'rast'], {}), '(DATA_DIR, rast)\n', (5427, 5443), False, 'import os\n'), ((2679, 2705), 'os.path.join', 'os.path.join', (['PREFIX', 'rast'], {}), '(PREFIX, rast)\n', (2691, 2705), False, 'import os\n'), ((2722, 2756), 'os.path.join', 'os.path.join', (['ARCHIVE_PREFIX', 'rast'], {}), '(ARCHIVE_PREFIX, rast)\n', (2734, 2756), False, 'import os\n'), ((3365, 3399), 'os.path.join', 'os.path.join', (['ARCHIVE_PREFIX', 'rast'], {}), '(ARCHIVE_PREFIX, rast)\n', (3377, 3399), False, 'import os\n'), ((3416, 3444), 'os.path.join', 'os.path.join', (['DATA_DIR', 'rast'], {}), '(DATA_DIR, rast)\n', (3428, 3444), False, 'import os\n'), ((3456, 3477), 'os.path.isfile', 'os.path.isfile', (['fname'], {}), '(fname)\n', (3470, 3477), False, 'import os\n'), ((3915, 3953), 'os.path.join', 'os.path.join', (['DATA_DIR', 'f"""{d}_{i}.zip"""'], {}), "(DATA_DIR, f'{d}_{i}.zip')\n", (3927, 3953), False, 'import os\n'), ((5453, 5473), 'rasterio.open', 'rio.open', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (5461, 5473), True, 'import rasterio as rio\n'), ((7205, 7236), 'os.path.join', 'os.path.join', (['DATA_DIR', 'outrast'], {}), '(DATA_DIR, outrast)\n', (7217, 7236), False, 'import os\n'), ((8230, 8275), 'os.path.join', 'os.path.join', (['DATA_DIR', 'f"""prep_share_{d}.zip"""'], {}), "(DATA_DIR, f'prep_share_{d}.zip')\n", (8242, 8275), False, 'import os\n'), ((2382, 2412), 'os.getenv', 'os.getenv', (['"""AWS_ACCESS_KEY_ID"""'], {}), "('AWS_ACCESS_KEY_ID')\n", (2391, 2412), False, 'import os\n'), ((2444, 2478), 'os.getenv', 'os.getenv', (['"""AWS_SECRET_ACCESS_KEY"""'], {}), "('AWS_SECRET_ACCESS_KEY')\n", (2453, 2478), False, 'import os\n'), ((3063, 3093), 'os.getenv', 'os.getenv', (['"""AWS_ACCESS_KEY_ID"""'], {}), "('AWS_ACCESS_KEY_ID')\n", (3072, 3093), False, 'import os\n'), ((3125, 3159), 'os.getenv', 'os.getenv', (['"""AWS_SECRET_ACCESS_KEY"""'], {}), "('AWS_SECRET_ACCESS_KEY')\n", (3134, 3159), False, 'import os\n'), ((4001, 4029), 'os.path.join', 'os.path.join', (['DATA_DIR', 'rast'], {}), '(DATA_DIR, rast)\n', (4013, 4029), False, 'import os\n'), ((4054, 4078), 'os.path.join', 'os.path.join', (['s', 'e', 'rast'], {}), '(s, e, rast)\n', (4066, 4078), False, 'import os\n'), ((4528, 4591), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zipname', '"""w"""'], {'compression': 'zipfile.ZIP_DEFLATED'}), "(zipname, 'w', compression=zipfile.ZIP_DEFLATED)\n", (4543, 4591), False, 'import zipfile\n'), ((4979, 5007), 'os.path.join', 'os.path.join', (['DATA_DIR', 'rast'], {}), '(DATA_DIR, rast)\n', (4991, 5007), False, 'import os\n'), ((5764, 5792), 'os.path.join', 'os.path.join', (['DATA_DIR', 'rast'], {}), '(DATA_DIR, rast)\n', (5776, 5792), False, 'import os\n'), ((7261, 7284), 'os.path.isfile', 'os.path.isfile', (['outfile'], {}), '(outfile)\n', (7275, 7284), False, 'import os\n'), ((8323, 8354), 'os.path.join', 'os.path.join', (['DATA_DIR', 'outrast'], {}), '(DATA_DIR, outrast)\n', (8335, 8354), False, 'import os\n'), ((8583, 8646), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zipname', '"""w"""'], {'compression': 'zipfile.ZIP_DEFLATED'}), "(zipname, 'w', compression=zipfile.ZIP_DEFLATED)\n", (8598, 8646), False, 'import zipfile\n'), ((5025, 5046), 'rasterio.open', 'rio.open', (['fname', '"""r+"""'], {}), "(fname, 'r+')\n", (5033, 5046), True, 'import rasterio as rio\n'), ((5847, 5868), 'rasterio.open', 'rio.open', (['fname', '"""r+"""'], {}), "(fname, 'r+')\n", (5855, 5868), True, 'import rasterio as rio\n'), ((7391, 7419), 'os.path.join', 'os.path.join', (['DATA_DIR', 'rast'], {}), '(DATA_DIR, rast)\n', (7403, 7419), False, 'import os\n'), ((8041, 8074), 'rasterio.open', 'rio.open', (['outfile', '"""w"""'], {}), "(outfile, 'w', **profile)\n", (8049, 8074), True, 'import rasterio as rio\n'), ((2824, 2851), 'os.path.join', 'os.path.join', (['BUCKET', 'rast0'], {}), '(BUCKET, rast0)\n', (2836, 2851), False, 'import os\n'), ((7441, 7461), 'rasterio.open', 'rio.open', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (7449, 7461), True, 'import rasterio as rio\n'), ((7687, 7722), 'numpy.concatenate', 'np.concatenate', (['(arr, _arr)'], {'axis': '(0)'}), '((arr, _arr), axis=0)\n', (7701, 7722), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# Copyright 2018 Division of Medical Image Computing, German Cancer Research Center (DKFZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Parts are based on https://github.com/multimodallearning/pytorch-mask-rcnn
published under MIT license.
"""
import utils.model_utils as mutils
from utils.model_utils import DiceLoss
import utils.exp_utils as utils
from cuda_functions.nms_2D.pth_nms import nms_gpu as nms_2D
from cuda_functions.nms_3D.pth_nms import nms_gpu as nms_3D
from cuda_functions.roi_align_2D.roi_align.crop_and_resize import CropAndResizeFunction as ra2D
from cuda_functions.roi_align_3D.roi_align.crop_and_resize import CropAndResizeFunction as ra3D
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils
############################################################
# Networks on top of backbone
############################################################
class RPN(nn.Module):
"""
Region Proposal Network.
"""
def __init__(self, cf, conv):
super(RPN, self).__init__()
self.dim = conv.dim
#self.conv_shared = conv(cf.end_filts, cf.n_rpn_features, ks=3, stride=cf.rpn_anchor_stride, pad=1, relu=cf.relu)
self.conv_shared0 = conv(cf.end_filts[0], cf.n_rpn_features, ks=3, stride=cf.rpn_anchor_stride, pad=1, relu=cf.relu)
self.conv_shared1 = conv(cf.end_filts[1], cf.n_rpn_features, ks=3, stride=cf.rpn_anchor_stride, pad=1, relu=cf.relu)
self.conv_shared2 = conv(cf.end_filts[2], cf.n_rpn_features, ks=3, stride=cf.rpn_anchor_stride, pad=1, relu=cf.relu)
self.conv_shared3 = conv(cf.end_filts[3], cf.n_rpn_features, ks=3, stride=cf.rpn_anchor_stride, pad=1, relu=cf.relu)
if 'vnet' in cf.backbone_path:
self.conv_shared4 = conv(cf.end_filts[4], cf.n_rpn_features, ks=3, stride=cf.rpn_anchor_stride, pad=1, relu=cf.relu)
self.conv_class = conv(cf.n_rpn_features, 2 * len(cf.rpn_anchor_ratios), ks=1, stride=1, relu=None)
self.conv_bbox = conv(cf.n_rpn_features, 2 * self.dim * len(cf.rpn_anchor_ratios), ks=1, stride=1, relu=None)
def forward(self, x, level):
"""
:param x: input feature maps (b, in_channels, y, x, (z))
:return: rpn_class_logits (b, 2, n_anchors)
:return: rpn_probs_logits (b, 2, n_anchors)
:return: rpn_bbox (b, 2 * dim, n_anchors)
"""
# Shared convolutional base of the RPN.
#x = self.conv_shared(x)
if level == 0:
x = self.conv_shared0(x)
if level == 1:
x = self.conv_shared1(x)
if level == 2:
x = self.conv_shared2(x)
if level == 3:
x = self.conv_shared3(x)
if level == 4:
x = self.conv_shared4(x)
# Anchor Score. (batch, anchors per location * 2, y, x, (z)).
rpn_class_logits = self.conv_class(x)
# Reshape to (batch, 2, anchors)
axes = (0, 2, 3, 1) if self.dim == 2 else (0, 2, 3, 4, 1)
rpn_class_logits = rpn_class_logits.permute(*axes)
rpn_class_logits = rpn_class_logits.contiguous()
rpn_class_logits = rpn_class_logits.view(x.size()[0], -1, 2)
# Softmax on last dimension (fg vs. bg).
rpn_probs = F.softmax(rpn_class_logits, dim=2)
# Bounding box refinement. (batch, anchors_per_location * (y, x, (z), log(h), log(w), (log(d)), y, x, (z))
rpn_bbox = self.conv_bbox(x)
# Reshape to (batch, 2*dim, anchors)
rpn_bbox = rpn_bbox.permute(*axes)
rpn_bbox = rpn_bbox.contiguous()
rpn_bbox = rpn_bbox.view(x.size()[0], -1, self.dim * 2)
return [rpn_class_logits, rpn_probs, rpn_bbox]
class Classifier(nn.Module):
"""
Head network for classification and bounding box refinement. Performs RoiAlign, processes resulting features through a
shared convolutional base and finally branches off the classifier- and regression head.
"""
def __init__(self, cf, conv):
super(Classifier, self).__init__()
self.dim = conv.dim
self.in_channels = cf.end_filts
self.pool_size = cf.pool_size
self.pyramid_levels = cf.pyramid_levels
# instance_norm does not work with spatial dims (1, 1, (1))
norm = cf.norm if cf.norm != 'instance_norm' else None
if 'fpn' in cf.backbone_path:
self.temp_end_filter = cf.end_filts[cf.pyramid_levels[0]] * 4
self.conv1 = conv(cf.end_filts[cf.pyramid_levels[0]], self.temp_end_filter, ks=self.pool_size, stride=1, norm=norm, relu=cf.relu)
if 'vnet' in cf.backbone_path:
self.temp_end_filter = cf.end_filts[cf.pyramid_levels[0]]
self.conv1 = conv(self.temp_end_filter, self.temp_end_filter, ks=self.pool_size, stride=1, norm=norm, relu=cf.relu)
self.conv2 = conv(self.temp_end_filter, self.temp_end_filter, ks=1, stride=1, norm=norm, relu=cf.relu)
self.linear_class = nn.Linear(self.temp_end_filter, cf.head_classes)
self.linear_bbox = nn.Linear(self.temp_end_filter, cf.head_classes * 2 * self.dim)
#if 'fpn' in cf.backbone_path:
# self.temp_end_filter = cf.end_filts * 4
#if 'vnet' in cf.backbone_path:
# self.temp_end_filter = cf.end_filts
#self.conv1 = conv(cf.end_filts, self.temp_end_filter , ks=self.pool_size, stride=1, norm=norm, relu=cf.relu)
#self.conv2 = conv(self.temp_end_filter, self.temp_end_filter, ks=1, stride=1, norm=norm, relu=cf.relu)
#self.linear_class = nn.Linear(self.temp_end_filter, cf.head_classes)
#self.linear_bbox = nn.Linear(self.temp_end_filter, cf.head_classes * 2 * self.dim)
def forward(self, x, rois):
"""
:param x: input feature maps (b, in_channels, y, x, (z))
:param rois: normalized box coordinates as proposed by the RPN to be forwarded through
the second stage (n_proposals, (y1, x1, y2, x2, (z1), (z2), batch_ix). Proposals of all batch elements
have been merged to one vector, while the origin info has been stored for re-allocation.
:return: mrcnn_class_logits (n_proposals, n_head_classes)
:return: mrcnn_bbox (n_proposals, n_head_classes, 2 * dim) predicted corrections to be applied to proposals for refinement.
"""
x = pyramid_roi_align(x, rois, self.pool_size, self.pyramid_levels, self.dim)
x = self.conv1(x)
x = self.conv2(x)
#x = x.view(-1, self.in_channels * 4)
x = x.view(-1, self.temp_end_filter )
mrcnn_class_logits = self.linear_class(x)
mrcnn_bbox = self.linear_bbox(x)
mrcnn_bbox = mrcnn_bbox.view(mrcnn_bbox.size()[0], -1, self.dim * 2)
return [mrcnn_class_logits, mrcnn_bbox]
class Mask(nn.Module):
"""
Head network for proposal-based mask segmentation. Performs RoiAlign, some convolutions and applies sigmoid on the
output logits to allow for overlapping classes.
"""
def __init__(self, cf, conv):
super(Mask, self).__init__()
self.pool_size = cf.mask_pool_size
self.pyramid_levels = cf.pyramid_levels
self.dim = conv.dim
self.conv1 = conv(cf.end_filts, cf.end_filts, ks=3, stride=1, pad=1, norm=cf.norm, relu=cf.relu)
self.conv2 = conv(cf.end_filts, cf.end_filts, ks=3, stride=1, pad=1, norm=cf.norm, relu=cf.relu)
self.conv3 = conv(cf.end_filts, cf.end_filts, ks=3, stride=1, pad=1, norm=cf.norm, relu=cf.relu)
self.conv4 = conv(cf.end_filts, cf.end_filts, ks=3, stride=1, pad=1, norm=cf.norm, relu=cf.relu)
if conv.dim == 2:
self.deconv = nn.ConvTranspose2d(cf.end_filts, cf.end_filts, kernel_size=2, stride=2)
else:
self.deconv = nn.ConvTranspose3d(cf.end_filts, cf.end_filts, kernel_size=2, stride=2)
self.relu = nn.ReLU(inplace=True) if cf.relu == 'relu' else nn.LeakyReLU(inplace=True)
self.conv5 = conv(cf.end_filts, cf.head_classes, ks=1, stride=1, relu=None)
self.sigmoid = nn.Sigmoid()
def forward(self, x, rois):
"""
:param x: input feature maps (b, in_channels, y, x, (z))
:param rois: normalized box coordinates as proposed by the RPN to be forwarded through
the second stage (n_proposals, (y1, x1, y2, x2, (z1), (z2), batch_ix). Proposals of all batch elements
have been merged to one vector, while the origin info has been stored for re-allocation.
:return: x: masks (n_sampled_proposals (n_detections in inference), n_classes, y, x, (z))
"""
x = pyramid_roi_align(x, rois, self.pool_size, self.pyramid_levels, self.dim)
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.relu(self.deconv(x))
x = self.conv5(x)
x = self.sigmoid(x)
return x
############################################################
# Loss Functions
############################################################
def compute_rpn_class_loss(rpn_match, rpn_class_logits, shem_poolsize):
"""
:param rpn_match: (n_anchors). [-1, 0, 1] for negative, neutral, and positive matched anchors.
:param rpn_class_logits: (n_anchors, 2). logits from RPN classifier.
:param shem_poolsize: int. factor of top-k candidates to draw from per negative sample
(stochastic-hard-example-mining).
:return: loss: torch tensor
:return: np_neg_ix: 1D array containing indices of the neg_roi_logits, which have been sampled for training.
"""
# filter out neutral anchors.
pos_indices = torch.nonzero(rpn_match == 1)
neg_indices = torch.nonzero(rpn_match == -1)
# loss for positive samples
if 0 not in pos_indices.size():
pos_indices = pos_indices.squeeze(1)
roi_logits_pos = rpn_class_logits[pos_indices]
pos_loss = F.cross_entropy(roi_logits_pos, torch.LongTensor([1] * pos_indices.shape[0]).cuda())
else:
pos_loss = torch.FloatTensor([0]).cuda()
# loss for negative samples: draw hard negative examples (SHEM)
# that match the number of positive samples, but at least 1.
if 0 not in neg_indices.size():
neg_indices = neg_indices.squeeze(1)
roi_logits_neg = rpn_class_logits[neg_indices]
negative_count = np.max((1, pos_indices.cpu().data.numpy().size))
roi_probs_neg = F.softmax(roi_logits_neg, dim=1)
neg_ix = mutils.shem(roi_probs_neg, negative_count, shem_poolsize)
neg_loss = F.cross_entropy(roi_logits_neg[neg_ix], torch.LongTensor([0] * neg_ix.shape[0]).cuda())
np_neg_ix = neg_ix.cpu().data.numpy()
else:
neg_loss = torch.FloatTensor([0]).cuda()
np_neg_ix = np.array([]).astype('int32')
loss = (pos_loss + neg_loss) / 2
return loss, np_neg_ix
def compute_rpn_bbox_loss(rpn_target_deltas, rpn_pred_deltas, rpn_match):
"""
:param rpn_target_deltas: (b, n_positive_anchors, (dy, dx, (dz), log(dh), log(dw), (log(dd)))).
Uses 0 padding to fill in unsed bbox deltas.
:param rpn_pred_deltas: predicted deltas from RPN. (b, n_anchors, (dy, dx, (dz), log(dh), log(dw), (log(dd))))
:param rpn_match: (n_anchors). [-1, 0, 1] for negative, neutral, and positive matched anchors.
:return: loss: torch 1D tensor.
"""
if 0 not in torch.nonzero(rpn_match == 1).size():
indices = torch.nonzero(rpn_match == 1).squeeze(1)
# Pick bbox deltas that contribute to the loss
rpn_pred_deltas = rpn_pred_deltas[indices]
# Trim target bounding box deltas to the same length as rpn_bbox.
target_deltas = rpn_target_deltas[:rpn_pred_deltas.size()[0], :]
# Smooth L1 loss
loss = F.smooth_l1_loss(rpn_pred_deltas, target_deltas)
else:
loss = torch.FloatTensor([0]).cuda()
return loss
def compute_mrcnn_class_loss(target_class_ids, pred_class_logits):
"""
:param target_class_ids: (n_sampled_rois) batch dimension was merged into roi dimension.
:param pred_class_logits: (n_sampled_rois, n_classes)
:return: loss: torch 1D tensor.
"""
if 0 not in target_class_ids.size():
loss = F.cross_entropy(pred_class_logits, target_class_ids.long())
else:
loss = torch.FloatTensor([0.]).cuda()
return loss
def compute_mrcnn_bbox_loss(mrcnn_target_deltas, mrcnn_pred_deltas, target_class_ids):
"""
:param mrcnn_target_deltas: (n_sampled_rois, (dy, dx, (dz), log(dh), log(dw), (log(dh)))
:param mrcnn_pred_deltas: (n_sampled_rois, n_classes, (dy, dx, (dz), log(dh), log(dw), (log(dh)))
:param target_class_ids: (n_sampled_rois)
:return: loss: torch 1D tensor.
"""
if 0 not in torch.nonzero(target_class_ids > 0).size():
positive_roi_ix = torch.nonzero(target_class_ids > 0)[:, 0]
positive_roi_class_ids = target_class_ids[positive_roi_ix].long()
target_bbox = mrcnn_target_deltas[positive_roi_ix, :].detach()
pred_bbox = mrcnn_pred_deltas[positive_roi_ix, positive_roi_class_ids, :]
loss = F.smooth_l1_loss(pred_bbox, target_bbox)
else:
loss = torch.FloatTensor([0]).cuda()
return loss
def compute_mrcnn_mask_loss(target_masks, pred_masks, target_class_ids):
"""
:param target_masks: (n_sampled_rois, y, x, (z)) A float32 tensor of values 0 or 1. Uses zero padding to fill array.
:param pred_masks: (n_sampled_rois, n_classes, y, x, (z)) float32 tensor with values between [0, 1].
:param target_class_ids: (n_sampled_rois)
:return: loss: torch 1D tensor.
"""
if 0 not in torch.nonzero(target_class_ids > 0).size():
# Only positive ROIs contribute to the loss. And only
# the class specific mask of each ROI.
positive_ix = torch.nonzero(target_class_ids > 0)[:, 0]
positive_class_ids = target_class_ids[positive_ix].long()
y_true = target_masks[positive_ix, :, :].detach()
y_pred = pred_masks[positive_ix, positive_class_ids, :, :]
loss = F.binary_cross_entropy(y_pred, y_true)
else:
loss = torch.FloatTensor([0]).cuda()
return loss
############################################################
# Helper Layers
############################################################
def proposal_layer(rpn_pred_probs, rpn_pred_deltas, proposal_count, anchors, cf):
"""
Receives anchor scores and selects a subset to pass as proposals
to the second stage. Filtering is done based on anchor scores and
non-max suppression to remove overlaps. It also applies bounding
box refinment detals to anchors.
:param rpn_pred_probs: (b, n_anchors, 2)
:param rpn_pred_deltas: (b, n_anchors, (y, x, (z), log(h), log(w), (log(d))))
:return: batch_normalized_boxes: Proposals in normalized coordinates
(b, proposal_count, (y1, x1, y2, x2, (z1), (z2)))
:return: batch_out_proposals: Box coords + RPN foreground scores
for monitoring/plotting (b, proposal_count, (y1, x1, y2, x2, (z1), (z2), score))
"""
batch_scores = rpn_pred_probs[:, :, 1]
batch_deltas = rpn_pred_deltas
batch_anchors = anchors
batch_normalized_boxes = []
batch_out_proposals = []
# loop over batch dimension.
for ix in range(batch_scores.shape[0]):
scores = batch_scores[ix]
deltas = batch_deltas[ix]
anchors = batch_anchors.clone()
# norm deltas
std_dev = torch.from_numpy(cf.rpn_bbox_std_dev[None]).float().cuda()
deltas = deltas * std_dev
# improve performance by trimming to top anchors by score
# and doing the rest on the smaller subset.
pre_nms_limit = min(cf.pre_nms_limit, anchors.size()[0])
scores, order = scores.sort(descending=True)
order = order[:pre_nms_limit]
scores = scores[:pre_nms_limit]
deltas = deltas[order, :]
anchors = anchors[order, :]
# apply deltas to anchors to get refined anchors and filter with non-maximum surpression.
if batch_deltas.shape[-1] == 4:
boxes = mutils.apply_box_deltas_2D(anchors, deltas)
boxes = mutils.clip_boxes_2D(boxes, cf.window)
keep = nms_2D(torch.cat((boxes, scores.unsqueeze(1)), 1), cf.rpn_nms_threshold)
norm = torch.from_numpy(cf.scale).float().cuda()
else:
boxes = mutils.apply_box_deltas_3D(anchors, deltas)
boxes = mutils.clip_boxes_3D(boxes, cf.window)
keep = nms_3D(torch.cat((boxes, scores.unsqueeze(1)), 1), cf.rpn_nms_threshold)
norm = torch.from_numpy(cf.scale).float().cuda()
keep = keep[:proposal_count]
boxes = boxes[keep, :]
rpn_scores = scores[keep][:, None]
# pad missing boxes with 0.
if boxes.shape[0] < proposal_count:
n_pad_boxes = proposal_count - boxes.shape[0]
zeros = torch.zeros([n_pad_boxes, boxes.shape[1]]).cuda()
boxes = torch.cat([boxes, zeros], dim=0)
zeros = torch.zeros([n_pad_boxes, rpn_scores.shape[1]]).cuda()
rpn_scores = torch.cat([rpn_scores, zeros], dim=0)
# concat box and score info for monitoring/plotting.
batch_out_proposals.append(torch.cat((boxes, rpn_scores), 1).cpu().data.numpy())
# normalize dimensions to range of 0 to 1.
normalized_boxes = boxes / norm
# add back batch dimension
batch_normalized_boxes.append(normalized_boxes.unsqueeze(0))
batch_normalized_boxes = torch.cat(batch_normalized_boxes)
batch_out_proposals = np.array(batch_out_proposals)
return batch_normalized_boxes, batch_out_proposals
def pyramid_roi_align(feature_maps, rois, pool_size, pyramid_levels, dim):
"""
Implements ROI Pooling on multiple levels of the feature pyramid.
:param feature_maps: list of feature maps, each of shape (b, c, y, x , (z))
:param rois: proposals (normalized coords.) as returned by RPN. contain info about original batch element allocation.
(n_proposals, (y1, x1, y2, x2, (z1), (z2), batch_ixs)
:param pool_size: list of poolsizes in dims: [x, y, (z)]
:param pyramid_levels: list. [0, 1, 2, ...]
:return: pooled: pooled feature map rois (n_proposals, c, poolsize_y, poolsize_x, (poolsize_z))
Output:
Pooled regions in the shape: [num_boxes, height, width, channels].
The width and height are those specific in the pool_shape in the layer
constructor.
"""
boxes = rois[:, :dim*2]
batch_ixs = rois[:, dim*2]
# Assign each ROI to a level in the pyramid based on the ROI area.
if dim == 2:
y1, x1, y2, x2 = boxes.chunk(4, dim=1)
else:
y1, x1, y2, x2, z1, z2 = boxes.chunk(6, dim=1)
h = y2 - y1
w = x2 - x1
# Equation 1 in https://arxiv.org/abs/1612.03144. Account for
# the fact that our coordinates are normalized here.
# divide sqrt(h*w) by 1 instead image_area.
roi_level = (4 + mutils.log2(torch.sqrt(h*w))).round().int().clamp(pyramid_levels[0], pyramid_levels[0])
# if Pyramid contains additional level P6, adapt the roi_level assignemnt accordingly.
#if len(pyramid_levels) == 5:
# roi_level[h*w > 0.65] = 5
# Loop through levels and apply ROI pooling to each.
pooled = []
box_to_level = []
for level_ix, level in enumerate(pyramid_levels):
ix = roi_level == level
if not ix.any():
continue
ix = torch.nonzero(ix)[:, 0]
level_boxes = boxes[ix, :]
# re-assign rois to feature map of original batch element.
ind = batch_ixs[ix].int()
# Keep track of which box is mapped to which level
box_to_level.append(ix)
# Stop gradient propogation to ROI proposals
level_boxes = level_boxes.detach()
# Crop and Resize
# From Mask R-CNN paper: "We sample four regular locations, so
# that we can evaluate either max or average pooling. In fact,
# interpolating only a single value at each bin center (without
# pooling) is nearly as effective."
#
# Here we use the simplified approach of a single value per bin,
# which is how is done in tf.crop_and_resize()
#
# Also fixed a bug from original implementation, reported in:
# https://hackernoon.com/how-tensorflows-tf-image-resize-stole-60-days-of-my-life-aba5eb093f35
if len(pool_size) == 2:
pooled_features = ra2D(pool_size[0], pool_size[1], 0)(feature_maps[level_ix], level_boxes, ind)
else:
pooled_features = ra3D(pool_size[0], pool_size[1], pool_size[2], 0)(feature_maps[level_ix], level_boxes, ind)
pooled.append(pooled_features)
# Pack pooled features into one tensor
pooled = torch.cat(pooled, dim=0)
# Pack box_to_level mapping into one array and add another
# column representing the order of pooled boxes
box_to_level = torch.cat(box_to_level, dim=0)
# Rearrange pooled features to match the order of the original boxes
_, box_to_level = torch.sort(box_to_level)
pooled = pooled[box_to_level, :, :]
return pooled
def detection_target_layer(batch_proposals, batch_mrcnn_class_scores, batch_gt_class_ids, batch_gt_boxes, cf):
"""
Subsamples proposals for mrcnn losses and generates targets. Sampling is done per batch element, seems to have positive
effects on training, as opposed to sampling over entire batch. Negatives are sampled via stochastic-hard-example-mining
(SHEM), where a number of negative proposals are drawn from larger pool of highest scoring proposals for stochasticity.
Scoring is obtained here as the max over all foreground probabilities as returned by mrcnn_classifier (worked better than
loss-based class balancing methods like "online-hard-example-mining" or "focal loss".)
:param batch_proposals: (n_proposals, (y1, x1, y2, x2, (z1), (z2), batch_ixs).
boxes as proposed by RPN. n_proposals here is determined by batch_size * POST_NMS_ROIS.
:param batch_mrcnn_class_scores: (n_proposals, n_classes)
:param batch_gt_class_ids: list over batch elements. Each element is a list over the corresponding roi target labels.
:param batch_gt_boxes: list over batch elements. Each element is a list over the corresponding roi target coordinates.
:param batch_gt_masks: list over batch elements. Each element is binary mask of shape (n_gt_rois, y, x, (z), c)
:return: sample_indices: (n_sampled_rois) indices of sampled proposals to be used for loss functions.
:return: target_class_ids: (n_sampled_rois)containing target class labels of sampled proposals.
:return: target_deltas: (n_sampled_rois, 2 * dim) containing target deltas of sampled proposals for box refinement.
:return: target_masks: (n_sampled_rois, y, x, (z)) containing target masks of sampled proposals.
"""
# normalization of target coordinates
if cf.dim == 2:
h, w = cf.patch_size
scale = torch.from_numpy(np.array([h, w, h, w])).float().cuda()
else:
h, w, z = cf.patch_size
scale = torch.from_numpy(np.array([h, w, h, w, z, z])).float().cuda()
positive_count = 0
negative_count = 0
sample_positive_indices = []
sample_negative_indices = []
sample_deltas = []
sample_class_ids = []
# loop over batch and get positive and negative sample rois.
for b in range(len(batch_gt_class_ids)):
gt_class_ids = torch.from_numpy(batch_gt_class_ids[b]).int().cuda()
if np.any(batch_gt_class_ids[b] > 0): # skip roi selection for no gt images.
gt_boxes = torch.from_numpy(batch_gt_boxes[b]).float().cuda() / scale
else:
gt_boxes = torch.FloatTensor().cuda()
# get proposals and indices of current batch element.
proposals = batch_proposals[batch_proposals[:, -1] == b][:, :-1]
batch_element_indices = torch.nonzero(batch_proposals[:, -1] == b).squeeze(1)
# Compute overlaps matrix [proposals, gt_boxes]
if 0 not in gt_boxes.size():
if gt_boxes.shape[1] == 4:
overlaps = mutils.bbox_overlaps_2D(proposals, gt_boxes)
else:
overlaps = mutils.bbox_overlaps_3D(proposals, gt_boxes)
# Determine postive and negative ROIs
roi_iou_max = torch.max(overlaps, dim=1)[0]
# 1. Positive ROIs are those with >= 0.5 IoU with a GT box
positive_roi_bool = roi_iou_max >= (0.5 if cf.dim == 2 else 0.3)
# 2. Negative ROIs are those with < 0.1 with every GT box.
negative_roi_bool = roi_iou_max < (0.1 if cf.dim == 2 else 0.01)
else:
positive_roi_bool = torch.FloatTensor().cuda()
negative_roi_bool = torch.from_numpy(np.array([1]*proposals.shape[0])).cuda()
# Sample Positive ROIs
if 0 not in torch.nonzero(positive_roi_bool).size():
positive_indices = torch.nonzero(positive_roi_bool).squeeze(1)
positive_samples = int(cf.train_rois_per_image * cf.roi_positive_ratio)
rand_idx = torch.randperm(positive_indices.size()[0])
rand_idx = rand_idx[:positive_samples].cuda()
positive_indices = positive_indices[rand_idx]
positive_samples = positive_indices.size()[0]
positive_rois = proposals[positive_indices, :]
# Assign positive ROIs to GT boxes.
positive_overlaps = overlaps[positive_indices, :]
roi_gt_box_assignment = torch.max(positive_overlaps, dim=1)[1]
roi_gt_boxes = gt_boxes[roi_gt_box_assignment, :]
roi_gt_class_ids = gt_class_ids[roi_gt_box_assignment]
# Compute bbox refinement targets for positive ROIs
deltas = mutils.box_refinement(positive_rois, roi_gt_boxes)
std_dev = torch.from_numpy(cf.bbox_std_dev).float().cuda()
deltas /= std_dev
sample_positive_indices.append(batch_element_indices[positive_indices])
sample_deltas.append(deltas)
sample_class_ids.append(roi_gt_class_ids)
positive_count += positive_samples
else:
positive_samples = 0
# Negative ROIs. Add enough to maintain positive:negative ratio, but at least 1. Sample via SHEM.
if 0 not in torch.nonzero(negative_roi_bool).size():
negative_indices = torch.nonzero(negative_roi_bool).squeeze(1)
r = 1.0 / cf.roi_positive_ratio
b_neg_count = np.max((int(r * positive_samples - positive_samples), 1))
roi_probs_neg = batch_mrcnn_class_scores[batch_element_indices[negative_indices]]
raw_sampled_indices = mutils.shem(roi_probs_neg, b_neg_count, cf.shem_poolsize)
sample_negative_indices.append(batch_element_indices[negative_indices[raw_sampled_indices]])
negative_count += raw_sampled_indices.size()[0]
if len(sample_positive_indices) > 0:
target_deltas = torch.cat(sample_deltas)
target_class_ids = torch.cat(sample_class_ids)
# Pad target information with zeros for negative ROIs.
if positive_count > 0 and negative_count > 0:
sample_indices = torch.cat((torch.cat(sample_positive_indices), torch.cat(sample_negative_indices)), dim=0)
zeros = torch.zeros(negative_count).int().cuda()
target_class_ids = torch.cat([target_class_ids, zeros], dim=0)
zeros = torch.zeros(negative_count, cf.dim * 2).cuda()
target_deltas = torch.cat([target_deltas, zeros], dim=0)
elif positive_count > 0:
sample_indices = torch.cat(sample_positive_indices)
elif negative_count > 0:
sample_indices = torch.cat(sample_negative_indices)
zeros = torch.zeros(negative_count).int().cuda()
target_class_ids = zeros
zeros = torch.zeros(negative_count, cf.dim * 2).cuda()
target_deltas = zeros
else:
sample_indices = torch.LongTensor().cuda()
target_class_ids = torch.IntTensor().cuda()
target_deltas = torch.FloatTensor().cuda()
return sample_indices, target_class_ids, target_deltas
############################################################
# Output Handler
############################################################
def refine_detections(rois, probs, deltas, batch_ixs, cf):
"""
Refine classified proposals, filter overlaps and return final detections.
:param rois: (n_proposals, 2 * dim) normalized boxes as proposed by RPN. n_proposals = batch_size * POST_NMS_ROIS
:param probs: (n_proposals, n_classes) softmax probabilities for all rois as predicted by mrcnn classifier.
:param deltas: (n_proposals, n_classes, 2 * dim) box refinement deltas as predicted by mrcnn bbox regressor.
:param batch_ixs: (n_proposals) batch element assignemnt info for re-allocation.
:return: result: (n_final_detections, (y1, x1, y2, x2, (z1), (z2), batch_ix, pred_class_id, pred_score))
"""
# class IDs per ROI. Since scores of all classes are of interest (not just max class), all are kept at this point.
class_ids = []
fg_classes = cf.head_classes - 1
# repeat vectors to fill in predictions for all foreground classes.
for ii in range(1, fg_classes + 1):
class_ids += [ii] * rois.shape[0]
class_ids = torch.from_numpy(np.array(class_ids)).cuda()
rois = rois.repeat(fg_classes, 1)
probs = probs.repeat(fg_classes, 1)
deltas = deltas.repeat(fg_classes, 1, 1)
batch_ixs = batch_ixs.repeat(fg_classes)
# get class-specific scores and bounding box deltas
idx = torch.arange(class_ids.size()[0]).long().cuda()
class_scores = probs[idx, class_ids]
deltas_specific = deltas[idx, class_ids]
batch_ixs = batch_ixs[idx]
# apply bounding box deltas. re-scale to image coordinates.
std_dev = torch.from_numpy(np.reshape(cf.rpn_bbox_std_dev, [1, cf.dim * 2])).float().cuda()
scale = torch.from_numpy(cf.scale).float().cuda()
refined_rois = mutils.apply_box_deltas_2D(rois, deltas_specific * std_dev) * scale if cf.dim == 2 else \
mutils.apply_box_deltas_3D(rois, deltas_specific * std_dev) * scale
# round and cast to int since we're deadling with pixels now
refined_rois = mutils.clip_to_window(cf.window, refined_rois)
refined_rois = torch.round(refined_rois)
# filter out low confidence boxes
keep = idx
keep_bool = (class_scores >= cf.model_min_confidence)
if 0 not in torch.nonzero(keep_bool).size():
score_keep = torch.nonzero(keep_bool)[:, 0]
pre_nms_class_ids = class_ids[score_keep]
pre_nms_rois = refined_rois[score_keep]
pre_nms_scores = class_scores[score_keep]
pre_nms_batch_ixs = batch_ixs[score_keep]
for j, b in enumerate(mutils.unique1d(pre_nms_batch_ixs)):
bixs = torch.nonzero(pre_nms_batch_ixs == b)[:, 0]
bix_class_ids = pre_nms_class_ids[bixs]
bix_rois = pre_nms_rois[bixs]
bix_scores = pre_nms_scores[bixs]
for i, class_id in enumerate(mutils.unique1d(bix_class_ids)):
ixs = torch.nonzero(bix_class_ids == class_id)[:, 0]
# nms expects boxes sorted by score.
ix_rois = bix_rois[ixs]
ix_scores = bix_scores[ixs]
ix_scores, order = ix_scores.sort(descending=True)
ix_rois = ix_rois[order, :]
if cf.dim == 2:
class_keep = nms_2D(torch.cat((ix_rois, ix_scores.unsqueeze(1)), dim=1), cf.detection_nms_threshold)
else:
class_keep = nms_3D(torch.cat((ix_rois, ix_scores.unsqueeze(1)), dim=1), cf.detection_nms_threshold)
# map indices back.
class_keep = keep[score_keep[bixs[ixs[order[class_keep]]]]]
# merge indices over classes for current batch element
b_keep = class_keep if i == 0 else mutils.unique1d(torch.cat((b_keep, class_keep)))
# only keep top-k boxes of current batch-element
top_ids = class_scores[b_keep].sort(descending=True)[1][:cf.model_max_instances_per_batch_element]
b_keep = b_keep[top_ids]
# merge indices over batch elements.
batch_keep = b_keep if j == 0 else mutils.unique1d(torch.cat((batch_keep, b_keep)))
keep = batch_keep
else:
keep = torch.tensor([0]).long().cuda()
# arrange output
result = torch.cat((refined_rois[keep],
batch_ixs[keep].unsqueeze(1),
class_ids[keep].unsqueeze(1).float(),
class_scores[keep].unsqueeze(1)), dim=1)
return result
def get_results(cf, img_shape, detections, seg_logits, box_results_list=None):
"""
Restores batch dimension of merged detections, unmolds detections, creates and fills results dict.
:param img_shape:
:param detections: (n_final_detections, (y1, x1, y2, x2, (z1), (z2), batch_ix, pred_class_id, pred_score)
:param detection_masks: (n_final_detections, n_classes, y, x, (z)) raw molded masks as returned by mask-head.
:param box_results_list: None or list of output boxes for monitoring/plotting.
each element is a list of boxes per batch element.
:param return_masks: boolean. If True, full resolution masks are returned for all proposals (speed trade-off).
:return: results_dict: dictionary with keys:
'boxes': list over batch elements. each batch element is a list of boxes. each box is a dictionary:
[[{box_0}, ... {box_n}], [{box_0}, ... {box_n}], ...]
'seg_preds': pixel-wise class predictions (b, 1, y, x, (z)) with values [0, 1] only fg. vs. bg for now.
class-specific return of masks will come with implementation of instance segmentation evaluation.
"""
detections = detections.cpu().data.numpy()
# restore batch dimension of merged detections using the batch_ix info.
batch_ixs = detections[:, cf.dim*2]
detections = [detections[batch_ixs == ix] for ix in range(img_shape[0])]
# for test_forward, where no previous list exists.
if box_results_list is None:
box_results_list = [[] for _ in range(img_shape[0])]
seg_preds = []
# loop over batch and unmold detections.
for ix in range(img_shape[0]):
if 0 not in detections[ix].shape:
boxes = detections[ix][:, :2 * cf.dim].astype(np.int32)
class_ids = detections[ix][:, 2 * cf.dim + 1].astype(np.int32)
scores = detections[ix][:, 2 * cf.dim + 2]
# Filter out detections with zero area. Often only happens in early
# stages of training when the network weights are still a bit random.
if cf.dim == 2:
exclude_ix = np.where((boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]
else:
exclude_ix = np.where(
(boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 5] - boxes[:, 4]) <= 0)[0]
if exclude_ix.shape[0] > 0:
boxes = np.delete(boxes, exclude_ix, axis=0)
class_ids = np.delete(class_ids, exclude_ix, axis=0)
scores = np.delete(scores, exclude_ix, axis=0)
# add final perdictions to results.
if 0 not in boxes.shape:
for ix2, score in enumerate(scores):
if score >= cf.model_min_confidence:
box_results_list[ix].append({'box_coords': boxes[ix2], 'box_score': score,
'box_type': 'det', 'box_pred_class_id': class_ids[ix2]})
# create and fill results dictionary.
results_dict = {'boxes': box_results_list}
if seg_logits is None:
# output dummy segmentation for retina_net.
results_dict['seg_preds'] = np.zeros(img_shape)[:, 0][:, np.newaxis]
else:
# output label maps for retina_unet.
results_dict['seg_preds'] = F.softmax(seg_logits, 1).argmax(1).cpu().data.numpy()[:, np.newaxis].astype('uint8')
results_dict['seg_logits'] = seg_logits
return results_dict
############################################################
# Mask R-CNN Class
############################################################
class net(nn.Module):
def __init__(self, cf, logger):
super(net, self).__init__()
self.cf = cf
self.logger = logger
self.build()
if self.cf.weight_init is not None:
logger.info("using pytorch weight init of type {}".format(self.cf.weight_init))
mutils.initialize_weights(self)
else:
logger.info("using default pytorch weight init")
def build(self):
"""Build Mask R-CNN architecture."""
# Image size must be dividable by 2 multiple times.
h, w = self.cf.patch_size[:2]
if h / 2**5 != int(h / 2**5) or w / 2**5 != int(w / 2**5):
raise Exception("Image size must be dividable by 2 at least 5 times "
"to avoid fractions when downscaling and upscaling."
"For example, use 256, 320, 384, 448, 512, ... etc. ")
# instanciate abstract multi dimensional conv class and backbone class.
conv = mutils.NDConvGenerator(self.cf.dim)
backbone = utils.import_module('bbone', self.cf.backbone_path)
# build Anchors, FPN, RPN, Classifier / Bbox-Regressor -head, Mask-head
self.np_anchors = mutils.generate_pyramid_anchors(self.logger, self.cf)
self.anchors = torch.from_numpy(self.np_anchors).float().cuda()
if 'fpn' in self.cf.backbone_path:
self.featurenet = backbone.FPN(self.cf, conv,operate_stride1 = self.cf.operate_stride1)
self.final_conv = conv(36, self.cf.num_seg_classes, ks=1, pad=0, norm=self.cf.norm, relu=None)
if 'vnet' in self.cf.backbone_path:
self.featurenet = backbone.VNet(self.cf)
self.final_conv = conv(2, self.cf.num_seg_classes, ks=1, pad=0, norm=self.cf.norm, relu=None)
self.rpn = RPN(self.cf, conv)
self.classifier = Classifier(self.cf, conv)
#self.mask = Mask(self.cf, conv)
def train_forward(self, batch, is_validation=False):
"""
train method (also used for validation monitoring). wrapper around forward pass of network. prepares input data
for processing, computes losses, and stores outputs in a dictionary.
:param batch: dictionary containing 'data', 'seg', etc.
:return: results_dict: dictionary with keys:
'boxes': list over batch elements. each batch element is a list of boxes. each box is a dictionary:
[[{box_0}, ... {box_n}], [{box_0}, ... {box_n}], ...]
'seg_preds': pixel-wise class predictions (b, 1, y, x, (z)) with values [0, n_classes].
'torch_loss': 1D torch tensor for backprop.
'class_loss': classification loss for monitoring.
"""
img = batch['data']
gt_class_ids = batch['roi_labels']
gt_boxes = batch['bb_target']
axes = (0, 2, 3, 1) if self.cf.dim == 2 else (0, 2, 3, 4, 1)
var_seg_ohe = torch.FloatTensor(mutils.get_one_hot_encoding(batch['seg'], self.cf.num_seg_classes)).cuda()#channel == 2
var_seg = torch.LongTensor(batch['seg']).cuda()#channel == 1
img = torch.from_numpy(img).float().cuda()
batch_rpn_class_loss = torch.FloatTensor([0]).cuda()
batch_rpn_bbox_loss = torch.FloatTensor([0]).cuda()
# list of output boxes for monitoring/plotting. each element is a list of boxes per batch element.
box_results_list = [[] for _ in range(img.shape[0])]
#forward passes. 1. general forward pass, where no activations are saved in second stage (for performance
# monitoring and loss sampling). 2. second stage forward pass of sampled rois with stored activations for backprop.
rpn_class_logits, rpn_pred_deltas, proposal_boxes, detections, seg_logits = self.forward(img)
mrcnn_class_logits, mrcnn_pred_deltas, target_class_ids, mrcnn_target_deltas, \
sample_proposals = self.loss_samples_forward(gt_class_ids, gt_boxes)
# loop over batch
for b in range(img.shape[0]):
if len(gt_boxes[b]) > 0:
# add gt boxes to output list for monitoring.
for ix in range(len(gt_boxes[b])):
box_results_list[b].append({'box_coords': batch['bb_target'][b][ix],
'box_label': batch['roi_labels'][b][ix], 'box_type': 'gt'})
# match gt boxes with anchors to generate targets for RPN losses.
rpn_match, rpn_target_deltas = mutils.gt_anchor_matching(self.cf, self.np_anchors, gt_boxes[b])
# add positive anchors used for loss to output list for monitoring.
#pos_anchors = mutils.clip_boxes_numpy(self.np_anchors[np.argwhere(rpn_match == 1)][:, 0], img.shape[2:])
#for p in pos_anchors:
# box_results_list[b].append({'box_coords': p, 'box_type': 'pos_anchor'})
else:
rpn_match = np.array([-1]*self.np_anchors.shape[0])
rpn_target_deltas = np.array([0])
rpn_match = torch.from_numpy(rpn_match).cuda()
rpn_target_deltas = torch.from_numpy(rpn_target_deltas).float().cuda()
# compute RPN losses.
rpn_class_loss, neg_anchor_ix = compute_rpn_class_loss(rpn_match, rpn_class_logits[b], self.cf.shem_poolsize)
rpn_bbox_loss = compute_rpn_bbox_loss(rpn_target_deltas, rpn_pred_deltas[b], rpn_match)
batch_rpn_class_loss += rpn_class_loss / img.shape[0]
batch_rpn_bbox_loss += rpn_bbox_loss / img.shape[0]
# add negative anchors used for loss to output list for monitoring.
#neg_anchors = mutils.clip_boxes_numpy(self.np_anchors[np.argwhere(rpn_match == -1)][0, neg_anchor_ix], img.shape[2:])
#for n in neg_anchors:
# box_results_list[b].append({'box_coords': n, 'box_type': 'neg_anchor'})
# add highest scoring proposals to output list for monitoring.
#rpn_proposals = proposal_boxes[b][proposal_boxes[b, :, -1].argsort()][::-1]
#for r in rpn_proposals[:self.cf.n_plot_rpn_props, :-1]:
# box_results_list[b].append({'box_coords': r, 'box_type': 'prop'})
# add positive and negative roi samples used for mrcnn losses to output list for monitoring.
#if 0 not in sample_proposals.shape:
# rois = mutils.clip_to_window(self.cf.window, sample_proposals).cpu().data.numpy()
#for ix, r in enumerate(rois):
# box_results_list[int(r[-1])].append({'box_coords': r[:-1] * self.cf.scale,
# 'box_type': 'pos_class' if target_class_ids[ix] > 0 else 'neg_class'})
batch_rpn_class_loss = batch_rpn_class_loss
batch_rpn_bbox_loss = batch_rpn_bbox_loss
# compute mrcnn losses.
mrcnn_class_loss = compute_mrcnn_class_loss(target_class_ids, mrcnn_class_logits)
mrcnn_bbox_loss = compute_mrcnn_bbox_loss(mrcnn_target_deltas, mrcnn_pred_deltas, target_class_ids)
# mrcnn can be run without pixelwise annotations available (Faster R-CNN mode).
# In this case, the mask_loss is taken out of training.
# if not self.cf.frcnn_mode:
# mrcnn_mask_loss = compute_mrcnn_mask_loss(target_mask, mrcnn_pred_mask, target_class_ids)
# else:
# mrcnn_mask_loss = torch.FloatTensor([0]).cuda()
#soft_seg = F.softmax(seg_logits,dim=1)
#print('soft_seg',soft_seg.shape)
#print('var_seg_ohe',var_seg_ohe.shape)
#print('soft_seg',soft_seg.max())
#print('soft_seg',soft_seg.min())
seg_loss_dice = 1 - mutils.batch_dice(F.softmax(seg_logits, dim=1), var_seg_ohe)
#seg_loss_ce = F.cross_entropy(seg_logits, var_seg[:, 0])
print('seg_loss_dice',seg_loss_dice)
mydice = DiceLoss()
seg_loss_dice_my = mydice(F.softmax(seg_logits,dim=1),var_seg_ohe)
print('seg_loss_dice_my',seg_loss_dice_my)
loss = seg_loss_dice_my
seg_loss_dice = seg_loss_dice_my
# monitor RPN performance: detection count = the number of correctly matched proposals per fg-class.
dcount = [list(target_class_ids.cpu().data.numpy()).count(c) for c in np.arange(self.cf.head_classes)[1:]]
# run unmolding of predictions for monitoring and merge all results to one dictionary.
results_dict = get_results(self.cf, img.shape, detections, seg_logits, box_results_list)
results_dict['torch_loss'] = loss
results_dict['monitor_values'] = {'loss': loss.item(), 'class_loss': mrcnn_class_loss.item()}
results_dict['monitor_losses'] = {'rpn_class_loss':batch_rpn_class_loss.item(),'rpn_bbox_loss':batch_rpn_bbox_loss.item(),'mrcnn_class_loss':mrcnn_class_loss.item(),'mrcnn_bbox_loss':mrcnn_bbox_loss.item(),'seg_loss_dice':seg_loss_dice.item()}
results_dict['logger_string'] = "loss: {0:.2f}, rpn_class: {1:.2f}, rpn_bbox: {2:.2f}, mrcnn_class: {3:.2f}, " \
"mrcnn_bbox: {4:.2f}, dice_loss: {5:.2f}, dcount {6}"\
.format(loss.item(), batch_rpn_class_loss.item(), batch_rpn_bbox_loss.item(), mrcnn_class_loss.item(),
mrcnn_bbox_loss.item(), seg_loss_dice.item(), dcount)
return results_dict
def test_forward(self, batch, return_masks=True):
"""
test method. wrapper around forward pass of network without usage of any ground truth information.
prepares input data for processing and stores outputs in a dictionary.
:param batch: dictionary containing 'data'
:param return_masks: boolean. If True, full resolution masks are returned for all proposals (speed trade-off).
:return: results_dict: dictionary with keys:
'boxes': list over batch elements. each batch element is a list of boxes. each box is a dictionary:
[[{box_0}, ... {box_n}], [{box_0}, ... {box_n}], ...]
'seg_preds': pixel-wise class predictions (b, 1, y, x, (z)) with values [0, n_classes]
"""
img = batch['data']
img = torch.from_numpy(img).float().cuda()
_, _, _, detections, seg_logits = self.forward(img)
results_dict = get_results(self.cf, img.shape, detections, seg_logits)
return results_dict
def forward(self, img, is_training=True):
"""
:param img: input images (b, c, y, x, (z)).
:return: rpn_pred_logits: (b, n_anchors, 2)
:return: rpn_pred_deltas: (b, n_anchors, (y, x, (z), log(h), log(w), (log(d))))
:return: batch_proposal_boxes: (b, n_proposals, (y1, x1, y2, x2, (z1), (z2), batch_ix)) only for monitoring/plotting.
:return: detections: (n_final_detections, (y1, x1, y2, x2, (z1), (z2), batch_ix, pred_class_id, pred_score)
:return: detection_masks: (n_final_detections, n_classes, y, x, (z)) raw molded masks as returned by mask-head.
"""
# extract features.
fpn_outs = self.featurenet(img)
if 'vnet' in self.cf.backbone_path:
seg_logits = fpn_outs[0]#channel = 2
seg_logits_final = self.final_conv(seg_logits)
if 'fpn' in self.cf.backbone_path:
seg_logits = self.final_conv(fpn_outs[0])
#print('seg_logits',seg_logits.shape)
#print('seg_logits max',seg_logits.max())
rpn_feature_maps = [fpn_outs[i + 1] for i in self.cf.pyramid_levels]
#rpn_feature_maps = [fpn_outs[i] for i in self.cf.pyramid_levels]
self.mrcnn_feature_maps = rpn_feature_maps
#for ff in self.mrcnn_feature_maps:
# print('ff',ff.shape)
# loop through pyramid layers and apply RPN.
layer_outputs = [] # list of lists
for ii,p in enumerate(rpn_feature_maps):
level = self.cf.pyramid_levels[ii]
outrpn = self.rpn(p,level)
layer_outputs.append(outrpn)
# concatenate layer outputs.
# convert from list of lists of level outputs to list of lists of outputs across levels.
# e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]
outputs = list(zip(*layer_outputs))
outputs = [torch.cat(list(o), dim=1) for o in outputs]
rpn_pred_logits, rpn_pred_probs, rpn_pred_deltas = outputs
#print('rpn_pred_probs',rpn_pred_logits.shape)
#print('rpn_pred_deltas',rpn_pred_deltas.shape)
# generate proposals: apply predicted deltas to anchors and filter by foreground scores from RPN classifier.
proposal_count = self.cf.post_nms_rois_training if is_training else self.cf.post_nms_rois_inference
batch_rpn_rois, batch_proposal_boxes = proposal_layer(rpn_pred_probs, rpn_pred_deltas, proposal_count, self.anchors, self.cf)
#print('batch_rpn_rois',batch_rpn_rois.shape)
#print('batch_proposal_boxes',batch_proposal_boxes.shape)
# merge batch dimension of proposals while storing allocation info in coordinate dimension.
batch_ixs = torch.from_numpy(np.repeat(np.arange(batch_rpn_rois.shape[0]), batch_rpn_rois.shape[1])).float().cuda()
rpn_rois = batch_rpn_rois.view(-1, batch_rpn_rois.shape[2])
self.rpn_rois_batch_info = torch.cat((rpn_rois, batch_ixs.unsqueeze(1)), dim=1)
# this is the first of two forward passes in the second stage, where no activations are stored for backprop.
# here, all proposals are forwarded (with virtual_batch_size = batch_size * post_nms_rois.)
# for inference/monitoring as well as sampling of rois for the loss functions.
# processed in chunks of roi_chunk_size to re-adjust to gpu-memory.
chunked_rpn_rois = self.rpn_rois_batch_info.split(self.cf.roi_chunk_size)
class_logits_list, bboxes_list = [], []
with torch.no_grad():
for chunk in chunked_rpn_rois:
chunk_class_logits, chunk_bboxes = self.classifier(self.mrcnn_feature_maps, chunk)
class_logits_list.append(chunk_class_logits)
bboxes_list.append(chunk_bboxes)
batch_mrcnn_class_logits = torch.cat(class_logits_list, 0)
batch_mrcnn_bbox = torch.cat(bboxes_list, 0)
self.batch_mrcnn_class_scores = F.softmax(batch_mrcnn_class_logits, dim=1)
#print('batch_mrcnn_class_scores',self.batch_mrcnn_class_scores.shape)
#print('batch_mrcnn_bbox',batch_mrcnn_bbox.shape)
# refine classified proposals, filter and return final detections.
detections = refine_detections(rpn_rois, self.batch_mrcnn_class_scores, batch_mrcnn_bbox, batch_ixs, self.cf, )
#print('detections',detections.shape)
return [rpn_pred_logits, rpn_pred_deltas, batch_proposal_boxes, detections, seg_logits]
def loss_samples_forward(self, batch_gt_class_ids, batch_gt_boxes):
"""
this is the second forward pass through the second stage (features from stage one are re-used).
samples few rois in detection_target_layer and forwards only those for loss computation.
:param batch_gt_class_ids: list over batch elements. Each element is a list over the corresponding roi target labels.
:param batch_gt_boxes: list over batch elements. Each element is a list over the corresponding roi target coordinates.
:param batch_gt_masks: list over batch elements. Each element is binary mask of shape (n_gt_rois, y, x, (z), c)
:return: sample_logits: (n_sampled_rois, n_classes) predicted class scores.
:return: sample_boxes: (n_sampled_rois, n_classes, 2 * dim) predicted corrections to be applied to proposals for refinement.
:return: sample_mask: (n_sampled_rois, n_classes, y, x, (z)) predicted masks per class and proposal.
:return: sample_target_class_ids: (n_sampled_rois) target class labels of sampled proposals.
:return: sample_target_deltas: (n_sampled_rois, 2 * dim) target deltas of sampled proposals for box refinement.
:return: sample_target_masks: (n_sampled_rois, y, x, (z)) target masks of sampled proposals.
:return: sample_proposals: (n_sampled_rois, 2 * dim) RPN output for sampled proposals. only for monitoring/plotting.
"""
# sample rois for loss and get corresponding targets for all Mask R-CNN head network losses.
sample_ix, sample_target_class_ids, sample_target_deltas = \
detection_target_layer(self.rpn_rois_batch_info, self.batch_mrcnn_class_scores,
batch_gt_class_ids, batch_gt_boxes, self.cf)
#print('sample_ix',sample_ix.shape)
#print('sample_target_class_ids',sample_target_class_ids.shape)
#print('sample_target_deltas',sample_target_deltas.shape)
# re-use feature maps and RPN output from first forward pass.
sample_proposals = self.rpn_rois_batch_info[sample_ix]
if 0 not in sample_proposals.size():
sample_logits, sample_boxes = self.classifier(self.mrcnn_feature_maps, sample_proposals)
else:
sample_logits = torch.FloatTensor().cuda()
sample_boxes = torch.FloatTensor().cuda()
#print('sample_logits',sample_logits.shape)
#print('sample_boxes',sample_boxes.shape)
return [sample_logits, sample_boxes, sample_target_class_ids, sample_target_deltas, sample_proposals]
| [
"torch.nn.functional.binary_cross_entropy",
"utils.model_utils.generate_pyramid_anchors",
"utils.model_utils.apply_box_deltas_2D",
"utils.model_utils.clip_boxes_2D",
"torch.sqrt",
"torch.nn.ConvTranspose3d",
"torch.cat",
"utils.model_utils.DiceLoss",
"numpy.arange",
"torch.no_grad",
"torch.Float... | [((10172, 10201), 'torch.nonzero', 'torch.nonzero', (['(rpn_match == 1)'], {}), '(rpn_match == 1)\n', (10185, 10201), False, 'import torch\n'), ((10220, 10250), 'torch.nonzero', 'torch.nonzero', (['(rpn_match == -1)'], {}), '(rpn_match == -1)\n', (10233, 10250), False, 'import torch\n'), ((18040, 18073), 'torch.cat', 'torch.cat', (['batch_normalized_boxes'], {}), '(batch_normalized_boxes)\n', (18049, 18073), False, 'import torch\n'), ((18100, 18129), 'numpy.array', 'np.array', (['batch_out_proposals'], {}), '(batch_out_proposals)\n', (18108, 18129), True, 'import numpy as np\n'), ((21299, 21323), 'torch.cat', 'torch.cat', (['pooled'], {'dim': '(0)'}), '(pooled, dim=0)\n', (21308, 21323), False, 'import torch\n'), ((21459, 21489), 'torch.cat', 'torch.cat', (['box_to_level'], {'dim': '(0)'}), '(box_to_level, dim=0)\n', (21468, 21489), False, 'import torch\n'), ((21586, 21610), 'torch.sort', 'torch.sort', (['box_to_level'], {}), '(box_to_level)\n', (21596, 21610), False, 'import torch\n'), ((30787, 30833), 'utils.model_utils.clip_to_window', 'mutils.clip_to_window', (['cf.window', 'refined_rois'], {}), '(cf.window, refined_rois)\n', (30808, 30833), True, 'import utils.model_utils as mutils\n'), ((30853, 30878), 'torch.round', 'torch.round', (['refined_rois'], {}), '(refined_rois)\n', (30864, 30878), False, 'import torch\n'), ((3858, 3892), 'torch.nn.functional.softmax', 'F.softmax', (['rpn_class_logits'], {'dim': '(2)'}), '(rpn_class_logits, dim=2)\n', (3867, 3892), True, 'import torch.nn.functional as F\n'), ((5552, 5600), 'torch.nn.Linear', 'nn.Linear', (['self.temp_end_filter', 'cf.head_classes'], {}), '(self.temp_end_filter, cf.head_classes)\n', (5561, 5600), True, 'import torch.nn as nn\n'), ((5628, 5691), 'torch.nn.Linear', 'nn.Linear', (['self.temp_end_filter', '(cf.head_classes * 2 * self.dim)'], {}), '(self.temp_end_filter, cf.head_classes * 2 * self.dim)\n', (5637, 5691), True, 'import torch.nn as nn\n'), ((8607, 8619), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (8617, 8619), True, 'import torch.nn as nn\n'), ((10951, 10983), 'torch.nn.functional.softmax', 'F.softmax', (['roi_logits_neg'], {'dim': '(1)'}), '(roi_logits_neg, dim=1)\n', (10960, 10983), True, 'import torch.nn.functional as F\n'), ((11001, 11058), 'utils.model_utils.shem', 'mutils.shem', (['roi_probs_neg', 'negative_count', 'shem_poolsize'], {}), '(roi_probs_neg, negative_count, shem_poolsize)\n', (11012, 11058), True, 'import utils.model_utils as mutils\n'), ((12285, 12333), 'torch.nn.functional.smooth_l1_loss', 'F.smooth_l1_loss', (['rpn_pred_deltas', 'target_deltas'], {}), '(rpn_pred_deltas, target_deltas)\n', (12301, 12333), True, 'import torch.nn.functional as F\n'), ((13619, 13659), 'torch.nn.functional.smooth_l1_loss', 'F.smooth_l1_loss', (['pred_bbox', 'target_bbox'], {}), '(pred_bbox, target_bbox)\n', (13635, 13659), True, 'import torch.nn.functional as F\n'), ((14570, 14608), 'torch.nn.functional.binary_cross_entropy', 'F.binary_cross_entropy', (['y_pred', 'y_true'], {}), '(y_pred, y_true)\n', (14592, 14608), True, 'import torch.nn.functional as F\n'), ((24060, 24093), 'numpy.any', 'np.any', (['(batch_gt_class_ids[b] > 0)'], {}), '(batch_gt_class_ids[b] > 0)\n', (24066, 24093), True, 'import numpy as np\n'), ((27529, 27553), 'torch.cat', 'torch.cat', (['sample_deltas'], {}), '(sample_deltas)\n', (27538, 27553), False, 'import torch\n'), ((27581, 27608), 'torch.cat', 'torch.cat', (['sample_class_ids'], {}), '(sample_class_ids)\n', (27590, 27608), False, 'import torch\n'), ((27919, 27962), 'torch.cat', 'torch.cat', (['[target_class_ids, zeros]'], {'dim': '(0)'}), '([target_class_ids, zeros], dim=0)\n', (27928, 27962), False, 'import torch\n'), ((28050, 28090), 'torch.cat', 'torch.cat', (['[target_deltas, zeros]'], {'dim': '(0)'}), '([target_deltas, zeros], dim=0)\n', (28059, 28090), False, 'import torch\n'), ((37890, 37925), 'utils.model_utils.NDConvGenerator', 'mutils.NDConvGenerator', (['self.cf.dim'], {}), '(self.cf.dim)\n', (37912, 37925), True, 'import utils.model_utils as mutils\n'), ((37945, 37996), 'utils.exp_utils.import_module', 'utils.import_module', (['"""bbone"""', 'self.cf.backbone_path'], {}), "('bbone', self.cf.backbone_path)\n", (37964, 37996), True, 'import utils.exp_utils as utils\n'), ((38104, 38157), 'utils.model_utils.generate_pyramid_anchors', 'mutils.generate_pyramid_anchors', (['self.logger', 'self.cf'], {}), '(self.logger, self.cf)\n', (38135, 38157), True, 'import utils.model_utils as mutils\n'), ((44765, 44775), 'utils.model_utils.DiceLoss', 'DiceLoss', ([], {}), '()\n', (44773, 44775), False, 'from utils.model_utils import DiceLoss\n'), ((51031, 51062), 'torch.cat', 'torch.cat', (['class_logits_list', '(0)'], {}), '(class_logits_list, 0)\n', (51040, 51062), False, 'import torch\n'), ((51090, 51115), 'torch.cat', 'torch.cat', (['bboxes_list', '(0)'], {}), '(bboxes_list, 0)\n', (51099, 51115), False, 'import torch\n'), ((51156, 51198), 'torch.nn.functional.softmax', 'F.softmax', (['batch_mrcnn_class_logits'], {'dim': '(1)'}), '(batch_mrcnn_class_logits, dim=1)\n', (51165, 51198), True, 'import torch.nn.functional as F\n'), ((8220, 8291), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['cf.end_filts', 'cf.end_filts'], {'kernel_size': '(2)', 'stride': '(2)'}), '(cf.end_filts, cf.end_filts, kernel_size=2, stride=2)\n', (8238, 8291), True, 'import torch.nn as nn\n'), ((8332, 8403), 'torch.nn.ConvTranspose3d', 'nn.ConvTranspose3d', (['cf.end_filts', 'cf.end_filts'], {'kernel_size': '(2)', 'stride': '(2)'}), '(cf.end_filts, cf.end_filts, kernel_size=2, stride=2)\n', (8350, 8403), True, 'import torch.nn as nn\n'), ((8425, 8446), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (8432, 8446), True, 'import torch.nn as nn\n'), ((8473, 8499), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (8485, 8499), True, 'import torch.nn as nn\n'), ((13335, 13370), 'torch.nonzero', 'torch.nonzero', (['(target_class_ids > 0)'], {}), '(target_class_ids > 0)\n', (13348, 13370), False, 'import torch\n'), ((14322, 14357), 'torch.nonzero', 'torch.nonzero', (['(target_class_ids > 0)'], {}), '(target_class_ids > 0)\n', (14335, 14357), False, 'import torch\n'), ((16605, 16648), 'utils.model_utils.apply_box_deltas_2D', 'mutils.apply_box_deltas_2D', (['anchors', 'deltas'], {}), '(anchors, deltas)\n', (16631, 16648), True, 'import utils.model_utils as mutils\n'), ((16669, 16707), 'utils.model_utils.clip_boxes_2D', 'mutils.clip_boxes_2D', (['boxes', 'cf.window'], {}), '(boxes, cf.window)\n', (16689, 16707), True, 'import utils.model_utils as mutils\n'), ((16896, 16939), 'utils.model_utils.apply_box_deltas_3D', 'mutils.apply_box_deltas_3D', (['anchors', 'deltas'], {}), '(anchors, deltas)\n', (16922, 16939), True, 'import utils.model_utils as mutils\n'), ((16960, 16998), 'utils.model_utils.clip_boxes_3D', 'mutils.clip_boxes_3D', (['boxes', 'cf.window'], {}), '(boxes, cf.window)\n', (16980, 16998), True, 'import utils.model_utils as mutils\n'), ((17493, 17525), 'torch.cat', 'torch.cat', (['[boxes, zeros]'], {'dim': '(0)'}), '([boxes, zeros], dim=0)\n', (17502, 17525), False, 'import torch\n'), ((17626, 17663), 'torch.cat', 'torch.cat', (['[rpn_scores, zeros]'], {'dim': '(0)'}), '([rpn_scores, zeros], dim=0)\n', (17635, 17663), False, 'import torch\n'), ((19969, 19986), 'torch.nonzero', 'torch.nonzero', (['ix'], {}), '(ix)\n', (19982, 19986), False, 'import torch\n'), ((26315, 26365), 'utils.model_utils.box_refinement', 'mutils.box_refinement', (['positive_rois', 'roi_gt_boxes'], {}), '(positive_rois, roi_gt_boxes)\n', (26336, 26365), True, 'import utils.model_utils as mutils\n'), ((27240, 27297), 'utils.model_utils.shem', 'mutils.shem', (['roi_probs_neg', 'b_neg_count', 'cf.shem_poolsize'], {}), '(roi_probs_neg, b_neg_count, cf.shem_poolsize)\n', (27251, 27297), True, 'import utils.model_utils as mutils\n'), ((28145, 28179), 'torch.cat', 'torch.cat', (['sample_positive_indices'], {}), '(sample_positive_indices)\n', (28154, 28179), False, 'import torch\n'), ((30536, 30595), 'utils.model_utils.apply_box_deltas_2D', 'mutils.apply_box_deltas_2D', (['rois', '(deltas_specific * std_dev)'], {}), '(rois, deltas_specific * std_dev)\n', (30562, 30595), True, 'import utils.model_utils as mutils\n'), ((30634, 30693), 'utils.model_utils.apply_box_deltas_3D', 'mutils.apply_box_deltas_3D', (['rois', '(deltas_specific * std_dev)'], {}), '(rois, deltas_specific * std_dev)\n', (30660, 30693), True, 'import utils.model_utils as mutils\n'), ((31062, 31086), 'torch.nonzero', 'torch.nonzero', (['keep_bool'], {}), '(keep_bool)\n', (31075, 31086), False, 'import torch\n'), ((31322, 31356), 'utils.model_utils.unique1d', 'mutils.unique1d', (['pre_nms_batch_ixs'], {}), '(pre_nms_batch_ixs)\n', (31337, 31356), True, 'import utils.model_utils as mutils\n'), ((37207, 37238), 'utils.model_utils.initialize_weights', 'mutils.initialize_weights', (['self'], {}), '(self)\n', (37232, 37238), True, 'import utils.model_utils as mutils\n'), ((44810, 44838), 'torch.nn.functional.softmax', 'F.softmax', (['seg_logits'], {'dim': '(1)'}), '(seg_logits, dim=1)\n', (44819, 44838), True, 'import torch.nn.functional as F\n'), ((50727, 50742), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (50740, 50742), False, 'import torch\n'), ((10553, 10575), 'torch.FloatTensor', 'torch.FloatTensor', (['[0]'], {}), '([0])\n', (10570, 10575), False, 'import torch\n'), ((11241, 11263), 'torch.FloatTensor', 'torch.FloatTensor', (['[0]'], {}), '([0])\n', (11258, 11263), False, 'import torch\n'), ((11291, 11303), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (11299, 11303), True, 'import numpy as np\n'), ((11894, 11923), 'torch.nonzero', 'torch.nonzero', (['(rpn_match == 1)'], {}), '(rpn_match == 1)\n', (11907, 11923), False, 'import torch\n'), ((11951, 11980), 'torch.nonzero', 'torch.nonzero', (['(rpn_match == 1)'], {}), '(rpn_match == 1)\n', (11964, 11980), False, 'import torch\n'), ((12359, 12381), 'torch.FloatTensor', 'torch.FloatTensor', (['[0]'], {}), '([0])\n', (12376, 12381), False, 'import torch\n'), ((12819, 12843), 'torch.FloatTensor', 'torch.FloatTensor', (['[0.0]'], {}), '([0.0])\n', (12836, 12843), False, 'import torch\n'), ((13265, 13300), 'torch.nonzero', 'torch.nonzero', (['(target_class_ids > 0)'], {}), '(target_class_ids > 0)\n', (13278, 13300), False, 'import torch\n'), ((13685, 13707), 'torch.FloatTensor', 'torch.FloatTensor', (['[0]'], {}), '([0])\n', (13702, 13707), False, 'import torch\n'), ((14147, 14182), 'torch.nonzero', 'torch.nonzero', (['(target_class_ids > 0)'], {}), '(target_class_ids > 0)\n', (14160, 14182), False, 'import torch\n'), ((14634, 14656), 'torch.FloatTensor', 'torch.FloatTensor', (['[0]'], {}), '([0])\n', (14651, 14656), False, 'import torch\n'), ((20987, 21022), 'cuda_functions.roi_align_2D.roi_align.crop_and_resize.CropAndResizeFunction', 'ra2D', (['pool_size[0]', 'pool_size[1]', '(0)'], {}), '(pool_size[0], pool_size[1], 0)\n', (20991, 21022), True, 'from cuda_functions.roi_align_2D.roi_align.crop_and_resize import CropAndResizeFunction as ra2D\n'), ((21109, 21158), 'cuda_functions.roi_align_3D.roi_align.crop_and_resize.CropAndResizeFunction', 'ra3D', (['pool_size[0]', 'pool_size[1]', 'pool_size[2]', '(0)'], {}), '(pool_size[0], pool_size[1], pool_size[2], 0)\n', (21113, 21158), True, 'from cuda_functions.roi_align_3D.roi_align.crop_and_resize import CropAndResizeFunction as ra3D\n'), ((24449, 24491), 'torch.nonzero', 'torch.nonzero', (['(batch_proposals[:, -1] == b)'], {}), '(batch_proposals[:, -1] == b)\n', (24462, 24491), False, 'import torch\n'), ((24663, 24707), 'utils.model_utils.bbox_overlaps_2D', 'mutils.bbox_overlaps_2D', (['proposals', 'gt_boxes'], {}), '(proposals, gt_boxes)\n', (24686, 24707), True, 'import utils.model_utils as mutils\n'), ((24753, 24797), 'utils.model_utils.bbox_overlaps_3D', 'mutils.bbox_overlaps_3D', (['proposals', 'gt_boxes'], {}), '(proposals, gt_boxes)\n', (24776, 24797), True, 'import utils.model_utils as mutils\n'), ((24875, 24901), 'torch.max', 'torch.max', (['overlaps'], {'dim': '(1)'}), '(overlaps, dim=1)\n', (24884, 24901), False, 'import torch\n'), ((26061, 26096), 'torch.max', 'torch.max', (['positive_overlaps'], {'dim': '(1)'}), '(positive_overlaps, dim=1)\n', (26070, 26096), False, 'import torch\n'), ((27755, 27789), 'torch.cat', 'torch.cat', (['sample_positive_indices'], {}), '(sample_positive_indices)\n', (27764, 27789), False, 'import torch\n'), ((27791, 27825), 'torch.cat', 'torch.cat', (['sample_negative_indices'], {}), '(sample_negative_indices)\n', (27800, 27825), False, 'import torch\n'), ((27979, 28018), 'torch.zeros', 'torch.zeros', (['negative_count', '(cf.dim * 2)'], {}), '(negative_count, cf.dim * 2)\n', (27990, 28018), False, 'import torch\n'), ((28234, 28268), 'torch.cat', 'torch.cat', (['sample_negative_indices'], {}), '(sample_negative_indices)\n', (28243, 28268), False, 'import torch\n'), ((29872, 29891), 'numpy.array', 'np.array', (['class_ids'], {}), '(class_ids)\n', (29880, 29891), True, 'import numpy as np\n'), ((31007, 31031), 'torch.nonzero', 'torch.nonzero', (['keep_bool'], {}), '(keep_bool)\n', (31020, 31031), False, 'import torch\n'), ((31379, 31416), 'torch.nonzero', 'torch.nonzero', (['(pre_nms_batch_ixs == b)'], {}), '(pre_nms_batch_ixs == b)\n', (31392, 31416), False, 'import torch\n'), ((31605, 31635), 'utils.model_utils.unique1d', 'mutils.unique1d', (['bix_class_ids'], {}), '(bix_class_ids)\n', (31620, 31635), True, 'import utils.model_utils as mutils\n'), ((35676, 35712), 'numpy.delete', 'np.delete', (['boxes', 'exclude_ix'], {'axis': '(0)'}), '(boxes, exclude_ix, axis=0)\n', (35685, 35712), True, 'import numpy as np\n'), ((35741, 35781), 'numpy.delete', 'np.delete', (['class_ids', 'exclude_ix'], {'axis': '(0)'}), '(class_ids, exclude_ix, axis=0)\n', (35750, 35781), True, 'import numpy as np\n'), ((35807, 35844), 'numpy.delete', 'np.delete', (['scores', 'exclude_ix'], {'axis': '(0)'}), '(scores, exclude_ix, axis=0)\n', (35816, 35844), True, 'import numpy as np\n'), ((36455, 36474), 'numpy.zeros', 'np.zeros', (['img_shape'], {}), '(img_shape)\n', (36463, 36474), True, 'import numpy as np\n'), ((39959, 39989), 'torch.LongTensor', 'torch.LongTensor', (["batch['seg']"], {}), "(batch['seg'])\n", (39975, 39989), False, 'import torch\n'), ((40093, 40115), 'torch.FloatTensor', 'torch.FloatTensor', (['[0]'], {}), '([0])\n', (40110, 40115), False, 'import torch\n'), ((40153, 40175), 'torch.FloatTensor', 'torch.FloatTensor', (['[0]'], {}), '([0])\n', (40170, 40175), False, 'import torch\n'), ((41403, 41467), 'utils.model_utils.gt_anchor_matching', 'mutils.gt_anchor_matching', (['self.cf', 'self.np_anchors', 'gt_boxes[b]'], {}), '(self.cf, self.np_anchors, gt_boxes[b])\n', (41428, 41467), True, 'import utils.model_utils as mutils\n'), ((41854, 41895), 'numpy.array', 'np.array', (['([-1] * self.np_anchors.shape[0])'], {}), '([-1] * self.np_anchors.shape[0])\n', (41862, 41895), True, 'import numpy as np\n'), ((41930, 41943), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (41938, 41943), True, 'import numpy as np\n'), ((44594, 44622), 'torch.nn.functional.softmax', 'F.softmax', (['seg_logits'], {'dim': '(1)'}), '(seg_logits, dim=1)\n', (44603, 44622), True, 'import torch.nn.functional as F\n'), ((10471, 10515), 'torch.LongTensor', 'torch.LongTensor', (['([1] * pos_indices.shape[0])'], {}), '([1] * pos_indices.shape[0])\n', (10487, 10515), False, 'import torch\n'), ((11118, 11157), 'torch.LongTensor', 'torch.LongTensor', (['([0] * neg_ix.shape[0])'], {}), '([0] * neg_ix.shape[0])\n', (11134, 11157), False, 'import torch\n'), ((17423, 17465), 'torch.zeros', 'torch.zeros', (['[n_pad_boxes, boxes.shape[1]]'], {}), '([n_pad_boxes, boxes.shape[1]])\n', (17434, 17465), False, 'import torch\n'), ((17546, 17593), 'torch.zeros', 'torch.zeros', (['[n_pad_boxes, rpn_scores.shape[1]]'], {}), '([n_pad_boxes, rpn_scores.shape[1]])\n', (17557, 17593), False, 'import torch\n'), ((24254, 24273), 'torch.FloatTensor', 'torch.FloatTensor', ([], {}), '()\n', (24271, 24273), False, 'import torch\n'), ((25247, 25266), 'torch.FloatTensor', 'torch.FloatTensor', ([], {}), '()\n', (25264, 25266), False, 'import torch\n'), ((25416, 25448), 'torch.nonzero', 'torch.nonzero', (['positive_roi_bool'], {}), '(positive_roi_bool)\n', (25429, 25448), False, 'import torch\n'), ((25488, 25520), 'torch.nonzero', 'torch.nonzero', (['positive_roi_bool'], {}), '(positive_roi_bool)\n', (25501, 25520), False, 'import torch\n'), ((26868, 26900), 'torch.nonzero', 'torch.nonzero', (['negative_roi_bool'], {}), '(negative_roi_bool)\n', (26881, 26900), False, 'import torch\n'), ((26940, 26972), 'torch.nonzero', 'torch.nonzero', (['negative_roi_bool'], {}), '(negative_roi_bool)\n', (26953, 26972), False, 'import torch\n'), ((30475, 30501), 'torch.from_numpy', 'torch.from_numpy', (['cf.scale'], {}), '(cf.scale)\n', (30491, 30501), False, 'import torch\n'), ((31661, 31701), 'torch.nonzero', 'torch.nonzero', (['(bix_class_ids == class_id)'], {}), '(bix_class_ids == class_id)\n', (31674, 31701), False, 'import torch\n'), ((32860, 32891), 'torch.cat', 'torch.cat', (['(batch_keep, b_keep)'], {}), '((batch_keep, b_keep))\n', (32869, 32891), False, 'import torch\n'), ((35361, 35433), 'numpy.where', 'np.where', (['((boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)'], {}), '((boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)\n', (35369, 35433), True, 'import numpy as np\n'), ((35484, 35591), 'numpy.where', 'np.where', (['((boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 5] -\n boxes[:, 4]) <= 0)'], {}), '((boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) * (boxes\n [:, 5] - boxes[:, 4]) <= 0)\n', (35492, 35591), True, 'import numpy as np\n'), ((39853, 39919), 'utils.model_utils.get_one_hot_encoding', 'mutils.get_one_hot_encoding', (["batch['seg']", 'self.cf.num_seg_classes'], {}), "(batch['seg'], self.cf.num_seg_classes)\n", (39880, 39919), True, 'import utils.model_utils as mutils\n'), ((41969, 41996), 'torch.from_numpy', 'torch.from_numpy', (['rpn_match'], {}), '(rpn_match)\n', (41985, 41996), False, 'import torch\n'), ((45162, 45193), 'numpy.arange', 'np.arange', (['self.cf.head_classes'], {}), '(self.cf.head_classes)\n', (45171, 45193), True, 'import numpy as np\n'), ((53965, 53984), 'torch.FloatTensor', 'torch.FloatTensor', ([], {}), '()\n', (53982, 53984), False, 'import torch\n'), ((54019, 54038), 'torch.FloatTensor', 'torch.FloatTensor', ([], {}), '()\n', (54036, 54038), False, 'import torch\n'), ((15968, 16011), 'torch.from_numpy', 'torch.from_numpy', (['cf.rpn_bbox_std_dev[None]'], {}), '(cf.rpn_bbox_std_dev[None])\n', (15984, 16011), False, 'import torch\n'), ((23996, 24035), 'torch.from_numpy', 'torch.from_numpy', (['batch_gt_class_ids[b]'], {}), '(batch_gt_class_ids[b])\n', (24012, 24035), False, 'import torch\n'), ((25323, 25357), 'numpy.array', 'np.array', (['([1] * proposals.shape[0])'], {}), '([1] * proposals.shape[0])\n', (25331, 25357), True, 'import numpy as np\n'), ((27851, 27878), 'torch.zeros', 'torch.zeros', (['negative_count'], {}), '(negative_count)\n', (27862, 27878), False, 'import torch\n'), ((28375, 28414), 'torch.zeros', 'torch.zeros', (['negative_count', '(cf.dim * 2)'], {}), '(negative_count, cf.dim * 2)\n', (28386, 28414), False, 'import torch\n'), ((28487, 28505), 'torch.LongTensor', 'torch.LongTensor', ([], {}), '()\n', (28503, 28505), False, 'import torch\n'), ((28540, 28557), 'torch.IntTensor', 'torch.IntTensor', ([], {}), '()\n', (28555, 28557), False, 'import torch\n'), ((28589, 28608), 'torch.FloatTensor', 'torch.FloatTensor', ([], {}), '()\n', (28606, 28608), False, 'import torch\n'), ((30398, 30446), 'numpy.reshape', 'np.reshape', (['cf.rpn_bbox_std_dev', '[1, cf.dim * 2]'], {}), '(cf.rpn_bbox_std_dev, [1, cf.dim * 2])\n', (30408, 30446), True, 'import numpy as np\n'), ((32504, 32535), 'torch.cat', 'torch.cat', (['(b_keep, class_keep)'], {}), '((b_keep, class_keep))\n', (32513, 32535), False, 'import torch\n'), ((32946, 32963), 'torch.tensor', 'torch.tensor', (['[0]'], {}), '([0])\n', (32958, 32963), False, 'import torch\n'), ((38181, 38214), 'torch.from_numpy', 'torch.from_numpy', (['self.np_anchors'], {}), '(self.np_anchors)\n', (38197, 38214), False, 'import torch\n'), ((40025, 40046), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (40041, 40046), False, 'import torch\n'), ((47047, 47068), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (47063, 47068), False, 'import torch\n'), ((16819, 16845), 'torch.from_numpy', 'torch.from_numpy', (['cf.scale'], {}), '(cf.scale)\n', (16835, 16845), False, 'import torch\n'), ((17110, 17136), 'torch.from_numpy', 'torch.from_numpy', (['cf.scale'], {}), '(cf.scale)\n', (17126, 17136), False, 'import torch\n'), ((23539, 23561), 'numpy.array', 'np.array', (['[h, w, h, w]'], {}), '([h, w, h, w])\n', (23547, 23561), True, 'import numpy as np\n'), ((23653, 23681), 'numpy.array', 'np.array', (['[h, w, h, w, z, z]'], {}), '([h, w, h, w, z, z])\n', (23661, 23681), True, 'import numpy as np\n'), ((26388, 26421), 'torch.from_numpy', 'torch.from_numpy', (['cf.bbox_std_dev'], {}), '(cf.bbox_std_dev)\n', (26404, 26421), False, 'import torch\n'), ((42036, 42071), 'torch.from_numpy', 'torch.from_numpy', (['rpn_target_deltas'], {}), '(rpn_target_deltas)\n', (42052, 42071), False, 'import torch\n'), ((17761, 17794), 'torch.cat', 'torch.cat', (['(boxes, rpn_scores)', '(1)'], {}), '((boxes, rpn_scores), 1)\n', (17770, 17794), False, 'import torch\n'), ((24158, 24193), 'torch.from_numpy', 'torch.from_numpy', (['batch_gt_boxes[b]'], {}), '(batch_gt_boxes[b])\n', (24174, 24193), False, 'import torch\n'), ((28285, 28312), 'torch.zeros', 'torch.zeros', (['negative_count'], {}), '(negative_count)\n', (28296, 28312), False, 'import torch\n'), ((49970, 50004), 'numpy.arange', 'np.arange', (['batch_rpn_rois.shape[0]'], {}), '(batch_rpn_rois.shape[0])\n', (49979, 50004), True, 'import numpy as np\n'), ((19492, 19509), 'torch.sqrt', 'torch.sqrt', (['(h * w)'], {}), '(h * w)\n', (19502, 19509), False, 'import torch\n'), ((36587, 36611), 'torch.nn.functional.softmax', 'F.softmax', (['seg_logits', '(1)'], {}), '(seg_logits, 1)\n', (36596, 36611), True, 'import torch.nn.functional as F\n')] |
"""
/***********************************
Copyright 2020 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
***********************************/
"""
import math
import numpy as np
import OFInterfaces.PyOF as PyOF
def dcmToQuat(dcm):
# check for largest term
q_sq = np.zeros(4)
q_sq[0] = (1 + 2*dcm[0][0] - np.trace(dcm))/4
q_sq[1] = (1 + 2*dcm[1][1] - np.trace(dcm))/4
q_sq[2] = (1 + 2*dcm[2][2] - np.trace(dcm))/4
q_sq[3] = (1 + np.trace(dcm))/4
idx = np.argmax(q_sq)
q = np.zeros(4)
if idx == 0:
q[0] = math.sqrt(q_sq[0])
q[1] = (dcm[0][1] + dcm[1][0])/(4 * q[0])
q[2] = (dcm[2][0] + dcm[0][2])/(4 * q[0])
q[3] = (dcm[1][2] - dcm[2][1])/(4 * q[0])
elif idx == 1:
q[1] = math.sqrt(q_sq[1])
q[0] = (dcm[0][1] + dcm[1][0])/(4 * q[1])
q[2] = (dcm[1][2] + dcm[2][1])/(4 * q[1])
q[3] = (dcm[2][0] - dcm[0][2])/(4 * q[1])
elif idx == 2:
q[2] = math.sqrt(q_sq[2])
q[0] = (dcm[2][0] + dcm[0][2])/(4 * q[2])
q[1] = (dcm[1][2] + dcm[2][1])/(4 * q[2])
q[3] = (dcm[0][1] - dcm[1][0])/(4 * q[2])
else:
q[3] = math.sqrt(q_sq[3])
q[0] = (dcm[1][2] - dcm[2][1])/(4 * q[3])
q[1] = (dcm[2][0] - dcm[0][2])/(4 * q[3])
q[2] = (dcm[0][1] - dcm[1][0])/(4 * q[3])
# Enforce norm
q /= np.linalg.norm(q)
return q
# based off of osg::Matrixd::makeLookAt()
def getAttitudeQuat(eye, center, up):
eye = np.array([PyOF.getOsgVec3d(eye, i) for i in range(3)])
center = np.array([PyOF.getOsgVec3d(center, i) for i in range(3)])
up = np.array([PyOF.getOsgVec3d(up, i) for i in range(3)])
f = center - eye
f /= np.linalg.norm(f)
s = np.cross(f, up)
s /= np.linalg.norm(s)
u = np.cross(s, f)
u /= np.linalg.norm(u)
mat = np.column_stack((s, u, -f));
q = dcmToQuat(mat)
# get inverse
q = PyOF.osgQuat(-q[0], -q[1], -q[2], q[3])
return q
# Create the interface that represents a window
myWindow = PyOF.WindowProxy(30, 30, 1280, 720, 1, 1, False, False);
# Create a ReferenceFrame for the root
root = PyOF.ReferenceFrame("Root");
view = PyOF.View(root, root);
myWindow.getGridPosition(0, 0).addView(view);
view.setDefaultViewDistance(15.0);
view.resetView();
# Create a custom cone (where we specify clock & cone angles)
customCone = PyOF.PolyhedralCone("Custom Cone");
customCone.setConeColor(0.5, 0.5, 0.5, 0.5);
customCone.setConeLength(5.0);
root.addChild(customCone);
view = PyOF.View(root, customCone);
myWindow.getGridPosition(0, 0).addView(view);
view.setDefaultViewParameters(PyOF.osgVec3d(0, 0, 5.0), PyOF.osgVec3d(0,0,0), PyOF.osgVec3d(0, 1.0, 0));
view.resetView();
# Set some clock/cone angles for the custom cone
clockAngles = [10.0, 30.0, 90.0, 180.0, 270.0]
clockAngles = PyOF.AngleArray([angle * math.pi/180 for angle in clockAngles])
coneAngles = [10.0, 30.0, 40.0, 60.0, 30.0]
coneAngles = PyOF.AngleArray([angle * math.pi/180 for angle in coneAngles])
customCone.setVertexAngles(clockAngles, coneAngles);
# Place apex at desired location and point boresight in desired direction
# Vectors are relative to the parent object's reference frame
origin = PyOF.osgVec3d(-10, 0, 0); # Cone apex location
direction = PyOF.osgVec3d(0, 0, 1); # Cone boresight direction
up = PyOF.osgVec3d(1, 0, 0); # Cone +Y axis
customCone.setPosition(origin);
q = getAttitudeQuat(PyOF.osgVec3d(0, 0, 0), direction, up)
customCone.setAttitude(q);
# Create an elliptic cone with specified semimajor/semiminor half-angles
ellipticCone = PyOF.EllipticCone("Elliptic Cone");
ellipticCone.setConeColor(0.1, 0.5, 0.6, 0.5);
ellipticCone.setConeLength(5.0);
ellipticCone.setPrimaryAngles(45.0 * math.pi/180, 20.0 * math.pi/180);
root.addChild(ellipticCone);
view = PyOF.View(root, ellipticCone);
myWindow.getGridPosition(0, 0).addView(view);
view.setDefaultViewParameters(PyOF.osgVec3d(0, 0, 5.0), PyOF.osgVec3d(0,0,0), PyOF.osgVec3d(0, 1.0, 0));
view.resetView();
# Place apex at desired location and point boresight in desired direction
# Vectors are relative to the parent object's reference frame
origin = PyOF.osgVec3d(10, 0, 0); # Cone apex location
direction = PyOF.osgVec3d(0, 1, 0); # Cone boresight direction
up = PyOF.osgVec3d(1, 0, 1); # Cone +Y axis
ellipticCone.setPosition(origin);
q = getAttitudeQuat(PyOF.osgVec3d(0, 0, 0), direction, up)
ellipticCone.setAttitude(q);
# Create a rectangular cone with specified x/y half-angles
rectangularCone = PyOF.RectangularCone("Rectangular Cone");
rectangularCone.setPosition(0, 0, 10.0);
rectangularCone.setConeColor(0.1, 0.5, 0.6, 0.5);
rectangularCone.setConeLength(5.0);
rectangularCone.setPrimaryAngles(45.0 * math.pi/180, 20.0 * math.pi/180);
root.addChild(rectangularCone);
# Create a manager to handle access to the scene
fm = PyOF.FrameManager();
fm.setFrame(root);
# Add the scene to the window
myWindow.setScene(fm, 0, 0);
myWindow.startThread(); # Start window animation
myWindow.join(); # Wait for window animation to finish
| [
"OFInterfaces.PyOF.EllipticCone",
"numpy.trace",
"OFInterfaces.PyOF.WindowProxy",
"OFInterfaces.PyOF.RectangularCone",
"math.sqrt",
"numpy.argmax",
"OFInterfaces.PyOF.FrameManager",
"OFInterfaces.PyOF.osgQuat",
"OFInterfaces.PyOF.getOsgVec3d",
"numpy.zeros",
"OFInterfaces.PyOF.osgVec3d",
"nump... | [((2584, 2639), 'OFInterfaces.PyOF.WindowProxy', 'PyOF.WindowProxy', (['(30)', '(30)', '(1280)', '(720)', '(1)', '(1)', '(False)', '(False)'], {}), '(30, 30, 1280, 720, 1, 1, False, False)\n', (2600, 2639), True, 'import OFInterfaces.PyOF as PyOF\n'), ((2688, 2715), 'OFInterfaces.PyOF.ReferenceFrame', 'PyOF.ReferenceFrame', (['"""Root"""'], {}), "('Root')\n", (2707, 2715), True, 'import OFInterfaces.PyOF as PyOF\n'), ((2724, 2745), 'OFInterfaces.PyOF.View', 'PyOF.View', (['root', 'root'], {}), '(root, root)\n', (2733, 2745), True, 'import OFInterfaces.PyOF as PyOF\n'), ((2922, 2956), 'OFInterfaces.PyOF.PolyhedralCone', 'PyOF.PolyhedralCone', (['"""Custom Cone"""'], {}), "('Custom Cone')\n", (2941, 2956), True, 'import OFInterfaces.PyOF as PyOF\n'), ((3068, 3095), 'OFInterfaces.PyOF.View', 'PyOF.View', (['root', 'customCone'], {}), '(root, customCone)\n', (3077, 3095), True, 'import OFInterfaces.PyOF as PyOF\n'), ((3377, 3444), 'OFInterfaces.PyOF.AngleArray', 'PyOF.AngleArray', (['[(angle * math.pi / 180) for angle in clockAngles]'], {}), '([(angle * math.pi / 180) for angle in clockAngles])\n', (3392, 3444), True, 'import OFInterfaces.PyOF as PyOF\n'), ((3498, 3564), 'OFInterfaces.PyOF.AngleArray', 'PyOF.AngleArray', (['[(angle * math.pi / 180) for angle in coneAngles]'], {}), '([(angle * math.pi / 180) for angle in coneAngles])\n', (3513, 3564), True, 'import OFInterfaces.PyOF as PyOF\n'), ((3760, 3784), 'OFInterfaces.PyOF.osgVec3d', 'PyOF.osgVec3d', (['(-10)', '(0)', '(0)'], {}), '(-10, 0, 0)\n', (3773, 3784), True, 'import OFInterfaces.PyOF as PyOF\n'), ((3821, 3843), 'OFInterfaces.PyOF.osgVec3d', 'PyOF.osgVec3d', (['(0)', '(0)', '(1)'], {}), '(0, 0, 1)\n', (3834, 3843), True, 'import OFInterfaces.PyOF as PyOF\n'), ((3878, 3900), 'OFInterfaces.PyOF.osgVec3d', 'PyOF.osgVec3d', (['(1)', '(0)', '(0)'], {}), '(1, 0, 0)\n', (3891, 3900), True, 'import OFInterfaces.PyOF as PyOF\n'), ((4133, 4167), 'OFInterfaces.PyOF.EllipticCone', 'PyOF.EllipticCone', (['"""Elliptic Cone"""'], {}), "('Elliptic Cone')\n", (4150, 4167), True, 'import OFInterfaces.PyOF as PyOF\n'), ((4356, 4385), 'OFInterfaces.PyOF.View', 'PyOF.View', (['root', 'ellipticCone'], {}), '(root, ellipticCone)\n', (4365, 4385), True, 'import OFInterfaces.PyOF as PyOF\n'), ((4702, 4725), 'OFInterfaces.PyOF.osgVec3d', 'PyOF.osgVec3d', (['(10)', '(0)', '(0)'], {}), '(10, 0, 0)\n', (4715, 4725), True, 'import OFInterfaces.PyOF as PyOF\n'), ((4762, 4784), 'OFInterfaces.PyOF.osgVec3d', 'PyOF.osgVec3d', (['(0)', '(1)', '(0)'], {}), '(0, 1, 0)\n', (4775, 4784), True, 'import OFInterfaces.PyOF as PyOF\n'), ((4818, 4840), 'OFInterfaces.PyOF.osgVec3d', 'PyOF.osgVec3d', (['(1)', '(0)', '(1)'], {}), '(1, 0, 1)\n', (4831, 4840), True, 'import OFInterfaces.PyOF as PyOF\n'), ((5065, 5105), 'OFInterfaces.PyOF.RectangularCone', 'PyOF.RectangularCone', (['"""Rectangular Cone"""'], {}), "('Rectangular Cone')\n", (5085, 5105), True, 'import OFInterfaces.PyOF as PyOF\n'), ((5395, 5414), 'OFInterfaces.PyOF.FrameManager', 'PyOF.FrameManager', ([], {}), '()\n', (5412, 5414), True, 'import OFInterfaces.PyOF as PyOF\n'), ((767, 778), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (775, 778), True, 'import numpy as np\n'), ((980, 995), 'numpy.argmax', 'np.argmax', (['q_sq'], {}), '(q_sq)\n', (989, 995), True, 'import numpy as np\n'), ((1009, 1020), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (1017, 1020), True, 'import numpy as np\n'), ((1896, 1913), 'numpy.linalg.norm', 'np.linalg.norm', (['q'], {}), '(q)\n', (1910, 1913), True, 'import numpy as np\n'), ((2244, 2261), 'numpy.linalg.norm', 'np.linalg.norm', (['f'], {}), '(f)\n', (2258, 2261), True, 'import numpy as np\n'), ((2270, 2285), 'numpy.cross', 'np.cross', (['f', 'up'], {}), '(f, up)\n', (2278, 2285), True, 'import numpy as np\n'), ((2295, 2312), 'numpy.linalg.norm', 'np.linalg.norm', (['s'], {}), '(s)\n', (2309, 2312), True, 'import numpy as np\n'), ((2321, 2335), 'numpy.cross', 'np.cross', (['s', 'f'], {}), '(s, f)\n', (2329, 2335), True, 'import numpy as np\n'), ((2345, 2362), 'numpy.linalg.norm', 'np.linalg.norm', (['u'], {}), '(u)\n', (2359, 2362), True, 'import numpy as np\n'), ((2378, 2405), 'numpy.column_stack', 'np.column_stack', (['(s, u, -f)'], {}), '((s, u, -f))\n', (2393, 2405), True, 'import numpy as np\n'), ((2461, 2500), 'OFInterfaces.PyOF.osgQuat', 'PyOF.osgQuat', (['(-q[0])', '(-q[1])', '(-q[2])', 'q[3]'], {}), '(-q[0], -q[1], -q[2], q[3])\n', (2473, 2500), True, 'import OFInterfaces.PyOF as PyOF\n'), ((3173, 3197), 'OFInterfaces.PyOF.osgVec3d', 'PyOF.osgVec3d', (['(0)', '(0)', '(5.0)'], {}), '(0, 0, 5.0)\n', (3186, 3197), True, 'import OFInterfaces.PyOF as PyOF\n'), ((3199, 3221), 'OFInterfaces.PyOF.osgVec3d', 'PyOF.osgVec3d', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (3212, 3221), True, 'import OFInterfaces.PyOF as PyOF\n'), ((3221, 3245), 'OFInterfaces.PyOF.osgVec3d', 'PyOF.osgVec3d', (['(0)', '(1.0)', '(0)'], {}), '(0, 1.0, 0)\n', (3234, 3245), True, 'import OFInterfaces.PyOF as PyOF\n'), ((3977, 3999), 'OFInterfaces.PyOF.osgVec3d', 'PyOF.osgVec3d', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (3990, 3999), True, 'import OFInterfaces.PyOF as PyOF\n'), ((4463, 4487), 'OFInterfaces.PyOF.osgVec3d', 'PyOF.osgVec3d', (['(0)', '(0)', '(5.0)'], {}), '(0, 0, 5.0)\n', (4476, 4487), True, 'import OFInterfaces.PyOF as PyOF\n'), ((4489, 4511), 'OFInterfaces.PyOF.osgVec3d', 'PyOF.osgVec3d', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (4502, 4511), True, 'import OFInterfaces.PyOF as PyOF\n'), ((4511, 4535), 'OFInterfaces.PyOF.osgVec3d', 'PyOF.osgVec3d', (['(0)', '(1.0)', '(0)'], {}), '(0, 1.0, 0)\n', (4524, 4535), True, 'import OFInterfaces.PyOF as PyOF\n'), ((4919, 4941), 'OFInterfaces.PyOF.osgVec3d', 'PyOF.osgVec3d', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (4932, 4941), True, 'import OFInterfaces.PyOF as PyOF\n'), ((1058, 1076), 'math.sqrt', 'math.sqrt', (['q_sq[0]'], {}), '(q_sq[0])\n', (1067, 1076), False, 'import math\n'), ((812, 825), 'numpy.trace', 'np.trace', (['dcm'], {}), '(dcm)\n', (820, 825), True, 'import numpy as np\n'), ((862, 875), 'numpy.trace', 'np.trace', (['dcm'], {}), '(dcm)\n', (870, 875), True, 'import numpy as np\n'), ((912, 925), 'numpy.trace', 'np.trace', (['dcm'], {}), '(dcm)\n', (920, 925), True, 'import numpy as np\n'), ((948, 961), 'numpy.trace', 'np.trace', (['dcm'], {}), '(dcm)\n', (956, 961), True, 'import numpy as np\n'), ((1270, 1288), 'math.sqrt', 'math.sqrt', (['q_sq[1]'], {}), '(q_sq[1])\n', (1279, 1288), False, 'import math\n'), ((2034, 2058), 'OFInterfaces.PyOF.getOsgVec3d', 'PyOF.getOsgVec3d', (['eye', 'i'], {}), '(eye, i)\n', (2050, 2058), True, 'import OFInterfaces.PyOF as PyOF\n'), ((2102, 2129), 'OFInterfaces.PyOF.getOsgVec3d', 'PyOF.getOsgVec3d', (['center', 'i'], {}), '(center, i)\n', (2118, 2129), True, 'import OFInterfaces.PyOF as PyOF\n'), ((2169, 2192), 'OFInterfaces.PyOF.getOsgVec3d', 'PyOF.getOsgVec3d', (['up', 'i'], {}), '(up, i)\n', (2185, 2192), True, 'import OFInterfaces.PyOF as PyOF\n'), ((1482, 1500), 'math.sqrt', 'math.sqrt', (['q_sq[2]'], {}), '(q_sq[2])\n', (1491, 1500), False, 'import math\n'), ((1685, 1703), 'math.sqrt', 'math.sqrt', (['q_sq[3]'], {}), '(q_sq[3])\n', (1694, 1703), False, 'import math\n')] |
from __future__ import print_function
import gym
import numpy as np
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
class ModifiedTaxiEnv:
"""Modifications to the taxi environment:
* make it so that first episode lasts forever (essentially)
* add a NOOP so that laziness is meaningfully possible
* amplify positive rewards (multiply by 10), since the base taxi env is so
unforgiving
"""
def __init__(self):
self.env = gym.make('Taxi-v2')
self.env._max_episode_steps = 1000 * 1000 * 1000
self.nactions = 1 + self.env.action_space.n
self.nstates = self.env.observation_space.n
self.last_obs = None
def render(self):
return self.env.render()
def reset(self):
obs = self.env.reset()
self.last_obs = obs
return obs
def step(self, action):
if action == 0:
return self.last_obs, -1, False, {'noop': True}
obs, reward, done, info = self.env.step(action - 1)
# amplify positive rewards, since the base taxi env is so unforgiving
if reward > 0:
reward *= 10
self.last_obs = obs
return obs, reward, done, info
DEFAULT_GAMMA = 0.95
DEFAULT_SIGMA = 0.995
DEFAULT_INITIAL_EPSILON = 0.05
DEFAULT_ALPHA = 0.1
DEFAULT_INITIAL_SVALUE = 0
DEFAULT_TIEBREAK = 0.00001
# use an extremely pessimistic initialization to make it cautious
DEFAULT_OPTIMISM = -7
DEFAULT_MAX_REWARD_DESIRED = 100.0 # = 10 * 10 = 100
DEFAULT_MEAN_REWARD_DESIRED = 5.0
DEFAULT_PATIENCE = 10
def main(fear=True,
# discount factor for Q- and F-values
gamma = DEFAULT_GAMMA,
# discount factor for S-value
sigma = DEFAULT_SIGMA,
# epsilon-greedy exploration parameter
initial_epsilon = DEFAULT_INITIAL_EPSILON,
# learning rate
alpha = DEFAULT_ALPHA,
# initial S-value
initial_svalue = DEFAULT_INITIAL_SVALUE,
# break ties randomly by adding small amount of random noise to Q-values
tiebreak = DEFAULT_TIEBREAK,
# optimistic or pessimistic initialization
optimism = DEFAULT_OPTIMISM,
# max reward desired
max_reward_desired = DEFAULT_MAX_REWARD_DESIRED,
# mean reward desired in utils / step
mean_reward_desired = DEFAULT_MEAN_REWARD_DESIRED,
# how long are we willing to wait before achieving the next reward?
patience = DEFAULT_PATIENCE):
epsilon = initial_epsilon
svalue = initial_svalue
env = ModifiedTaxiEnv()
# maximum desired S-value
max_s_desired = mean_reward_desired / (1 - sigma)
# how low must a reward be before we consider it a "mistake"
min_reward_desired = -5
# cumulative reward experienced
tot_reward = 0.0
# tabular estimator of Q-values
qtable = optimism + np.zeros((env.nstates, env.nactions))
# tabular estimator of F-values
fear_table = optimism + np.zeros((env.nstates, env.nactions))
# keep track of how often various actions are taken
action_histogram = np.zeros(env.nactions)
# experience replay cache
experience_replay = []
# capacity of experience replay cache
capacity = 100 * 100
# TRAINING LOOP
obs = env.reset()
# rewards experienced over time
rewards = []
# "mistakes" made over time
mistakes = []
# S-values experienced over time
svalues = []
for step in range(1, 40 * 1000):
# calculate desired S_sat-value
Ssat = max(
# the mature case
mean_reward_desired / (1 - sigma) +
gamma ** patience * max(max_s_desired, qtable.max()),
# the immature case - bump up the desired satisfaction
20 * 1000 - step)
qmax = Ssat - svalue
# if any q-value hits the qmax threshold, print out some diagnostic
# info
if (qtable[obs, :] >= qmax).any():
print('#' * 100)
print((qtable[obs, :] >= qmax).mean())
print('#' * 100)
# at step 20K, turn off exploration forever
if step >= 20 * 1000:
epsilon = 0
# PICK AN ACTION
if fear:
action = (np.random.random(env.nactions) * tiebreak + # random tiebreak for degenerate cases
fear_table[obs, :] + # fear negative rewards
(qtable[obs, :] >= qmax) + # prefer actions that hit the threshold
np.minimum(qtable[obs, :], qmax) # maximize q-value of selected action (up to qmax)
).argmax()
else:
action = (np.random.random(env.nactions) * tiebreak + # random tiebreak for degenerate cases
# don't fear negative rewards
(qtable[obs, :] >= qmax) + # prefer actions that hit the threshold
np.minimum(qtable[obs, :], qmax) # maximize q-value of selected action (up to qmax)
).argmax()
# epsilon-greedy exploration
if np.random.random() < epsilon:
action = np.random.randint(env.nactions)
print('ACTION', action)
# some book-keeping and diagnostic prints
old_obs = obs
obs, reward, done, info = env.step(action)
action_histogram[action] += 1
print(action_histogram)
env.render()
# if done, reset env
if done:
obs = env.reset()
# remember rewards and S-values
rewards.append(reward)
tot_reward += reward
svalue = sigma * svalue + reward
svalues.append(svalue)
# diagnostic prints
print('STEP', step)
print('REWARD', reward)
print('SSAT', Ssat)
print('SVAL', svalue)
print('QMAX', qmax)
print('SUM RW', np.sum(rewards[-100:]))
print('MEAN RW', np.mean(rewards[-100:]))
print('RECENT MISTAKES', np.sum(mistakes[-100:]))
print('QTAB',
(qtable > optimism).mean(),
)
print('FTAB',
(fear_table < optimism).mean(),
)
if reward > mean_reward_desired:
print('*' * 100)
# did we make a "mistake"?
if reward < min_reward_desired:
print('?' * 100)
mistakes.append(True)
else:
mistakes.append(False)
# update Q-table and F-table
experience_replay.append((old_obs, action, reward, obs))
experience_replay = experience_replay[-capacity:]
for s, a, r, s2 in experience_replay:
qtable[s, a] = (1 - alpha) * qtable[s, a] + alpha * (r + gamma * qtable[s2, :].max())
if r < min_reward_desired:
fear_table[s, a] = ((1 - alpha) * fear_table[s, a] +
alpha * (r + gamma * fear_table[s2, :].max()))
# produce the plot of results
plt.figure()
plt.plot(range(len(svalues)), np.array(svalues) / np.array(svalues).max(), 'c')
plt.plot(range(len(rewards)), np.cumsum(rewards) / np.cumsum(rewards)[-1], 'r')
plt.plot(range(len(mistakes)), np.cumsum(mistakes) / np.cumsum(mistakes)[-1], 'm')
# CONSTRUCT THE OUTPUT IMAGE NAME
# do we have fear or not?
fname_base = 'borgies'
if not fear:
fname_base += '_nofear'
# note down all non-default params
precision = 10000 # 4 decimal places
if gamma != DEFAULT_GAMMA:
fname_base += '_gamma%04d' % int(precision * gamma)
if sigma != DEFAULT_SIGMA:
fname_base += '_sigma%04d' % int(precision * sigma)
if initial_epsilon != DEFAULT_INITIAL_EPSILON:
fname_base += '_epsilon%04d' % int(precision * initial_epsilon)
if alpha != DEFAULT_ALPHA:
fname_base += '_alpha%04d' % int(precision * alpha)
if initial_svalue != DEFAULT_INITIAL_SVALUE:
fname_base += '_inits%04d' % int(precision * initial_svalue)
if tiebreak != DEFAULT_TIEBREAK:
fname_base += '_tiebreak%04d' % int(precision * tiebreak)
if optimism != DEFAULT_OPTIMISM:
fname_base += '_optimism%04d' % int(precision * optimism)
if max_reward_desired != DEFAULT_MAX_REWARD_DESIRED:
fname_base += '_ambition%04d' % int(precision * max_reward_desired)
if mean_reward_desired != DEFAULT_MEAN_REWARD_DESIRED:
fname_base += '_greed%04d' % int(precision * mean_reward_desired)
if patience != DEFAULT_PATIENCE:
fname_base += '_patience%04d' % int(precision * patience)
fname = fname_base + '.png'
# save the plot
plt.savefig(fname)
if __name__ == '__main__':
main()
main(fear=False)
main(gamma=0.9999)
main(sigma=0.9999)
main(initial_epsilon=0)
main(alpha=0.0001)
main(initial_svalue=9.9999)
main(tiebreak=0)
main(optimism=100)
main(max_reward_desired=9999)
main(mean_reward_desired=1000)
main(patience=1)
| [
"numpy.minimum",
"numpy.sum",
"gym.make",
"numpy.zeros",
"numpy.cumsum",
"matplotlib.pyplot.figure",
"matplotlib.use",
"numpy.random.random",
"numpy.random.randint",
"numpy.mean",
"numpy.array",
"matplotlib.pyplot.savefig"
] | [((99, 120), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (113, 120), False, 'import matplotlib\n'), ((3106, 3128), 'numpy.zeros', 'np.zeros', (['env.nactions'], {}), '(env.nactions)\n', (3114, 3128), True, 'import numpy as np\n'), ((6938, 6950), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6948, 6950), True, 'import matplotlib.pyplot as plt\n'), ((8579, 8597), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {}), '(fname)\n', (8590, 8597), True, 'import matplotlib.pyplot as plt\n'), ((496, 515), 'gym.make', 'gym.make', (['"""Taxi-v2"""'], {}), "('Taxi-v2')\n", (504, 515), False, 'import gym\n'), ((2887, 2924), 'numpy.zeros', 'np.zeros', (['(env.nstates, env.nactions)'], {}), '((env.nstates, env.nactions))\n', (2895, 2924), True, 'import numpy as np\n'), ((2989, 3026), 'numpy.zeros', 'np.zeros', (['(env.nstates, env.nactions)'], {}), '((env.nstates, env.nactions))\n', (2997, 3026), True, 'import numpy as np\n'), ((5070, 5088), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5086, 5088), True, 'import numpy as np\n'), ((5121, 5152), 'numpy.random.randint', 'np.random.randint', (['env.nactions'], {}), '(env.nactions)\n', (5138, 5152), True, 'import numpy as np\n'), ((5857, 5879), 'numpy.sum', 'np.sum', (['rewards[-100:]'], {}), '(rewards[-100:])\n', (5863, 5879), True, 'import numpy as np\n'), ((5906, 5929), 'numpy.mean', 'np.mean', (['rewards[-100:]'], {}), '(rewards[-100:])\n', (5913, 5929), True, 'import numpy as np\n'), ((5964, 5987), 'numpy.sum', 'np.sum', (['mistakes[-100:]'], {}), '(mistakes[-100:])\n', (5970, 5987), True, 'import numpy as np\n'), ((6985, 7002), 'numpy.array', 'np.array', (['svalues'], {}), '(svalues)\n', (6993, 7002), True, 'import numpy as np\n'), ((7069, 7087), 'numpy.cumsum', 'np.cumsum', (['rewards'], {}), '(rewards)\n', (7078, 7087), True, 'import numpy as np\n'), ((7154, 7173), 'numpy.cumsum', 'np.cumsum', (['mistakes'], {}), '(mistakes)\n', (7163, 7173), True, 'import numpy as np\n'), ((7090, 7108), 'numpy.cumsum', 'np.cumsum', (['rewards'], {}), '(rewards)\n', (7099, 7108), True, 'import numpy as np\n'), ((7176, 7195), 'numpy.cumsum', 'np.cumsum', (['mistakes'], {}), '(mistakes)\n', (7185, 7195), True, 'import numpy as np\n'), ((7005, 7022), 'numpy.array', 'np.array', (['svalues'], {}), '(svalues)\n', (7013, 7022), True, 'import numpy as np\n'), ((4513, 4545), 'numpy.minimum', 'np.minimum', (['qtable[obs, :]', 'qmax'], {}), '(qtable[obs, :], qmax)\n', (4523, 4545), True, 'import numpy as np\n'), ((4908, 4940), 'numpy.minimum', 'np.minimum', (['qtable[obs, :]', 'qmax'], {}), '(qtable[obs, :], qmax)\n', (4918, 4940), True, 'import numpy as np\n'), ((4662, 4692), 'numpy.random.random', 'np.random.random', (['env.nactions'], {}), '(env.nactions)\n', (4678, 4692), True, 'import numpy as np\n'), ((4252, 4282), 'numpy.random.random', 'np.random.random', (['env.nactions'], {}), '(env.nactions)\n', (4268, 4282), True, 'import numpy as np\n')] |
import os, sys, logging
import numpy as np
import pyaudio, queue, webrtcvad, collections
import deepspeech
from ctypes import CFUNCTYPE, cdll, c_char_p, c_int
import time
import threading
from karen.shared import threaded, upgradePackage
def py_error_handler(filename, line, function, err, fmt):
"""
Error handler to translate non-critical errors to logging messages.
Args:
filename (str): Output file name or device (/dev/null).
line (int): Line number of error
function (str): Function containing
err (Exception): Exception raised for error
fmt (str): Format of log output
"""
# Convert the parameters to strings for logging calls
fmt = fmt.decode("utf-8")
filename = filename.decode("utf-8")
fnc = function.decode('utf-8')
# Setting up a logger so you can turn these errors off if so desired.
logger = logging.getLogger("CTYPES")
if (fmt.count("%s") == 1 and fmt.count("%i") == 1):
logger.debug(fmt % (fnc, line))
elif (fmt.count("%s") == 1):
logger.debug(fmt % (fnc))
elif (fmt.count("%s") == 2):
logger.debug(fmt % (fnc, str(err)))
else:
logger.debug(fmt)
return
class SilenceStream():
"""
Hides C library messages by redirecting to log file
"""
def __init__(self, stream, log_file=None, file_mode='a'):
"""
Redirect stream to log file
Args:
stream (stream): Inbound stream containing errors to hide
log_file (str): File name or device name for error log
file_mode (str): Mode to open log_file
"""
self.fd_to_silence = stream.fileno() # Store the stream we're referening
self.log_file = log_file # Store the log file to redirect to
self.file_mode = file_mode # Append vs. Writex
def __enter__(self):
"""
Starts the stream redirection to the log file
"""
if (self.log_file is None):
return # No log file means we can skip this and let output flow as normal.
self.stored_dup = os.dup(self.fd_to_silence) # Store the original pointer for the stream
self.devnull = open(self.log_file, self.file_mode) # Get a pointer for the new target
os.dup2(self.devnull.fileno(), self.fd_to_silence) # Redirect to the new pointer
def __exit__(self, exc_type, exc_value, tb):
"""
Restore stream back to its original state before the silencer was called.
Args:
exc_type (obj): Execution type. Not used.
exc_value (obj): Execution value. Not used.
tb (obj): Traceback. Not used.
"""
if (self.log_file is None):
return # No log file means we can skip this as nothing needs to change.
os.dup2(self.stored_dup, self.fd_to_silence) # Restore the pointer to the original
self.devnull = None # Cleanup
self.stored_dup = None # Cleanup
class Listener():
"""
Listener device to capture audio from microphone and convert any speech to text and send to callback method.
"""
def __init__(
self,
parent=None,
speechModel=None, # Speech Model file. Ideally this could be searched for in a default location
speechScorer=None, # Scorer file. Okay for this to be None as scorer file is not required
audioChannels=1, # VAD requires this to be 1 channel
audioSampleRate=16000, # VAD requires this to be 16000
vadAggressiveness=1, # VAD accepts 1 thru 3
speechRatio=0.75, # Must be between 0 and 1 as a decimal
speechBufferSize=50, # Buffer size for speech frames
speechBufferPadding=350, # Padding, in milliseconds, of speech frames
audioDeviceIndex=None,
callback=None): # Callback is a function that accepts ONE positional argument which will contain the text identified
"""
Listener Initialization
Args:
parent (object): Containing object's reference. Normally this would be the device container. (optional)
speechModel (str): Path and filename of Deepspeech Speech Model file. If not set then listener will do a basic seach for the PBMM or TFLite file.
speechScorer (str): Path and filename of Deepspeech Scorer file. Okay for this to be None as scorer file is not required.
audioChannels (int): Audio channels for audio source. VAD requires this to be 1 channel.
audioSampleRate (int): Audio sample rate of audio source. VAD requires this to be 16000.
vadAggressiveness (int): Voice Activity Detection (VAD) aggressiveness for filtering noise. Accepts 1 thru 3.
speechRatio (float): Must be between 0 and 1 as a decimal
speechBufferSize (int): Buffer size for speech frames
speechBufferPadding (int): Padding, in milliseconds, of speech frames
audioDeviceIndex (int): Listening device index number. If not set then will use default audio capture device.
callback (function): Callback function for which to send capture text
"""
from . import __version__
self.version = __version__
self._packageName = "karen-plugin-listener"
# Local variable instantiation and initialization
self.type = "LISTENER"
self.callback = callback
self.logger = logging.getLogger(self.type)
self.parent = parent
self.speechModel=speechModel
self.speechScorer=speechScorer
self.audioChannels=audioChannels
self.audioSampleRate=audioSampleRate
self.vadAggressiveness=vadAggressiveness
self.speechRatio=speechRatio
self.speechBufferSize=speechBufferSize
self.speechBufferPadding=speechBufferPadding
self.audioDeviceIndex=audioDeviceIndex
if self.speechModel is None:
# Search for speech model?
self.logger.info("Speech model not specified. Attempting to use defaults.")
local_path = os.path.join(os.path.expanduser("~/.karen"), "data", "models", "speech")
os.makedirs(local_path, exist_ok=True)
files = os.listdir(local_path)
files = sorted(files, reverse=True) # Very poor attempt to get the latest version of the model if multiple exist.
bFoundPBMM=False
bFoundTFLITE=False
for file in files:
if not bFoundPBMM:
if file.startswith("deepspeech") and file.endswith("models.pbmm"):
self.speechModel=os.path.abspath(os.path.join(local_path, file))
self.logger.debug("Using speech model from " + str(self.speechModel))
bFoundPBMM = True
if not bFoundPBMM and not bFoundTFLITE:
if file.startswith("deepspeech") and file.endswith("models.tflite"):
self.speechModel=os.path.abspath(os.path.join(local_path, file))
self.logger.debug("Using speech model from " + str(self.speechModel))
bFoundTFLITE = True
if self.speechScorer is None:
if file.startswith("deepspeech") and file.endswith("models.scorer"):
self.speechScorer=os.path.abspath(os.path.join(local_path, file))
self.logger.debug("Using speech scorer from " + str(self.speechScorer))
if bFoundPBMM and bFoundTFLITE:
self.logger.warning("Found both PBMM and TFLite deepspeech models.")
self.logger.warning("Defaulting to PBMM model which will not work with Raspberry Pi devices.")
self.logger.warning("To use with RPi either delete the PBMM model or specify the TFLite model explicitly.")
if self.speechModel is None:
#FIXME: Should we try to download the models if they don't exist?
raise Exception("Invalid speech model. Unable to start listener.")
self.stream = None
self.thread = None
self._isRunning = False
self._isAudioOut = False
@threaded
def _doCallback(self, inData):
"""
Calls the specified callback as a thread to keep from blocking audio device listening
Args:
text (str): Text to send to callback function
Returns:
(thread): The thread on which the callback is created to be sent to avoid blocking calls.
"""
try:
if self.callback is not None:
self.callback("AUDIO_INPUT", inData)
except:
pass
return
@threaded
def _readFromMic(self):
"""
Opens audio device for listening and processing speech to text
Returns:
(thread): The thread created for the listener while listening for incoming speech.
"""
buffer_queue = queue.Queue() # Buffer queue for incoming frames of audio
self._isRunning = True # Reset to True to insure we can successfully start
def proxy_callback(in_data, frame_count, time_info, status):
"""Callback for the audio capture which adds the incoming audio frames to the buffer queue"""
# Save captured frames to buffer
buffer_queue.put(in_data)
# Tell the caller that it can continue capturing frames
return (None, pyaudio.paContinue)
# Using a collections queue to enable fast response to processing items.
# The collections class is simply faster at handling this data than a simple dict or array.
# The size of the buffer is the length of the padding and thereby those chunks of audio.
ring_buffer = collections.deque(maxlen=self.speechBufferPadding // (1000 * int(self.audioSampleRate / float(self.speechBufferSize)) // self.audioSampleRate))
# Set up C lib error handler for Alsa programs to trap errors from Alsa spin up
#ERROR_HANDLER_FUNC = CFUNCTYPE(None, c_char_p, c_int, c_char_p, c_int, c_char_p)
#c_error_handler = ERROR_HANDLER_FUNC(py_error_handler)
#asound = cdll.LoadLibrary('libasound.so')
#asound.snd_lib_error_set_handler(c_error_handler)
with SilenceStream(sys.stderr, log_file="/dev/null"):
_model = deepspeech.Model(self.speechModel)
if self.speechScorer is not None:
_model.enableExternalScorer(self.speechScorer)
_vad = webrtcvad.Vad(self.vadAggressiveness)
ERROR_HANDLER_FUNC = CFUNCTYPE(None, c_char_p, c_int, c_char_p, c_int, c_char_p)
c_error_handler = ERROR_HANDLER_FUNC(py_error_handler)
asound = cdll.LoadLibrary('libasound.so')
asound.snd_lib_error_set_handler(c_error_handler)
_audio_device = pyaudio.PyAudio()
# Open a stream on the audio device for reading frames
self.stream = _audio_device.open(format=pyaudio.paInt16,
channels=self.audioChannels,
rate=self.audioSampleRate,
input=True,
frames_per_buffer=int(self.audioSampleRate / float(self.speechBufferSize)),
input_device_index=self.audioDeviceIndex,
stream_callback=proxy_callback)
self.stream.start_stream() # Open audio device stream
stream_context = _model.createStream() # Context of audio frames is used to
# better identify the spoken words.
triggered = False # Used to flag whether we are above
# or below the ratio threshold set
# for speech frames to total frames
self.logger.info("Started")
# We will loop looking for incoming audio until the KILL_SWITCH is set to True
while self._isRunning == True:
# Get current data in buffer as an audio frame
frame = buffer_queue.get()
# A lot of the following code was pulled from examples on DeepSpeech
# https://github.com/mozilla/DeepSpeech-examples/blob/r0.7/mic_vad_streaming/mic_vad_streaming.py
# Important note that the frame lengths must be specific sizes for VAD detection to work.
# Voice Activity Detection (VAD) also expects single channel input at specific rates.
# Highly recommend reading up on webrtcvad() before adjusting any of this.
# We also skip this process if we are actively sending audio to the output device to avoid
# looping and thus listening to ourselves.
if len(frame) >= 640 and self._isAudioOut == False:
# Bool to determine if this frame includes speech.
# This only determines if the frame has speech, it does not translate to text.
is_speech = _vad.is_speech(frame, self.audioSampleRate)
# Trigger is set for first frame that contains speech and remains triggered until
# we fall below the allowed ratio of speech frames to total frames
if not triggered:
# Save the frame to the buffer along with an indication of if it is speech (or not)
ring_buffer.append((frame, is_speech))
# Get the number of frames with speech in them
num_voiced = len([f for f, speech in ring_buffer if speech])
# Compare frames with speech to the expected number of frames with speech
if num_voiced > self.speechRatio * ring_buffer.maxlen:
# We have more speech than the ratio so we start listening
triggered = True
# Feed data into the deepspeech model for determing the words used
for f in ring_buffer:
stream_context.feedAudioContent(np.frombuffer(f[0], np.int16))
# Since we've now fed every frame in the buffer to the deepspeech model
# we no longer need the frames collected up to this point
ring_buffer.clear()
else:
# We only get here after we've identified we have enough frames to cross the threshold
# for the supplied ratio of speech to total frames. Thus we can safely keep feeding
# incoming frames into the deepspeech model until we fall below the threshold again.
# Feed to deepspeech model the incoming frame
stream_context.feedAudioContent(np.frombuffer(frame, np.int16))
# Save to ring buffer for calculating the ratio of speech to total frames with speech
ring_buffer.append((frame, is_speech))
# We have a full collection of frames so now we loop through them to recalculate our total
# number of non-spoken frames (as I pulled from an example this could easily be stated as
# the inverse of the calculation in the code block above)
num_unvoiced = len([f for f, speech in ring_buffer if not speech])
# Compare our calculated value with the ratio. In this case we're doing the opposite
# of the calculation in the previous code block by looking for frames without speech
if num_unvoiced > self.speechRatio * ring_buffer.maxlen:
# We have fallen below the threshold for speech per frame ratio
triggered = False
# Let's see if we heard anything that can be translated to words.
# This is the invocation of the deepspeech's primary STT logic.
# Note that this is outside the kill_switch block just to insure that all the
# buffers are cleaned and closed properly. (Arguably this is not needed if killed)
text = str(stream_context.finishStream())
# We've completed the hard part. Now let's just clean up.
if self._isRunning == True:
# We'll only process if the text if there is a real value AND we're not already processing something.
# We don't block the processing of incoming audio though, we just ignore it if we're processing data.
if text.strip() != "":
self.logger.info("HEARD " + text)
self._doCallback(text)
stream_context = _model.createStream() # Create a fresh new context
ring_buffer.clear() # Clear the ring buffer as we've crossed the threshold again
self.logger.debug("Stopping streams")
self.stream.stop_stream() # Stop audio device stream
self.stream.close() # Close audio device stream
self.logger.debug("Streams stopped")
def accepts(self):
return ["start","stop","audioOutStart","audioOutEnd", "upgrade"]
def isRunning(self):
return self._isRunning
def audioOutStart(self, httpRequest=None):
self._isAudioOut = True
return True
def audioOutEnd(self, httpRequest=None):
self._isAudioOut = False
return True
def upgrade(self, httpRequest=None):
return upgradePackage(self._packageName)
def stop(self, httpRequest=None):
"""
Stops the listener and any active audio streams
Returns:
(bool): True on success else will raise an exception.
"""
if not self._isRunning:
return True
self._isRunning = False
if self.thread is not None:
self.thread.join()
self.logger.info("Stopped")
return True
def start(self, httpRequest=None, useThreads=True):
"""
Starts the listener to listen to the default audio device
Args:
useThreads (bool): Indicates if the brain should be started on a new thread.
Returns:
(bool): True on success else will raise an exception.
"""
if self._isRunning:
return True
self.thread = self._readFromMic()
if not useThreads:
self.wait()
return True
def wait(self, seconds=0):
"""
Waits for any active listeners to complete before closing
Args:
seconds (int): Number of seconds to wait before calling the "stop()" function
Returns:
(bool): True on success else will raise an exception.
"""
if not self._isRunning:
return True
if seconds > 0:
if self.thread is not None:
time.sleep(seconds)
self.stop()
else:
if self.thread is not None:
self.thread.join()
return True
| [
"os.listdir",
"os.dup2",
"os.makedirs",
"deepspeech.Model",
"os.path.join",
"numpy.frombuffer",
"os.dup",
"ctypes.cdll.LoadLibrary",
"karen.shared.upgradePackage",
"time.sleep",
"webrtcvad.Vad",
"pyaudio.PyAudio",
"ctypes.CFUNCTYPE",
"os.path.expanduser",
"queue.Queue",
"logging.getLog... | [((898, 925), 'logging.getLogger', 'logging.getLogger', (['"""CTYPES"""'], {}), "('CTYPES')\n", (915, 925), False, 'import os, sys, logging\n'), ((2126, 2152), 'os.dup', 'os.dup', (['self.fd_to_silence'], {}), '(self.fd_to_silence)\n', (2132, 2152), False, 'import os, sys, logging\n'), ((2852, 2896), 'os.dup2', 'os.dup2', (['self.stored_dup', 'self.fd_to_silence'], {}), '(self.stored_dup, self.fd_to_silence)\n', (2859, 2896), False, 'import os, sys, logging\n'), ((5598, 5626), 'logging.getLogger', 'logging.getLogger', (['self.type'], {}), '(self.type)\n', (5615, 5626), False, 'import os, sys, logging\n'), ((9347, 9360), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (9358, 9360), False, 'import pyaudio, queue, webrtcvad, collections\n'), ((10957, 10994), 'webrtcvad.Vad', 'webrtcvad.Vad', (['self.vadAggressiveness'], {}), '(self.vadAggressiveness)\n', (10970, 10994), False, 'import pyaudio, queue, webrtcvad, collections\n'), ((11033, 11092), 'ctypes.CFUNCTYPE', 'CFUNCTYPE', (['None', 'c_char_p', 'c_int', 'c_char_p', 'c_int', 'c_char_p'], {}), '(None, c_char_p, c_int, c_char_p, c_int, c_char_p)\n', (11042, 11092), False, 'from ctypes import CFUNCTYPE, cdll, c_char_p, c_int\n'), ((11173, 11205), 'ctypes.cdll.LoadLibrary', 'cdll.LoadLibrary', (['"""libasound.so"""'], {}), "('libasound.so')\n", (11189, 11205), False, 'from ctypes import CFUNCTYPE, cdll, c_char_p, c_int\n'), ((11297, 11314), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (11312, 11314), False, 'import pyaudio, queue, webrtcvad, collections\n'), ((18519, 18552), 'karen.shared.upgradePackage', 'upgradePackage', (['self._packageName'], {}), '(self._packageName)\n', (18533, 18552), False, 'from karen.shared import threaded, upgradePackage\n'), ((6420, 6458), 'os.makedirs', 'os.makedirs', (['local_path'], {'exist_ok': '(True)'}), '(local_path, exist_ok=True)\n', (6431, 6458), False, 'import os, sys, logging\n'), ((6492, 6514), 'os.listdir', 'os.listdir', (['local_path'], {}), '(local_path)\n', (6502, 6514), False, 'import os, sys, logging\n'), ((10785, 10819), 'deepspeech.Model', 'deepspeech.Model', (['self.speechModel'], {}), '(self.speechModel)\n', (10801, 10819), False, 'import deepspeech\n'), ((6348, 6378), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.karen"""'], {}), "('~/.karen')\n", (6366, 6378), False, 'import os, sys, logging\n'), ((20029, 20048), 'time.sleep', 'time.sleep', (['seconds'], {}), '(seconds)\n', (20039, 20048), False, 'import time\n'), ((15431, 15461), 'numpy.frombuffer', 'np.frombuffer', (['frame', 'np.int16'], {}), '(frame, np.int16)\n', (15444, 15461), True, 'import numpy as np\n'), ((6912, 6942), 'os.path.join', 'os.path.join', (['local_path', 'file'], {}), '(local_path, file)\n', (6924, 6942), False, 'import os, sys, logging\n'), ((7307, 7337), 'os.path.join', 'os.path.join', (['local_path', 'file'], {}), '(local_path, file)\n', (7319, 7337), False, 'import os, sys, logging\n'), ((7671, 7701), 'os.path.join', 'os.path.join', (['local_path', 'file'], {}), '(local_path, file)\n', (7683, 7701), False, 'import os, sys, logging\n'), ((14682, 14711), 'numpy.frombuffer', 'np.frombuffer', (['f[0]', 'np.int16'], {}), '(f[0], np.int16)\n', (14695, 14711), True, 'import numpy as np\n')] |
"""A multi-thread tool to crop large images to sub-images for faster IO."""
import os
import os.path as osp
import numpy as np
import cv2
from PIL import Image
import data.util as data_util # noqa: E402
import torch.utils.data as data
from tqdm import tqdm
import torch
def main():
split_img = False
opt = {}
opt['n_thread'] = 7
opt['compression_level'] = 90 # JPEG compression quality rating.
# CV_IMWRITE_PNG_COMPRESSION from 0 to 9. A higher value means a smaller size and longer
# compression time. If read raw images during training, use 0 for faster IO speed.
opt['dest'] = 'file'
opt['input_folder'] = 'F:\\4k6k\\datasets\\images\youtube\\images_cook'
opt['save_folder'] = 'F:\\4k6k\\datasets\\images\\youtube_massive_cook'
opt['crop_sz'] = [512, 1024, 2048] # the size of each sub-image
opt['step'] = [256, 512, 1024] # step of the sliding crop window
opt['exclusions'] = [[],[],[]] # image names matching these terms wont be included in the processing.
opt['thres_sz'] = 128 # size threshold
opt['resize_final_img'] = [.5, .25, .125]
opt['only_resize'] = False
opt['vertical_split'] = False
opt['input_image_max_size_before_being_halved'] = 5500 # As described, images larger than this dimensional size will be halved before anything else is done.
# This helps prevent images from cameras with "false-megapixels" from polluting the dataset.
# False-megapixel=lots of noise at ultra-high res.
save_folder = opt['save_folder']
if not osp.exists(save_folder):
os.makedirs(save_folder)
print('mkdir [{:s}] ...'.format(save_folder))
if opt['dest'] == 'lmdb':
writer = LmdbWriter(save_folder)
else:
writer = FileWriter(save_folder)
extract_single(opt, writer)
class LmdbWriter:
def __init__(self, lmdb_path, max_mem_size=30*1024*1024*1024, write_freq=5000):
self.db = lmdb.open(lmdb_path, subdir=True,
map_size=max_mem_size, readonly=False,
meminit=False, map_async=True)
self.txn = self.db.begin(write=True)
self.ref_id = 0
self.tile_ids = {}
self.writes = 0
self.write_freq = write_freq
self.keys = []
# Writes the given reference image to the db and returns its ID.
def write_reference_image(self, ref_img, _):
id = self.ref_id
self.ref_id += 1
self.write_image(id, ref_img[0], ref_img[1])
return id
# Writes a tile image to the db given a reference image and returns its ID.
def write_tile_image(self, ref_id, tile_image):
next_tile_id = 0 if ref_id not in self.tile_ids.keys() else self.tile_ids[ref_id]
self.tile_ids[ref_id] = next_tile_id+1
full_id = "%i_%i" % (ref_id, next_tile_id)
self.write_image(full_id, tile_image[0], tile_image[1])
self.keys.append(full_id)
return full_id
# Writes an image directly to the db with the given reference image and center point.
def write_image(self, id, img, center_point):
self.txn.put(u'{}'.format(id).encode('ascii'), pyarrow.serialize(img).to_buffer(), pyarrow.serialize(center_point).to_buffer())
self.writes += 1
if self.writes % self.write_freq == 0:
self.txn.commit()
self.txn = self.db.begin(write=True)
def close(self):
self.txn.commit()
with self.db.begin(write=True) as txn:
txn.put(b'__keys__', pyarrow.serialize(self.keys).to_buffer())
txn.put(b'__len__', pyarrow.serialize(len(self.keys)).to_buffer())
self.db.sync()
self.db.close()
class FileWriter:
def __init__(self, folder):
self.folder = folder
self.next_unique_id = 0
self.ref_center_points = {} # Maps ref_img basename to a dict of image IDs:center points
self.ref_ids_to_names = {}
def get_next_unique_id(self):
id = self.next_unique_id
self.next_unique_id += 1
return id
def save_image(self, ref_path, img_name, img):
save_path = osp.join(self.folder, ref_path)
os.makedirs(save_path, exist_ok=True)
f = open(osp.join(save_path, img_name), "wb")
f.write(img)
f.close()
# Writes the given reference image to the db and returns its ID.
def write_reference_image(self, ref_img, path):
ref_img, _, _ = ref_img # Encoded with a center point, which is irrelevant for the reference image.
img_name = osp.basename(path).replace(".jpg", "").replace(".png", "")
self.ref_center_points[img_name] = {}
self.save_image(img_name, "ref.jpg", ref_img)
id = self.get_next_unique_id()
self.ref_ids_to_names[id] = img_name
return id
# Writes a tile image to the db given a reference image and returns its ID.
def write_tile_image(self, ref_id, tile_image):
id = self.get_next_unique_id()
ref_name = self.ref_ids_to_names[ref_id]
img, center, tile_sz = tile_image
self.ref_center_points[ref_name][id] = center, tile_sz
self.save_image(ref_name, "%08i.jpg" % (id,), img)
return id
def flush(self):
for ref_name, cps in self.ref_center_points.items():
torch.save(cps, osp.join(self.folder, ref_name, "centers.pt"))
self.ref_center_points = {}
def close(self):
self.flush()
class TiledDataset(data.Dataset):
def __init__(self, opt):
self.split_mode = opt['vertical_split']
self.opt = opt
input_folder = opt['input_folder']
self.images = data_util._get_paths_from_images(input_folder)
def __getitem__(self, index):
if self.split_mode:
return (self.get(index, True, True), self.get(index, True, False))
else:
# Wrap in a tuple to align with split mode.
return (self.get(index, False, False), None)
def get_for_scale(self, img, crop_sz, step, resize_factor, ref_resize_factor):
thres_sz = self.opt['thres_sz']
h, w, c = img.shape
if crop_sz > h:
return []
h_space = np.arange(0, h - crop_sz + 1, step)
if h - (h_space[-1] + crop_sz) > thres_sz:
h_space = np.append(h_space, h - crop_sz)
w_space = np.arange(0, w - crop_sz + 1, step)
if w - (w_space[-1] + crop_sz) > thres_sz:
w_space = np.append(w_space, w - crop_sz)
index = 0
tile_dim = int(crop_sz * resize_factor)
dsize = (tile_dim, tile_dim)
results = []
for x in h_space:
for y in w_space:
index += 1
crop_img = img[x:x + crop_sz, y:y + crop_sz, :]
# Center point needs to be resized by ref_resize_factor - since it is relative to the reference image.
center_point = (int((x + crop_sz // 2) // ref_resize_factor), int((y + crop_sz // 2) // ref_resize_factor))
crop_img = np.ascontiguousarray(crop_img)
if 'resize_final_img' in self.opt.keys():
crop_img = cv2.resize(crop_img, dsize, interpolation=cv2.INTER_AREA)
success, buffer = cv2.imencode(".jpg", crop_img, [cv2.IMWRITE_JPEG_QUALITY, self.opt['compression_level']])
assert success
results.append((buffer, center_point, int(crop_sz // ref_resize_factor)))
return results
def get(self, index, split_mode, left_img):
path = self.images[index]
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
if img is None or len(img.shape) == 2:
return None
h, w, c = img.shape
if max(h,w) > self.opt['input_image_max_size_before_being_halved']:
h = h // 2
w = w // 2
img = cv2.resize(img, (w, h), interpolation=cv2.INTER_AREA)
#print("Resizing to ", img.shape)
# Uncomment to filter any image that doesnt meet a threshold size.
if min(h,w) < 512:
return None
# Greyscale not supported.
if len(img.shape) == 2:
return None
# Handle splitting the image if needed.
left = 0
right = w
if split_mode:
if left_img:
left = 0
right = w//2
else:
left = w//2
right = w
img = img[:, left:right]
# We must convert the image into a square.
dim = min(h, w)
if split_mode:
# Crop the image towards the center, which makes more sense in split mode.
if left_img:
img = img[-dim:, -dim:, :]
else:
img = img[:dim, :dim, :]
else:
# Crop the image so that only the center is left, since this is often the most salient part of the image.
img = img[(h - dim) // 2:dim + (h - dim) // 2, (w - dim) // 2:dim + (w - dim) // 2, :]
h, w, c = img.shape
tile_dim = int(self.opt['crop_sz'][0] * self.opt['resize_final_img'][0])
dsize = (tile_dim, tile_dim)
ref_resize_factor = h / tile_dim
# Reference image should always be first entry in results.
ref_img = cv2.resize(img, dsize, interpolation=cv2.INTER_AREA)
success, ref_buffer = cv2.imencode(".jpg", ref_img, [cv2.IMWRITE_JPEG_QUALITY, self.opt['compression_level']])
assert success
results = [(ref_buffer, (-1,-1), (-1,-1))]
for crop_sz, exclusions, resize_factor, step in zip(self.opt['crop_sz'], self.opt['exclusions'], self.opt['resize_final_img'], self.opt['step']):
excluded = False
for exc in exclusions:
if exc in path:
excluded = True
break;
if excluded:
continue
results.extend(self.get_for_scale(img, crop_sz, step, resize_factor, ref_resize_factor))
return results, path
def __len__(self):
return len(self.images)
def identity(x):
return x
def extract_single(opt, writer):
dataset = TiledDataset(opt)
dataloader = data.DataLoader(dataset, num_workers=opt['n_thread'], collate_fn=identity)
tq = tqdm(dataloader)
for spl_imgs in tq:
if spl_imgs is None:
continue
spl_imgs = spl_imgs[0]
for imgs, lbl in zip(list(spl_imgs), ['left', 'right']):
if imgs is None:
continue
imgs, path = imgs
if imgs is None or len(imgs) <= 1:
continue
path = path + "_" + lbl
ref_id = writer.write_reference_image(imgs[0], path)
for tile in imgs[1:]:
writer.write_tile_image(ref_id, tile)
writer.flush()
writer.close()
if __name__ == '__main__':
main()
| [
"tqdm.tqdm",
"os.makedirs",
"torch.utils.data.DataLoader",
"os.path.basename",
"numpy.ascontiguousarray",
"os.path.exists",
"cv2.imread",
"numpy.append",
"numpy.arange",
"data.util._get_paths_from_images",
"cv2.imencode",
"os.path.join",
"cv2.resize"
] | [((10272, 10346), 'torch.utils.data.DataLoader', 'data.DataLoader', (['dataset'], {'num_workers': "opt['n_thread']", 'collate_fn': 'identity'}), "(dataset, num_workers=opt['n_thread'], collate_fn=identity)\n", (10287, 10346), True, 'import torch.utils.data as data\n'), ((10356, 10372), 'tqdm.tqdm', 'tqdm', (['dataloader'], {}), '(dataloader)\n', (10360, 10372), False, 'from tqdm import tqdm\n'), ((1647, 1670), 'os.path.exists', 'osp.exists', (['save_folder'], {}), '(save_folder)\n', (1657, 1670), True, 'import os.path as osp\n'), ((1680, 1704), 'os.makedirs', 'os.makedirs', (['save_folder'], {}), '(save_folder)\n', (1691, 1704), False, 'import os\n'), ((4211, 4242), 'os.path.join', 'osp.join', (['self.folder', 'ref_path'], {}), '(self.folder, ref_path)\n', (4219, 4242), True, 'import os.path as osp\n'), ((4251, 4288), 'os.makedirs', 'os.makedirs', (['save_path'], {'exist_ok': '(True)'}), '(save_path, exist_ok=True)\n', (4262, 4288), False, 'import os\n'), ((5733, 5779), 'data.util._get_paths_from_images', 'data_util._get_paths_from_images', (['input_folder'], {}), '(input_folder)\n', (5765, 5779), True, 'import data.util as data_util\n'), ((6267, 6302), 'numpy.arange', 'np.arange', (['(0)', '(h - crop_sz + 1)', 'step'], {}), '(0, h - crop_sz + 1, step)\n', (6276, 6302), True, 'import numpy as np\n'), ((6426, 6461), 'numpy.arange', 'np.arange', (['(0)', '(w - crop_sz + 1)', 'step'], {}), '(0, w - crop_sz + 1, step)\n', (6435, 6461), True, 'import numpy as np\n'), ((7652, 7690), 'cv2.imread', 'cv2.imread', (['path', 'cv2.IMREAD_UNCHANGED'], {}), '(path, cv2.IMREAD_UNCHANGED)\n', (7662, 7690), False, 'import cv2\n'), ((9361, 9413), 'cv2.resize', 'cv2.resize', (['img', 'dsize'], {'interpolation': 'cv2.INTER_AREA'}), '(img, dsize, interpolation=cv2.INTER_AREA)\n', (9371, 9413), False, 'import cv2\n'), ((9444, 9537), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'ref_img', "[cv2.IMWRITE_JPEG_QUALITY, self.opt['compression_level']]"], {}), "('.jpg', ref_img, [cv2.IMWRITE_JPEG_QUALITY, self.opt[\n 'compression_level']])\n", (9456, 9537), False, 'import cv2\n'), ((4306, 4335), 'os.path.join', 'osp.join', (['save_path', 'img_name'], {}), '(save_path, img_name)\n', (4314, 4335), True, 'import os.path as osp\n'), ((6376, 6407), 'numpy.append', 'np.append', (['h_space', '(h - crop_sz)'], {}), '(h_space, h - crop_sz)\n', (6385, 6407), True, 'import numpy as np\n'), ((6535, 6566), 'numpy.append', 'np.append', (['w_space', '(w - crop_sz)'], {}), '(w_space, w - crop_sz)\n', (6544, 6566), True, 'import numpy as np\n'), ((7933, 7986), 'cv2.resize', 'cv2.resize', (['img', '(w, h)'], {'interpolation': 'cv2.INTER_AREA'}), '(img, (w, h), interpolation=cv2.INTER_AREA)\n', (7943, 7986), False, 'import cv2\n'), ((5407, 5452), 'os.path.join', 'osp.join', (['self.folder', 'ref_name', '"""centers.pt"""'], {}), "(self.folder, ref_name, 'centers.pt')\n", (5415, 5452), True, 'import os.path as osp\n'), ((7109, 7139), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['crop_img'], {}), '(crop_img)\n', (7129, 7139), True, 'import numpy as np\n'), ((7321, 7415), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'crop_img', "[cv2.IMWRITE_JPEG_QUALITY, self.opt['compression_level']]"], {}), "('.jpg', crop_img, [cv2.IMWRITE_JPEG_QUALITY, self.opt[\n 'compression_level']])\n", (7333, 7415), False, 'import cv2\n'), ((7229, 7286), 'cv2.resize', 'cv2.resize', (['crop_img', 'dsize'], {'interpolation': 'cv2.INTER_AREA'}), '(crop_img, dsize, interpolation=cv2.INTER_AREA)\n', (7239, 7286), False, 'import cv2\n'), ((4632, 4650), 'os.path.basename', 'osp.basename', (['path'], {}), '(path)\n', (4644, 4650), True, 'import os.path as osp\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 10 13:43:27 2019
@author: asier.erramuzpe
"""
### RINGING DETECTION MODULE
# file1 = OK, file2 = RINGING, file2 = SLIGHT RINGING, file3 = SLIGHT FRONTAL MOVE
file1 = '/ems/elsc-labs/mezer-a/Mezer-Lab/analysis/reading_stanford/s008/mrQ_ver2/OutPutFiles_1/BrainMaps/T1_map_Wlin.nii.gz'
file2 = '/ems/elsc-labs/mezer-a/Mezer-Lab/analysis/reading_stanford/s007/mrQ_ver2/OutPutFiles_1/BrainMaps/T1_map_Wlin.nii.gz'
file3 = '/ems/elsc-labs/mezer-a/Mezer-Lab/analysis/reading_stanford/s046/mrQ_ver2/OutPutFiles_1/BrainMaps/T1_map_Wlin.nii.gz'
file4 = '/ems/elsc-labs/mezer-a/Mezer-Lab/analysis/reading_stanford/s064/mrQ_ver2/OutPutFiles_1/BrainMaps/T1_map_Wlin.nii.gz'
file5 = '/ems/elsc-labs/mezer-a/Mezer-Lab/analysis/reading_stanford/s078/mrQ_ver2/OutPutFiles_1/BrainMaps/T1_map_Wlin.nii.gz'
file6 = '/ems/elsc-labs/mezer-a/Mezer-Lab/analysis/reading_stanford/s106/mrQ_ver2/OutPutFiles_1/BrainMaps/T1_map_Wlin.nii.gz'
file1 = '/ems/elsc-labs/mezer-a/Mezer-Lab/analysis/reading_stanford/s008/mrQ_ver2/OutPutFiles_1/BrainMaps/TV_map.nii.gz'
file2 = '/ems/elsc-labs/mezer-a/Mezer-Lab/analysis/reading_stanford/s007/mrQ_ver2/OutPutFiles_1/BrainMaps/TV_map.nii.gz'
file3 = '/ems/elsc-labs/mezer-a/Mezer-Lab/analysis/reading_stanford/s046/mrQ_ver2/OutPutFiles_1/BrainMaps/TV_map.nii.gz'
file4 = '/ems/elsc-labs/mezer-a/Mezer-Lab/analysis/reading_stanford/s064/mrQ_ver2/OutPutFiles_1/BrainMaps/TV_map.nii.gz'
file5 = '/ems/elsc-labs/mezer-a/Mezer-Lab/analysis/reading_stanford/s078/mrQ_ver2/OutPutFiles_1/BrainMaps/TV_map.nii.gz'
file6 = '/ems/elsc-labs/mezer-a/Mezer-Lab/analysis/reading_stanford/s106/mrQ_ver2/OutPutFiles_1/BrainMaps/TV_map.nii.gz'
def get_axial(file):
import nibabel as nib
try:
file_data = nib.load(file).get_data()
except:
print('File {} does not exist'.format(file))
x, y, z = file_data.shape
file_axial = file_data[:, :, z//2]
return file_axial
def plot_spectrum(img):
from matplotlib.colors import LogNorm
from scipy import fftpack
im_fft = fftpack.fft2(img)
# A logarithmic colormap
plt.figure()
plt.imshow(np.abs(im_fft), norm=LogNorm(vmin=5))
plt.colorbar()
plt.title('Fourier transform')
plot_spectrum(get_axial(file1))
plot_spectrum(get_axial(file2))
plot_spectrum(get_axial(file3))
plot_spectrum(get_axial(file4))
plot_spectrum(get_axial(file5))
plt.imshow(get_axial(file1))
plt.imshow(get_axial(file2))
plt.imshow(get_axial(file3))
plt.imshow(get_axial(file4))
plt.imshow(get_axial(file5))
plt.imshow(get_axial(file6))
plot_fft2_power(file1)
plot_fft2_power(file2)
plot_fft2_power(file3)
plot_fft2_power(file4)
plot_fft2_power(file5)
plot_fft2_power(file6)
power_sum(file1)
power_sum(file2)
power_sum(file3)
power_sum(file4)
power_sum(file5)
power_sum(file6)
def power_sum(file):
from scipy import fftpack
import numpy as np
import pylab as py
image = get_axial(file)
# Take the fourier transform of the image.
F1 = fftpack.fft2(image)
# Now shift the quadrants around so that low spatial frequencies are in
# the center of the 2D fourier transformed image.
F2 = fftpack.fftshift( F1 )
# Calculate a 2D power spectrum
psd2D = np.abs( F2 )**2
# Calculate the azimuthally averaged 1D power spectrum
psd1D = azimuthalAverage(psd2D)
return np.sum(psd1D)
def plot_fft2_power(file):
from scipy import fftpack
import numpy as np
import pylab as py
image = get_axial(file)
# Take the fourier transform of the image.
F1 = fftpack.fft2(image)
# Now shift the quadrants around so that low spatial frequencies are in
# the center of the 2D fourier transformed image.
F2 = fftpack.fftshift( F1 )
# Calculate a 2D power spectrum
psd2D = np.abs( F2 )**2
# Calculate the azimuthally averaged 1D power spectrum
psd1D = azimuthalAverage(psd2D)
# Now plot up both
# py.figure(1)
# py.clf()
# py.imshow( np.log10( image ), cmap=py.cm.Greys)
py.figure(2)
py.clf()
py.imshow( np.log10( psd2D ))
# py.figure(3)
# py.clf()
# py.semilogy( psd1D )
# py.xlabel('Spatial Frequency')
# py.ylabel('Power Spectrum')
# py.title(str(np.sum(psd1D)))
py.show()
def azimuthalAverage(image, center=None):
"""
Calculate the azimuthally averaged radial profile.
image - The 2D image
center - The [x,y] pixel coordinates used as the center. The default is
None, which then uses the center of the image (including
fracitonal pixels).
"""
# Calculate the indices from the image
y, x = np.indices(image.shape)
if not center:
center = np.array([(x.max()-x.min())/2.0, (y.max()-y.min())/2.0])
r = np.hypot(x - center[0], y - center[1])
# Get sorted radii
ind = np.argsort(r.flat)
r_sorted = r.flat[ind]
i_sorted = image.flat[ind]
# Get the integer part of the radii (bin size = 1)
r_int = r_sorted.astype(int)
# Find all pixels that fall within each radial bin.
deltar = r_int[1:] - r_int[:-1] # Assumes all radii represented
rind = np.where(deltar)[0] # location of changed radius
nr = rind[1:] - rind[:-1] # number of radius bin
# Cumulative sum to figure out sums for each radius bin
csim = np.cumsum(i_sorted, dtype=float)
tbin = csim[rind[1:]] - csim[rind[:-1]]
radial_prof = tbin / nr
return radial_prof
"""
QC reports
"""
import os
from os.path import join as opj
import numpy as np
import scipy.io as sio
from src.visualization.visualize import multipage
from dotenv import find_dotenv, load_dotenv
dotenv_path = find_dotenv()
load_dotenv(dotenv_path)
analysis_data_path = os.environ.get("ANALYSIS_DATA_PATH")
def create_qc_report_mrq(dataset, file_path):
"""
Creates a visual report with axial middle slices.
dataset = dataset to choose from
file_paths = dictionary {file: path_to_file inside mrQver2 folder}
as many files as wanted
"""
input_path = opj(analysis_data_path,
dataset)
figures = []
for sub_idx, sub in enumerate(sorted(os.listdir(input_path))):
print(sub)
for file, file_path in file_paths.items():
target_file = opj(input_path, sub, file_path)
if os.path.exists(target_file):
axial_slice = get_axial(target_file)
# make figure
plt.imshow(axial_slice, cmap='gray')
plt.clim(0, 4)
plt.colorbar()
ax = plt.title('Subject {}. File {}'.format(sub, file))
fig = ax.get_figure()
figures.append(fig)
plt.close()
output_folder = opj('./reports',
'qc')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
multipage(opj(output_folder,
'report_' + dataset + '.pdf'),
figures,
dpi=250)
# possible datasets = kalanit_stanford ms_stanford_run1 stanford_2 reading_stanford anorexia_stanford gotlib_stanford amblyopia_stanford
datasets = ['kalanit_stanford', 'ms_stanford_run1', 'stanford_2',
'reading_stanford', 'anorexia_stanford',
'gotlib_stanford', 'amblyopia_stanford']
file_paths = {'T1': 'mrQ_ver2/OutPutFiles_1/BrainMaps/T1_map_Wlin.nii.gz'}
for dataset in datasets:
create_qc_report_mrq(dataset, file_paths)
| [
"numpy.sum",
"numpy.abs",
"dotenv.find_dotenv",
"scipy.fftpack.fftshift",
"numpy.argsort",
"matplotlib.colors.LogNorm",
"pylab.figure",
"os.path.join",
"os.path.exists",
"numpy.cumsum",
"numpy.log10",
"dotenv.load_dotenv",
"numpy.indices",
"numpy.hypot",
"scipy.fftpack.fft2",
"os.listd... | [((5738, 5751), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (5749, 5751), False, 'from dotenv import find_dotenv, load_dotenv\n'), ((5752, 5776), 'dotenv.load_dotenv', 'load_dotenv', (['dotenv_path'], {}), '(dotenv_path)\n', (5763, 5776), False, 'from dotenv import find_dotenv, load_dotenv\n'), ((5798, 5834), 'os.environ.get', 'os.environ.get', (['"""ANALYSIS_DATA_PATH"""'], {}), "('ANALYSIS_DATA_PATH')\n", (5812, 5834), False, 'import os\n'), ((2109, 2126), 'scipy.fftpack.fft2', 'fftpack.fft2', (['img'], {}), '(img)\n', (2121, 2126), False, 'from scipy import fftpack\n'), ((3055, 3074), 'scipy.fftpack.fft2', 'fftpack.fft2', (['image'], {}), '(image)\n', (3067, 3074), False, 'from scipy import fftpack\n'), ((3214, 3234), 'scipy.fftpack.fftshift', 'fftpack.fftshift', (['F1'], {}), '(F1)\n', (3230, 3234), False, 'from scipy import fftpack\n'), ((3407, 3420), 'numpy.sum', 'np.sum', (['psd1D'], {}), '(psd1D)\n', (3413, 3420), True, 'import numpy as np\n'), ((3622, 3641), 'scipy.fftpack.fft2', 'fftpack.fft2', (['image'], {}), '(image)\n', (3634, 3641), False, 'from scipy import fftpack\n'), ((3781, 3801), 'scipy.fftpack.fftshift', 'fftpack.fftshift', (['F1'], {}), '(F1)\n', (3797, 3801), False, 'from scipy import fftpack\n'), ((4080, 4092), 'pylab.figure', 'py.figure', (['(2)'], {}), '(2)\n', (4089, 4092), True, 'import pylab as py\n'), ((4097, 4105), 'pylab.clf', 'py.clf', ([], {}), '()\n', (4103, 4105), True, 'import pylab as py\n'), ((4310, 4319), 'pylab.show', 'py.show', ([], {}), '()\n', (4317, 4319), True, 'import pylab as py\n'), ((4700, 4723), 'numpy.indices', 'np.indices', (['image.shape'], {}), '(image.shape)\n', (4710, 4723), True, 'import numpy as np\n'), ((4827, 4865), 'numpy.hypot', 'np.hypot', (['(x - center[0])', '(y - center[1])'], {}), '(x - center[0], y - center[1])\n', (4835, 4865), True, 'import numpy as np\n'), ((4900, 4918), 'numpy.argsort', 'np.argsort', (['r.flat'], {}), '(r.flat)\n', (4910, 4918), True, 'import numpy as np\n'), ((5394, 5426), 'numpy.cumsum', 'np.cumsum', (['i_sorted'], {'dtype': 'float'}), '(i_sorted, dtype=float)\n', (5403, 5426), True, 'import numpy as np\n'), ((6129, 6161), 'os.path.join', 'opj', (['analysis_data_path', 'dataset'], {}), '(analysis_data_path, dataset)\n', (6132, 6161), True, 'from os.path import join as opj\n'), ((6857, 6879), 'os.path.join', 'opj', (['"""./reports"""', '"""qc"""'], {}), "('./reports', 'qc')\n", (6860, 6879), True, 'from os.path import join as opj\n'), ((2188, 2202), 'numpy.abs', 'np.abs', (['im_fft'], {}), '(im_fft)\n', (2194, 2202), True, 'import numpy as np\n'), ((3285, 3295), 'numpy.abs', 'np.abs', (['F2'], {}), '(F2)\n', (3291, 3295), True, 'import numpy as np\n'), ((3852, 3862), 'numpy.abs', 'np.abs', (['F2'], {}), '(F2)\n', (3858, 3862), True, 'import numpy as np\n'), ((4121, 4136), 'numpy.log10', 'np.log10', (['psd2D'], {}), '(psd2D)\n', (4129, 4136), True, 'import numpy as np\n'), ((5203, 5219), 'numpy.where', 'np.where', (['deltar'], {}), '(deltar)\n', (5211, 5219), True, 'import numpy as np\n'), ((6920, 6949), 'os.path.exists', 'os.path.exists', (['output_folder'], {}), '(output_folder)\n', (6934, 6949), False, 'import os\n'), ((6959, 6985), 'os.makedirs', 'os.makedirs', (['output_folder'], {}), '(output_folder)\n', (6970, 6985), False, 'import os\n'), ((7009, 7057), 'os.path.join', 'opj', (['output_folder', "('report_' + dataset + '.pdf')"], {}), "(output_folder, 'report_' + dataset + '.pdf')\n", (7012, 7057), True, 'from os.path import join as opj\n'), ((2209, 2224), 'matplotlib.colors.LogNorm', 'LogNorm', ([], {'vmin': '(5)'}), '(vmin=5)\n', (2216, 2224), False, 'from matplotlib.colors import LogNorm\n'), ((6241, 6263), 'os.listdir', 'os.listdir', (['input_path'], {}), '(input_path)\n', (6251, 6263), False, 'import os\n'), ((6372, 6403), 'os.path.join', 'opj', (['input_path', 'sub', 'file_path'], {}), '(input_path, sub, file_path)\n', (6375, 6403), True, 'from os.path import join as opj\n'), ((6419, 6446), 'os.path.exists', 'os.path.exists', (['target_file'], {}), '(target_file)\n', (6433, 6446), False, 'import os\n'), ((1796, 1810), 'nibabel.load', 'nib.load', (['file'], {}), '(file)\n', (1804, 1810), True, 'import nibabel as nib\n')] |
import numpy as np
import torch
from torch import nn
# =============================================== L1 NORM =====================================================
def l1_norm_error(source, candidate):
error = np.abs(source - candidate)
source[source == 0] = 1e-30 # add for numerical stability
error = error / source # compute the percentage
error = error.mean()
return error
# =============================================== RMSLE =====================================================
def rmsle_error(source, candidate):
candidate += 1e-30
error = np.log10((source + 1) / (candidate + 1))
error = error * error
error = error.mean()
error = np.sqrt(error)
return error
# =============================================== GRADIENT SMOOTH =====================================================
class GradientSmoothLoss(nn.Module):
def __init__(self, refGrad, future, decayFunc = None):
'''
Function that minimizes the rate of change of a time series prediction,
as the times evolves. It tries to give a desired "shape".
:param refGrad: the maximum gradient that is used for scaling
:param future: number of future predictions in the timeseries
:param decayFunc: decay function for weights (the weights decrease as time increases, such that the last
timestamps will have a smoother rate of change)
'''
super().__init__()
self.future = future
self.refGrad = refGrad
# compute decay weights
decay = np.linspace(0, 1, future)
decay = self.__linear_decay(decay) if decayFunc is None \
else decayFunc(decay)
decay = torch.from_numpy(decay)
self.decay = decay * refGrad
# =============================================== LINEAR DECAY =====================================================
def __linear_decay(self, linSpace):
return 0.8 - linSpace * 0.5
# =============================================== FORWARD ==========================================================
def forward(self, inTensor, clampVal = 0.25):
'''
:param inTensor: input tensor on which to apply the loss
:param clampVal: clamp errors before averaging for better stability
:return:
'''
self.decay = self.decay.to(inTensor.device)
gradOut = inTensor[:, 1:] - inTensor[:, :-1]
gradOut = gradOut.abs() - self.decay
gradOut = torch.clamp(gradOut, min=0, max=clampVal)
gradOut = gradOut.mean()
return gradOut
| [
"numpy.abs",
"torch.clamp",
"numpy.linspace",
"numpy.log10",
"numpy.sqrt",
"torch.from_numpy"
] | [((218, 244), 'numpy.abs', 'np.abs', (['(source - candidate)'], {}), '(source - candidate)\n', (224, 244), True, 'import numpy as np\n'), ((591, 631), 'numpy.log10', 'np.log10', (['((source + 1) / (candidate + 1))'], {}), '((source + 1) / (candidate + 1))\n', (599, 631), True, 'import numpy as np\n'), ((695, 709), 'numpy.sqrt', 'np.sqrt', (['error'], {}), '(error)\n', (702, 709), True, 'import numpy as np\n'), ((1588, 1613), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'future'], {}), '(0, 1, future)\n', (1599, 1613), True, 'import numpy as np\n'), ((1762, 1785), 'torch.from_numpy', 'torch.from_numpy', (['decay'], {}), '(decay)\n', (1778, 1785), False, 'import torch\n'), ((2549, 2590), 'torch.clamp', 'torch.clamp', (['gradOut'], {'min': '(0)', 'max': 'clampVal'}), '(gradOut, min=0, max=clampVal)\n', (2560, 2590), False, 'import torch\n')] |
from igraph import *
import argparse
import numpy as np
import matplotlib.pyplot as plt
from statistics import mean
BINS = 1
# Prints degree distribution properties
def print_degree_dist_props(histogram):
print("Results:")
print("Mean:", round(histogram.mean, 2))
print("Variance:", round(histogram.var, 2))
# Calculates best fit power-law lambda
def calc_best_fit(xs, ys):
data = []
for i in range(len(xs)):
for j in range(int(xs[i])):
data.append(ys[i])
result = power_law_fit(data)
return round(result.alpha, 1)
# Plots best fit power laws for probability distribution
def plot_best_fit(x, alpha, lambda_val):
plt.plot(x, alpha*x**-lambda_val, "-")
# Plots degree distribution from igraph.Histogram
def plot_degree_dist(histogram):
# Get frequencies
xs, ys = zip(*[(left, count) for left, _, count in histogram.bins()])
xs = np.array(xs)
ys = np.array(ys)
del_indexes = np.array(xs[(ys < 1)], dtype="int")
xs = np.delete(xs, del_indexes)
ys = np.delete(ys, del_indexes)
# Normalize
ys_norm = [float(i)/sum(ys) for i in ys]
# Plot out-degree distribution
plt.plot(xs, ys_norm, marker=".", linestyle="", markersize=2)
return xs, ys
# Calculates out-degree distribution
def calc_out_degree(graph):
print("Calculating out-degree distribution...")
plt.figure()
histogram = graph.degree_distribution(BINS, mode=OUT)
xs, ys = plot_degree_dist(histogram)
lambda1 = calc_best_fit(xs, ys)
alpha1 = 20
plot_best_fit(xs[10:65], alpha1, lambda1)
plt.annotate(xy=[15, 0.0001], s="λ=" + str(lambda1))
# Scales and labels
plt.yscale("log")
plt.xscale("log")
plt.xlabel("Out-degree")
plt.ylabel("Probability")
plt.show()
print_degree_dist_props(histogram)
# Calculates in-degree distribution
def calc_in_degree(graph):
print("Calculating in-degree distribution...")
plt.figure()
histogram = graph.degree_distribution(BINS, mode=IN)
xs, ys = plot_degree_dist(histogram)
lambda1 = calc_best_fit(xs, ys)
alpha = 50
plot_best_fit(xs[10:75], alpha, lambda1)
plt.annotate(xy=[25, 0.00018], s="λ=" + str(lambda1))
# Scales and labels
plt.yscale("log")
plt.xscale("log")
plt.xlabel("In-degree")
plt.ylabel("Probability")
plt.show()
print_degree_dist_props(histogram)
# Calculates total degree distribution
def calc_total_degree(graph):
print("Calculating total degree distribution...")
plt.figure()
histogram = graph.degree_distribution(BINS, mode=ALL)
xs, ys = plot_degree_dist(histogram)
lambda1 = calc_best_fit(xs, ys)
alpha1 = 50
plot_best_fit(xs[10:75], alpha1, lambda1)
plt.annotate(xy=[25, 0.0005], s="λ=" + str(lambda1))
# Scales and labels
plt.yscale("log")
plt.xscale("log")
plt.xlabel("Total degree")
plt.ylabel("Probability")
plt.show()
print_degree_dist_props(histogram)
# Calculates graph assortativity degree
def calc_assort_degree(graph):
print("Calculating assortativity degree...")
result = graph.assortativity_degree()
print("Result:", result)
def calc_clustering(graph, n_nodes, n_edges):
print("Calculating the global clustering coefficient...")
print("Result:", graph.transitivity_undirected())
print("Calculating the average clustering coefficient (average of local coefficients)...")
print("Result:", np.mean(graph.transitivity_local_undirected(mode="zero")))
print("Calculating SCC...")
scc = graph.components(mode="STRONG")
largest_scc = scc.giant()
largest_scc_nodes = largest_scc.vcount()
largest_scc_edges = largest_scc.ecount()
print("Nodes in largest SCC", largest_scc_nodes,
"(" + str(round(largest_scc_nodes/n_nodes, 3)) + ")")
print("Edges in largest SCC", largest_scc_edges,
"(" + str(round(largest_scc_edges/n_edges, 3)) + ")")
print("Calculating WCC...")
wcc = graph.components(mode="WEAK")
largest_wcc = wcc.giant()
largest_wcc_nodes = largest_wcc.vcount()
largest_wcc_edges = largest_wcc.ecount()
print("Nodes in largest WCC", largest_wcc_nodes,
"(" + str(round(largest_wcc_nodes/n_nodes, 3)) + ")")
print("Edges in largest WCC", largest_wcc_edges,
"(" + str(round(largest_wcc_edges/n_edges, 3)) + ")")
def calc_short_path(graph):
print("Calculating diameter...")
print("Result:", graph.diameter())
print("Calculating average path length...")
print("Result:", graph.average_path_length())
| [
"matplotlib.pyplot.xscale",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.delete"
] | [((672, 714), 'matplotlib.pyplot.plot', 'plt.plot', (['x', '(alpha * x ** -lambda_val)', '"""-"""'], {}), "(x, alpha * x ** -lambda_val, '-')\n", (680, 714), True, 'import matplotlib.pyplot as plt\n'), ((901, 913), 'numpy.array', 'np.array', (['xs'], {}), '(xs)\n', (909, 913), True, 'import numpy as np\n'), ((923, 935), 'numpy.array', 'np.array', (['ys'], {}), '(ys)\n', (931, 935), True, 'import numpy as np\n'), ((955, 988), 'numpy.array', 'np.array', (['xs[ys < 1]'], {'dtype': '"""int"""'}), "(xs[ys < 1], dtype='int')\n", (963, 988), True, 'import numpy as np\n'), ((1000, 1026), 'numpy.delete', 'np.delete', (['xs', 'del_indexes'], {}), '(xs, del_indexes)\n', (1009, 1026), True, 'import numpy as np\n'), ((1036, 1062), 'numpy.delete', 'np.delete', (['ys', 'del_indexes'], {}), '(ys, del_indexes)\n', (1045, 1062), True, 'import numpy as np\n'), ((1165, 1226), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'ys_norm'], {'marker': '"""."""', 'linestyle': '""""""', 'markersize': '(2)'}), "(xs, ys_norm, marker='.', linestyle='', markersize=2)\n", (1173, 1226), True, 'import matplotlib.pyplot as plt\n'), ((1369, 1381), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1379, 1381), True, 'import matplotlib.pyplot as plt\n'), ((1670, 1687), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (1680, 1687), True, 'import matplotlib.pyplot as plt\n'), ((1692, 1709), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (1702, 1709), True, 'import matplotlib.pyplot as plt\n'), ((1714, 1738), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Out-degree"""'], {}), "('Out-degree')\n", (1724, 1738), True, 'import matplotlib.pyplot as plt\n'), ((1743, 1768), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability"""'], {}), "('Probability')\n", (1753, 1768), True, 'import matplotlib.pyplot as plt\n'), ((1773, 1783), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1781, 1783), True, 'import matplotlib.pyplot as plt\n'), ((1944, 1956), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1954, 1956), True, 'import matplotlib.pyplot as plt\n'), ((2243, 2260), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (2253, 2260), True, 'import matplotlib.pyplot as plt\n'), ((2265, 2282), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (2275, 2282), True, 'import matplotlib.pyplot as plt\n'), ((2287, 2310), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""In-degree"""'], {}), "('In-degree')\n", (2297, 2310), True, 'import matplotlib.pyplot as plt\n'), ((2315, 2340), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability"""'], {}), "('Probability')\n", (2325, 2340), True, 'import matplotlib.pyplot as plt\n'), ((2345, 2355), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2353, 2355), True, 'import matplotlib.pyplot as plt\n'), ((2525, 2537), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2535, 2537), True, 'import matplotlib.pyplot as plt\n'), ((2826, 2843), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (2836, 2843), True, 'import matplotlib.pyplot as plt\n'), ((2848, 2865), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (2858, 2865), True, 'import matplotlib.pyplot as plt\n'), ((2870, 2896), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Total degree"""'], {}), "('Total degree')\n", (2880, 2896), True, 'import matplotlib.pyplot as plt\n'), ((2901, 2926), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability"""'], {}), "('Probability')\n", (2911, 2926), True, 'import matplotlib.pyplot as plt\n'), ((2931, 2941), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2939, 2941), True, 'import matplotlib.pyplot as plt\n')] |
import sys
import time
import numpy as np
import tensorflow as tf
import cv2
class FROZEN_GRAPH_INFERENCE:
def __init__(self, frozen_model):
"""Tensorflow detector
"""
self.inference_list = list()
self.PATH_TO_CKPT = frozen_model
self.count = 0
self.detection_graph = tf.Graph()
with self.detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(self.PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
with self.detection_graph.as_default():
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.sess = tf.Session(graph=self.detection_graph, config=config)
self.windowNotSet = True
def draw_bounding_box(self, image, scores, boxes, classes, im_width, im_height):
boxes = np.squeeze(boxes)
scores = np.squeeze(scores)
classes = np.squeeze(classes).astype(np.int32)
for score, box, name in zip(scores, boxes, classes):
if name == 1 and score > 0.6:
# ymin, xmin, ymax, xmax = box
left = int(box[1]*im_width)
top = int(box[0]*im_height)
right = int(box[3]*im_width)
bottom = int(box[2]*im_height)
box_width = right-left
box_height = bottom-top
cv2.rectangle(image, (left, top), (right, bottom), (0, 255, 0), 2, 8)
cv2.putText(image, '{}: {:.3f}'.format('person', score),(left, top - 5),
cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 255), 2)
return image
def run_frozen_graph(self, image, im_width, im_height):
"""image: bgr image
return (boxes, scores, classes, num_detections)
"""
image_np = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
start_time = time.time()
(boxes, scores, classes, num_detections) = self.sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
elapsed_time = time.time() - start_time
self.inference_list.append(elapsed_time)
self.count = self.count + 1
average_inference = sum(self.inference_list)/self.count
print('Average inference time: {}'.format(average_inference))
# Draw bounding boxes on the image
image = self.draw_bounding_box(image, scores, boxes, classes, im_width, im_height)
return (image, boxes, scores, classes, num_detections)
| [
"cv2.cvtColor",
"tensorflow.Session",
"numpy.expand_dims",
"time.time",
"tensorflow.ConfigProto",
"tensorflow.gfile.GFile",
"tensorflow.Graph",
"cv2.rectangle",
"numpy.squeeze",
"tensorflow.import_graph_def",
"tensorflow.GraphDef"
] | [((327, 337), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (335, 337), True, 'import tensorflow as tf\n'), ((1016, 1033), 'numpy.squeeze', 'np.squeeze', (['boxes'], {}), '(boxes)\n', (1026, 1033), True, 'import numpy as np\n'), ((1051, 1069), 'numpy.squeeze', 'np.squeeze', (['scores'], {}), '(scores)\n', (1061, 1069), True, 'import numpy as np\n'), ((1973, 2011), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (1985, 2011), False, 'import cv2\n'), ((2283, 2315), 'numpy.expand_dims', 'np.expand_dims', (['image_np'], {'axis': '(0)'}), '(image_np, axis=0)\n', (2297, 2315), True, 'import numpy as np\n'), ((3015, 3026), 'time.time', 'time.time', ([], {}), '()\n', (3024, 3026), False, 'import time\n'), ((413, 426), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (424, 426), True, 'import tensorflow as tf\n'), ((731, 747), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (745, 747), True, 'import tensorflow as tf\n'), ((823, 876), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'self.detection_graph', 'config': 'config'}), '(graph=self.detection_graph, config=config)\n', (833, 876), True, 'import tensorflow as tf\n'), ((3227, 3238), 'time.time', 'time.time', ([], {}), '()\n', (3236, 3238), False, 'import time\n'), ((444, 483), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['self.PATH_TO_CKPT', '"""rb"""'], {}), "(self.PATH_TO_CKPT, 'rb')\n", (458, 483), True, 'import tensorflow as tf\n'), ((617, 659), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['od_graph_def'], {'name': '""""""'}), "(od_graph_def, name='')\n", (636, 659), True, 'import tensorflow as tf\n'), ((1088, 1107), 'numpy.squeeze', 'np.squeeze', (['classes'], {}), '(classes)\n', (1098, 1107), True, 'import numpy as np\n'), ((1552, 1621), 'cv2.rectangle', 'cv2.rectangle', (['image', '(left, top)', '(right, bottom)', '(0, 255, 0)', '(2)', '(8)'], {}), '(image, (left, top), (right, bottom), (0, 255, 0), 2, 8)\n', (1565, 1621), False, 'import cv2\n')] |
"""
align_funcs.py
The alignment algorithm takes in TextGrid classes and extracts meaningful
data. This information consists of the beginning of a phoneme's utterance,
its end, and its phonemic transcription. With this information the program
finds the edit distance between two given utterances, phoneme by phoneme.
Functions: is_vow(char)- Checks if char is vowel
is_cons(char)- Checks if char is consonant
alignment(s1, s2)- Performs LD on two TextGrids
<NAME>
Infant Language Acquisition Lab
Professor: Dr. <NAME>
Manager: <NAME>
Last Updated: 7/28/15
"""
import numpy as np
from phonemes import VOWELS, CONSONANTS
def is_vow(char):
""" Checks if char is vowel """
for i in VOWELS.values():
if char in i:
return True
else:
return False
def is_con(char):
""" Checks if char is consonant """
for i in CONSONANTS.values():
if char in i:
return True
else:
return False
def is_diff(char1, char2):
"""Checks if char are different, i.e. vow/vow, cons/cons, or vow/cons"""
if (is_con(char1) and is_vow(char2)) or (is_vow(char1) and is_con(char2)):
return True
else:
return False
def alignment(s1, s2):
""" Minimum number of edits needed to get from one string to another """
""" Wikipedia: Levenshtein distance: Computing Levenshtein distance """
grid = [[0 for x in range(len(s2) + 1)] for x in range(len(s1) + 1)]
backtrace = [[0 for x in range(len(s2) + 1)] for x in range(len(s1) + 1)]
for i, item in enumerate(grid):
grid[i][0] = i
for j, jtem in enumerate(grid[0]):
grid[0][j] = j
for i in range(1, len(grid)):
for j in range(1, len(grid[0])):
if s1[i-1] == s2[j-1]:
cost = 0
elif is_diff(s1[i-1], s2[j-1]):
cost = 2
else:
cost = 1
funcs = [(grid[i-1][j] + 1), (grid[i][j-1] + 1),
(grid[i-1][j-1] + cost)]
grid[i][j] = min(funcs)
backtrace[i][j] = np.argmin(funcs)
""" Backtrace function """
a1 = ""
a2 = ""
i = len(backtrace)-1
j = len(backtrace[0])-1
""" Loop through point array to find cheapest operation """
while (i != 0) and (j != 0):
if backtrace[i][j] == 2: # Substitution
a1 += s1[i-1]
a2 += s2[j-1]
i -= 1
j -= 1
elif backtrace[i][j] == 1: # Deletion
a1 += '_'
a2 += s2[j-1]
j -= 1
else: # Insertion
a1 += s1[i-1]
a2 += '_'
i -= 1
a1 = a1[::-1]
a2 = a2[::-1]
return grid, a1, a2, grid[len(grid)-1][len(grid[0])-1]
| [
"phonemes.VOWELS.values",
"phonemes.CONSONANTS.values",
"numpy.argmin"
] | [((768, 783), 'phonemes.VOWELS.values', 'VOWELS.values', ([], {}), '()\n', (781, 783), False, 'from phonemes import VOWELS, CONSONANTS\n'), ((936, 955), 'phonemes.CONSONANTS.values', 'CONSONANTS.values', ([], {}), '()\n', (953, 955), False, 'from phonemes import VOWELS, CONSONANTS\n'), ((2146, 2162), 'numpy.argmin', 'np.argmin', (['funcs'], {}), '(funcs)\n', (2155, 2162), True, 'import numpy as np\n')] |
from typing import Any, Hashable, Mapping, Tuple
import numpy as np
import xarray as xr
def histogram(da: xr.DataArray, **kwargs) -> Tuple[xr.DataArray, xr.DataArray]:
"""Compute histogram and return tuple of counts and bin widths.
Args:
da: input data
kwargs: optional parameters to pass on to np.histogram
Return:
counts, bin_widths tuple of xr.DataArrays. The coordinate of both arrays is
equal to the left side of the histogram bins.
"""
coord_name = f"{da.name}_bins" if da.name is not None else "bins"
count, bins = np.histogram(da, **kwargs)
coords: Mapping[Hashable, Any] = {coord_name: bins[:-1]}
width = bins[1:] - bins[:-1]
width_da = xr.DataArray(width, coords=coords, dims=[coord_name])
count_da = xr.DataArray(count, coords=coords, dims=[coord_name])
if "units" in da.attrs:
count_da[coord_name].attrs["units"] = da.units
width_da[coord_name].attrs["units"] = da.units
width_da.attrs["units"] = da.units
if "long_name" in da.attrs:
count_da[coord_name].attrs["long_name"] = da.long_name
return count_da, width_da
def histogram2d(
x: xr.DataArray, y: xr.DataArray, **kwargs
) -> Tuple[xr.DataArray, xr.DataArray, xr.DataArray]:
"""Compute 2D histogram and return tuple of counts and bin widths.
Args:
x: input data
y: input data
kwargs: optional parameters to pass on to np.histogram
Return:
counts, x_bin_widths, y_bin_widths tuple of xr.DataArrays. The coordinate of all
arrays is equal to the left side of the histogram bins.
"""
xcoord_name = f"{x.name}_bins" if x.name is not None else "xbins"
ycoord_name = f"{y.name}_bins" if y.name is not None else "ybins"
count, xedges, yedges = np.histogram2d(
x.values.ravel(), y.transpose(*x.dims).values.ravel(), **kwargs
)
xcoord: Mapping[Hashable, Any] = {xcoord_name: xedges[:-1]}
ycoord: Mapping[Hashable, Any] = {ycoord_name: yedges[:-1]}
xwidth = xedges[1:] - xedges[:-1]
ywidth = yedges[1:] - yedges[:-1]
xwidth_da = xr.DataArray(xwidth, coords=xcoord, dims=[xcoord_name])
ywidth_da = xr.DataArray(ywidth, coords=ycoord, dims=[ycoord_name])
count_da = xr.DataArray(
count, coords={**xcoord, **ycoord}, dims=[xcoord_name, ycoord_name]
)
if "units" in x.attrs:
xwidth_da[xcoord_name].attrs["units"] = x.units
xwidth_da.attrs["units"] = x.units
if "units" in y.attrs:
ywidth_da[ycoord_name].attrs["units"] = y.units
ywidth_da.attrs["units"] = y.units
return count_da, xwidth_da, ywidth_da
| [
"numpy.histogram",
"xarray.DataArray"
] | [((583, 609), 'numpy.histogram', 'np.histogram', (['da'], {}), '(da, **kwargs)\n', (595, 609), True, 'import numpy as np\n'), ((719, 772), 'xarray.DataArray', 'xr.DataArray', (['width'], {'coords': 'coords', 'dims': '[coord_name]'}), '(width, coords=coords, dims=[coord_name])\n', (731, 772), True, 'import xarray as xr\n'), ((788, 841), 'xarray.DataArray', 'xr.DataArray', (['count'], {'coords': 'coords', 'dims': '[coord_name]'}), '(count, coords=coords, dims=[coord_name])\n', (800, 841), True, 'import xarray as xr\n'), ((2113, 2168), 'xarray.DataArray', 'xr.DataArray', (['xwidth'], {'coords': 'xcoord', 'dims': '[xcoord_name]'}), '(xwidth, coords=xcoord, dims=[xcoord_name])\n', (2125, 2168), True, 'import xarray as xr\n'), ((2185, 2240), 'xarray.DataArray', 'xr.DataArray', (['ywidth'], {'coords': 'ycoord', 'dims': '[ycoord_name]'}), '(ywidth, coords=ycoord, dims=[ycoord_name])\n', (2197, 2240), True, 'import xarray as xr\n'), ((2256, 2341), 'xarray.DataArray', 'xr.DataArray', (['count'], {'coords': '{**xcoord, **ycoord}', 'dims': '[xcoord_name, ycoord_name]'}), '(count, coords={**xcoord, **ycoord}, dims=[xcoord_name,\n ycoord_name])\n', (2268, 2341), True, 'import xarray as xr\n')] |
"""
"Cholesky Factor Algorithm" (CFA) simulation smoothing for state space models
Author: <NAME>
License: BSD-3
"""
import numpy as np
from . import tools
class CFASimulationSmoother(object):
r"""
"Cholesky Factor Algorithm" (CFA) simulation smoother
Parameters
----------
model : Representation
The state space model.
Notes
-----
This class allows simulation smoothing by the "Cholesky Factor Algorithm"
(CFA) described in [1]_ and [2]_, which essentially takes advantage of the
existence of an efficient sparse Cholesky factor algorithm for banded
matrices that are held in a sparse matrix format.
In particular, this simulation smoother computes the joint posterior mean
and covariance matrix for the unobserved state vector all at once, rather
than using the recursive computations of the Kalman filter and smoother. It
then uses these posterior moments to sample directly from this joint
posterior. For some models, it can be more computationally efficient than
the simulation smoother based on the Kalman filter and smoother.
**Important caveat**:
However, this simulation smoother cannot be used with all state space
models, including several of the most popular. In particular, the CFA
algorithm cannot support degenerate distributions (i.e. positive
semi-definite covariance matrices) for the initial state (which is the
prior for the first state) or the observation or state innovations.
One practical problem with this algorithm is that an autoregressive term
with order higher than one is typically put into state space form by
augmenting the states using identities. As identities, these augmenting
terms will not be subject to random innovations, and so the state
innovation will be degenerate. It is possible to take these higher order
terms directly into account when constructing the posterior covariance
matrix, but this has not yet been implemented.
Similarly, some state space forms of SARIMA and VARMA models make
the observation equation an identity, which is not compatible with the CFA
simulation smoothing approach.
This simulation smoother has so-far found most of its use with dynamic
factor and stochastic volatility models, which satisfy the restrictions
described above.
**Not-yet-implemented**:
There are several features that are not yet available with this simulation
smoother:
- It does not yet allow diffuse initialization of the state vector.
- It produces simulated states only for exactly the observations in the
model (i.e. it cannot produce simulations for a subset of the model
observations or for observations outside the model).
References
----------
.. [1] McCausland, <NAME>., <NAME>, and <NAME>.
"Simulation smoothing for state-space models: A computational
efficiency analysis."
Computational Statistics & Data Analysis 55, no. 1 (2011): 199-212.
.. [2] Chan, <NAME>, and <NAME>.
"Efficient simulation and integrated likelihood estimation in
state space models."
International Journal of Mathematical Modelling and Numerical
Optimisation 1, no. 1-2 (2009): 101-120.
"""
def __init__(self, model, cfa_simulation_smoother_classes=None):
self.model = model
# Get the simulation smoother classes
self.prefix_simulation_smoother_map = (
cfa_simulation_smoother_classes
if cfa_simulation_smoother_classes is not None
else tools.prefix_cfa_simulation_smoother_map.copy())
self._simulation_smoothers = {}
self._posterior_mean = None
self._posterior_cov_inv_chol = None
self._posterior_cov = None
self._simulated_state = None
@property
def _simulation_smoother(self):
prefix = self.model.prefix
if prefix in self._simulation_smoothers:
return self._simulation_smoothers[prefix]
return None
@property
def posterior_mean(self):
r"""
Posterior mean of the states conditional on the data
Notes
-----
.. math::
\hat \alpha_t = E[\alpha_t \mid Y^n ]
This posterior mean is identical to the `smoothed_state` computed by
the Kalman smoother.
"""
if self._posterior_mean is None:
self._posterior_mean = np.array(
self._simulation_smoother.posterior_mean, copy=True)
return self._posterior_mean
@property
def posterior_cov_inv_chol_sparse(self):
r"""
Sparse Cholesky factor of inverse posterior covariance matrix
Notes
-----
This attribute holds in sparse diagonal banded storage the Cholesky
factor of the inverse of the posterior covariance matrix. If we denote
:math:`P = Var[\alpha \mid Y^n ]`, then the this attribute holds the
lower Cholesky factor :math:`L`, defined from :math:`L L' = P^{-1}`.
This attribute uses the sparse diagonal banded storage described in the
documentation of, for example, the SciPy function
`scipy.linalg.solveh_banded`.
"""
if self._posterior_cov_inv_chol is None:
self._posterior_cov_inv_chol = np.array(
self._simulation_smoother.posterior_cov_inv_chol, copy=True)
return self._posterior_cov_inv_chol
@property
def posterior_cov(self):
r"""
Posterior covariance of the states conditional on the data
Notes
-----
**Warning**: the matrix computed when accessing this property can be
extremely large: it is shaped `(nobs * k_states, nobs * k_states)`. In
most cases, it is better to use the `posterior_cov_inv_chol_sparse`
property if possible, which holds in sparse diagonal banded storage
the Cholesky factor of the inverse of the posterior covariance matrix.
.. math::
Var[\alpha \mid Y^n ]
This posterior covariance matrix is *not* identical to the
`smoothed_state_cov` attribute produced by the Kalman smoother, because
it additionally contains all cross-covariance terms. Instead,
`smoothed_state_cov` contains the `(k_states, k_states)` block
diagonal entries of this posterior covariance matrix.
"""
if self._posterior_cov is None:
from scipy.linalg import cho_solve_banded
inv_chol = self.posterior_cov_inv_chol_sparse
self._posterior_cov = cho_solve_banded(
(inv_chol, True), np.eye(inv_chol.shape[1]))
return self._posterior_cov
def simulate(self, variates=None, update_posterior=True):
r"""
Perform simulation smoothing (via Cholesky factor algorithm)
Does not return anything, but populates the object's `simulated_state`
attribute, and also makes available the attributes `posterior_mean`,
`posterior_cov`, and `posterior_cov_inv_chol_sparse`.
Parameters
----------
variates : array_like, optional
Random variates, distributed standard Normal. Usually only
specified if results are to be replicated (e.g. to enforce a seed)
or for testing. If not specified, random variates are drawn. Must
be shaped (nobs, k_states).
Notes
-----
The first step in simulating from the joint posterior of the state
vector conditional on the data is to compute the two relevant moments
of the joint posterior distribution:
.. math::
\alpha \mid Y_n \sim N(\hat \alpha, Var(\alpha \mid Y_n))
Let :math:`L L' = Var(\alpha \mid Y_n)^{-1}`. Then simulation proceeds
according to the following steps:
1. Draw :math:`u \sim N(0, I)`
2. Compute :math:`x = \hat \alpha + (L')^{-1} u`
And then :math:`x` is a draw from the joint posterior of the states.
The output of the function is as follows:
- The simulated draw :math:`x` is held in the `simulated_state`
attribute.
- The posterior mean :math:`\hat \alpha` is held in the
`posterior_mean` attribute.
- The (lower triangular) Cholesky factor of the inverse posterior
covariance matrix, :math:`L`, is held in sparse diagonal banded
storage in the `posterior_cov_inv_chol` attribute.
- The posterior covariance matrix :math:`Var(\alpha \mid Y_n)` can be
computed on demand by accessing the `posterior_cov` property. Note
that this matrix can be extremely large, so care must be taken when
accessing this property. In most cases, it will be preferred to make
use of the `posterior_cov_inv_chol` attribute rather than the
`posterior_cov` attribute.
"""
# (Re) initialize the _statespace representation
prefix, dtype, create = self.model._initialize_representation()
# Validate variates and get in required datatype
if variates is not None:
tools.validate_matrix_shape('variates', variates.shape,
self.model.k_states,
self.model.nobs, 1)
variates = np.ravel(variates, order='F').astype(dtype)
# (Re) initialize the state
self.model._initialize_state(prefix=prefix)
# Construct the Cython simulation smoother instance, if necessary
if create or prefix not in self._simulation_smoothers:
cls = self.prefix_simulation_smoother_map[prefix]
self._simulation_smoothers[prefix] = cls(
self.model._statespaces[prefix])
sim = self._simulation_smoothers[prefix]
# Update posterior moments, if requested
if update_posterior:
sim.update_sparse_posterior_moments()
self._posterior_mean = None
self._posterior_cov_inv_chol = None
self._posterior_cov = None
# Perform simulation smoothing
self.simulated_state = sim.simulate(variates=variates)
| [
"numpy.eye",
"numpy.array",
"numpy.ravel"
] | [((4493, 4554), 'numpy.array', 'np.array', (['self._simulation_smoother.posterior_mean'], {'copy': '(True)'}), '(self._simulation_smoother.posterior_mean, copy=True)\n', (4501, 4554), True, 'import numpy as np\n'), ((5369, 5438), 'numpy.array', 'np.array', (['self._simulation_smoother.posterior_cov_inv_chol'], {'copy': '(True)'}), '(self._simulation_smoother.posterior_cov_inv_chol, copy=True)\n', (5377, 5438), True, 'import numpy as np\n'), ((6695, 6720), 'numpy.eye', 'np.eye', (['inv_chol.shape[1]'], {}), '(inv_chol.shape[1])\n', (6701, 6720), True, 'import numpy as np\n'), ((9403, 9432), 'numpy.ravel', 'np.ravel', (['variates'], {'order': '"""F"""'}), "(variates, order='F')\n", (9411, 9432), True, 'import numpy as np\n')] |
import os
import cv2
import numpy as np
from feat.utils import get_resource_path
from feat.facepose_detectors.utils import convert_to_euler
THREED_FACE_MODEL = os.path.join(get_resource_path(), "reference_3d_68_points_trans.npy")
class PerspectiveNPointModel:
""" Class that leverages 68 2D facial landmark points to estimate head pose using the Perspective-n-Point
algorithm.
Code adapted from https://github.com/yinguobing/head-pose-estimation/ and
https://github.com/lincolnhard/head-pose-estimation/. Each code base licensed under MIT Licenses, which can be
found here: https://github.com/yinguobing/head-pose-estimation/blob/master/LICENSE and here:
https://github.com/lincolnhard/head-pose-estimation/blob/master/LICENSE
"""
def __init__(self):
""" Initializes the model, with a reference 3D model (xyz coordinates) of a standard face"""
# self.model_points = get_full_model_points(os.path.join(get_resource_path(), "3d_face_model.txt"))
self.model_points = np.load(THREED_FACE_MODEL, allow_pickle=True)
def predict(self, img, landmarks):
""" Determines headpose using passed 68 2D landmarks
Args:
img (np.ndarray) : The cv2 image from which the landmarks were produced
landmarks (np.ndarray) : The landmarks to use to produce the headpose estimate
Returns:
np.ndarray: Euler angles ([pitch, roll, yaw])
"""
# Obtain camera intrinsics to solve PnP algorithm. These intrinsics represent defaults - users may modify this
# code to pass their own camera matrix and distortion coefficients if they happen to have calibrated their
# camera: https://learnopencv.com/camera-calibration-using-opencv/
h, w = img.shape[:2]
camera_matrix = np.array([[w + h, 0, w // 2],
[0, w + h, h // 2],
[0, 0, 1]], dtype='float32')
dist_coeffs = np.zeros((4, 1), dtype='float32') # Assuming no lens distortion
# Solve PnP using all 68 points:
landmarks = landmarks.astype('float32')
_, rotation_vector, translation_vector = cv2.solvePnP(self.model_points, landmarks, camera_matrix, dist_coeffs,
flags=cv2.SOLVEPNP_EPNP)
# Convert to Euler Angles
euler_angles = convert_to_euler(np.squeeze(rotation_vector))
# PnP may give values outside the range of (-90, 90), and sometimes misinterprets a face as facing
# AWAY from the camera (since 2D landmarks do not convey whether face is facing towards or away from camera)
# Thus, we adjust below to ensure the face is interpreted as front-facing
euler_angles[euler_angles > 90] -= 180
euler_angles[euler_angles < -90] += 180
return euler_angles
| [
"numpy.load",
"cv2.solvePnP",
"numpy.zeros",
"numpy.array",
"numpy.squeeze",
"feat.utils.get_resource_path"
] | [((178, 197), 'feat.utils.get_resource_path', 'get_resource_path', ([], {}), '()\n', (195, 197), False, 'from feat.utils import get_resource_path\n'), ((1045, 1090), 'numpy.load', 'np.load', (['THREED_FACE_MODEL'], {'allow_pickle': '(True)'}), '(THREED_FACE_MODEL, allow_pickle=True)\n', (1052, 1090), True, 'import numpy as np\n'), ((1848, 1926), 'numpy.array', 'np.array', (['[[w + h, 0, w // 2], [0, w + h, h // 2], [0, 0, 1]]'], {'dtype': '"""float32"""'}), "([[w + h, 0, w // 2], [0, w + h, h // 2], [0, 0, 1]], dtype='float32')\n", (1856, 1926), True, 'import numpy as np\n'), ((2020, 2053), 'numpy.zeros', 'np.zeros', (['(4, 1)'], {'dtype': '"""float32"""'}), "((4, 1), dtype='float32')\n", (2028, 2053), True, 'import numpy as np\n'), ((2228, 2327), 'cv2.solvePnP', 'cv2.solvePnP', (['self.model_points', 'landmarks', 'camera_matrix', 'dist_coeffs'], {'flags': 'cv2.SOLVEPNP_EPNP'}), '(self.model_points, landmarks, camera_matrix, dist_coeffs,\n flags=cv2.SOLVEPNP_EPNP)\n', (2240, 2327), False, 'import cv2\n'), ((2465, 2492), 'numpy.squeeze', 'np.squeeze', (['rotation_vector'], {}), '(rotation_vector)\n', (2475, 2492), True, 'import numpy as np\n')] |
"""
Implementation of sequential minimal optimization (SMO) for support vector machines
(SVM).
Sequential minimal optimization (SMO) is an algorithm for solving the quadratic
programming (QP) problem that arises during the training of support vector
machines.
It was invented by <NAME> in 1998.
Input:
0: type: numpy.ndarray.
1: first column of ndarray must be tags of samples, must be 1 or -1.
2: rows of ndarray represent samples.
Usage:
Command:
python3 sequential_minimum_optimization.py
Code:
from sequential_minimum_optimization import SmoSVM, Kernel
kernel = Kernel(kernel='poly', degree=3., coef0=1., gamma=0.5)
init_alphas = np.zeros(train.shape[0])
SVM = SmoSVM(train=train, alpha_list=init_alphas, kernel_func=kernel, cost=0.4,
b=0.0, tolerance=0.001)
SVM.fit()
predict = SVM.predict(test_samples)
Reference:
https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/smo-book.pdf
https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/tr-98-14.pdf
http://web.cs.iastate.edu/~honavar/smo-svm.pdf
"""
import os
import sys
import urllib.request
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.datasets import make_blobs, make_circles
from sklearn.preprocessing import StandardScaler
CANCER_DATASET_URL = "http://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data"
class SmoSVM:
def __init__(
self,
train,
kernel_func,
alpha_list=None,
cost=0.4,
b=0.0,
tolerance=0.001,
auto_norm=True,
):
self._init = True
self._auto_norm = auto_norm
self._c = np.float64(cost)
self._b = np.float64(b)
self._tol = np.float64(tolerance) if tolerance > 0.0001 else np.float64(0.001)
self.tags = train[:, 0]
self.samples = self._norm(train[:, 1:]) if self._auto_norm else train[:, 1:]
self.alphas = alpha_list if alpha_list is not None else np.zeros(train.shape[0])
self.Kernel = kernel_func
self._eps = 0.001
self._all_samples = list(range(self.length))
self._K_matrix = self._calculate_k_matrix()
self._error = np.zeros(self.length)
self._unbound = []
self.choose_alpha = self._choose_alphas()
# Calculate alphas using SMO algorithm
def fit(self):
K = self._k
state = None
while True:
# 1: Find alpha1, alpha2
try:
i1, i2 = self.choose_alpha.send(state)
state = None
except StopIteration:
print("Optimization done!\nEvery sample satisfy the KKT condition!")
break
# 2: calculate new alpha2 and new alpha1
y1, y2 = self.tags[i1], self.tags[i2]
a1, a2 = self.alphas[i1].copy(), self.alphas[i2].copy()
e1, e2 = self._e(i1), self._e(i2)
args = (i1, i2, a1, a2, e1, e2, y1, y2)
a1_new, a2_new = self._get_new_alpha(*args)
if not a1_new and not a2_new:
state = False
continue
self.alphas[i1], self.alphas[i2] = a1_new, a2_new
# 3: update threshold(b)
b1_new = np.float64(
-e1
- y1 * K(i1, i1) * (a1_new - a1)
- y2 * K(i2, i1) * (a2_new - a2)
+ self._b
)
b2_new = np.float64(
-e2
- y2 * K(i2, i2) * (a2_new - a2)
- y1 * K(i1, i2) * (a1_new - a1)
+ self._b
)
if 0.0 < a1_new < self._c:
b = b1_new
if 0.0 < a2_new < self._c:
b = b2_new
if not (np.float64(0) < a2_new < self._c) and not (
np.float64(0) < a1_new < self._c
):
b = (b1_new + b2_new) / 2.0
b_old = self._b
self._b = b
# 4: update error value,here we only calculate those non-bound samples' error
self._unbound = [i for i in self._all_samples if self._is_unbound(i)]
for s in self.unbound:
if s == i1 or s == i2:
continue
self._error[s] += (
y1 * (a1_new - a1) * K(i1, s)
+ y2 * (a2_new - a2) * K(i2, s)
+ (self._b - b_old)
)
# if i1 or i2 is non-bound,update there error value to zero
if self._is_unbound(i1):
self._error[i1] = 0
if self._is_unbound(i2):
self._error[i2] = 0
# Predict test samles
def predict(self, test_samples, classify=True):
if test_samples.shape[1] > self.samples.shape[1]:
raise ValueError(
"Test samples' feature length does not equal to that of train samples"
)
if self._auto_norm:
test_samples = self._norm(test_samples)
results = []
for test_sample in test_samples:
result = self._predict(test_sample)
if classify:
results.append(1 if result > 0 else -1)
else:
results.append(result)
return np.array(results)
# Check if alpha violate KKT condition
def _check_obey_kkt(self, index):
alphas = self.alphas
tol = self._tol
r = self._e(index) * self.tags[index]
c = self._c
return (r < -tol and alphas[index] < c) or (r > tol and alphas[index] > 0.0)
# Get value calculated from kernel function
def _k(self, i1, i2):
# for test samples,use Kernel function
if isinstance(i2, np.ndarray):
return self.Kernel(self.samples[i1], i2)
# for train samples,Kernel values have been saved in matrix
else:
return self._K_matrix[i1, i2]
# Get sample's error
def _e(self, index):
"""
Two cases:
1:Sample[index] is non-bound,Fetch error from list: _error
2:sample[index] is bound,Use predicted value deduct true value: g(xi) - yi
"""
# get from error data
if self._is_unbound(index):
return self._error[index]
# get by g(xi) - yi
else:
gx = np.dot(self.alphas * self.tags, self._K_matrix[:, index]) + self._b
yi = self.tags[index]
return gx - yi
# Calculate Kernel matrix of all possible i1,i2 ,saving time
def _calculate_k_matrix(self):
k_matrix = np.zeros([self.length, self.length])
for i in self._all_samples:
for j in self._all_samples:
k_matrix[i, j] = np.float64(
self.Kernel(self.samples[i, :], self.samples[j, :])
)
return k_matrix
# Predict test sample's tag
def _predict(self, sample):
k = self._k
predicted_value = (
np.sum(
[
self.alphas[i1] * self.tags[i1] * k(i1, sample)
for i1 in self._all_samples
]
)
+ self._b
)
return predicted_value
# Choose alpha1 and alpha2
def _choose_alphas(self):
locis = yield from self._choose_a1()
if not locis:
return
return locis
def _choose_a1(self):
"""
Choose first alpha ;steps:
1:First loop over all sample
2:Second loop over all non-bound samples till all non-bound samples does not voilate kkt condition.
3:Repeat this two process endlessly,till all samples does not voilate kkt condition samples after first loop.
"""
while True:
all_not_obey = True
# all sample
print("scanning all sample!")
for i1 in [i for i in self._all_samples if self._check_obey_kkt(i)]:
all_not_obey = False
yield from self._choose_a2(i1)
# non-bound sample
print("scanning non-bound sample!")
while True:
not_obey = True
for i1 in [
i
for i in self._all_samples
if self._check_obey_kkt(i) and self._is_unbound(i)
]:
not_obey = False
yield from self._choose_a2(i1)
if not_obey:
print("all non-bound samples fit the KKT condition!")
break
if all_not_obey:
print("all samples fit the KKT condition! Optimization done!")
break
return False
def _choose_a2(self, i1):
"""
Choose the second alpha by using heuristic algorithm ;steps:
1: Choose alpha2 which gets the maximum step size (|E1 - E2|).
2: Start in a random point,loop over all non-bound samples till alpha1 and
alpha2 are optimized.
3: Start in a random point,loop over all samples till alpha1 and alpha2 are
optimized.
"""
self._unbound = [i for i in self._all_samples if self._is_unbound(i)]
if len(self.unbound) > 0:
tmp_error = self._error.copy().tolist()
tmp_error_dict = {
index: value
for index, value in enumerate(tmp_error)
if self._is_unbound(index)
}
if self._e(i1) >= 0:
i2 = min(tmp_error_dict, key=lambda index: tmp_error_dict[index])
else:
i2 = max(tmp_error_dict, key=lambda index: tmp_error_dict[index])
cmd = yield i1, i2
if cmd is None:
return
for i2 in np.roll(self.unbound, np.random.choice(self.length)):
cmd = yield i1, i2
if cmd is None:
return
for i2 in np.roll(self._all_samples, np.random.choice(self.length)):
cmd = yield i1, i2
if cmd is None:
return
# Get the new alpha2 and new alpha1
def _get_new_alpha(self, i1, i2, a1, a2, e1, e2, y1, y2):
K = self._k
if i1 == i2:
return None, None
# calculate L and H which bound the new alpha2
s = y1 * y2
if s == -1:
L, H = max(0.0, a2 - a1), min(self._c, self._c + a2 - a1)
else:
L, H = max(0.0, a2 + a1 - self._c), min(self._c, a2 + a1)
if L == H:
return None, None
# calculate eta
k11 = K(i1, i1)
k22 = K(i2, i2)
k12 = K(i1, i2)
eta = k11 + k22 - 2.0 * k12
# select the new alpha2 which could get the minimal objectives
if eta > 0.0:
a2_new_unc = a2 + (y2 * (e1 - e2)) / eta
# a2_new has a boundary
if a2_new_unc >= H:
a2_new = H
elif a2_new_unc <= L:
a2_new = L
else:
a2_new = a2_new_unc
else:
b = self._b
l1 = a1 + s * (a2 - L)
h1 = a1 + s * (a2 - H)
# way 1
f1 = y1 * (e1 + b) - a1 * K(i1, i1) - s * a2 * K(i1, i2)
f2 = y2 * (e2 + b) - a2 * K(i2, i2) - s * a1 * K(i1, i2)
ol = (
l1 * f1
+ L * f2
+ 1 / 2 * l1 ** 2 * K(i1, i1)
+ 1 / 2 * L ** 2 * K(i2, i2)
+ s * L * l1 * K(i1, i2)
)
oh = (
h1 * f1
+ H * f2
+ 1 / 2 * h1 ** 2 * K(i1, i1)
+ 1 / 2 * H ** 2 * K(i2, i2)
+ s * H * h1 * K(i1, i2)
)
"""
# way 2
Use objective function check which alpha2 new could get the minimal objectives
"""
if ol < (oh - self._eps):
a2_new = L
elif ol > oh + self._eps:
a2_new = H
else:
a2_new = a2
# a1_new has a boundary too
a1_new = a1 + s * (a2 - a2_new)
if a1_new < 0:
a2_new += s * a1_new
a1_new = 0
if a1_new > self._c:
a2_new += s * (a1_new - self._c)
a1_new = self._c
return a1_new, a2_new
# Normalise data using min_max way
def _norm(self, data):
if self._init:
self._min = np.min(data, axis=0)
self._max = np.max(data, axis=0)
self._init = False
return (data - self._min) / (self._max - self._min)
else:
return (data - self._min) / (self._max - self._min)
def _is_unbound(self, index):
if 0.0 < self.alphas[index] < self._c:
return True
else:
return False
def _is_support(self, index):
if self.alphas[index] > 0:
return True
else:
return False
@property
def unbound(self):
return self._unbound
@property
def support(self):
return [i for i in range(self.length) if self._is_support(i)]
@property
def length(self):
return self.samples.shape[0]
class Kernel:
def __init__(self, kernel, degree=1.0, coef0=0.0, gamma=1.0):
self.degree = np.float64(degree)
self.coef0 = np.float64(coef0)
self.gamma = np.float64(gamma)
self._kernel_name = kernel
self._kernel = self._get_kernel(kernel_name=kernel)
self._check()
def _polynomial(self, v1, v2):
return (self.gamma * np.inner(v1, v2) + self.coef0) ** self.degree
def _linear(self, v1, v2):
return np.inner(v1, v2) + self.coef0
def _rbf(self, v1, v2):
return np.exp(-1 * (self.gamma * np.linalg.norm(v1 - v2) ** 2))
def _check(self):
if self._kernel == self._rbf:
if self.gamma < 0:
raise ValueError("gamma value must greater than 0")
def _get_kernel(self, kernel_name):
maps = {"linear": self._linear, "poly": self._polynomial, "rbf": self._rbf}
return maps[kernel_name]
def __call__(self, v1, v2):
return self._kernel(v1, v2)
def __repr__(self):
return self._kernel_name
def count_time(func):
def call_func(*args, **kwargs):
import time
start_time = time.time()
func(*args, **kwargs)
end_time = time.time()
print(f"smo algorithm cost {end_time - start_time} seconds")
return call_func
@count_time
def test_cancel_data():
print("Hello!\nStart test svm by smo algorithm!")
# 0: download dataset and load into pandas' dataframe
if not os.path.exists(r"cancel_data.csv"):
request = urllib.request.Request(
CANCER_DATASET_URL,
headers={"User-Agent": "Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)"},
)
response = urllib.request.urlopen(request)
content = response.read().decode("utf-8")
with open(r"cancel_data.csv", "w") as f:
f.write(content)
data = pd.read_csv(r"cancel_data.csv", header=None)
# 1: pre-processing data
del data[data.columns.tolist()[0]]
data = data.dropna(axis=0)
data = data.replace({"M": np.float64(1), "B": np.float64(-1)})
samples = np.array(data)[:, :]
# 2: dividing data into train_data data and test_data data
train_data, test_data = samples[:328, :], samples[328:, :]
test_tags, test_samples = test_data[:, 0], test_data[:, 1:]
# 3: choose kernel function,and set initial alphas to zero(optional)
mykernel = Kernel(kernel="rbf", degree=5, coef0=1, gamma=0.5)
al = np.zeros(train_data.shape[0])
# 4: calculating best alphas using SMO algorithm and predict test_data samples
mysvm = SmoSVM(
train=train_data,
kernel_func=mykernel,
alpha_list=al,
cost=0.4,
b=0.0,
tolerance=0.001,
)
mysvm.fit()
predict = mysvm.predict(test_samples)
# 5: check accuracy
score = 0
test_num = test_tags.shape[0]
for i in range(test_tags.shape[0]):
if test_tags[i] == predict[i]:
score += 1
print(f"\nall: {test_num}\nright: {score}\nfalse: {test_num - score}")
print(f"Rough Accuracy: {score / test_tags.shape[0]}")
def test_demonstration():
# change stdout
print("\nStart plot,please wait!!!")
sys.stdout = open(os.devnull, "w")
ax1 = plt.subplot2grid((2, 2), (0, 0))
ax2 = plt.subplot2grid((2, 2), (0, 1))
ax3 = plt.subplot2grid((2, 2), (1, 0))
ax4 = plt.subplot2grid((2, 2), (1, 1))
ax1.set_title("linear svm,cost:0.1")
test_linear_kernel(ax1, cost=0.1)
ax2.set_title("linear svm,cost:500")
test_linear_kernel(ax2, cost=500)
ax3.set_title("rbf kernel svm,cost:0.1")
test_rbf_kernel(ax3, cost=0.1)
ax4.set_title("rbf kernel svm,cost:500")
test_rbf_kernel(ax4, cost=500)
sys.stdout = sys.__stdout__
print("Plot done!!!")
def test_linear_kernel(ax, cost):
train_x, train_y = make_blobs(
n_samples=500, centers=2, n_features=2, random_state=1
)
train_y[train_y == 0] = -1
scaler = StandardScaler()
train_x_scaled = scaler.fit_transform(train_x, train_y)
train_data = np.hstack((train_y.reshape(500, 1), train_x_scaled))
mykernel = Kernel(kernel="linear", degree=5, coef0=1, gamma=0.5)
mysvm = SmoSVM(
train=train_data,
kernel_func=mykernel,
cost=cost,
tolerance=0.001,
auto_norm=False,
)
mysvm.fit()
plot_partition_boundary(mysvm, train_data, ax=ax)
def test_rbf_kernel(ax, cost):
train_x, train_y = make_circles(
n_samples=500, noise=0.1, factor=0.1, random_state=1
)
train_y[train_y == 0] = -1
scaler = StandardScaler()
train_x_scaled = scaler.fit_transform(train_x, train_y)
train_data = np.hstack((train_y.reshape(500, 1), train_x_scaled))
mykernel = Kernel(kernel="rbf", degree=5, coef0=1, gamma=0.5)
mysvm = SmoSVM(
train=train_data,
kernel_func=mykernel,
cost=cost,
tolerance=0.001,
auto_norm=False,
)
mysvm.fit()
plot_partition_boundary(mysvm, train_data, ax=ax)
def plot_partition_boundary(
model, train_data, ax, resolution=100, colors=("b", "k", "r")
):
"""
We can not get the optimum w of our kernel svm model which is different from linear svm.
For this reason, we generate randomly distributed points with high desity and prediced values of these points are
calculated by using our tained model. Then we could use this prediced values to draw contour map.
And this contour map can represent svm's partition boundary.
"""
train_data_x = train_data[:, 1]
train_data_y = train_data[:, 2]
train_data_tags = train_data[:, 0]
xrange = np.linspace(train_data_x.min(), train_data_x.max(), resolution)
yrange = np.linspace(train_data_y.min(), train_data_y.max(), resolution)
test_samples = np.array([(x, y) for x in xrange for y in yrange]).reshape(
resolution * resolution, 2
)
test_tags = model.predict(test_samples, classify=False)
grid = test_tags.reshape((len(xrange), len(yrange)))
# Plot contour map which represents the partition boundary
ax.contour(
xrange,
yrange,
np.mat(grid).T,
levels=(-1, 0, 1),
linestyles=("--", "-", "--"),
linewidths=(1, 1, 1),
colors=colors,
)
# Plot all train samples
ax.scatter(
train_data_x,
train_data_y,
c=train_data_tags,
cmap=plt.cm.Dark2,
lw=0,
alpha=0.5,
)
# Plot support vectors
support = model.support
ax.scatter(
train_data_x[support],
train_data_y[support],
c=train_data_tags[support],
cmap=plt.cm.Dark2,
)
if __name__ == "__main__":
test_cancel_data()
test_demonstration()
plt.show()
| [
"sklearn.datasets.make_circles",
"sklearn.preprocessing.StandardScaler",
"matplotlib.pyplot.show",
"pandas.read_csv",
"matplotlib.pyplot.subplot2grid",
"numpy.zeros",
"sklearn.datasets.make_blobs",
"os.path.exists",
"time.time",
"numpy.min",
"numpy.max",
"numpy.array",
"numpy.linalg.norm",
... | [((15240, 15283), 'pandas.read_csv', 'pd.read_csv', (['"""cancel_data.csv"""'], {'header': 'None'}), "('cancel_data.csv', header=None)\n", (15251, 15283), True, 'import pandas as pd\n'), ((15827, 15856), 'numpy.zeros', 'np.zeros', (['train_data.shape[0]'], {}), '(train_data.shape[0])\n', (15835, 15856), True, 'import numpy as np\n'), ((16610, 16642), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(2, 2)', '(0, 0)'], {}), '((2, 2), (0, 0))\n', (16626, 16642), True, 'import matplotlib.pyplot as plt\n'), ((16653, 16685), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(2, 2)', '(0, 1)'], {}), '((2, 2), (0, 1))\n', (16669, 16685), True, 'import matplotlib.pyplot as plt\n'), ((16696, 16728), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(2, 2)', '(1, 0)'], {}), '((2, 2), (1, 0))\n', (16712, 16728), True, 'import matplotlib.pyplot as plt\n'), ((16739, 16771), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(2, 2)', '(1, 1)'], {}), '((2, 2), (1, 1))\n', (16755, 16771), True, 'import matplotlib.pyplot as plt\n'), ((17208, 17274), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'n_samples': '(500)', 'centers': '(2)', 'n_features': '(2)', 'random_state': '(1)'}), '(n_samples=500, centers=2, n_features=2, random_state=1)\n', (17218, 17274), False, 'from sklearn.datasets import make_blobs, make_circles\n'), ((17333, 17349), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (17347, 17349), False, 'from sklearn.preprocessing import StandardScaler\n'), ((17826, 17892), 'sklearn.datasets.make_circles', 'make_circles', ([], {'n_samples': '(500)', 'noise': '(0.1)', 'factor': '(0.1)', 'random_state': '(1)'}), '(n_samples=500, noise=0.1, factor=0.1, random_state=1)\n', (17838, 17892), False, 'from sklearn.datasets import make_blobs, make_circles\n'), ((17951, 17967), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (17965, 17967), False, 'from sklearn.preprocessing import StandardScaler\n'), ((20109, 20119), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20117, 20119), True, 'import matplotlib.pyplot as plt\n'), ((1784, 1800), 'numpy.float64', 'np.float64', (['cost'], {}), '(cost)\n', (1794, 1800), True, 'import numpy as np\n'), ((1819, 1832), 'numpy.float64', 'np.float64', (['b'], {}), '(b)\n', (1829, 1832), True, 'import numpy as np\n'), ((2315, 2336), 'numpy.zeros', 'np.zeros', (['self.length'], {}), '(self.length)\n', (2323, 2336), True, 'import numpy as np\n'), ((5386, 5403), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (5394, 5403), True, 'import numpy as np\n'), ((6694, 6730), 'numpy.zeros', 'np.zeros', (['[self.length, self.length]'], {}), '([self.length, self.length])\n', (6702, 6730), True, 'import numpy as np\n'), ((13468, 13486), 'numpy.float64', 'np.float64', (['degree'], {}), '(degree)\n', (13478, 13486), True, 'import numpy as np\n'), ((13508, 13525), 'numpy.float64', 'np.float64', (['coef0'], {}), '(coef0)\n', (13518, 13525), True, 'import numpy as np\n'), ((13547, 13564), 'numpy.float64', 'np.float64', (['gamma'], {}), '(gamma)\n', (13557, 13564), True, 'import numpy as np\n'), ((14518, 14529), 'time.time', 'time.time', ([], {}), '()\n', (14527, 14529), False, 'import time\n'), ((14579, 14590), 'time.time', 'time.time', ([], {}), '()\n', (14588, 14590), False, 'import time\n'), ((14843, 14876), 'os.path.exists', 'os.path.exists', (['"""cancel_data.csv"""'], {}), "('cancel_data.csv')\n", (14857, 14876), False, 'import os\n'), ((15466, 15480), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (15474, 15480), True, 'import numpy as np\n'), ((1853, 1874), 'numpy.float64', 'np.float64', (['tolerance'], {}), '(tolerance)\n', (1863, 1874), True, 'import numpy as np\n'), ((1902, 1919), 'numpy.float64', 'np.float64', (['(0.001)'], {}), '(0.001)\n', (1912, 1919), True, 'import numpy as np\n'), ((2102, 2126), 'numpy.zeros', 'np.zeros', (['train.shape[0]'], {}), '(train.shape[0])\n', (2110, 2126), True, 'import numpy as np\n'), ((9941, 9970), 'numpy.random.choice', 'np.random.choice', (['self.length'], {}), '(self.length)\n', (9957, 9970), True, 'import numpy as np\n'), ((10101, 10130), 'numpy.random.choice', 'np.random.choice', (['self.length'], {}), '(self.length)\n', (10117, 10130), True, 'import numpy as np\n'), ((12598, 12618), 'numpy.min', 'np.min', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (12604, 12618), True, 'import numpy as np\n'), ((12643, 12663), 'numpy.max', 'np.max', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (12649, 12663), True, 'import numpy as np\n'), ((13840, 13856), 'numpy.inner', 'np.inner', (['v1', 'v2'], {}), '(v1, v2)\n', (13848, 13856), True, 'import numpy as np\n'), ((15415, 15428), 'numpy.float64', 'np.float64', (['(1)'], {}), '(1)\n', (15425, 15428), True, 'import numpy as np\n'), ((15435, 15449), 'numpy.float64', 'np.float64', (['(-1)'], {}), '(-1)\n', (15445, 15449), True, 'import numpy as np\n'), ((19164, 19214), 'numpy.array', 'np.array', (['[(x, y) for x in xrange for y in yrange]'], {}), '([(x, y) for x in xrange for y in yrange])\n', (19172, 19214), True, 'import numpy as np\n'), ((19503, 19515), 'numpy.mat', 'np.mat', (['grid'], {}), '(grid)\n', (19509, 19515), True, 'import numpy as np\n'), ((6445, 6502), 'numpy.dot', 'np.dot', (['(self.alphas * self.tags)', 'self._K_matrix[:, index]'], {}), '(self.alphas * self.tags, self._K_matrix[:, index])\n', (6451, 6502), True, 'import numpy as np\n'), ((13747, 13763), 'numpy.inner', 'np.inner', (['v1', 'v2'], {}), '(v1, v2)\n', (13755, 13763), True, 'import numpy as np\n'), ((3876, 3889), 'numpy.float64', 'np.float64', (['(0)'], {}), '(0)\n', (3886, 3889), True, 'import numpy as np\n'), ((3936, 3949), 'numpy.float64', 'np.float64', (['(0)'], {}), '(0)\n', (3946, 3949), True, 'import numpy as np\n'), ((13940, 13963), 'numpy.linalg.norm', 'np.linalg.norm', (['(v1 - v2)'], {}), '(v1 - v2)\n', (13954, 13963), True, 'import numpy as np\n')] |
import os
import re
from urllib.parse import unquote
import numpy as np
from REL.db.generic import GenericLookup
from REL.utils import first_letter_to_uppercase, trim1, unicode2ascii
"""
Class responsible for processing Wikipedia dumps. Performs computations to obtain the p(e|m) index and counts
overall occurrences of mentions.
"""
class WikipediaYagoFreq:
def __init__(self, base_url, wiki_version, wikipedia):
self.base_url = base_url
self.wiki_version = wiki_version
self.wikipedia = wikipedia
self.wiki_freq = {}
self.p_e_m = {}
self.mention_freq = {}
def store(self):
"""
Stores results in a sqlite3 database.
:return:
"""
print("Please take a break, this will take a while :).")
wiki_db = GenericLookup(
"entity_word_embedding",
"{}/{}/generated/".format(self.base_url, self.wiki_version),
table_name="wiki",
columns={"p_e_m": "blob", "lower": "text", "freq": "INTEGER"},
)
wiki_db.load_wiki(self.p_e_m, self.mention_freq, batch_size=50000, reset=True)
def compute_wiki(self):
"""
Computes p(e|m) index for a given wiki and crosswikis dump.
:return:
"""
self.__wiki_counts()
self.__cross_wiki_counts()
# Step 1: Calculate p(e|m) for wiki.
print("Filtering candidates and calculating p(e|m) values for Wikipedia.")
for ent_mention in self.wiki_freq:
if len(ent_mention) < 1:
continue
ent_wiki_names = sorted(
self.wiki_freq[ent_mention].items(), key=lambda kv: kv[1], reverse=True
)
# Get the sum of at most 100 candidates, but less if less are available.
total_count = np.sum([v for k, v in ent_wiki_names][:100])
if total_count < 1:
continue
self.p_e_m[ent_mention] = {}
for ent_name, count in ent_wiki_names:
self.p_e_m[ent_mention][ent_name] = count / total_count
if len(self.p_e_m[ent_mention]) >= 100:
break
del self.wiki_freq
def compute_custom(self, custom=None):
"""
Computes p(e|m) index for YAGO and combines this index with the Wikipedia p(e|m) index as reported
by Ganea et al. in 'Deep Joint Entity Disambiguation with Local Neural Attention'.
Alternatively, users may specificy their own custom p(e|m) by providing mention/entity counts.
:return:
"""
if custom:
self.custom_freq = custom
else:
self.custom_freq = self.__yago_counts()
print("Computing p(e|m)")
for mention in self.custom_freq:
total = len(self.custom_freq[mention])
# Assumes uniform distribution, else total will need to be adjusted.
if mention not in self.mention_freq:
self.mention_freq[mention] = 0
self.mention_freq[mention] += 1
cust_ment_ent_temp = {
k: 1 / total for k, v in self.custom_freq[mention].items()
}
if mention not in self.p_e_m:
self.p_e_m[mention] = cust_ment_ent_temp
else:
for ent_wiki_id in cust_ment_ent_temp:
prob = cust_ment_ent_temp[ent_wiki_id]
if ent_wiki_id not in self.p_e_m[mention]:
self.p_e_m[mention][ent_wiki_id] = 0.0
# Assumes addition of p(e|m) as described by authors.
self.p_e_m[mention][ent_wiki_id] = np.round(
min(1.0, self.p_e_m[mention][ent_wiki_id] + prob), 3
)
def __yago_counts(self):
"""
Counts mention/entity occurrences for YAGO.
:return: frequency index
"""
num_lines = 0
print("Calculating Yago occurrences")
custom_freq = {}
with open(
"{}/generic/p_e_m_data/aida_means.tsv".format(self.base_url),
"r",
encoding="utf-8",
) as f:
for line in f:
num_lines += 1
if num_lines % 5000000 == 0:
print("Processed {} lines.".format(num_lines))
line = line.rstrip()
line = unquote(line)
parts = line.split("\t")
mention = parts[0][1:-1].strip()
ent_name = parts[1].strip()
ent_name = ent_name.replace("&", "&")
ent_name = ent_name.replace(""", '"')
x = ent_name.find("\\u")
while x != -1:
code = ent_name[x : x + 6]
replace = unicode2ascii(code)
if replace == "%":
replace = "%%"
ent_name = ent_name.replace(code, replace)
x = ent_name.find("\\u")
ent_name = self.wikipedia.preprocess_ent_name(ent_name)
if ent_name in self.wikipedia.wiki_id_name_map["ent_name_to_id"]:
if mention not in custom_freq:
custom_freq[mention] = {}
ent_name = ent_name.replace(" ", "_")
if ent_name not in custom_freq[mention]:
custom_freq[mention][ent_name] = 1
return custom_freq
def __cross_wiki_counts(self):
"""
Updates mention/entity for Wiki with this additional corpus.
:return:
"""
print("Updating counts by merging with CrossWiki")
cnt = 0
crosswiki_path = "{}/generic/p_e_m_data/crosswikis_p_e_m.txt".format(
self.base_url
)
with open(crosswiki_path, "r", encoding="utf-8") as f:
for line in f:
parts = line.split("\t")
mention = unquote(parts[0])
if ("Wikipedia" not in mention) and ("wikipedia" not in mention):
if mention not in self.wiki_freq:
self.wiki_freq[mention] = {}
num_ents = len(parts)
for i in range(2, num_ents):
ent_str = parts[i].split(",")
ent_wiki_id = int(ent_str[0])
freq_ent = int(ent_str[1])
if (
ent_wiki_id
not in self.wikipedia.wiki_id_name_map["ent_id_to_name"]
):
ent_name_re = self.wikipedia.wiki_redirect_id(ent_wiki_id)
if (
ent_name_re
in self.wikipedia.wiki_id_name_map["ent_name_to_id"]
):
ent_wiki_id = self.wikipedia.wiki_id_name_map[
"ent_name_to_id"
][ent_name_re]
cnt += 1
if (
ent_wiki_id
in self.wikipedia.wiki_id_name_map["ent_id_to_name"]
):
if mention not in self.mention_freq:
self.mention_freq[mention] = 0
self.mention_freq[mention] += freq_ent
ent_name = self.wikipedia.wiki_id_name_map[
"ent_id_to_name"
][ent_wiki_id].replace(" ", "_")
if ent_name not in self.wiki_freq[mention]:
self.wiki_freq[mention][ent_name] = 0
self.wiki_freq[mention][ent_name] += freq_ent
def __wiki_counts(self):
"""
Computes mention/entity for a given Wiki dump.
:return:
"""
num_lines = 0
num_valid_hyperlinks = 0
disambiguation_ent_errors = 0
print("Calculating Wikipedia mention/entity occurrences")
last_processed_id = -1
exist_id_found = False
wiki_anchor_files = os.listdir(
"{}/{}/basic_data/anchor_files/".format(self.base_url, self.wiki_version)
)
for wiki_anchor in wiki_anchor_files:
wiki_file = "{}/{}/basic_data/anchor_files/{}".format(
self.base_url, self.wiki_version, wiki_anchor
)
with open(wiki_file, "r", encoding="utf-8") as f:
for line in f:
num_lines += 1
if num_lines % 5000000 == 0:
print(
"Processed {} lines, valid hyperlinks {}".format(
num_lines, num_valid_hyperlinks
)
)
if '<doc id="' in line:
id = int(line[line.find("id") + 4 : line.find("url") - 2])
if id <= last_processed_id:
exist_id_found = True
continue
else:
exist_id_found = False
last_processed_id = id
else:
if not exist_id_found:
(
list_hyp,
disambiguation_ent_error,
print_values,
) = self.__extract_text_and_hyp(line)
disambiguation_ent_errors += disambiguation_ent_error
for el in list_hyp:
mention = el["mention"]
ent_wiki_id = el["ent_wikiid"]
num_valid_hyperlinks += 1
if mention not in self.wiki_freq:
self.wiki_freq[mention] = {}
if (
ent_wiki_id
in self.wikipedia.wiki_id_name_map["ent_id_to_name"]
):
if mention not in self.mention_freq:
self.mention_freq[mention] = 0
self.mention_freq[mention] += 1
ent_name = self.wikipedia.wiki_id_name_map[
"ent_id_to_name"
][ent_wiki_id].replace(" ", "_")
if ent_name not in self.wiki_freq[mention]:
self.wiki_freq[mention][ent_name] = 0
self.wiki_freq[mention][ent_name] += 1
print(
"Done computing Wikipedia counts. Num valid hyperlinks = {}".format(
num_valid_hyperlinks
)
)
def __extract_text_and_hyp(self, line):
"""
Extracts hyperlinks from given Wikipedia document to obtain mention/entity counts.
:return: list of mentions/wiki Ids and their respective counts (plus some statistics).
"""
line = unquote(line)
list_hyp = []
num_mentions = 0
start_entities = [m.start() for m in re.finditer('<a href="', line)]
end_entities = [m.start() for m in re.finditer('">', line)]
end_mentions = [m.start() for m in re.finditer("</a>", line)]
disambiguation_ent_errors = 0
start_entity = line.find('<a href="')
while start_entity >= 0:
line = line[start_entity + len('<a href="') :]
end_entity = line.find('">')
end_mention = line.find("</a>")
mention = line[end_entity + len('">') : end_mention]
if (
("Wikipedia" not in mention)
and ("wikipedia" not in mention)
and (len(mention) >= 1)
):
# Valid mention
entity = line[0:end_entity]
find_wikt = entity.find("wikt:")
entity = entity[len("wikt:") :] if find_wikt == 0 else entity
entity = self.wikipedia.preprocess_ent_name(entity)
if entity.find("List of ") != 0:
if "#" not in entity:
ent_wiki_id = self.wikipedia.ent_wiki_id_from_name(entity)
if ent_wiki_id == -1:
disambiguation_ent_errors += 1
else:
num_mentions += 1
list_hyp.append(
{
"mention": mention,
"ent_wikiid": ent_wiki_id,
"cnt": num_mentions,
}
)
# find new entity
start_entity = line.find('<a href="')
return (
list_hyp,
disambiguation_ent_errors,
[len(start_entities), len(end_entities), len(end_mentions)],
)
# def __preprocess_ent_name(self, ent_name):
# ent_name = ent_name.strip()
# ent_name = trim1(ent_name)
# ent_name = ent_name.replace("&", "&")
# ent_name = ent_name.replace(""", '"')
# ent_name = ent_name.replace("_", " ")
# ent_name = first_letter_to_uppercase(ent_name)
#
# ent_name = self.wikipedia.wiki_redirect_ent_title(ent_name)
# return ent_name
| [
"urllib.parse.unquote",
"REL.utils.unicode2ascii",
"numpy.sum",
"re.finditer"
] | [((11430, 11443), 'urllib.parse.unquote', 'unquote', (['line'], {}), '(line)\n', (11437, 11443), False, 'from urllib.parse import unquote\n'), ((1830, 1874), 'numpy.sum', 'np.sum', (['[v for k, v in ent_wiki_names][:100]'], {}), '([v for k, v in ent_wiki_names][:100])\n', (1836, 1874), True, 'import numpy as np\n'), ((4414, 4427), 'urllib.parse.unquote', 'unquote', (['line'], {}), '(line)\n', (4421, 4427), False, 'from urllib.parse import unquote\n'), ((5995, 6012), 'urllib.parse.unquote', 'unquote', (['parts[0]'], {}), '(parts[0])\n', (6002, 6012), False, 'from urllib.parse import unquote\n'), ((11536, 11566), 're.finditer', 're.finditer', (['"""<a href=\\""""', 'line'], {}), '(\'<a href="\', line)\n', (11547, 11566), False, 'import re\n'), ((11611, 11634), 're.finditer', 're.finditer', (['"""">"""', 'line'], {}), '(\'">\', line)\n', (11622, 11634), False, 'import re\n'), ((11679, 11704), 're.finditer', 're.finditer', (['"""</a>"""', 'line'], {}), "('</a>', line)\n", (11690, 11704), False, 'import re\n'), ((4830, 4849), 'REL.utils.unicode2ascii', 'unicode2ascii', (['code'], {}), '(code)\n', (4843, 4849), False, 'from REL.utils import first_letter_to_uppercase, trim1, unicode2ascii\n')] |
import gym
import logging
import numpy as np
import tensorflow as tf
from src.naf import NAF
from src.network import Network
from src.statistic import Statistic
from src.exploration import OUExploration, BrownianExploration, LinearDecayExploration
# from naf import NAF
# from network import Network
# from statistic import Statistic
# from exploration import OUExploration, BrownianExploration, LinearDecayExploration
from utils import get_model_dir, preprocess_conf
flags = tf.app.flags
# environment
flags.DEFINE_string('env_name', 'Pendulum-v0', 'name of environment')
# network
flags.DEFINE_string('hidden_dims', '[100, 100]', 'dimension of hidden layers')
flags.DEFINE_boolean('use_batch_norm', False, 'use batch normalization or not')
flags.DEFINE_boolean('clip_action', False, 'whether to clip an action with given bound')
flags.DEFINE_boolean('use_seperate_networks', False, 'use seperate networks for mu, V and A')
flags.DEFINE_string('hidden_w', 'uniform_big', 'weight initialization of hidden layers [uniform_small, uniform_big, he]')
flags.DEFINE_string('hidden_fn', 'tanh', 'activation function of hidden layer [none, tanh, relu]')
flags.DEFINE_string('action_w', 'uniform_big', 'weight initilization of action layer [uniform_small, uniform_big, he]')
flags.DEFINE_string('action_fn', 'tanh', 'activation function of action layer [none, tanh, relu]')
flags.DEFINE_string('w_reg', 'none', 'weight regularization [none, l1, l2]')
flags.DEFINE_float('w_reg_scale', 0.001, 'scale of regularization')
# exploration
flags.DEFINE_float('noise_scale', 0.3, 'scale of noise')
flags.DEFINE_string('noise', 'ou', 'type of noise exploration [ou, linear_decay, brownian]')
# training
flags.DEFINE_float('tau', 0.001, 'tau of soft target update')
flags.DEFINE_float('discount', 0.99, 'discount factor of Q-learning')
flags.DEFINE_float('learning_rate', 1e-3, 'value of learning rate')
flags.DEFINE_integer('batch_size', 100, 'The size of batch for minibatch training')
flags.DEFINE_integer('max_steps', 200, 'maximum # of steps for each episode')
flags.DEFINE_integer('update_repeat', 10, 'maximum # of q-learning updates for each step')
flags.DEFINE_integer('max_episodes', 10000, 'maximum # of episodes to train')
# Debug
flags.DEFINE_boolean('is_train', True, 'training or testing')
flags.DEFINE_integer('random_seed', 123, 'random seed')
flags.DEFINE_boolean('monitor', False, 'monitor the training or not')
flags.DEFINE_boolean('display', False, 'display the game screen or not')
flags.DEFINE_string('log_level', 'INFO', 'log level [DEBUG, INFO, WARNING, ERROR, CRITICAL]')
conf = flags.FLAGS
logger = logging.getLogger()
logger.propagate = False
logger.setLevel(conf.log_level)
# set random seed
tf.set_random_seed(conf.random_seed)
np.random.seed(conf.random_seed)
def main(_):
model_dir = get_model_dir(conf,
['is_train', 'random_seed', 'monitor', 'display', 'log_level'])
preprocess_conf(conf)
with tf.Session() as sess:
# environment
env = gym.make(conf.env_name)
env._seed(conf.random_seed)
assert isinstance(env.observation_space, gym.spaces.Box), \
"observation space must be continuous"
assert isinstance(env.action_space, gym.spaces.Box), \
"action space must be continuous"
# exploration strategy
if conf.noise == 'ou':
strategy = OUExploration(env, sigma=conf.noise_scale)
elif conf.noise == 'brownian':
strategy = BrownianExploration(env, conf.noise_scale)
elif conf.noise == 'linear_decay':
strategy = LinearDecayExploration(env)
else:
raise ValueError('Unkown exploration strategy: %s' % conf.noise)
# networks
shared_args = {
'sess': sess,
'input_shape': env.observation_space.shape,
'action_size': env.action_space.shape[0],
'hidden_dims': conf.hidden_dims,
'use_batch_norm': conf.use_batch_norm,
'use_seperate_networks': conf.use_seperate_networks,
'hidden_w': conf.hidden_w, 'action_w': conf.action_w,
'hidden_fn': conf.hidden_fn, 'action_fn': conf.action_fn,
'w_reg': conf.w_reg,
}
logger.info("Creating prediction network...")
pred_network = Network(
scope='pred_network', **shared_args
)
logger.info("Creating target network...")
target_network = Network(
scope='target_network', **shared_args
)
target_network.make_soft_update_from(pred_network, conf.tau)
# statistic
stat = Statistic(sess, conf.env_name, model_dir, pred_network.variables, conf.update_repeat)
agent = NAF(sess, env, strategy, pred_network, target_network, stat,
conf.discount, conf.batch_size, conf.learning_rate,
conf.max_steps, conf.update_repeat, conf.max_episodes)
#agent.run(conf.monitor, conf.display, conf.is_train)
agent.run(conf.monitor, conf.display, True)
#agent.run2(conf.monitor, conf.display, True)
if __name__ == '__main__':
tf.app.run()
| [
"numpy.random.seed",
"gym.make",
"src.exploration.OUExploration",
"src.exploration.BrownianExploration",
"utils.preprocess_conf",
"tensorflow.Session",
"src.network.Network",
"src.naf.NAF",
"tensorflow.set_random_seed",
"src.exploration.LinearDecayExploration",
"utils.get_model_dir",
"src.stat... | [((2618, 2637), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (2635, 2637), False, 'import logging\n'), ((2714, 2750), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['conf.random_seed'], {}), '(conf.random_seed)\n', (2732, 2750), True, 'import tensorflow as tf\n'), ((2751, 2783), 'numpy.random.seed', 'np.random.seed', (['conf.random_seed'], {}), '(conf.random_seed)\n', (2765, 2783), True, 'import numpy as np\n'), ((2812, 2899), 'utils.get_model_dir', 'get_model_dir', (['conf', "['is_train', 'random_seed', 'monitor', 'display', 'log_level']"], {}), "(conf, ['is_train', 'random_seed', 'monitor', 'display',\n 'log_level'])\n", (2825, 2899), False, 'from utils import get_model_dir, preprocess_conf\n'), ((2904, 2925), 'utils.preprocess_conf', 'preprocess_conf', (['conf'], {}), '(conf)\n', (2919, 2925), False, 'from utils import get_model_dir, preprocess_conf\n'), ((4911, 4923), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (4921, 4923), True, 'import tensorflow as tf\n'), ((2934, 2946), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2944, 2946), True, 'import tensorflow as tf\n'), ((2984, 3007), 'gym.make', 'gym.make', (['conf.env_name'], {}), '(conf.env_name)\n', (2992, 3007), False, 'import gym\n'), ((4148, 4192), 'src.network.Network', 'Network', ([], {'scope': '"""pred_network"""'}), "(scope='pred_network', **shared_args)\n", (4155, 4192), False, 'from src.network import Network\n'), ((4273, 4319), 'src.network.Network', 'Network', ([], {'scope': '"""target_network"""'}), "(scope='target_network', **shared_args)\n", (4280, 4319), False, 'from src.network import Network\n'), ((4425, 4515), 'src.statistic.Statistic', 'Statistic', (['sess', 'conf.env_name', 'model_dir', 'pred_network.variables', 'conf.update_repeat'], {}), '(sess, conf.env_name, model_dir, pred_network.variables, conf.\n update_repeat)\n', (4434, 4515), False, 'from src.statistic import Statistic\n'), ((4524, 4699), 'src.naf.NAF', 'NAF', (['sess', 'env', 'strategy', 'pred_network', 'target_network', 'stat', 'conf.discount', 'conf.batch_size', 'conf.learning_rate', 'conf.max_steps', 'conf.update_repeat', 'conf.max_episodes'], {}), '(sess, env, strategy, pred_network, target_network, stat, conf.discount,\n conf.batch_size, conf.learning_rate, conf.max_steps, conf.update_repeat,\n conf.max_episodes)\n', (4527, 4699), False, 'from src.naf import NAF\n'), ((3321, 3363), 'src.exploration.OUExploration', 'OUExploration', (['env'], {'sigma': 'conf.noise_scale'}), '(env, sigma=conf.noise_scale)\n', (3334, 3363), False, 'from src.exploration import OUExploration, BrownianExploration, LinearDecayExploration\n'), ((3416, 3458), 'src.exploration.BrownianExploration', 'BrownianExploration', (['env', 'conf.noise_scale'], {}), '(env, conf.noise_scale)\n', (3435, 3458), False, 'from src.exploration import OUExploration, BrownianExploration, LinearDecayExploration\n'), ((3515, 3542), 'src.exploration.LinearDecayExploration', 'LinearDecayExploration', (['env'], {}), '(env)\n', (3537, 3542), False, 'from src.exploration import OUExploration, BrownianExploration, LinearDecayExploration\n')] |
"""Base Class of an Adversarial Environments."""
from abc import ABCMeta, abstractmethod
import numpy as np
from gym import Wrapper
from gym.spaces import Box
class AdversarialWrapper(Wrapper, metaclass=ABCMeta):
r"""An adversarial environment wrapper.
This is an abstract wrapper that wraps a gym Env.
It overrides the step() method.
If the action has the same dimensions as the original environment, then the step()
method from the original environment is called.
If the action has other dimensions, then the adversarial_step() method is called.
AdversarialWrapper leaves this method abstract.
"""
def __init__(self, env, antagonist_low, antagonist_high, alpha=1.0):
super().__init__(env=env)
if not isinstance(self.action_space, Box):
raise TypeError("Only continuous actions allowed.")
self.protagonist_dim_action = self.env.action_space.shape
if alpha > 0:
self.antagonist_dim_action = antagonist_high.shape
else:
self.antagonist_dim_action = (0,)
self.antagonist_low = antagonist_low
self.antagonist_high = antagonist_high
self.alpha = alpha
@property
def alpha(self):
"""Return robustness level."""
return self._alpha
@alpha.setter
def alpha(self, alpha):
"""Set robustness level."""
if alpha < 0:
raise ValueError(f"alpha must be strictly positive and {alpha} was given.")
self._alpha = alpha
if alpha == 0:
self.action_space = self.env.unwrapped.action_space
else:
self.action_space = Box(
low=np.concatenate(
(self.env.unwrapped.action_space.low, self.antagonist_low)
),
high=np.concatenate(
(self.env.unwrapped.action_space.high, self.antagonist_high)
),
shape=(self.protagonist_dim_action[0] + self.antagonist_dim_action[0],),
dtype=np.float32,
)
def step(self, action):
"""See `gym.Env.step()'."""
if len(action) == self.protagonist_dim_action[0]:
assert self.env.action_space.contains(action), f"{action} invalid"
observation, reward, done, info = self.env.step(action)
else:
assert self.action_space.contains(action), f"{action} invalid"
protagonist_action = action[: self.protagonist_dim_action[0]]
antagonist_action = action[self.protagonist_dim_action[0] :]
observation, reward, done, info = self.adversarial_step(
protagonist_action, antagonist_action
)
return observation, reward, done, info
@abstractmethod
def adversarial_step(self, protagonist_action, antagonist_action):
"""Perform an adversarial step on the environment."""
raise NotImplementedError
@property
def name(self):
"""Adversarial-Wrapper name."""
return self.__class__.__name__
| [
"numpy.concatenate"
] | [((1675, 1749), 'numpy.concatenate', 'np.concatenate', (['(self.env.unwrapped.action_space.low, self.antagonist_low)'], {}), '((self.env.unwrapped.action_space.low, self.antagonist_low))\n', (1689, 1749), True, 'import numpy as np\n'), ((1810, 1886), 'numpy.concatenate', 'np.concatenate', (['(self.env.unwrapped.action_space.high, self.antagonist_high)'], {}), '((self.env.unwrapped.action_space.high, self.antagonist_high))\n', (1824, 1886), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import math
import numpy as np
import pandas as pd
import random
import string
from scipy.stats import zipf
from itertools import chain
import json
class contentCatalogue():
def __init__(self, size=1000):
'''
Assigns the size and constructs an empty list of contents. Constructs
an empty list for popularities of contents (probabilities). Constructs an
empty content matrix as a list.
Input: contents: list, popularity: list, contentMatrix: list, size: int
'''
self.size = size
self.contents = list()
self.popularity = list()
self.contentMatrix = list()
def characteristics(self):
'''
Output: returns specs of content catalogue
'''
return 'Content Catalogue Size: {}\nContent Catalogue Popularity:\n{}\nContent Catalogue:\n{}\nContent Catalogue Relations:\n{}'.format(self.size, self.popularity, self.contents, self.contentMatrix)
def randomSingleContentGenerator(self, stringLength=8):
"""Generate a random string of letters and digits """
lettersAndDigits = string.ascii_letters + string.digits
return ''.join(random.choice(lettersAndDigits) for i in range(stringLength))
def randomMultipleContentsGenerator(self):
"""Generate a list of random strings of letters and digits """
contents = list()
for i in range(0, self.size):
contents.append(self.randomSingleContentGenerator())
assert(len(contents) == self.size)
return contents
def getNrandomElements(self, list, N):
'''
Returns random elements from a list
Input: list and num of items to be returned
Output: list of N random items
'''
return random.sample(list, N)
def getContents(self):
'''
Output: returns contents as a list
'''
return self.contents
def getContentsLength(self):
'''
Output: returns contents size
'''
return len(self.contents)
def zipf_pmf(self, lib_size, expn):
'''
Returns a probability mass function (list of probabilities summing to 1.0) for the Zipf distribution with the given size "lib_size" and exponent "expn"
Input: size of content catalogue, exponent
Output: list of probabilities that sum up to 1
'''
K = lib_size
p0 = np.zeros([K])
for i in range(K):
p0[i] = ((i+1)**(-expn))/np.sum(np.power(range(1, K+1), -expn))
return p0
def setContentsPopularity(self, distribution='zipf', a=0.78):
'''
Sets the popularity of contents given a distribution (zipf by default) .
Input: distribution, exponent
Output: vector of probabilities that correspond to the content catalogue
'''
if distribution == 'zipf':
prob = self.zipf_pmf(self.getContentsLength(), a)
return prob
else:
raise Exception('Distribution \'' + distribution +
'\' not implemented yet')
def initializeContentCatalogue(self, contents):
'''
Set the content catalogue for the first time in a list format
Input: strings in a list
'''
self.contents = contents
self.popularity = self.setContentsPopularity()
def symmetrize(self, a):
'''
Forces symmetricity in a content matrix
Input: a matrix
Output: a symmetric matrix provided from the original
'''
return np.tril(a) + np.tril(a, -1).T
def createRandomContentMatrixBinary(self, symmetric=True, numOfRelations=10, outputForm='dataframe'):
'''
TODO: Fix commentary in this piece of code
'''
numOfContents = self.getContentsLength()
contentMatrix = np.zeros((numOfContents, numOfContents))
idx = np.random.rand(numOfContents, numOfContents).argsort(1)[
:, :numOfRelations]
contentMatrix[np.arange(numOfContents)[:, None], idx] = 1
print(contentMatrix)
# if symmetric:
# contentMatrix = self.symmetrize(contentMatrix)
# print(contentMatrix)
# for row in contentMatrix:
# if(len(row) != numOfRelations):
# print(row)
# # print(contentMatrix)
# for i in range(numOfContents):
# for j in range(numOfContents):
# if i == j and contentMatrix[i][j] == 1:
# indexesOfZeros = np.argwhere(
# contentMatrix[i] == 0).tolist()
# contentMatrix[i][j] = 0
# for i in range(numOfContents):
# for j in range(numOfContents):
# # print(i, j)
# indexesOfOnesCurrentNodeRow = np.argwhere(
# contentMatrix[i] == 1).tolist()
# # print(len(indexesOfOnesCurrentNodeRow))
# while len(indexesOfOnesCurrentNodeRow) < numOfRelations:
# randomChoiceOfIndex = random.choice(
# indexesOfOnesCurrentNodeRow)[0]
# indexesOfOnesRelatedNodesRow = np.argwhere(
# contentMatrix[randomChoiceOfIndex] == 1).tolist()
# if len(indexesOfOnesRelatedNodesRow) < numOfRelations:
# contentMatrix[i][randomChoiceOfIndex] = 1
# contentMatrix[randomChoiceOfIndex][i] = 1
# assert symmetricity
# assert(np.allclose(contentMatrix, contentMatrix.T))
self.contentMatrix = contentMatrix
# Return in a specific format (list or df)
if outputForm == 'dataframe':
names = [_ for _ in self.getContents()]
df = pd.DataFrame(contentMatrix, index=names, columns=names)
return df
return contentMatrix
def loadContentMatrix_JSON(self, url):
'''
Loads an item based NxN content matrix (IDs in rows/columns).
Input: url of content matrix
'''
out = None
with open(url, 'r') as f:
out = json.load(f)
self.contentMatrix = np.array(out)
def loadContentMatrix_CSV(self, url):
'''
Loads an item based NxN content matrix (IDs in rows/columns).
Also initializes the content catalogue with the given column names
Input: url of content matrix as a CSV file
'''
data = pd.read_csv(url, delimiter='\t')
self.initializeContentCatalogue(list(data.columns)[1:])
data = [list(x[1:]) for x in data.to_numpy()]
self.contentMatrix = np.array(data)
def relatedContents(self, id):
'''
Returns all non zero relations to a given ID
Relations are extracted from a content matrix (cm)
Input: id
Output: related contents list
'''
# extract all relations from content matrix
candidateRelated = self.contentMatrix[self.contents.index(id)]
# print(len(candidateRelated))
# extract all non zero relations from the above list - just indexes
indexesOfPositiveRelations = np.argwhere(candidateRelated == 1)
# print(len(indexesOfPositiveRelations))
# make the above indexes a single list, for easier reference
indexesOfPositiveRelations = list(
chain.from_iterable(indexesOfPositiveRelations))
# dereference the indexes => acquire a list of related contents
related = [self.contents[i] for i in indexesOfPositiveRelations]
toReturn = []
# Return also the relation weight for each related content
for rel in related:
toReturn.append(
(rel, candidateRelated[self.contents.index(rel)]))
# Return items sorted in descending relevance (most relevant item in first position)
return sorted(toReturn, key=lambda x: x[1], reverse=True)
def main():
N = 10 # number of relations
W = 5
MP = 10000
r = contentCatalogue(size=10000)
r.initializeContentCatalogue(r.randomMultipleContentsGenerator())
# Set numOfRelations equal to the number of relations you want each content to have with all others
r.createRandomContentMatrixBinary(symmetric=False, numOfRelations=N)
# Get content catalogue
names = [_ for _ in r.getContents()]
df = pd.DataFrame(r.contentMatrix, index=names, columns=names)
df.to_csv(r'JointCachingRecommendations\Results\contentCatalogue.csv', sep='\t')
# Get top most popular contents
mostPopularContents = r.getContents()[0:MP]
# print(r.relatedContents(mostPopularContents[0]))
with open(r'JointCachingRecommendations\Results\mostPopular.json', 'w') as f:
json.dump(mostPopularContents, f, indent=4)
# Get depth 1 related
depth1 = {}
for popular in mostPopularContents:
depth1[popular] = [x[0] for x in r.relatedContents(popular)[0:W]]
# print(len(depth1[popular]))
with open(r'JointCachingRecommendations\Results\dataSet_depth1_width50.json', 'w') as f:
json.dump(depth1, f, indent=4)
# Get depth 2 related
depth1values = list(
set([item for sublist in depth1.values() for item in sublist]))
depth2 = {}
for item in depth1values:
depth2[item] = [x[0] for x in r.relatedContents(item)[0:W]]
with open(r'JointCachingRecommendations\Results\dataSet_depth2_width50.json', 'w') as f:
json.dump(depth2, f, indent=4)
if __name__ == "__main__":
main()
| [
"pandas.DataFrame",
"json.dump",
"json.load",
"numpy.tril",
"random.sample",
"pandas.read_csv",
"numpy.random.rand",
"numpy.zeros",
"random.choice",
"numpy.array",
"numpy.arange",
"numpy.argwhere",
"itertools.chain.from_iterable"
] | [((8400, 8457), 'pandas.DataFrame', 'pd.DataFrame', (['r.contentMatrix'], {'index': 'names', 'columns': 'names'}), '(r.contentMatrix, index=names, columns=names)\n', (8412, 8457), True, 'import pandas as pd\n'), ((1784, 1806), 'random.sample', 'random.sample', (['list', 'N'], {}), '(list, N)\n', (1797, 1806), False, 'import random\n'), ((2427, 2440), 'numpy.zeros', 'np.zeros', (['[K]'], {}), '([K])\n', (2435, 2440), True, 'import numpy as np\n'), ((3864, 3904), 'numpy.zeros', 'np.zeros', (['(numOfContents, numOfContents)'], {}), '((numOfContents, numOfContents))\n', (3872, 3904), True, 'import numpy as np\n'), ((6195, 6208), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (6203, 6208), True, 'import numpy as np\n'), ((6488, 6520), 'pandas.read_csv', 'pd.read_csv', (['url'], {'delimiter': '"""\t"""'}), "(url, delimiter='\\t')\n", (6499, 6520), True, 'import pandas as pd\n'), ((6668, 6682), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (6676, 6682), True, 'import numpy as np\n'), ((7188, 7222), 'numpy.argwhere', 'np.argwhere', (['(candidateRelated == 1)'], {}), '(candidateRelated == 1)\n', (7199, 7222), True, 'import numpy as np\n'), ((8773, 8816), 'json.dump', 'json.dump', (['mostPopularContents', 'f'], {'indent': '(4)'}), '(mostPopularContents, f, indent=4)\n', (8782, 8816), False, 'import json\n'), ((9113, 9143), 'json.dump', 'json.dump', (['depth1', 'f'], {'indent': '(4)'}), '(depth1, f, indent=4)\n', (9122, 9143), False, 'import json\n'), ((9483, 9513), 'json.dump', 'json.dump', (['depth2', 'f'], {'indent': '(4)'}), '(depth2, f, indent=4)\n', (9492, 9513), False, 'import json\n'), ((3579, 3589), 'numpy.tril', 'np.tril', (['a'], {}), '(a)\n', (3586, 3589), True, 'import numpy as np\n'), ((5799, 5854), 'pandas.DataFrame', 'pd.DataFrame', (['contentMatrix'], {'index': 'names', 'columns': 'names'}), '(contentMatrix, index=names, columns=names)\n', (5811, 5854), True, 'import pandas as pd\n'), ((6153, 6165), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6162, 6165), False, 'import json\n'), ((7397, 7444), 'itertools.chain.from_iterable', 'chain.from_iterable', (['indexesOfPositiveRelations'], {}), '(indexesOfPositiveRelations)\n', (7416, 7444), False, 'from itertools import chain\n'), ((1187, 1218), 'random.choice', 'random.choice', (['lettersAndDigits'], {}), '(lettersAndDigits)\n', (1200, 1218), False, 'import random\n'), ((3592, 3606), 'numpy.tril', 'np.tril', (['a', '(-1)'], {}), '(a, -1)\n', (3599, 3606), True, 'import numpy as np\n'), ((3919, 3963), 'numpy.random.rand', 'np.random.rand', (['numOfContents', 'numOfContents'], {}), '(numOfContents, numOfContents)\n', (3933, 3963), True, 'import numpy as np\n'), ((4030, 4054), 'numpy.arange', 'np.arange', (['numOfContents'], {}), '(numOfContents)\n', (4039, 4054), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
img=Image.open("flower.jpeg")
# img =img.rotate(-90)
img=np.asarray(img)
plt.imshow(img)
print(type(img))
plt.show()
| [
"matplotlib.pyplot.imshow",
"numpy.asarray",
"matplotlib.pyplot.show",
"PIL.Image.open"
] | [((78, 103), 'PIL.Image.open', 'Image.open', (['"""flower.jpeg"""'], {}), "('flower.jpeg')\n", (88, 103), False, 'from PIL import Image\n'), ((132, 147), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (142, 147), True, 'import numpy as np\n'), ((148, 163), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (158, 163), True, 'import matplotlib.pyplot as plt\n'), ((181, 191), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (189, 191), True, 'import matplotlib.pyplot as plt\n')] |
import os
import json
import numpy as np
import pandas as pd
from keras.applications import inception_resnet_v2
from keras.preprocessing import image
img_width, img_height = 331, 331
import fnmatch
from shutil import copyfile
import PIL
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True # read broken images
# copy jpg files
toCopyFile = False
modelname = "InceptionResnetV2"
# EU
dataname = "EU"
# KEAL
default_path = '/pd/data/crafty/deepGreen'
photo_path_base = "/pd/data/crafty/FlickrEU_DOWNLOAD_14May2018/May2018_V1_Photo/"
out_path_base = "/pd/data/crafty/FlickrEU_result/Tagging_EU2018_v3/"
# photo_path_base = "/pd/data/crafty/FlickrEU_DOWNLOAD_11Jan2019/Jan2019_V1_Photos/"
# out_path_base = "/pd/data/crafty/FlickrEU_result/Tagging_EU2019_v3/"
# Linux
# default_path = '/home/alan/Dropbox/KIT/FlickrEU/deepGreen'
# photo_path_base = "/home/alan/Dropbox/KIT/FlickrEU/FlickrEU_download/SamplePhotos/"
# # photo_path_base = "/DATA10TB/FlickrEU_download/Bayern/Flickr_Aug2018_V2_Photo_Bayern/"
# out_path_base = "/home/alan/Dropbox/KIT/FlickrEU/LabelledData/Test/"
os.chdir(default_path)
out_path = out_path_base + modelname + "/" + dataname + "/"
# number of images for one batch prediction
prediction_batch_size = 1024
top = 10 # print top-n classes
img_width = img_height = 299
model_trained = inception_resnet_v2.InceptionResNetV2(include_top=True, weights='imagenet', input_tensor=None,
input_shape=(img_width, img_height, 3))
# Imagenet class labels
imagenet_labels_filename = "Data/imagenet_class_index.json"
with open(imagenet_labels_filename) as f:
CLASS_INDEX = json.load(f)
#
classes = []
for i in range(CLASS_INDEX.__len__()):
classes.append(CLASS_INDEX[str(i)][1])
classes_arr = np.array(classes)
num_classes = len(classes)
##### Predict
# list only folder names
foldernames = [d for d in os.listdir(photo_path_base) if os.path.isdir(os.path.join(photo_path_base, d))]
f_idx = 1
for f_idx in (range(10000, len(foldernames))):
# for f_idx in (range(0, 1)):
foldername = foldernames[f_idx]
print("folder idx:" + str(f_idx))
print(foldername)
photo_path_aoi = os.path.join(photo_path_base, foldername)
for (root, subdirs, files) in os.walk(photo_path_aoi):
if len(subdirs) == 0:
continue # skip if it does not have a subdir
print('--\nroot = ' + root)
# csv output file
name_csv = out_path + "Result/" + "/CSV/" + os.path.relpath(root, photo_path_base) + ".csv"
if os.path.exists(name_csv):
print("skips as it is done already")
continue # skip the folder if there is already the output csv file
### Read filenames
filenames_raw = [os.path.join(dp, f) for dp, dn, fn in os.walk(os.path.expanduser(photo_path_aoi)) for f in fn]
# print(filenames_raw)
filenames1 = fnmatch.filter(filenames_raw, "*.jpg")
filenames2 = fnmatch.filter(filenames_raw, "*.JPG")
filenames = filenames1 + filenames2
n_files = len(filenames)
# print(filenames)
def foo_get_year(x):
return (os.path.basename(os.path.dirname(x)))
years = list(map(foo_get_year, filenames))
if n_files == 0:
print("skips as there is no image")
continue # skip the folder if there is no image
# base filenames
base_filenames = list(map(os.path.basename, filenames))
prediction_steps_per_epoch = int(np.ceil(n_files / prediction_batch_size))
# load all images into a list
batch_size_folder = min(n_files, prediction_batch_size) # n_files can be smaller than the batch size
for step_start_idx in range(0, n_files, batch_size_folder):
end_idx = min(step_start_idx + batch_size_folder, n_files)
print(step_start_idx)
print(end_idx)
if step_start_idx == end_idx:
filenames_batch = [filenames[step_start_idx]]
else:
filenames_batch = filenames[step_start_idx:end_idx]
bsize_tmp = min(batch_size_folder, len(filenames_batch)) # for the last batch
images = []
images_broken_idx = np.empty(bsize_tmp, dtype=bool)
images_broken_idx[:] = False
for f_idx, fname in enumerate(filenames_batch):
# print(f_idx, fname)
# print(img_name)
img_name = os.path.join(photo_path_aoi, root, fname)
# load an image in PIL format
try:
img = image.load_img(img_name, target_size=(img_width, img_height))
except:
print("skips as it is broken")
print(f_idx, fname)
images_broken_idx[f_idx] = True
img = PIL.Image.new(mode="RGB", size=(img_width, img_height))
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
# prepare the image (normalisation for channels)
img_preprocessed = inception_resnet_v2.preprocess_input(img.copy())
images.append(img_preprocessed)
# vstack for batch tagging
images_vstack = np.vstack(images)
# stack up images list to pass for prediction
predictions = model_trained.predict(images_vstack, batch_size=bsize_tmp)
# predictions.shape
## top selected classes
top_classes_idx_arr = np.argsort(predictions)[:, ::-1][:, :top]
top_classes_arr = classes_arr[top_classes_idx_arr]
# print(top_classes_arr)
# create an empty array
top_classes_probs_arr = np.empty([bsize_tmp, top])
top_classes_probs_arr[:] = 0
for i in range(0, bsize_tmp):
top_classes_probs_arr[i,] = predictions[i, [top_classes_idx_arr[i,]]]
# np.argsort(predictions)[:, ::-1][:,:top][0, :]
# chainlink_fence', 'worm_fence', 'lakeside', 'seashore', 'stone_wall', 'cliff', 'breakwater']
# Out[61]: array([489, 912, 975, 978, 825, 972, 460])
top_classes_arr[0, :]
top_classes_probs_arr[0, :]
predicted_class_v = top_classes_arr[:, 0] # top1
#predicted_class_top2_v = top_classes_arr[:, 1] # top2
# print('Predicted:', predicted_class_v)
# 2nd-level
# kind of equivalent to `sapply()' in R
# def foo_get_predicted_filename(x):
# return (out_path + "Result/" + modelname + "/ClassifiedPhotos/" + os.path.relpath(root,
# photo_path_base) + "/" + x)
# predicted_filenames = list(map(foo_get_predicted_filename, predicted_class_v))
top_classes_arr[images_broken_idx,] = ""
top_classes_probs_arr[images_broken_idx,]= 0
arr_tmp = pd.DataFrame(np.concatenate((top_classes_arr, top_classes_probs_arr), axis=1))
if step_start_idx == 0:
arr_aoi = arr_tmp
else:
arr_aoi = np.concatenate((arr_aoi, arr_tmp), axis=0)
# save_folder_names = list(map(os.path.basename, predicted_filenames))
# create necessary folders
# for i in range(0, n_files):
# if not (os.path.exists(save_folder_names[i])):
# os.makedirs(save_folder_names[i], exist_ok=False)
# if (toCopyFile):
# for i in range(0, bsize_tmp):
#
# save_folder = predicted_filenames[i]
# print(save_folder)
#
# if not (os.path.exists(save_folder)):
# os.makedirs(save_folder, exist_ok=False)
# copyfile(filenames_batch[i], predicted_filenames[i] + '/' + os.path.basename(filenames_batch[i]))
# Write csv files
if not (os.path.exists(os.path.dirname(name_csv))):
os.makedirs(os.path.dirname(name_csv), exist_ok=True)
# Write a Pandas data frame
df_aoi = pd.concat([pd.DataFrame(base_filenames), pd.DataFrame(years), pd.DataFrame(arr_aoi)], axis=1)
header = np.concatenate(
(["Filename"], ["Year"],["Top1", "Top2", "Top3", "Top4", "Top5", "Top6", "Top7", "Top8", "Top9", "Top10"],
["Prob1", "Prob2", "Prob3", "Prob4", "Prob5", "Prob6", "Prob7", "Prob8", "Prob9", "Prob10"]))
df_aoi.columns = header
df_aoi.to_csv(name_csv, index=False, columns=header)
# @todo attention map
| [
"PIL.Image.new",
"numpy.empty",
"os.walk",
"numpy.argsort",
"keras.preprocessing.image.img_to_array",
"os.path.join",
"os.chdir",
"pandas.DataFrame",
"keras.applications.inception_resnet_v2.InceptionResNetV2",
"os.path.dirname",
"os.path.exists",
"keras.preprocessing.image.load_img",
"numpy.... | [((1112, 1134), 'os.chdir', 'os.chdir', (['default_path'], {}), '(default_path)\n', (1120, 1134), False, 'import os\n'), ((1349, 1487), 'keras.applications.inception_resnet_v2.InceptionResNetV2', 'inception_resnet_v2.InceptionResNetV2', ([], {'include_top': '(True)', 'weights': '"""imagenet"""', 'input_tensor': 'None', 'input_shape': '(img_width, img_height, 3)'}), "(include_top=True, weights='imagenet',\n input_tensor=None, input_shape=(img_width, img_height, 3))\n", (1386, 1487), False, 'from keras.applications import inception_resnet_v2\n'), ((1808, 1825), 'numpy.array', 'np.array', (['classes'], {}), '(classes)\n', (1816, 1825), True, 'import numpy as np\n'), ((1683, 1695), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1692, 1695), False, 'import json\n'), ((2210, 2251), 'os.path.join', 'os.path.join', (['photo_path_base', 'foldername'], {}), '(photo_path_base, foldername)\n', (2222, 2251), False, 'import os\n'), ((2287, 2310), 'os.walk', 'os.walk', (['photo_path_aoi'], {}), '(photo_path_aoi)\n', (2294, 2310), False, 'import os\n'), ((1923, 1950), 'os.listdir', 'os.listdir', (['photo_path_base'], {}), '(photo_path_base)\n', (1933, 1950), False, 'import os\n'), ((2574, 2598), 'os.path.exists', 'os.path.exists', (['name_csv'], {}), '(name_csv)\n', (2588, 2598), False, 'import os\n'), ((2932, 2970), 'fnmatch.filter', 'fnmatch.filter', (['filenames_raw', '"""*.jpg"""'], {}), "(filenames_raw, '*.jpg')\n", (2946, 2970), False, 'import fnmatch\n'), ((2992, 3030), 'fnmatch.filter', 'fnmatch.filter', (['filenames_raw', '"""*.JPG"""'], {}), "(filenames_raw, '*.JPG')\n", (3006, 3030), False, 'import fnmatch\n'), ((8412, 8636), 'numpy.concatenate', 'np.concatenate', (["(['Filename'], ['Year'], ['Top1', 'Top2', 'Top3', 'Top4', 'Top5', 'Top6',\n 'Top7', 'Top8', 'Top9', 'Top10'], ['Prob1', 'Prob2', 'Prob3', 'Prob4',\n 'Prob5', 'Prob6', 'Prob7', 'Prob8', 'Prob9', 'Prob10'])"], {}), "((['Filename'], ['Year'], ['Top1', 'Top2', 'Top3', 'Top4',\n 'Top5', 'Top6', 'Top7', 'Top8', 'Top9', 'Top10'], ['Prob1', 'Prob2',\n 'Prob3', 'Prob4', 'Prob5', 'Prob6', 'Prob7', 'Prob8', 'Prob9', 'Prob10']))\n", (8426, 8636), True, 'import numpy as np\n'), ((1968, 2000), 'os.path.join', 'os.path.join', (['photo_path_base', 'd'], {}), '(photo_path_base, d)\n', (1980, 2000), False, 'import os\n'), ((2784, 2803), 'os.path.join', 'os.path.join', (['dp', 'f'], {}), '(dp, f)\n', (2796, 2803), False, 'import os\n'), ((3548, 3588), 'numpy.ceil', 'np.ceil', (['(n_files / prediction_batch_size)'], {}), '(n_files / prediction_batch_size)\n', (3555, 3588), True, 'import numpy as np\n'), ((4285, 4316), 'numpy.empty', 'np.empty', (['bsize_tmp'], {'dtype': 'bool'}), '(bsize_tmp, dtype=bool)\n', (4293, 4316), True, 'import numpy as np\n'), ((5331, 5348), 'numpy.vstack', 'np.vstack', (['images'], {}), '(images)\n', (5340, 5348), True, 'import numpy as np\n'), ((5813, 5839), 'numpy.empty', 'np.empty', (['[bsize_tmp, top]'], {}), '([bsize_tmp, top])\n', (5821, 5839), True, 'import numpy as np\n'), ((2515, 2553), 'os.path.relpath', 'os.path.relpath', (['root', 'photo_path_base'], {}), '(root, photo_path_base)\n', (2530, 2553), False, 'import os\n'), ((3206, 3224), 'os.path.dirname', 'os.path.dirname', (['x'], {}), '(x)\n', (3221, 3224), False, 'import os\n'), ((4521, 4562), 'os.path.join', 'os.path.join', (['photo_path_aoi', 'root', 'fname'], {}), '(photo_path_aoi, root, fname)\n', (4533, 4562), False, 'import os\n'), ((4991, 5014), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (5009, 5014), False, 'from keras.preprocessing import image\n'), ((5037, 5064), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (5051, 5064), True, 'import numpy as np\n'), ((7108, 7172), 'numpy.concatenate', 'np.concatenate', (['(top_classes_arr, top_classes_probs_arr)'], {'axis': '(1)'}), '((top_classes_arr, top_classes_probs_arr), axis=1)\n', (7122, 7172), True, 'import numpy as np\n'), ((7289, 7331), 'numpy.concatenate', 'np.concatenate', (['(arr_aoi, arr_tmp)'], {'axis': '(0)'}), '((arr_aoi, arr_tmp), axis=0)\n', (7303, 7331), True, 'import numpy as np\n'), ((8152, 8177), 'os.path.dirname', 'os.path.dirname', (['name_csv'], {}), '(name_csv)\n', (8167, 8177), False, 'import os\n'), ((8205, 8230), 'os.path.dirname', 'os.path.dirname', (['name_csv'], {}), '(name_csv)\n', (8220, 8230), False, 'import os\n'), ((8312, 8340), 'pandas.DataFrame', 'pd.DataFrame', (['base_filenames'], {}), '(base_filenames)\n', (8324, 8340), True, 'import pandas as pd\n'), ((8342, 8361), 'pandas.DataFrame', 'pd.DataFrame', (['years'], {}), '(years)\n', (8354, 8361), True, 'import pandas as pd\n'), ((8363, 8384), 'pandas.DataFrame', 'pd.DataFrame', (['arr_aoi'], {}), '(arr_aoi)\n', (8375, 8384), True, 'import pandas as pd\n'), ((2830, 2864), 'os.path.expanduser', 'os.path.expanduser', (['photo_path_aoi'], {}), '(photo_path_aoi)\n', (2848, 2864), False, 'import os\n'), ((4657, 4718), 'keras.preprocessing.image.load_img', 'image.load_img', (['img_name'], {'target_size': '(img_width, img_height)'}), '(img_name, target_size=(img_width, img_height))\n', (4671, 4718), False, 'from keras.preprocessing import image\n'), ((5597, 5620), 'numpy.argsort', 'np.argsort', (['predictions'], {}), '(predictions)\n', (5607, 5620), True, 'import numpy as np\n'), ((4912, 4967), 'PIL.Image.new', 'PIL.Image.new', ([], {'mode': '"""RGB"""', 'size': '(img_width, img_height)'}), "(mode='RGB', size=(img_width, img_height))\n", (4925, 4967), False, 'import PIL\n')] |
from typing import Union
import warnings
import numpy as np
from copy import copy
from scipy import ndimage as ndi
from xml.etree.ElementTree import Element
from base64 import b64encode
from imageio import imwrite
from ..base import Layer
from ...util.colormaps import colormaps
from ...util.event import Event
from ...util.misc import interpolate_coordinates
from ._constants import Mode
class Labels(Layer):
"""Labels (or segmentation) layer.
An image-like layer where every pixel contains an integer ID
corresponding to the region it belongs to.
Parameters
----------
data : array
Labels data.
num_colors : int
Number of unique colors to use in colormap.
seed : float
Seed for colormap random generator.
n_dimensional : bool
If `True`, paint and fill edit labels across all dimensions.
name : str
Name of the layer.
metadata : dict
Layer metadata.
scale : tuple of float
Scale factors for the layer.
translate : tuple of float
Translation values for the layer.
opacity : float
Opacity of the layer visual, between 0.0 and 1.0.
blending : str
One of a list of preset blending modes that determines how RGB and
alpha values of the layer visual get mixed. Allowed values are
{'opaque', 'translucent', and 'additive'}.
visible : bool
Whether the layer visual is currently being displayed.
Attributes
----------
data : array
Integer valued label data. Can be N dimensional. Every pixel contains
an integer ID corresponding to the region it belongs to. The label 0 is
rendered as transparent.
metadata : dict
Labels metadata.
num_colors : int
Number of unique colors to use in colormap.
seed : float
Seed for colormap random generator.
opacity : float
Opacity of the labels, must be between 0 and 1.
contiguous : bool
If `True`, the fill bucket changes only connected pixels of same label.
n_dimensional : bool
If `True`, paint and fill edit labels across all dimensions.
brush_size : float
Size of the paint brush.
selected_label : int
Index of selected label. Can be greater than the current maximum label.
mode : str
Interactive mode. The normal, default mode is PAN_ZOOM, which
allows for normal interactivity with the canvas.
In PICKER mode the cursor functions like a color picker, setting the
clicked on label to be the curent label. If the background is picked it
will select the background label `0`.
In PAINT mode the cursor functions like a paint brush changing any
pixels it brushes over to the current label. If the background label
`0` is selected than any pixels will be changed to background and this
tool functions like an eraser. The size and shape of the cursor can be
adjusted in the properties widget.
In FILL mode the cursor functions like a fill bucket replacing pixels
of the label clicked on with the current label. It can either replace
all pixels of that label or just those that are contiguous with the
clicked on pixel. If the background label `0` is selected than any
pixels will be changed to background and this tool functions like an
eraser.
Extended Summary
----------
_data_labels : array (N, M)
2D labels data for the currently viewed slice.
_selected_color : 4-tuple or None
RGBA tuple of the color of the selected label, or None if the
background label `0` is selected.
_last_cursor_coord : list or None
Coordinates of last cursor click before painting, gets reset to None
after painting is done. Used for interpolating brush strokes.
"""
def __init__(
self,
data,
*,
num_colors=50,
seed=0.5,
n_dimensional=False,
name=None,
metadata=None,
scale=None,
translate=None,
opacity=0.7,
blending='translucent',
visible=True,
):
super().__init__(
data.ndim,
name=name,
metadata=metadata,
scale=scale,
translate=translate,
opacity=opacity,
blending=blending,
visible=visible,
)
self.events.add(
contrast_limits=Event,
colormap=Event,
interpolation=Event,
rendering=Event,
mode=Event,
n_dimensional=Event,
contiguous=Event,
brush_size=Event,
selected_label=Event,
)
self._data = data
self._data_labels = np.zeros((1,) * self.dims.ndisplay)
self._data_view = np.zeros((1,) * self.dims.ndisplay)
self.contrast_limits = [0.0, 1.0]
self.interpolation = 'nearest'
self.rendering = 'mip'
self._seed = seed
self._colormap_name = 'random'
self._num_colors = num_colors
self.colormap = (
self._colormap_name,
colormaps.label_colormap(self.num_colors),
)
self._n_dimensional = n_dimensional
self._contiguous = True
self._brush_size = 10
self._last_cursor_coord = None
self._selected_label = 0
self._selected_color = None
self._mode = Mode.PAN_ZOOM
self._mode_history = self._mode
self._status = self.mode
self._help = 'enter paint or fill mode to edit labels'
# Trigger generation of view slice and thumbnail
self._update_dims()
@property
def data(self):
"""array: Labels data."""
return self._data
@data.setter
def data(self, data):
self._data = data
self._update_dims()
self.events.data()
def _get_ndim(self):
"""Determine number of dimensions of the layer."""
return self.data.ndim
def _get_extent(self):
return tuple((0, m) for m in self.data.shape)
@property
def contiguous(self):
"""bool: fill bucket changes only connected pixels of same label."""
return self._contiguous
@contiguous.setter
def contiguous(self, contiguous):
self._contiguous = contiguous
self.events.contiguous()
@property
def n_dimensional(self):
"""bool: paint and fill edits labels across all dimensions."""
return self._n_dimensional
@n_dimensional.setter
def n_dimensional(self, n_dimensional):
self._n_dimensional = n_dimensional
self.events.n_dimensional()
@property
def brush_size(self):
"""float: Size of the paint brush."""
return self._brush_size
@brush_size.setter
def brush_size(self, brush_size):
self._brush_size = int(brush_size)
self.cursor_size = self._brush_size / self.scale_factor
self.events.brush_size()
@property
def seed(self):
"""float: Seed for colormap random generator."""
return self._seed
@seed.setter
def seed(self, seed):
self._seed = seed
self._set_view_slice()
@property
def num_colors(self):
"""int: Number of unique colors to use in colormap."""
return self._num_colors
@num_colors.setter
def num_colors(self, num_colors):
self._num_colors = num_colors
self.colormap = (
self._colormap_name,
colormaps.label_colormap(num_colors),
)
self._set_view_slice()
@property
def selected_label(self):
"""int: Index of selected label."""
return self._selected_label
@selected_label.setter
def selected_label(self, selected_label):
if selected_label < 0:
raise ValueError('cannot reduce selected label below 0')
if selected_label == self.selected_label:
return
self._selected_label = selected_label
self._selected_color = self.get_color(selected_label)
self.events.selected_label()
@property
def mode(self):
"""MODE: Interactive mode. The normal, default mode is PAN_ZOOM, which
allows for normal interactivity with the canvas.
In PICKER mode the cursor functions like a color picker, setting the
clicked on label to be the curent label. If the background is picked it
will select the background label `0`.
In PAINT mode the cursor functions like a paint brush changing any
pixels it brushes over to the current label. If the background label
`0` is selected than any pixels will be changed to background and this
tool functions like an eraser. The size and shape of the cursor can be
adjusted in the properties widget.
In FILL mode the cursor functions like a fill bucket replacing pixels
of the label clicked on with the current label. It can either replace
all pixels of that label or just those that are contiguous with the
clicked on pixel. If the background label `0` is selected than any
pixels will be changed to background and this tool functions like an
eraser.
"""
return str(self._mode)
@mode.setter
def mode(self, mode: Union[str, Mode]):
if isinstance(mode, str):
mode = Mode(mode)
if mode == self._mode:
return
if mode == Mode.PAN_ZOOM:
self.cursor = 'standard'
self.interactive = True
self.help = 'enter paint or fill mode to edit labels'
elif mode == Mode.PICKER:
self.cursor = 'cross'
self.interactive = False
self.help = 'hold <space> to pan/zoom, ' 'click to pick a label'
elif mode == Mode.PAINT:
self.cursor_size = self.brush_size / self.scale_factor
self.cursor = 'square'
self.interactive = False
self.help = 'hold <space> to pan/zoom, ' 'drag to paint a label'
elif mode == Mode.FILL:
self.cursor = 'cross'
self.interactive = False
self.help = 'hold <space> to pan/zoom, ' 'click to fill a label'
else:
raise ValueError("Mode not recongnized")
self.status = str(mode)
self._mode = mode
self.events.mode(mode=mode)
self._set_view_slice()
def _raw_to_displayed(self, raw):
"""Determine displayed image from a saved raw image and a saved seed.
This function ensures that the 0 label gets mapped to the 0 displayed
pixel.
Parameters
-------
raw : array or int
Raw integer input image.
Returns
-------
image : array
Image mapped between 0 and 1 to be displayed.
"""
image = np.where(
raw > 0, colormaps._low_discrepancy_image(raw, self._seed), 0
)
return image
def new_colormap(self):
self._seed = np.random.rand()
self._selected_color = self.get_color(self.selected_label)
self._set_view_slice()
def get_color(self, label):
"""Return the color corresponding to a specific label."""
if label == 0:
col = None
else:
val = self._raw_to_displayed(np.array([label]))
col = self.colormap[1].map(val)[0]
return col
def _set_view_slice(self):
"""Sets the view given the indices to slice with."""
self._data_labels = np.asarray(self.data[self.dims.indices]).transpose(
self.dims.displayed_order
)
self._data_view = self._raw_to_displayed(self._data_labels)
self._update_thumbnail()
self._update_coordinates()
self.events.set_data()
def fill(self, coord, old_label, new_label):
"""Replace an existing label with a new label, either just at the
connected component if the `contiguous` flag is `True` or everywhere
if it is `False`, working either just in the current slice if
the `n_dimensional` flag is `False` or on the entire data if it is
`True`.
Parameters
----------
coord : sequence of float
Position of mouse cursor in image coordinates.
old_label : int
Value of the label image at the coord to be replaced.
new_label : int
Value of the new label to be filled in.
"""
int_coord = np.round(coord).astype(int)
if self.n_dimensional or self.ndim == 2:
# work with entire image
labels = self.data
slice_coord = tuple(int_coord)
else:
# work with just the sliced image
labels = self._data_labels
slice_coord = tuple(int_coord[d] for d in self.dims.displayed)
matches = labels == old_label
if self.contiguous:
# if not contiguous replace only selected connected component
labeled_matches, num_features = ndi.label(matches)
if num_features != 1:
match_label = labeled_matches[slice_coord]
matches = np.logical_and(
matches, labeled_matches == match_label
)
# Replace target pixels with new_label
labels[matches] = new_label
if not (self.n_dimensional or self.ndim == 2):
# if working with just the slice, update the rest of the raw data
self.data[tuple(self.indices)] = labels
self._set_view_slice()
def paint(self, coord, new_label):
"""Paint over existing labels with a new label, using the selected
brush shape and size, either only on the visible slice or in all
n dimensions.
Parameters
----------
coord : sequence of int
Position of mouse cursor in image coordinates.
new_label : int
Value of the new label to be filled in.
"""
if self.n_dimensional or self.ndim == 2:
slice_coord = tuple(
[
slice(
np.round(
np.clip(c - self.brush_size / 2 + 0.5, 0, s)
).astype(int),
np.round(
np.clip(c + self.brush_size / 2 + 0.5, 0, s)
).astype(int),
1,
)
for c, s in zip(coord, self.shape)
]
)
else:
slice_coord = [0] * self.ndim
for i in self.dims.displayed:
slice_coord[i] = slice(
np.round(
np.clip(
coord[i] - self.brush_size / 2 + 0.5,
0,
self.shape[i],
)
).astype(int),
np.round(
np.clip(
coord[i] + self.brush_size / 2 + 0.5,
0,
self.shape[i],
)
).astype(int),
1,
)
for i in self.dims.not_displayed:
slice_coord[i] = np.round(coord[i]).astype(int)
slice_coord = tuple(slice_coord)
# update the labels image
self.data[slice_coord] = new_label
self._set_view_slice()
def get_value(self):
"""Returns coordinates, values, and a string for a given mouse position
and set of indices.
Returns
----------
coord : tuple of int
Position of mouse cursor in data.
value : int or float or sequence of int or float or None
Value of the data at the coord, or none if coord is outside range.
"""
coord = np.round(self.coordinates).astype(int)
shape = self._data_labels.shape
if all(0 <= c < s for c, s in zip(coord[self.dims.displayed], shape)):
value = self._data_labels[tuple(coord[self.dims.displayed])]
else:
value = None
return value
def _update_thumbnail(self):
"""Update thumbnail with current image data and colors.
"""
if self.dims.ndisplay == 3:
image = np.max(self._data_labels, axis=0)
else:
image = self._data_labels
zoom_factor = np.divide(
self._thumbnail_shape[:2], image.shape[:2]
).min()
# warning filter can be removed with scipy 1.4
with warnings.catch_warnings():
warnings.simplefilter("ignore")
downsampled = np.round(
ndi.zoom(image, zoom_factor, prefilter=False, order=0)
)
downsampled = self._raw_to_displayed(downsampled)
colormapped = self.colormap[1].map(downsampled)
colormapped = colormapped.reshape(downsampled.shape + (4,))
# render background as black instead of transparent
colormapped[..., 3] = 1
colormapped[..., 3] *= self.opacity
self.thumbnail = colormapped
def to_xml_list(self):
"""Generates a list with a single xml element that defines the
currently viewed image as a png according to the svg specification.
Returns
----------
xml : list of xml.etree.ElementTree.Element
List of a single xml element specifying the currently viewed image
as a png according to the svg specification.
"""
mapped_image = (self.colormap[1].map(self._data_view) * 255).astype(
np.uint8
)
mapped_image = mapped_image.reshape(list(self._data_view.shape) + [4])
image_str = imwrite('<bytes>', mapped_image, format='png')
image_str = "data:image/png;base64," + str(b64encode(image_str))[2:-1]
props = {'xlink:href': image_str}
width = str(self.shape[self.dims.displayed[1]])
height = str(self.shape[self.dims.displayed[0]])
opacity = str(self.opacity)
xml = Element(
'image', width=width, height=height, opacity=opacity, **props
)
return [xml]
def on_mouse_press(self, event):
"""Called whenever mouse pressed in canvas.
Parameters
----------
event : Event
Vispy event
"""
if self._mode == Mode.PAN_ZOOM:
# If in pan/zoom mode do nothing
pass
elif self._mode == Mode.PICKER:
self.selected_label = self._value
elif self._mode == Mode.PAINT:
# Start painting with new label
self.paint(self.coordinates, self.selected_label)
self._last_cursor_coord = copy(self.coordinates)
elif self._mode == Mode.FILL:
# Fill clicked on region with new label
self.fill(self.coordinates, self._value, self.selected_label)
else:
raise ValueError("Mode not recongnized")
def on_mouse_move(self, event):
"""Called whenever mouse moves over canvas.
Parameters
----------
event : Event
Vispy event
"""
if self._mode == Mode.PAINT and event.is_dragging:
new_label = self.selected_label
if self._last_cursor_coord is None:
interp_coord = [self.coordinates]
else:
interp_coord = interpolate_coordinates(
self._last_cursor_coord, self.coordinates, self.brush_size
)
with self.events.set_data.blocker():
for c in interp_coord:
self.paint(c, new_label)
self._set_view_slice()
self._last_cursor_coord = copy(self.coordinates)
def on_mouse_release(self, event):
"""Called whenever mouse released in canvas.
Parameters
----------
event : Event
Vispy event
"""
self._last_cursor_coord = None
| [
"numpy.divide",
"warnings.simplefilter",
"numpy.logical_and",
"numpy.asarray",
"xml.etree.ElementTree.Element",
"numpy.zeros",
"copy.copy",
"scipy.ndimage.zoom",
"numpy.clip",
"numpy.max",
"scipy.ndimage.label",
"warnings.catch_warnings",
"numpy.array",
"base64.b64encode",
"numpy.random.... | [((4804, 4839), 'numpy.zeros', 'np.zeros', (['((1,) * self.dims.ndisplay)'], {}), '((1,) * self.dims.ndisplay)\n', (4812, 4839), True, 'import numpy as np\n'), ((4866, 4901), 'numpy.zeros', 'np.zeros', (['((1,) * self.dims.ndisplay)'], {}), '((1,) * self.dims.ndisplay)\n', (4874, 4901), True, 'import numpy as np\n'), ((11104, 11120), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (11118, 11120), True, 'import numpy as np\n'), ((17921, 17967), 'imageio.imwrite', 'imwrite', (['"""<bytes>"""', 'mapped_image'], {'format': '"""png"""'}), "('<bytes>', mapped_image, format='png')\n", (17928, 17967), False, 'from imageio import imwrite\n'), ((18252, 18322), 'xml.etree.ElementTree.Element', 'Element', (['"""image"""'], {'width': 'width', 'height': 'height', 'opacity': 'opacity'}), "('image', width=width, height=height, opacity=opacity, **props)\n", (18259, 18322), False, 'from xml.etree.ElementTree import Element\n'), ((13133, 13151), 'scipy.ndimage.label', 'ndi.label', (['matches'], {}), '(matches)\n', (13142, 13151), True, 'from scipy import ndimage as ndi\n'), ((16497, 16530), 'numpy.max', 'np.max', (['self._data_labels'], {'axis': '(0)'}), '(self._data_labels, axis=0)\n', (16503, 16530), True, 'import numpy as np\n'), ((16756, 16781), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (16779, 16781), False, 'import warnings\n'), ((16795, 16826), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (16816, 16826), False, 'import warnings\n'), ((19942, 19964), 'copy.copy', 'copy', (['self.coordinates'], {}), '(self.coordinates)\n', (19946, 19964), False, 'from copy import copy\n'), ((11419, 11436), 'numpy.array', 'np.array', (['[label]'], {}), '([label])\n', (11427, 11436), True, 'import numpy as np\n'), ((11625, 11665), 'numpy.asarray', 'np.asarray', (['self.data[self.dims.indices]'], {}), '(self.data[self.dims.indices])\n', (11635, 11665), True, 'import numpy as np\n'), ((12585, 12600), 'numpy.round', 'np.round', (['coord'], {}), '(coord)\n', (12593, 12600), True, 'import numpy as np\n'), ((13271, 13326), 'numpy.logical_and', 'np.logical_and', (['matches', '(labeled_matches == match_label)'], {}), '(matches, labeled_matches == match_label)\n', (13285, 13326), True, 'import numpy as np\n'), ((16039, 16065), 'numpy.round', 'np.round', (['self.coordinates'], {}), '(self.coordinates)\n', (16047, 16065), True, 'import numpy as np\n'), ((16606, 16659), 'numpy.divide', 'np.divide', (['self._thumbnail_shape[:2]', 'image.shape[:2]'], {}), '(self._thumbnail_shape[:2], image.shape[:2])\n', (16615, 16659), True, 'import numpy as np\n'), ((16879, 16933), 'scipy.ndimage.zoom', 'ndi.zoom', (['image', 'zoom_factor'], {'prefilter': '(False)', 'order': '(0)'}), '(image, zoom_factor, prefilter=False, order=0)\n', (16887, 16933), True, 'from scipy import ndimage as ndi\n'), ((18019, 18039), 'base64.b64encode', 'b64encode', (['image_str'], {}), '(image_str)\n', (18028, 18039), False, 'from base64 import b64encode\n'), ((18924, 18946), 'copy.copy', 'copy', (['self.coordinates'], {}), '(self.coordinates)\n', (18928, 18946), False, 'from copy import copy\n'), ((15436, 15454), 'numpy.round', 'np.round', (['coord[i]'], {}), '(coord[i])\n', (15444, 15454), True, 'import numpy as np\n'), ((14842, 14905), 'numpy.clip', 'np.clip', (['(coord[i] - self.brush_size / 2 + 0.5)', '(0)', 'self.shape[i]'], {}), '(coord[i] - self.brush_size / 2 + 0.5, 0, self.shape[i])\n', (14849, 14905), True, 'import numpy as np\n'), ((15106, 15169), 'numpy.clip', 'np.clip', (['(coord[i] + self.brush_size / 2 + 0.5)', '(0)', 'self.shape[i]'], {}), '(coord[i] + self.brush_size / 2 + 0.5, 0, self.shape[i])\n', (15113, 15169), True, 'import numpy as np\n'), ((14284, 14328), 'numpy.clip', 'np.clip', (['(c - self.brush_size / 2 + 0.5)', '(0)', 's'], {}), '(c - self.brush_size / 2 + 0.5, 0, s)\n', (14291, 14328), True, 'import numpy as np\n'), ((14430, 14474), 'numpy.clip', 'np.clip', (['(c + self.brush_size / 2 + 0.5)', '(0)', 's'], {}), '(c + self.brush_size / 2 + 0.5, 0, s)\n', (14437, 14474), True, 'import numpy as np\n')] |
from __future__ import division
import math
import numpy as np
import random
def random_sized_crop(img,
scale_ratio_range=(0.08, 1),
aspect_ratio_range=(3 / 4, 4 / 3),
return_param=False, copy=False):
"""Crop an image to random size and aspect ratio.
The size :math:`(H_{crop}, W_{crop})` and the left top coordinate
:math:`(y_{start}, x_{start})` of the crop are calculated as follows:
+ :math:`H_{crop} = \\lfloor{\\sqrt{s \\times H \\times W \
\\times a}}\\rfloor`
+ :math:`W_{crop} = \\lfloor{\\sqrt{s \\times H \\times W \
\\div a}}\\rfloor`
+ :math:`y_{start} \\sim Uniform\\{0, H - H_{crop}\\}`
+ :math:`x_{start} \\sim Uniform\\{0, W - W_{crop}\\}`
+ :math:`s \\sim Uniform(s_1, s_2)`
+ :math:`b \\sim Uniform(a_1, a_2)` and \
:math:`a = b` or :math:`a = \\frac{1}{b}` in 50/50 probability.
Here, :math:`s_1, s_2` are the two floats in
:obj:`scale_ratio_range` and :math:`a_1, a_2` are the two floats
in :obj:`aspect_ratio_range`.
Also, :math:`H` and :math:`W` are the height and the width of the image.
Note that :math:`s \\approx \\frac{H_{crop} \\times W_{crop}}{H \\times W}`
and :math:`a \\approx \\frac{H_{crop}}{W_{crop}}`.
The approximations come from flooring floats to integers.
.. note::
When it fails to sample a valid scale and aspect ratio for ten
times, it picks values in a non-uniform way.
If this happens, the selected scale ratio can be smaller
than :obj:`scale_ratio_range[0]`.
Args:
img (~numpy.ndarray): An image array. This is in CHW format.
scale_ratio_range (tuple of two floats): Determines
the distribution from which a scale ratio is sampled.
The default values are selected so that the area of the crop is
8~100% of the original image. This is the default
setting used to train ResNets in Torch style.
aspect_ratio_range (tuple of two floats): Determines
the distribution from which an aspect ratio is sampled.
The default values are
:math:`\\frac{3}{4}` and :math:`\\frac{4}{3}`, which
are also the default setting to train ResNets in Torch style.
return_param (bool): Returns parameters if :obj:`True`.
Returns:
~numpy.ndarray or (~numpy.ndarray, dict):
If :obj:`return_param = False`,
returns only the cropped image.
If :obj:`return_param = True`,
returns a tuple of cropped image and :obj:`param`.
:obj:`param` is a dictionary of intermediate parameters whose
contents are listed below with key, value-type and the description
of the value.
* **y_slice** (*slice*): A slice used to crop the input image.\
The relation below holds together with :obj:`x_slice`.
* **x_slice** (*slice*): Similar to :obj:`y_slice`.
.. code::
out_img = img[:, y_slice, x_slice]
* **scale_ratio** (float): :math:`s` in the description (see above).
* **aspect_ratio** (float): :math:`a` in the description.
"""
_, H, W = img.shape
scale_ratio, aspect_ratio =\
_sample_parameters(
(H, W), scale_ratio_range, aspect_ratio_range)
H_crop = int(math.floor(np.sqrt(scale_ratio * H * W * aspect_ratio)))
W_crop = int(math.floor(np.sqrt(scale_ratio * H * W / aspect_ratio)))
y_start = random.randint(0, H - H_crop)
x_start = random.randint(0, W - W_crop)
y_slice = slice(y_start, y_start + H_crop)
x_slice = slice(x_start, x_start + W_crop)
img = img[:, y_slice, x_slice]
if copy:
img = img.copy()
if return_param:
params = {'y_slice': y_slice, 'x_slice': x_slice,
'scale_ratio': scale_ratio, 'aspect_ratio': aspect_ratio}
return img, params
else:
return img
def _sample_parameters(size, scale_ratio_range, aspect_ratio_range):
H, W = size
for _ in range(10):
aspect_ratio = random.uniform(
aspect_ratio_range[0], aspect_ratio_range[1])
if random.uniform(0, 1) < 0.5:
aspect_ratio = 1 / aspect_ratio
# This is determined so that relationships "H - H_crop >= 0" and
# "W - W_crop >= 0" are always satisfied.
scale_ratio_max = min((scale_ratio_range[1],
H / (W * aspect_ratio),
(aspect_ratio * W) / H))
scale_ratio = random.uniform(
scale_ratio_range[0], scale_ratio_range[1])
if scale_ratio_range[0] <= scale_ratio <= scale_ratio_max:
return scale_ratio, aspect_ratio
# This scale_ratio is outside the given range when
# scale_ratio_max < scale_ratio_range[0].
scale_ratio = random.uniform(
min((scale_ratio_range[0], scale_ratio_max)), scale_ratio_max)
return scale_ratio, aspect_ratio
| [
"numpy.sqrt",
"random.randint",
"random.uniform"
] | [((3519, 3548), 'random.randint', 'random.randint', (['(0)', '(H - H_crop)'], {}), '(0, H - H_crop)\n', (3533, 3548), False, 'import random\n'), ((3563, 3592), 'random.randint', 'random.randint', (['(0)', '(W - W_crop)'], {}), '(0, W - W_crop)\n', (3577, 3592), False, 'import random\n'), ((4107, 4167), 'random.uniform', 'random.uniform', (['aspect_ratio_range[0]', 'aspect_ratio_range[1]'], {}), '(aspect_ratio_range[0], aspect_ratio_range[1])\n', (4121, 4167), False, 'import random\n'), ((4574, 4632), 'random.uniform', 'random.uniform', (['scale_ratio_range[0]', 'scale_ratio_range[1]'], {}), '(scale_ratio_range[0], scale_ratio_range[1])\n', (4588, 4632), False, 'import random\n'), ((3385, 3428), 'numpy.sqrt', 'np.sqrt', (['(scale_ratio * H * W * aspect_ratio)'], {}), '(scale_ratio * H * W * aspect_ratio)\n', (3392, 3428), True, 'import numpy as np\n'), ((3459, 3502), 'numpy.sqrt', 'np.sqrt', (['(scale_ratio * H * W / aspect_ratio)'], {}), '(scale_ratio * H * W / aspect_ratio)\n', (3466, 3502), True, 'import numpy as np\n'), ((4192, 4212), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (4206, 4212), False, 'import random\n')] |
"""
This file should contain only functions that operates on pixels, not on images,
so no references to PIL are necessary and the module can be used also through
Jython
"""
import numpy
import sys
import time
from worldengine.common import get_verbose, count_neighbours
from worldengine.common import anti_alias as anti_alias_channel
from worldengine.biome import BiomeGroup, _un_camelize
# -------------------
# Reusable functions
# -------------------
def gradient(value, low, high, low_color, high_color):
lr, lg, lb = low_color
if high == low:
return lr, lg, lb, 255
_range = float(high - low)
_x = float(value - low) / _range
_ix = 1.0 - _x
hr, hg, hb = high_color
r = int(lr * _ix + hr * _x)
g = int(lg * _ix + hg * _x)
b = int(lb * _ix + hb * _x)
return r, g, b, 255
def rgba_to_rgb(rgba):
r, g, b, a = rgba
return r, g, b
def draw_rivers_on_image(world, target, factor=1):
"""Draw only the rivers, it expect the background to be in place
"""
for y in range(world.height):
for x in range(world.width):
if world.is_land((x, y)) and (world.layers['river_map'].data[y, x] > 0.0):
for dx in range(factor):
for dy in range(factor):
target.set_pixel(x * factor + dx, y * factor + dy, (0, 0, 128, 255))
if world.is_land((x, y)) and (world.layers['lake_map'].data[y, x] != 0):
for dx in range(factor):
for dy in range(factor):
target.set_pixel(x * factor + dx, y * factor + dy, (0, 100, 128, 255))
# -------------------
# Drawing ancient map
# -------------------
def _find_mountains_mask(world, factor):
_mask = numpy.zeros((world.height, world.width), float)
_mask[world.elevation>world.get_mountain_level()] = 1.0
# disregard elevated oceans
_mask[world.ocean] = 0.0
# this is fast but not 100% precise
# subsequent steps are fiendishly sensitive to these precision errors
# therefore the rounding
_mask[_mask>0] = numpy.around(count_neighbours(_mask, 3)[_mask>0], 6)
_mask[_mask<32.000000001] = 0.0
_mask /= 4.0
_mask = _mask.repeat(factor, 0).repeat(factor, 1)
return _mask
def _build_biome_group_masks(world, factor):
biome_groups = BiomeGroup.__subclasses__()
biome_masks = {}
for group in biome_groups:
group_mask = numpy.zeros((world.height, world.width), float)
for biome in group.__subclasses__():
group_mask[world.biome==_un_camelize(biome.__name__)] += 1.0
group_mask[group_mask>0] = count_neighbours(group_mask)[group_mask>0]
group_mask[group_mask<5.000000001] = 0.0
group_mask = group_mask.repeat(factor, 0).repeat(factor, 1)
biome_masks[_un_camelize(group.__name__)] = group_mask
return biome_masks
def _draw_shaded_pixel(pixels, x, y, r, g, b):
nb = (x ** int(y / 5) + x * 23 + y * 37 + (x * y) * 13) % 75
nr = r - nb
ng = g - nb
nb = b - nb
pixels[y, x] = (nr, ng, nb, 255)
def _draw_forest_pattern1(pixels, x, y, c, c2):
pixels[y - 4, x + 0] = c
pixels[y - 3, x + 0] = c
pixels[y - 2, x - 1] = c
pixels[y - 2, x + 1] = c
pixels[y - 1, x - 1] = c
pixels[y - 1, x + 1] = c
pixels[y + 0, x - 2] = c
pixels[y + 0, x + 1] = c
pixels[y + 0, x + 2] = c
pixels[y + 1, x - 2] = c
pixels[y + 1, x + 2] = c
pixels[y + 2, x - 3] = c
pixels[y + 2, x - 1] = c
pixels[y + 2, x + 3] = c
pixels[y + 3, x - 3] = c
pixels[y + 3, x - 2] = c
pixels[y + 3, x - 1] = c
pixels[y + 3, x - 0] = c
pixels[y + 3, x + 1] = c
pixels[y + 3, x + 2] = c
pixels[y + 3, x + 3] = c
pixels[y + 4, x - 0] = c
pixels[y - 2, x + 0] = c2
pixels[y - 1, x + 0] = c2
pixels[y - 0, x - 1] = c2
pixels[y - 0, x - 0] = c2
pixels[y + 1, x - 1] = c2
pixels[y + 1, x - 0] = c2
pixels[y + 1, x + 1] = c2
pixels[y + 2, x - 2] = c2
pixels[y + 2, x - 0] = c2
pixels[y + 2, x + 1] = c2
pixels[y + 2, x + 2] = c2
def _draw_forest_pattern2(pixels, x, y, c, c2):
pixels[y - 4, x - 1] = c
pixels[y - 4, x - 0] = c
pixels[y - 4, x + 1] = c
pixels[y - 3, x - 2] = c
pixels[y - 3, x - 1] = c
pixels[y - 3, x + 2] = c
pixels[y - 2, x - 2] = c
pixels[y - 2, x + 1] = c
pixels[y - 2, x + 2] = c
pixels[y - 1, x - 2] = c
pixels[y - 1, x + 2] = c
pixels[y - 0, x - 2] = c
pixels[y - 0, x - 1] = c
pixels[y - 0, x + 2] = c
pixels[y + 1, x - 2] = c
pixels[y + 1, x + 1] = c
pixels[y + 1, x + 2] = c
pixels[y + 2, x - 1] = c
pixels[y + 2, x - 0] = c
pixels[y + 2, x + 1] = c
pixels[y + 3, x - 0] = c
pixels[y + 4, x - 0] = c
pixels[y - 3, x + 0] = c2
pixels[y - 3, x + 1] = c2
pixels[y - 2, x - 1] = c2
pixels[y - 2, x - 0] = c2
pixels[y - 1, x - 1] = c2
pixels[y - 1, x - 0] = c2
pixels[y - 1, x + 1] = c2
pixels[y - 0, x - 0] = c2
pixels[y - 0, x + 1] = c2
pixels[y + 1, x - 1] = c2
pixels[y + 1, x - 0] = c2
def _draw_desert_pattern(pixels, x, y, c):
pixels[y - 2, x - 1] = c
pixels[y - 2, x - 0] = c
pixels[y - 2, x + 1] = c
pixels[y - 2, x + 1] = c
pixels[y - 2, x + 2] = c
pixels[y - 1, x - 2] = c
pixels[y - 1, x - 1] = c
pixels[y - 1, x - 0] = c
pixels[y - 1, x + 4] = c
pixels[y - 0, x - 4] = c
pixels[y - 0, x - 3] = c
pixels[y - 0, x - 2] = c
pixels[y - 0, x - 1] = c
pixels[y - 0, x + 1] = c
pixels[y - 0, x + 2] = c
pixels[y - 0, x + 6] = c
pixels[y + 1, x - 5] = c
pixels[y + 1, x - 0] = c
pixels[y + 1, x + 7] = c
pixels[y + 1, x + 8] = c
pixels[y + 2, x - 8] = c
pixels[y + 2, x - 7] = c
def _draw_glacier(pixels, x, y):
rg = 255 - (x ** int(y / 5) + x * 23 + y * 37 + (x * y) * 13) % 75
pixels[y, x] = (rg, rg, 255, 255)
def _draw_cold_parklands(pixels, x, y, w, h):
b = (x ** int(y / 5) + x * 23 + y * 37 + (x * y) * 13) % 75
r = 105 - b
g = 96 - b
b = 38 - int(b / 2)
pixels[y, x] = (r, g, b, 255)
def _draw_boreal_forest(pixels, x, y, w, h):
c = (0, 32, 0, 255)
c2 = (0, 64, 0, 255)
_draw_forest_pattern1(pixels, x, y, c, c2)
def _draw_warm_temperate_forest(pixels, x, y, w, h):
c = (0, 96, 0, 255)
c2 = (0, 192, 0, 255)
_draw_forest_pattern2(pixels, x, y, c, c2)
def _draw_temperate_forest1(pixels, x, y, w, h):
c = (0, 64, 0, 255)
c2 = (0, 96, 0, 255)
_draw_forest_pattern1(pixels, x, y, c, c2)
def _draw_temperate_forest2(pixels, x, y, w, h):
c = (0, 64, 0, 255)
c2 = (0, 112, 0, 255)
_draw_forest_pattern2(pixels, x, y, c, c2)
def _draw_tropical_dry_forest(pixels, x, y, w, h):
c = (51, 36, 3, 255)
c2 = (139, 204, 58, 255)
_draw_forest_pattern2(pixels, x, y, c, c2)
def _draw_jungle(pixels, x, y, w, h):
c = (0, 128, 0, 255)
c2 = (0, 255, 0, 255)
_draw_forest_pattern2(pixels, x, y, c, c2)
def _draw_cool_desert(pixels, x, y, w, h):
c = (72, 72, 53, 255)
# c2 = (219, 220, 200, 255) # TODO: not used?
_draw_desert_pattern(pixels, x, y, c)
def _draw_hot_desert(pixels, x, y, w, h):
c = (72, 72, 53, 255)
# c2 = (219, 220, 200, 255) # TODO: not used?
_draw_desert_pattern(pixels, x, y, c)
def _draw_tundra(pixels, x, y, w, h):
_draw_shaded_pixel(pixels,x, y, 166, 148, 75)
def _draw_steppe(pixels, x, y, w, h):
_draw_shaded_pixel(pixels, x, y, 96, 192, 96)
def _draw_chaparral(pixels, x, y, w, h):
_draw_shaded_pixel(pixels, x, y, 180, 171, 113)
def _draw_savanna(pixels, x, y, w, h):
_draw_shaded_pixel(pixels, x, y, 255, 246, 188)
# TODO: complete and enable this one
def _dynamic_draw_a_mountain(pixels, rng, x, y, w=3, h=3):
# mcl = (0, 0, 0, 255) # TODO: No longer used?
# mcll = (128, 128, 128, 255)
mcr = (75, 75, 75, 255)
# left edge
last_leftborder = None
for mody in range(-h, h + 1):
bottomness = (float(mody + h) / 2.0) / w
min_leftborder = int(bottomness * w * 0.66)
if not last_leftborder == None:
min_leftborder = max(min_leftborder, last_leftborder - 1)
max_leftborder = int(bottomness * w * 1.33)
if not last_leftborder == None:
max_leftborder = min(max_leftborder, last_leftborder + 1)
leftborder = int(bottomness * w) + rng.randint(-2, 2)/2
if leftborder < min_leftborder:
leftborder = min_leftborder
if leftborder > max_leftborder:
leftborder = max_leftborder
last_leftborder = leftborder
darkarea = int(bottomness * w / 2)
lightarea = int(bottomness * w / 2)
for itx in range(darkarea, leftborder + 1):
pixels[y + mody, x - itx] = gradient(itx, darkarea, leftborder,
(0, 0, 0), (64, 64, 64))
for itx in range(-darkarea, lightarea + 1):
pixels[y + mody, x - itx] = gradient(itx, -darkarea, lightarea,
(64, 64, 64), (128, 128, 128))
for itx in range(lightarea, leftborder):
pixels[y + mody, x - itx] = (181, 166, 127, 255) # land_color
# right edge
last_modx = None
for mody in range(-h, h + 1):
bottomness = (float(mody + h) / 2.0) / w
min_modx = int(bottomness * w * 0.66)
if not last_modx == None:
min_modx = max(min_modx, last_modx - 1)
max_modx = int(bottomness * w * 1.33)
if not last_modx == None:
max_modx = min(max_modx, last_modx + 1)
modx = int(bottomness * w) + numpy.random.randint(-2, 2)/2
if modx < min_modx:
modx = min_modx
if modx > max_modx:
modx = max_modx
last_modx = modx
pixels[y + mody, x - itx] = mcr
def _draw_a_mountain(pixels, x, y, w=3, h=3):
# mcl = (0, 0, 0, 255) # TODO: No longer used?
# mcll = (128, 128, 128, 255)
mcr = (75, 75, 75, 255)
# left edge
for mody in range(-h, h + 1):
bottomness = (float(mody + h) / 2.0) / w
leftborder = int(bottomness * w)
darkarea = int(bottomness * w / 2)
lightarea = int(bottomness * w / 2)
for itx in range(darkarea, leftborder + 1):
pixels[y + mody, x - itx] = gradient(itx, darkarea, leftborder,
(0, 0, 0), (64, 64, 64))
for itx in range(-darkarea, lightarea + 1):
pixels[y + mody, x + itx] = gradient(itx, -darkarea, lightarea,
(64, 64, 64), (128, 128, 128))
for itx in range(lightarea, leftborder):
pixels[y + mody, x + itx] = (181, 166, 127, 255) # land_color
# right edge
for mody in range(-h, h + 1):
bottomness = (float(mody + h) / 2.0) / w
modx = int(bottomness * w)
pixels[y + mody, x + modx] = mcr
def draw_ancientmap(world, target, resize_factor=1,
sea_color=(212, 198, 169, 255),
draw_biome = True, draw_rivers = True, draw_mountains = True,
draw_outer_land_border = False, verbose=get_verbose()):
rng = numpy.random.RandomState(world.seed) # create our own random generator
if verbose:
start_time = time.time()
land_color = (
181, 166, 127, 255) # TODO: Put this in the argument list too??
scaled_ocean = world.ocean.repeat(resize_factor, 0).repeat(resize_factor, 1)
borders = numpy.zeros((resize_factor * world.height, resize_factor * world.width), bool)
borders[count_neighbours(scaled_ocean) > 0] = True
borders[scaled_ocean] = False
# cache neighbours count at different radii
border_neighbours = {}
border_neighbours[6] = numpy.rint(count_neighbours(borders, 6))
border_neighbours[9] = numpy.rint(count_neighbours(borders, 9))
if draw_outer_land_border:
inner_borders = borders
outer_borders = None
for i in range(2):
_outer_borders = numpy.zeros((resize_factor * world.height, resize_factor * world.width), bool)
_outer_borders[count_neighbours(inner_borders) > 0] = True
_outer_borders[inner_borders] = False
_outer_borders[numpy.logical_not(scaled_ocean)] = False
outer_borders = _outer_borders
inner_borders = outer_borders
if draw_mountains:
mountains_mask = _find_mountains_mask(world, resize_factor)
if draw_biome:
biome_masks = _build_biome_group_masks(world, resize_factor)
def _draw_biome(name, _func, w, h, r, _alt_func = None):
if verbose:
start_time = time.time()
for y in range(resize_factor * world.height):
for x in range(resize_factor * world.width):
if biome_masks[name][y, x] > 0:
if r == 0 or border_neighbours[r][y,x] <= 2:
if _alt_func is not None and rng.random_sample() > .5:
_alt_func(target, x, y, w, h)
else:
_func(target, x, y, w, h)
biome_masks[name][y-r:y+r+1,x-r:x+r+1] = 0.0
if verbose:
elapsed_time = time.time() - start_time
print(
"...drawing_functions.draw_ancientmap: " + name +
" Elapsed time " + str(elapsed_time) + " seconds.")
if verbose:
elapsed_time = time.time() - start_time
print(
"...drawing_functions.draw_oldmap_on_pixel: init Elapsed time " +
str(elapsed_time) + " seconds.")
sys.stdout.flush()
if verbose:
start_time = time.time()
border_color = (0, 0, 0, 255)
outer_border_color = gradient(0.5, 0, 1.0, rgba_to_rgb(border_color), rgba_to_rgb(sea_color))
# start in low resolution
num_channels = 4
channels = numpy.zeros((num_channels, world.height, world.width), int)
for c in range(num_channels):
channels[c] = land_color[c]
channels[c][world.ocean] = sea_color[c]
# now go full resolution
channels = channels.repeat(resize_factor, 1).repeat(resize_factor, 2)
if draw_outer_land_border:
for c in range(num_channels):
channels[c][outer_borders] = outer_border_color[c]
for c in range(num_channels):
channels[c][borders] = border_color[c]
if verbose:
elapsed_time = time.time() - start_time
print(
"...drawing_functions.draw_oldmap_on_pixel: color ocean " +
"Elapsed time " + str(elapsed_time) + " seconds.")
if verbose:
start_time = time.time()
# don't anti-alias the alpha channel
for c in range(num_channels-1):
channels[c] = anti_alias_channel(channels[c], 1)
# switch from channel major storage to pixel major storage
for c in range(num_channels):
target[:,:,c] = channels[c,:,:]
if verbose:
elapsed_time = time.time() - start_time
print(
"...drawing_functions.draw_oldmap_on_pixel: anti alias " +
"Elapsed time " + str(elapsed_time) + " seconds.")
if draw_biome:
# Draw glacier
if verbose:
start_time = time.time()
for y in range(resize_factor * world.height):
for x in range(resize_factor * world.width):
if not borders[y, x] and world.is_iceland(
(int(x / resize_factor), int(y / resize_factor))):
_draw_glacier(target, x, y)
if verbose:
elapsed_time = time.time() - start_time
print(
"...drawing_functions.draw_oldmap_on_pixel: draw glacier " +
"Elapsed time " + str(elapsed_time) + " seconds.")
_draw_biome('tundra', _draw_tundra, 0, 0, 0)
_draw_biome('cold parklands', _draw_cold_parklands, 0, 0, 0)
_draw_biome('steppe', _draw_steppe, 0, 0, 0)
_draw_biome('chaparral', _draw_chaparral, 0, 0, 0)
_draw_biome('savanna', _draw_savanna, 0, 0, 0)
_draw_biome('cool desert', _draw_cool_desert, 8, 2, 9)
_draw_biome('hot desert', _draw_hot_desert, 8, 2, 9)
_draw_biome('boreal forest', _draw_boreal_forest, 4, 5, 6)
_draw_biome('cool temperate forest', _draw_temperate_forest1, 4, 5, 6,
_draw_temperate_forest2)
_draw_biome('warm temperate forest', _draw_warm_temperate_forest, 4, 5, 6)
_draw_biome('tropical dry forest group', _draw_tropical_dry_forest, 4, 5, 6)
_draw_biome('jungle', _draw_jungle, 4, 5, 6)
# TODO: there was a stub for a rock desert biome group
# it should be super easy to introduce that group with the new
# biome group concept but since it did nothing I removed the stub
if draw_rivers:
draw_rivers_on_image(world, target, resize_factor)
# Draw mountains
if draw_mountains:
if verbose:
start_time = time.time()
for y in range(resize_factor * world.height):
for x in range(resize_factor * world.width):
if mountains_mask[y, x] > 0:
w = mountains_mask[y, x]
h = 3 + int(world.level_of_mountain(
(int(x / resize_factor), int(y / resize_factor))))
r = max(int(w / 3 * 2), h)
if r not in border_neighbours:
border_neighbours[r] = numpy.rint(count_neighbours(borders, r))
if border_neighbours[r][y,x] <= 2:
_draw_a_mountain(target, x, y, w=w, h=h)
mountains_mask[y-r:y+r+1,x-r:x+r+1] = 0.0
if verbose:
elapsed_time = time.time() - start_time
print(
"...drawing_functions.draw_oldmap_on_pixel: draw mountains " +
"Elapsed time " + str(elapsed_time) + " seconds.")
| [
"worldengine.common.anti_alias",
"numpy.logical_not",
"numpy.zeros",
"worldengine.biome._un_camelize",
"numpy.random.RandomState",
"time.time",
"worldengine.common.count_neighbours",
"sys.stdout.flush",
"numpy.random.randint",
"worldengine.biome.BiomeGroup.__subclasses__",
"worldengine.common.ge... | [((1749, 1796), 'numpy.zeros', 'numpy.zeros', (['(world.height, world.width)', 'float'], {}), '((world.height, world.width), float)\n', (1760, 1796), False, 'import numpy\n'), ((2330, 2357), 'worldengine.biome.BiomeGroup.__subclasses__', 'BiomeGroup.__subclasses__', ([], {}), '()\n', (2355, 2357), False, 'from worldengine.biome import BiomeGroup, _un_camelize\n'), ((11284, 11297), 'worldengine.common.get_verbose', 'get_verbose', ([], {}), '()\n', (11295, 11297), False, 'from worldengine.common import get_verbose, count_neighbours\n'), ((11310, 11346), 'numpy.random.RandomState', 'numpy.random.RandomState', (['world.seed'], {}), '(world.seed)\n', (11334, 11346), False, 'import numpy\n'), ((11626, 11704), 'numpy.zeros', 'numpy.zeros', (['(resize_factor * world.height, resize_factor * world.width)', 'bool'], {}), '((resize_factor * world.height, resize_factor * world.width), bool)\n', (11637, 11704), False, 'import numpy\n'), ((14100, 14159), 'numpy.zeros', 'numpy.zeros', (['(num_channels, world.height, world.width)', 'int'], {}), '((num_channels, world.height, world.width), int)\n', (14111, 14159), False, 'import numpy\n'), ((2433, 2480), 'numpy.zeros', 'numpy.zeros', (['(world.height, world.width)', 'float'], {}), '((world.height, world.width), float)\n', (2444, 2480), False, 'import numpy\n'), ((11420, 11431), 'time.time', 'time.time', ([], {}), '()\n', (11429, 11431), False, 'import time\n'), ((11909, 11937), 'worldengine.common.count_neighbours', 'count_neighbours', (['borders', '(6)'], {}), '(borders, 6)\n', (11925, 11937), False, 'from worldengine.common import get_verbose, count_neighbours\n'), ((11977, 12005), 'worldengine.common.count_neighbours', 'count_neighbours', (['borders', '(9)'], {}), '(borders, 9)\n', (11993, 12005), False, 'from worldengine.common import get_verbose, count_neighbours\n'), ((13831, 13849), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (13847, 13849), False, 'import sys\n'), ((13888, 13899), 'time.time', 'time.time', ([], {}), '()\n', (13897, 13899), False, 'import time\n'), ((14855, 14866), 'time.time', 'time.time', ([], {}), '()\n', (14864, 14866), False, 'import time\n'), ((14967, 15001), 'worldengine.common.anti_alias', 'anti_alias_channel', (['channels[c]', '(1)'], {}), '(channels[c], 1)\n', (14985, 15001), True, 'from worldengine.common import anti_alias as anti_alias_channel\n'), ((2097, 2123), 'worldengine.common.count_neighbours', 'count_neighbours', (['_mask', '(3)'], {}), '(_mask, 3)\n', (2113, 2123), False, 'from worldengine.common import get_verbose, count_neighbours\n'), ((2651, 2679), 'worldengine.common.count_neighbours', 'count_neighbours', (['group_mask'], {}), '(group_mask)\n', (2667, 2679), False, 'from worldengine.common import get_verbose, count_neighbours\n'), ((2834, 2862), 'worldengine.biome._un_camelize', '_un_camelize', (['group.__name__'], {}), '(group.__name__)\n', (2846, 2862), False, 'from worldengine.biome import BiomeGroup, _un_camelize\n'), ((11717, 11747), 'worldengine.common.count_neighbours', 'count_neighbours', (['scaled_ocean'], {}), '(scaled_ocean)\n', (11733, 11747), False, 'from worldengine.common import get_verbose, count_neighbours\n'), ((12158, 12236), 'numpy.zeros', 'numpy.zeros', (['(resize_factor * world.height, resize_factor * world.width)', 'bool'], {}), '((resize_factor * world.height, resize_factor * world.width), bool)\n', (12169, 12236), False, 'import numpy\n'), ((13660, 13671), 'time.time', 'time.time', ([], {}), '()\n', (13669, 13671), False, 'import time\n'), ((14642, 14653), 'time.time', 'time.time', ([], {}), '()\n', (14651, 14653), False, 'import time\n'), ((15186, 15197), 'time.time', 'time.time', ([], {}), '()\n', (15195, 15197), False, 'import time\n'), ((15448, 15459), 'time.time', 'time.time', ([], {}), '()\n', (15457, 15459), False, 'import time\n'), ((17220, 17231), 'time.time', 'time.time', ([], {}), '()\n', (17229, 17231), False, 'import time\n'), ((9724, 9751), 'numpy.random.randint', 'numpy.random.randint', (['(-2)', '(2)'], {}), '(-2, 2)\n', (9744, 9751), False, 'import numpy\n'), ((12385, 12416), 'numpy.logical_not', 'numpy.logical_not', (['scaled_ocean'], {}), '(scaled_ocean)\n', (12402, 12416), False, 'import numpy\n'), ((12811, 12822), 'time.time', 'time.time', ([], {}), '()\n', (12820, 12822), False, 'import time\n'), ((15800, 15811), 'time.time', 'time.time', ([], {}), '()\n', (15809, 15811), False, 'import time\n'), ((17986, 17997), 'time.time', 'time.time', ([], {}), '()\n', (17995, 17997), False, 'import time\n'), ((2563, 2591), 'worldengine.biome._un_camelize', '_un_camelize', (['biome.__name__'], {}), '(biome.__name__)\n', (2575, 2591), False, 'from worldengine.biome import BiomeGroup, _un_camelize\n'), ((12264, 12295), 'worldengine.common.count_neighbours', 'count_neighbours', (['inner_borders'], {}), '(inner_borders)\n', (12280, 12295), False, 'from worldengine.common import get_verbose, count_neighbours\n'), ((13430, 13441), 'time.time', 'time.time', ([], {}), '()\n', (13439, 13441), False, 'import time\n'), ((17722, 17750), 'worldengine.common.count_neighbours', 'count_neighbours', (['borders', 'r'], {}), '(borders, r)\n', (17738, 17750), False, 'from worldengine.common import get_verbose, count_neighbours\n')] |
import pygame
import numpy as np
# Drawing the sudoku grid
def draw_the_grid():
for row in range(9):
for column in range(9):
color = BLACK
for numbers in range(1,10):
if grid[row][column] == numbers:
number[row][column] = myfont.render(str(numbers), False, (255, 255, 255))
pygame.draw.rect(screen, color,[(MARGIN + WIDTH) * column + MARGIN,(MARGIN + HEIGHT) * row + MARGIN,WIDTH,HEIGHT])
def init(array):
sudoku_array = array
class END(Exception): pass
def xy_to_number(x, y): return sudoku_array[(9*(8-y)+x)]
def xy_to_location(x, y): return (9*(8-y)+x)
def xy_to_location2(y, x): return (9*(y)+x)
#CHECK IF THE X LINE MEETS THE RULES
def is_in_my_line(x, y, number):
for columns in range(9):
if(columns != y):
if (xy_to_number(x, columns) == number): return True
for rows in range(9):
if(rows != x):
if (xy_to_number(rows, y) == number): return True
return False
#CHECK IF THE SQUARE MEETS THE RULES
def my_square(puvodx, puvody, number):
if (puvodx/3 < 1): square_x = 0
elif (puvodx/3 >= 2): square_x = 2
else: square_x = 1
if (puvody/3 < 1): square_y = 0
elif (puvody/3 >= 2): square_y = 2
else: square_y = 1
for x in range(3*square_x, 3*square_x+3):
for y in range(3*square_y, 3*square_y+3):
if(xy_to_number(x, y) == number): return True
return False
#CREATE OPTIONS
def create_options(array):
options = []
alloptions = [1,2,3,4,5,6,7,8,9]
for x in range(len(array)):
if array[x] == 0: options.append(alloptions)
else: options.append(array[x])
return options
def odstran(number, array):
custom_array = array
if(isinstance(custom_array, list)):
for i in range(len(custom_array)):
if len(custom_array) == 0: return 0
if custom_array[i] == number:
index = [i]
custom_array = np.delete(custom_array, index)
return list(custom_array)
return list(array)
def check(sudoku_array):
counter = 0
for i in range(len(sudoku_array)):
if(sudoku_array[i] == 0): counter = counter + 1
return counter
def check_duplicates(array):
if len(array) == len(set(array)): return False
else: return True
def check_lines(array):
numbers_in_line = []
count_numbers_in_line = 9
for line in range(81):
if array[line] != 0 and len(numbers_in_line) != count_numbers_in_line: numbers_in_line.append(array[line])
elif array[line] == 0 and len(numbers_in_line) != count_numbers_in_line: count_numbers_in_line = count_numbers_in_line - 1
if len(numbers_in_line) == count_numbers_in_line:
if check_duplicates(numbers_in_line) == True: return False
count_numbers_in_line = 9
numbers_in_line = []
return True
def check_rows(array):
numbers__in_columns = []
amount_of_numbers_in_columns = 9
for line in range(9):
for row in range(9):
if array[9*row+line] != 0 and len(numbers__in_columns) != amount_of_numbers_in_columns: numbers__in_columns.append(array[9*row+line])
elif array[9*row+line] == 0 and len(numbers__in_columns) != amount_of_numbers_in_columns: amount_of_numbers_in_columns = amount_of_numbers_in_columns - 1
if len(numbers__in_columns) == amount_of_numbers_in_columns:
if check_duplicates(numbers__in_columns) == True: return False
amount_of_numbers_in_columns = 9
numbers__in_columns = []
return True
def check_squares(array):
numbers__in_columns = []
amount_of_numbers_in_columns = 9
for y in range(3):
for x in range(3):
for a in range(3):
for b in range(3):
if array[xy_to_location(a + x*3, b + y*3)] != 0 and len(numbers__in_columns) != amount_of_numbers_in_columns: numbers__in_columns.append(array[xy_to_location(a + x*3, b + y*3)])
elif array[xy_to_location(a + x*3, b + y*3)] == 0 and len(numbers__in_columns) != amount_of_numbers_in_columns: amount_of_numbers_in_columns = amount_of_numbers_in_columns - 1
if len(numbers__in_columns) == amount_of_numbers_in_columns:
if check_duplicates(numbers__in_columns) == True: return False
amount_of_numbers_in_columns = 9
numbers__in_columns = []
return True
#CHECK IF THE SUDOKU ARRAY DOESN´T BREAK ANY RULES
def check2(array):
if check_rows(array) == True and check_lines(array) == True and check_squares(array) == True: return True
return False
#DELETE ALL FALSE OPTIONS FOR INDIVIDUAL BOXES
def delete_options(options):
for a in range(9):
for b in range(9):
for c in range(10):
if(xy_to_number(a, b) == 0):
if(is_in_my_line(a,b,c) == True):
position = xy_to_location(a, b)
options[position] = odstran(c, options[position])
elif(my_square(a,b,c) == True):
position = xy_to_location(a, b)
options[position] = odstran(c, options[position])
return options
def vykresleni_do_sudoku_array(options, sudoku_array):
for a in range(81):
if(isinstance(options[a], list) == True):
if(len(options[a]) == 1):
styl = options[a]
sudoku_array[a] = styl[0]
return sudoku_array
def draw_sudoku_array(array):
for row in range(9):
for column in range(9):
if array[xy_to_location2(row, column)] != 0:
number[row][column] = myfont.render(str(array[xy_to_location2(row, column)]), False, YELLOW)
screen.blit(number[row][column],(column*50 + 20 + MARGIN*column,row*50 + 10 + MARGIN*row))
pygame.display.flip()
predchozi_vysledek = 0
options = create_options(sudoku_array)
while(check(sudoku_array) > 1):
if predchozi_vysledek == check(sudoku_array): break
predchozi_vysledek = check(sudoku_array)
options = delete_options(options)
sudoku_array = vykresleni_do_sudoku_array(options, sudoku_array)
print(sudoku_array)
draw_the_grid()
draw_sudoku_array(sudoku_array)
if check(sudoku_array) == 0: print(sudoku_array) #done
else: #backtracking
print("NOT COMPLETED YET -> backtracking")
backtracking_sudoku_array, backup_sudoku_array = (sudoku_array for i in range(2))
backtracking_options, location_backtracking_options, my_options = ([] for i in range(3))
my_position = 0
for i in range(len(options)):
if(isinstance(options[i], list) and len(options[i]) > 1): backtracking_options.append(options[i])
for i in range(len(backtracking_options)):
backtracking_options[i] = [0] + backtracking_options[i]
my_options.append(0)
for i in range(len(sudoku_array)):
if(sudoku_array[i] == 0): location_backtracking_options.append(i)
def backtracking(my_position):
print(backtracking_sudoku_array)
draw_the_grid()
draw_sudoku_array(backtracking_sudoku_array)
if check2(backtracking_sudoku_array) == False:
if my_options[my_position] < len(backtracking_options[my_position]) - 1:
my_options[my_position] += 1
backtracking_sudoku_array[location_backtracking_options[my_position]] = backtracking_options[my_position][my_options[my_position]]
elif my_options[my_position] == len(backtracking_options[my_position]) - 1:
backtracking_sudoku_array[location_backtracking_options[my_position]] = 0
my_options[my_position] = 0
backtracking_sudoku_array[location_backtracking_options[my_position]] = backtracking_options[my_position][my_options[my_position]]
my_position -= 1
is_done = False
while(is_done == False):
if my_options[my_position] < len(backtracking_options[my_position]) - 1:
my_options[my_position] += 1
backtracking_sudoku_array[location_backtracking_options[my_position]] = backtracking_options[my_position][my_options[my_position]]
is_done = True
else:
my_options[my_position] = 0
backtracking_sudoku_array[location_backtracking_options[my_position]] = backtracking_options[my_position][my_options[my_position]]
my_position -= 1
return my_position
else:
if my_options[my_position] == 0:
my_options[my_position] += 1
backtracking_sudoku_array[location_backtracking_options[my_position]] = backtracking_options[my_position][my_options[my_position]]
elif my_position < len(my_options) - 1:
my_position += 1
my_options[my_position] += 1
backtracking_sudoku_array[location_backtracking_options[my_position]] = backtracking_options[my_position][my_options[my_position]]
else:
raise END
return my_position
try:
while True:
backup_sudoku_array = backtracking_sudoku_array
my_position = backtracking(my_position)
except END: pass
sudoku_array = []
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
YELLOW = (244,175,27)
WIDTH = 50 # WIDTH of each square
HEIGHT = 50 # HEIGHT of each square
MARGIN = 2 # Margin between each cell
# Create a 2 dimensional array(grid) that we´ll later fill with sudoku numbers #
grid, number = ([] for i in range(2))
for row in range(10):
grid.append([])
number.append([])
for column in range(10):
grid[row].append(0)
number[row].append(0)
def erase():
for row in range(10):
for column in range(10):
grid[row][column] = 0
number[row][column] = 0
pygame.init() # Initialize pygame (gui)
pygame.font.init()
myfont = pygame.font.SysFont('Arial', 30)
WINDOW_SIZE = [900, 475]
screen = pygame.display.set_mode(WINDOW_SIZE)
pygame.display.set_caption("Sudoku solver")
done = False
# -------- Main Loop until the user clicks the close button----------- #
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT: done = True # Flag that we are done -> we exit this loop
elif event.type == pygame.MOUSEBUTTONDOWN:
# Get the mouse position
pos = pygame.mouse.get_pos()
# Change the x/y screen coordinates to grid coordinates
column = pos[0] // (WIDTH + MARGIN)
row = pos[1] // (HEIGHT + MARGIN)
if column > 9 or row > 9:
if(pos[0] > 600 and pos[0] < 800 and pos[1] < 125 and pos[1] > 50):
# Solve the sudoku
for row in range(9):
for column in range(9):
sudoku_array.append(grid[row][column])
init(sudoku_array)
elif(pos[0] > 600 and pos[0] < 800 and pos[1] < 205 and pos[1] > 130):
# Erase the sudoku board
erase()
sudoku_array = []
elif grid[row][column] == 9:
grid[row][column] = 0
number[row][column] = myfont.render("", False, (255, 255, 255))
screen.blit(number[row][column],(column*50 + 20 + MARGIN*column,row*50 + 10 + MARGIN*row))
else: grid[row][column] += 1
# Set the screen background
screen.fill(WHITE)
draw_the_grid()
# Displaying sudoku numbers that we chose
for row in range(9):
for column in range(9):
if number[row][column] != 0: screen.blit(number[row][column],(column*50 + 20 + MARGIN*column,row*50 + 10 + MARGIN*row))
pygame.draw.rect(screen, BLACK,[600,50,200,75])
pygame.draw.rect(screen, BLACK,[600,130,200,75])
SOLVE = myfont.render("SOLVE", False, YELLOW)
ERASE = myfont.render("ERASE", False, YELLOW)
screen.blit(SOLVE,(650,75))
screen.blit(ERASE,(650,150))
# Updates the screen
pygame.display.flip()
pygame.quit()
| [
"pygame.quit",
"pygame.font.SysFont",
"pygame.event.get",
"pygame.display.set_mode",
"pygame.draw.rect",
"pygame.init",
"pygame.display.flip",
"pygame.font.init",
"pygame.mouse.get_pos",
"pygame.display.set_caption",
"numpy.delete"
] | [((11049, 11062), 'pygame.init', 'pygame.init', ([], {}), '()\n', (11060, 11062), False, 'import pygame\n'), ((11092, 11110), 'pygame.font.init', 'pygame.font.init', ([], {}), '()\n', (11108, 11110), False, 'import pygame\n'), ((11121, 11153), 'pygame.font.SysFont', 'pygame.font.SysFont', (['"""Arial"""', '(30)'], {}), "('Arial', 30)\n", (11140, 11153), False, 'import pygame\n'), ((11192, 11228), 'pygame.display.set_mode', 'pygame.display.set_mode', (['WINDOW_SIZE'], {}), '(WINDOW_SIZE)\n', (11215, 11228), False, 'import pygame\n'), ((11230, 11273), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Sudoku solver"""'], {}), "('Sudoku solver')\n", (11256, 11273), False, 'import pygame\n'), ((13356, 13369), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (13367, 13369), False, 'import pygame\n'), ((11401, 11419), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (11417, 11419), False, 'import pygame\n'), ((13026, 13077), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'BLACK', '[600, 50, 200, 75]'], {}), '(screen, BLACK, [600, 50, 200, 75])\n', (13042, 13077), False, 'import pygame\n'), ((13079, 13131), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'BLACK', '[600, 130, 200, 75]'], {}), '(screen, BLACK, [600, 130, 200, 75])\n', (13095, 13131), False, 'import pygame\n'), ((13331, 13352), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (13350, 13352), False, 'import pygame\n'), ((6568, 6589), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (6587, 6589), False, 'import pygame\n'), ((370, 493), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'color', '[(MARGIN + WIDTH) * column + MARGIN, (MARGIN + HEIGHT) * row + MARGIN,\n WIDTH, HEIGHT]'], {}), '(screen, color, [(MARGIN + WIDTH) * column + MARGIN, (\n MARGIN + HEIGHT) * row + MARGIN, WIDTH, HEIGHT])\n', (386, 493), False, 'import pygame\n'), ((11628, 11650), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (11648, 11650), False, 'import pygame\n'), ((2211, 2241), 'numpy.delete', 'np.delete', (['custom_array', 'index'], {}), '(custom_array, index)\n', (2220, 2241), True, 'import numpy as np\n')] |
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
from solo12_shoulder_collision_utils import followBoundary, colMapToDistField
# Load the collision map from file
res = 200
col_map_file = './npy_data/collision_map_centered_res{}.npy'.format(res)
dist_field_file = './npy_data/updated_collision_map_distance_res{}.npy'.format(res)
col_map = np.load(col_map_file, allow_pickle=True)
col_map = col_map.T
dist_field = np.load(dist_field_file, allow_pickle=True)
"""
traj1 = followBoundary(col_map.T)
traj2 = followBoundary(col_map.T, first_dir=2)
traj2 = [traj2[-i] for i in range(len(traj2))]
traj1X = np.array([t[0] for t in traj1])
traj1Y = np.array([t[1] for t in traj1])
traj2X = np.array([t[0] for t in traj2])
traj2Y = np.array([t[1] for t in traj2])
#traj2X = np.concatenate([traj2X, traj2X + len(traj2X), traj2X + 2*len(traj2X)])
#traj2Y = np.array(3*[t[1] for t in traj2])
def approxFourier(trajX, trajY, Nh, plot=True):
complexTraj = np.array(trajX + 1j*trajY)
period = len(complexTraj)
time = np.array([i for i in range(period)])
def cn(n):
c = complexTraj*np.exp(-1j*2*n*np.pi*time/period)
return c.sum()/c.size
def f(x, Nh):
f = np.array([cn(i)*np.exp(1j*2*i*np.pi*x/period) for i in range(-Nh-1,Nh+1)])
return f.sum()
traj_est = np.array([f(t,Nh) for t in time])
#plt.figure()
if(plot):
plt.plot(complexTraj.real - complexTraj.real[int(len(complexTraj)/3)], complexTraj.imag,
linewidth=2,
c='limegreen')
plt.plot(traj_est.real[10:-10] - complexTraj.real[int(len(complexTraj)/3)], traj_est.imag[10:-10], 'r-', linewidth=2,
label="Fourier series - {} harmonics".format(Nh))
#plt.plot(trajX, trajY_est)
return traj_est
def approxPolynom(trajX, trajY, deg, plot=True):
polynCoeffs = np.polyfit(trajX, trajY, deg)
polynEval = np.poly1d(polynCoeffs)
if(plot):
#plt.figure()
#plt.plot(trajX, trajY)
plt.plot(trajX, polynEval(trajX),linewidth=2, c='yellow',label="Polynom - deg. {}".format(deg))
return [trajX, polynEval(trajX)]
def newApproxFourier(colMap, Nh):
period = [len(colMap), len(colMap[0])]
xRange = np.linspace(0,period[0],period[0])
yRange = np.linspace(0,period[1],period[1])
gridX, gridY = np.meshgrid(xRange,yRange)
# Compute the 2D fourier coeff of index (i,j)
def c_mn(m,n):
c = (1./(4*np.pi*np.pi))*colMap*np.exp(-1j*2*np.pi*m*gridX/period[0])*np.exp(-1j*2*np.pi*n*gridY/period[1])
return c.sum()/c.size
# Evaluate f based on the coeffs
def f(x,y,Nh):
f = np.array([ [c_mn(k,l)*np.exp(1j*2*np.pi*l*x/period[0])*np.exp(1j*2*np.pi*k*y/period[1]) for l in range(-Nh-1, Nh+1)] for k in range(-Nh-1, Nh+1)])
return f.sum()
estim = [[f(x, y, Nh) for y in yRange] for x in xRange]
return estim
"""
"""
#print(traj2)
#plt.subplot(2,2,1)
#approxFourier(traj1, 50, plot=False)
#plt.subplot(2,2,2)
#approxPolynom(traj1, 10, plot=False)
#plt.subplot(2,2,3)
plt.figure()
plt.imshow(col_map)
plt.plot(traj1X, traj1Y, 'r')
#polynTraj1 = approxPolynom(traj1X, traj1Y, 101, plot=True)
#traj1X = np.concatenate([traj1X, traj1X + len(traj1X), traj1X + 2*len(traj1X)])
#traj1Y = np.array(3*[t[1] for t in traj1])
#fourierTraj1 = approxFourier(traj1X, traj1Y, 10, plot=True)
#fourierTraj2 = approxFourier(traj2X, traj2Y, 100, plot=True)
#plt.subplot(2,2,4)
#polynTraj2 = approxPolynom(traj2, 10, plot=True)
plt.legend()
plt.title('Collision boundary approximation')
plt.figure()
plt.grid(True)
plt.plot(traj1X, traj1Y + 2*len(col_map)/4)
#plt.plot(fourierTraj1.real[10:-10], fourierTraj1.imag[10:-10] + len(col_map)/4)
#plt.plot(polynTraj1[0] + fourierTraj1.real[int(len(fourierTraj1)/3)], polynTraj1[1])
#plt.plot(polynTraj1[0] , polynTraj1[1])
plt.show()
'''
'''
dist_field = np.array(colMapToDistField(col_map.T))
np.save('./npy_data/collision_map_distance_res1000', dist_field)
plt.figure()
plt.imshow(col_map.T)
plt.plot(traj1X, traj1Y,'r')
plt.plot(traj2X, traj2Y,'r')
plt.figure()
plt.imshow(dist_field, cmap=plt.cm.RdYlGn)
#plt.plot(traj1X, traj1Y, 'r')
#plt.plot(traj2X, traj2Y,'r')
plt.colorbar(label='Dist. to boundary')
plt.figure()
#cumul_dist_field = (dist_field > 0).astype(float) + (dist_field > 10) + (dist_field > 20) + (dist_field > 30) + (dist_field > 40)
cumul_dist_field = (dist_field > 0.1).astype(float) + (dist_field < -0.1)
plt.imshow(cumul_dist_field)
plt.show()
"""
def thresholdFilter(threshold, fft):
return (np.log(1 + abs(fft)) > threshold)*fft
def vBandFilter(bandwidth, fft):
copy = np.fft.fftshift(fft)
copy[:,0:bandwidth] = 0
copy[:,len(copy) - bandwidth:len(copy)] = 0
return np.fft.ifftshift(copy)
def computePredictionError(fft, dist_field, offset, optim=False, optimRate=1):
def optimOffset(wrong_pred, pred_error, offset):
while(wrong_pred > 0):
offset = offset + max(wrong_pred*0.001, optimRate)
pred_error = abs(np.fft.ifft2(fft) + np.min(dist_field) > offset) - np.asarray((dist_field > 0), dtype=float)
wrong_pred = np.count_nonzero(pred_error > 0)
return wrong_pred, pred_error, offset
pred_error = abs(np.fft.ifft2(fft) + np.min(dist_field) > offset) - np.asarray((dist_field > 0), dtype=float)
wrong_pred = np.count_nonzero(pred_error > 0)
if(optim):
wrong_pred, pred_error, offset = optimOffset(wrong_pred, pred_error, offset)
lost_space = np.count_nonzero(pred_error != 0)
return wrong_pred, lost_space, pred_error
def plotLostSpaceVsNbCoeff(fft, dist_field, thresholds):
nb_coeffs = []
lost_space = []
cumul_lost = np.zeros(dist_field.shape)
for t in thresholds:
thresh_estim = thresholdFilter(t, fft)
n_err, lost, error_map = computePredictionError(thresh_estim, dist_field, 5, optim=True, optimRate=0.05)
nb_coeffs.append(np.count_nonzero(thresh_estim))
lost_space.append(100*lost/np.count_nonzero(dist_field > 0))
cumul_lost = cumul_lost - error_map
plt.plot(nb_coeffs, lost_space, '-+')
plt.grid(True)
plt.xscale("log")
plt.xlabel("Nb. coeffs of the FFT to evaluate")
plt.ylabel("Lost range of motion (%)")
plt.figure()
plt.imshow(cumul_lost)
estim = np.fft.fft2(dist_field - np.min(dist_field))
log_threshold = 12
#thresh_estim = vBandFilter(240,estim)
thresh_estim = thresholdFilter(log_threshold, estim)
plt.figure()
plt.subplot(2,2,1)
#plt.imshow(abs(np.fft.ifft2(estim)), cmap=plt.cm.RdYlGn)
plt.imshow(dist_field, cmap=plt.cm.RdYlGn)
#plt.figure()
plt.subplot(2,2,2)
plt.imshow(abs(np.fft.ifft2(thresh_estim)), cmap=plt.cm.RdYlGn)
plt.title("Estimated distance (iFFT of thresholded transform)")
#error_map = (abs(np.fft.ifft2(thresh_estim)) + np.min(dist_field) > 0) - np.asarray((dist_field > 0), dtype=float)
n_err, lost, error_map = computePredictionError(thresh_estim, dist_field, 5, optim=True)
#plt.figure()
plt.subplot(2,2,4)
plt.imshow(error_map)
plt.title("Prediction errors on binary collision check\n{:.2f}% lost space".format(100*lost/np.count_nonzero(dist_field > 0)))
#plt.figure()
plt.subplot(2,2,3)
plt.imshow(np.fft.fftshift(np.log(1 + abs(thresh_estim))))
plt.title("Filtered FFT : log(abs(F(fx,fy))) > {}\n {} non zero coeff.".format(log_threshold,np.count_nonzero(thresh_estim)))
'''
# Diff map
plt.figure()
diff_map = abs(np.fft.ifft2(thresh_estim)) - dist_field + np.min(dist_field)
plt.imshow(diff_map)
'''
print("FFT non zero values : ")
print(np.count_nonzero(thresh_estim))
print("Error ratio : ")
print(np.count_nonzero(error_map)/error_map.size)
'''
# Periodic view
plt.figure()
wideview = np.concatenate([np.concatenate([abs(np.fft.ifft2(thresh_estim)),abs(np.fft.ifft2(thresh_estim))]),
np.concatenate([abs(np.fft.ifft2(thresh_estim)),abs(np.fft.ifft2(thresh_estim))])], axis=1)
plt.imshow(wideview, cmap=plt.cm.RdYlGn)
'''
plt.figure()
plotLostSpaceVsNbCoeff(estim, dist_field, [0,5,8,10,11,12,12.5,13,13.5,14,14.5,15,15.5,16])
#plotLostSpaceVsNbCoeff(estim, dist_field, [0,5,10,15])
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"numpy.load",
"numpy.fft.ifftshift",
"matplotlib.pyplot.show",
"numpy.count_nonzero",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xscale",
"matplotlib.pyplot.imshow",
"numpy.asarray",
"numpy.fft.ifft2",
"numpy.zeros",
"matplotlib.pyplo... | [((368, 408), 'numpy.load', 'np.load', (['col_map_file'], {'allow_pickle': '(True)'}), '(col_map_file, allow_pickle=True)\n', (375, 408), True, 'import numpy as np\n'), ((442, 485), 'numpy.load', 'np.load', (['dist_field_file'], {'allow_pickle': '(True)'}), '(dist_field_file, allow_pickle=True)\n', (449, 485), True, 'import numpy as np\n'), ((6613, 6625), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6623, 6625), True, 'import matplotlib.pyplot as plt\n'), ((6626, 6646), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (6637, 6646), True, 'import matplotlib.pyplot as plt\n'), ((6703, 6745), 'matplotlib.pyplot.imshow', 'plt.imshow', (['dist_field'], {'cmap': 'plt.cm.RdYlGn'}), '(dist_field, cmap=plt.cm.RdYlGn)\n', (6713, 6745), True, 'import matplotlib.pyplot as plt\n'), ((6761, 6781), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (6772, 6781), True, 'import matplotlib.pyplot as plt\n'), ((6844, 6907), 'matplotlib.pyplot.title', 'plt.title', (['"""Estimated distance (iFFT of thresholded transform)"""'], {}), "('Estimated distance (iFFT of thresholded transform)')\n", (6853, 6907), True, 'import matplotlib.pyplot as plt\n'), ((7128, 7148), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (7139, 7148), True, 'import matplotlib.pyplot as plt\n'), ((7147, 7168), 'matplotlib.pyplot.imshow', 'plt.imshow', (['error_map'], {}), '(error_map)\n', (7157, 7168), True, 'import matplotlib.pyplot as plt\n'), ((7311, 7331), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (7322, 7331), True, 'import matplotlib.pyplot as plt\n'), ((8100, 8112), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8110, 8112), True, 'import matplotlib.pyplot as plt\n'), ((8262, 8272), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8270, 8272), True, 'import matplotlib.pyplot as plt\n'), ((4776, 4796), 'numpy.fft.fftshift', 'np.fft.fftshift', (['fft'], {}), '(fft)\n', (4791, 4796), True, 'import numpy as np\n'), ((4884, 4906), 'numpy.fft.ifftshift', 'np.fft.ifftshift', (['copy'], {}), '(copy)\n', (4900, 4906), True, 'import numpy as np\n'), ((5492, 5524), 'numpy.count_nonzero', 'np.count_nonzero', (['(pred_error > 0)'], {}), '(pred_error > 0)\n', (5508, 5524), True, 'import numpy as np\n'), ((5642, 5675), 'numpy.count_nonzero', 'np.count_nonzero', (['(pred_error != 0)'], {}), '(pred_error != 0)\n', (5658, 5675), True, 'import numpy as np\n'), ((5838, 5864), 'numpy.zeros', 'np.zeros', (['dist_field.shape'], {}), '(dist_field.shape)\n', (5846, 5864), True, 'import numpy as np\n'), ((6227, 6264), 'matplotlib.pyplot.plot', 'plt.plot', (['nb_coeffs', 'lost_space', '"""-+"""'], {}), "(nb_coeffs, lost_space, '-+')\n", (6235, 6264), True, 'import matplotlib.pyplot as plt\n'), ((6269, 6283), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (6277, 6283), True, 'import matplotlib.pyplot as plt\n'), ((6288, 6305), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (6298, 6305), True, 'import matplotlib.pyplot as plt\n'), ((6310, 6357), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Nb. coeffs of the FFT to evaluate"""'], {}), "('Nb. coeffs of the FFT to evaluate')\n", (6320, 6357), True, 'import matplotlib.pyplot as plt\n'), ((6362, 6400), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Lost range of motion (%)"""'], {}), "('Lost range of motion (%)')\n", (6372, 6400), True, 'import matplotlib.pyplot as plt\n'), ((6406, 6418), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6416, 6418), True, 'import matplotlib.pyplot as plt\n'), ((6423, 6445), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cumul_lost'], {}), '(cumul_lost)\n', (6433, 6445), True, 'import matplotlib.pyplot as plt\n'), ((7685, 7715), 'numpy.count_nonzero', 'np.count_nonzero', (['thresh_estim'], {}), '(thresh_estim)\n', (7701, 7715), True, 'import numpy as np\n'), ((5433, 5472), 'numpy.asarray', 'np.asarray', (['(dist_field > 0)'], {'dtype': 'float'}), '(dist_field > 0, dtype=float)\n', (5443, 5472), True, 'import numpy as np\n'), ((6480, 6498), 'numpy.min', 'np.min', (['dist_field'], {}), '(dist_field)\n', (6486, 6498), True, 'import numpy as np\n'), ((6795, 6821), 'numpy.fft.ifft2', 'np.fft.ifft2', (['thresh_estim'], {}), '(thresh_estim)\n', (6807, 6821), True, 'import numpy as np\n'), ((7482, 7512), 'numpy.count_nonzero', 'np.count_nonzero', (['thresh_estim'], {}), '(thresh_estim)\n', (7498, 7512), True, 'import numpy as np\n'), ((7748, 7775), 'numpy.count_nonzero', 'np.count_nonzero', (['error_map'], {}), '(error_map)\n', (7764, 7775), True, 'import numpy as np\n'), ((5281, 5313), 'numpy.count_nonzero', 'np.count_nonzero', (['(pred_error > 0)'], {}), '(pred_error > 0)\n', (5297, 5313), True, 'import numpy as np\n'), ((6076, 6106), 'numpy.count_nonzero', 'np.count_nonzero', (['thresh_estim'], {}), '(thresh_estim)\n', (6092, 6106), True, 'import numpy as np\n'), ((7261, 7293), 'numpy.count_nonzero', 'np.count_nonzero', (['(dist_field > 0)'], {}), '(dist_field > 0)\n', (7277, 7293), True, 'import numpy as np\n'), ((5214, 5253), 'numpy.asarray', 'np.asarray', (['(dist_field > 0)'], {'dtype': 'float'}), '(dist_field > 0, dtype=float)\n', (5224, 5253), True, 'import numpy as np\n'), ((6143, 6175), 'numpy.count_nonzero', 'np.count_nonzero', (['(dist_field > 0)'], {}), '(dist_field > 0)\n', (6159, 6175), True, 'import numpy as np\n'), ((5382, 5399), 'numpy.fft.ifft2', 'np.fft.ifft2', (['fft'], {}), '(fft)\n', (5394, 5399), True, 'import numpy as np\n'), ((5402, 5420), 'numpy.min', 'np.min', (['dist_field'], {}), '(dist_field)\n', (5408, 5420), True, 'import numpy as np\n'), ((5163, 5180), 'numpy.fft.ifft2', 'np.fft.ifft2', (['fft'], {}), '(fft)\n', (5175, 5180), True, 'import numpy as np\n'), ((5183, 5201), 'numpy.min', 'np.min', (['dist_field'], {}), '(dist_field)\n', (5189, 5201), True, 'import numpy as np\n')] |
"""Sorting components: template matching."""
import numpy as np
import scipy.spatial
from tqdm import tqdm
import sklearn, scipy
import scipy
from threadpoolctl import threadpool_limits
try:
import numba
from numba import jit, prange
HAVE_NUMBA = True
except ImportError:
HAVE_NUMBA = False
from spikeinterface.core import WaveformExtractor
from spikeinterface.core.job_tools import ChunkRecordingExecutor
from spikeinterface.toolkit import (get_noise_levels, get_template_channel_sparsity,
get_channel_distances, get_chunk_with_margin, get_template_extremum_channel, get_random_data_chunks)
from spikeinterface.sortingcomponents.peak_detection import detect_peak_locally_exclusive, detect_peaks_by_channel
from sklearn.feature_extraction.image import extract_patches_2d, reconstruct_from_patches_2d
from sklearn.linear_model import orthogonal_mp_gram
potrs, = scipy.linalg.get_lapack_funcs(('potrs',), dtype=np.float32)
nrm2, = scipy.linalg.get_blas_funcs(('nrm2', ), dtype=np.float32)
spike_dtype = [('sample_ind', 'int64'), ('channel_ind', 'int64'), ('cluster_ind', 'int64'),
('amplitude', 'float64'), ('segment_ind', 'int64')]
def find_spikes_from_templates(recording, method='naive', method_kwargs={}, extra_outputs=False,
**job_kwargs):
"""Find spike from a recording from given templates.
Parameters
----------
recording: RecordingExtractor
The recording extractor object
waveform_extractor: WaveformExtractor
The waveform extractor
method: str
Which method to use ('naive' | 'tridesclous' | 'circus')
method_kwargs: dict, optional
Keyword arguments for the chosen method
extra_outputs: bool
If True then method_kwargs is also return
job_kwargs: dict
Parameters for ChunkRecordingExecutor
Returns
-------
spikes: ndarray
Spikes found from templates.
method_kwargs:
Optionaly returns for debug purpose.
Notes
-----
Templates are represented as WaveformExtractor so statistics can be extracted.
"""
assert method in template_matching_methods
method_class = template_matching_methods[method]
# initialize
method_kwargs = method_class.initialize_and_check_kwargs(recording, method_kwargs)
# add
method_kwargs['margin'] = method_class.get_margin(recording, method_kwargs)
# serialiaze for worker
method_kwargs_seralized = method_class.serialize_method_kwargs(method_kwargs)
# and run
func = _find_spikes_chunk
init_func = _init_worker_find_spikes
init_args = (recording.to_dict(), method, method_kwargs_seralized)
processor = ChunkRecordingExecutor(recording, func, init_func, init_args,
handle_returns=True, job_name=f'find spikes ({method})', **job_kwargs)
spikes = processor.run()
spikes = np.concatenate(spikes)
if extra_outputs:
return spikes, method_kwargs
else:
return spikes
def _init_worker_find_spikes(recording, method, method_kwargs):
"""Initialize worker for finding spikes."""
if isinstance(recording, dict):
from spikeinterface.core import load_extractor
recording = load_extractor(recording)
method_class = template_matching_methods[method]
method_kwargs = method_class.unserialize_in_worker(method_kwargs)
# create a local dict per worker
worker_ctx = {}
worker_ctx['recording'] = recording
worker_ctx['method'] = method
worker_ctx['method_kwargs'] = method_kwargs
worker_ctx['function'] = method_class.main_function
return worker_ctx
def _find_spikes_chunk(segment_index, start_frame, end_frame, worker_ctx):
"""Find spikes from a chunk of data."""
# recover variables of the worker
recording = worker_ctx['recording']
method = worker_ctx['method']
method_kwargs = worker_ctx['method_kwargs']
margin = method_kwargs['margin']
# load trace in memory given some margin
recording_segment = recording._recording_segments[segment_index]
traces, left_margin, right_margin = get_chunk_with_margin(recording_segment,
start_frame, end_frame, None, margin, add_zeros=True)
function = worker_ctx['function']
with threadpool_limits(limits=1):
spikes = function(traces, method_kwargs)
# remove spikes in margin
if margin > 0:
keep = (spikes['sample_ind'] >= margin) & (spikes['sample_ind'] < (traces.shape[0] - margin))
spikes = spikes[keep]
spikes['sample_ind'] += (start_frame - margin)
spikes['segment_ind'] = segment_index
return spikes
# generic class for template engine
class BaseTemplateMatchingEngine:
default_params = {}
@classmethod
def initialize_and_check_kwargs(cls, recording, kwargs):
"""This function runs before loops"""
# need to be implemented in subclass
raise NotImplementedError
@classmethod
def serialize_method_kwargs(cls, kwargs):
"""This function serializes kwargs to distribute them to workers"""
# need to be implemented in subclass
raise NotImplementedError
@classmethod
def unserialize_in_worker(cls, recording, kwargs):
"""This function unserializes kwargs in workers"""
# need to be implemented in subclass
raise NotImplementedError
@classmethod
def get_margin(cls, recording, kwargs):
# need to be implemented in subclass
raise NotImplementedError
@classmethod
def main_function(cls, traces, method_kwargs):
"""This function returns the number of samples for the chunk margins"""
# need to be implemented in subclass
raise NotImplementedError
##################
# naive matching #
##################
class NaiveMatching(BaseTemplateMatchingEngine):
"""
This is a naive template matching that does not resolve collision
and does not take in account sparsity.
It just minimizes the distance to templates for detected peaks.
It is implemented for benchmarking against this low quality template matching.
And also as an example how to deal with methods_kwargs, margin, intit, func, ...
"""
default_params = {
'waveform_extractor': None,
'peak_sign': 'neg',
'n_shifts': 10,
'detect_threshold': 5,
'noise_levels': None,
'local_radius_um': 100,
'random_chunk_kwargs': {},
}
@classmethod
def initialize_and_check_kwargs(cls, recording, kwargs):
d = cls.default_params.copy()
d.update(kwargs)
assert d['waveform_extractor'] is not None
we = d['waveform_extractor']
if d['noise_levels'] is None:
d['noise_levels'] = get_noise_levels(recording, **d['random_chunk_kwargs'])
d['abs_threholds'] = d['noise_levels'] * d['detect_threshold']
channel_distance = get_channel_distances(recording)
d['neighbours_mask'] = channel_distance < d['local_radius_um']
d['nbefore'] = we.nbefore
d['nafter'] = we.nafter
return d
@classmethod
def get_margin(cls, recording, kwargs):
margin = max(kwargs['nbefore'], kwargs['nafter'])
return margin
@classmethod
def serialize_method_kwargs(cls, kwargs):
kwargs = dict(kwargs)
waveform_extractor = kwargs['waveform_extractor']
kwargs['waveform_extractor'] = str(waveform_extractor.folder)
return kwargs
@classmethod
def unserialize_in_worker(cls, kwargs):
we = kwargs['waveform_extractor']
if isinstance(we, str):
we = WaveformExtractor.load_from_folder(we)
kwargs['waveform_extractor'] = we
templates = we.get_all_templates(mode='average')
kwargs['templates'] = templates
return kwargs
@classmethod
def main_function(cls, traces, method_kwargs):
peak_sign = method_kwargs['peak_sign']
abs_threholds = method_kwargs['abs_threholds']
n_shifts = method_kwargs['n_shifts']
neighbours_mask = method_kwargs['neighbours_mask']
templates = method_kwargs['templates']
nbefore = method_kwargs['nbefore']
nafter = method_kwargs['nafter']
margin = method_kwargs['margin']
if margin > 0:
peak_traces = traces[margin:-margin, :]
else:
peak_traces = traces
peak_sample_ind, peak_chan_ind = detect_peak_locally_exclusive(peak_traces, peak_sign, abs_threholds, n_shifts, neighbours_mask)
peak_sample_ind += margin
spikes = np.zeros(peak_sample_ind.size, dtype=spike_dtype)
spikes['sample_ind'] = peak_sample_ind
spikes['channel_ind'] = peak_chan_ind # TODO need to put the channel from template
# naively take the closest template
for i in range(peak_sample_ind.size):
i0 = peak_sample_ind[i] - nbefore
i1 = peak_sample_ind[i] + nafter
wf = traces[i0:i1, :]
dist = np.sum(np.sum((templates - wf[None, : , :])**2, axis=1), axis=1)
cluster_ind = np.argmin(dist)
spikes['cluster_ind'][i] = cluster_ind
spikes['amplitude'][i] = 0.
return spikes
######################
# tridesclous peeler #
######################
class TridesclousPeeler(BaseTemplateMatchingEngine):
"""
Template-matching ported from Tridesclous sorter.
The idea of this peeler is pretty simple.
1. Find peaks
2. order by best amplitues
3. find nearest template
4. remove it from traces.
5. in the residual find peaks again
This method is quite fast but don't give exelent results to resolve
spike collision when templates have high similarity.
"""
default_params = {
'waveform_extractor': None,
'peak_sign': 'neg',
'peak_shift_ms': 0.2,
'detect_threshold': 5,
'noise_levels': None,
'local_radius_um': 100,
'num_closest' : 5,
'sample_shift': 3,
'ms_before': 0.8,
'ms_after': 1.2,
'num_peeler_loop': 2,
'num_template_try' : 1,
}
@classmethod
def initialize_and_check_kwargs(cls, recording, kwargs):
assert HAVE_NUMBA
d = cls.default_params.copy()
d.update(kwargs)
assert isinstance(d['waveform_extractor'], WaveformExtractor)
we = d['waveform_extractor']
unit_ids = we.sorting.unit_ids
channel_ids = we.recording.channel_ids
sr = we.recording.get_sampling_frequency()
# TODO load as sharedmem
templates = we.get_all_templates(mode='average')
d['templates'] = templates
d['nbefore'] = we.nbefore
d['nafter'] = we.nafter
nbefore_short = int(d['ms_before'] * sr / 1000.)
nafter_short = int(d['ms_before'] * sr / 1000.)
assert nbefore_short <= we.nbefore
assert nafter_short <= we.nafter
d['nbefore_short'] = nbefore_short
d['nafter_short'] = nafter_short
s0 = (we.nbefore - nbefore_short)
s1 = -(we.nafter - nafter_short)
if s1 == 0:
s1 = None
templates_short = templates[:, slice(s0,s1), :].copy()
d['templates_short'] = templates_short
d['peak_shift'] = int(d['peak_shift_ms'] / 1000 * sr)
if d['noise_levels'] is None:
print('TridesclousPeeler : noise should be computed outside')
d['noise_levels'] = get_noise_levels(recording)
d['abs_threholds'] = d['noise_levels'] * d['detect_threshold']
channel_distance = get_channel_distances(recording)
d['neighbours_mask'] = channel_distance < d['local_radius_um']
#
#~ template_sparsity_inds = get_template_channel_sparsity(we, method='radius',
#~ peak_sign=d['peak_sign'], outputs='index', radius_um=d['local_radius_um'])
template_sparsity_inds = get_template_channel_sparsity(we, method='threshold',
peak_sign=d['peak_sign'], outputs='index', threshold=d['detect_threshold'])
template_sparsity = np.zeros((unit_ids.size, channel_ids.size), dtype='bool')
for unit_index, unit_id in enumerate(unit_ids):
chan_inds = template_sparsity_inds[unit_id]
template_sparsity[unit_index, chan_inds] = True
d['template_sparsity'] = template_sparsity
extremum_channel = get_template_extremum_channel(we, peak_sign=d['peak_sign'], outputs='index')
# as numpy vector
extremum_channel = np.array([extremum_channel[unit_id] for unit_id in unit_ids], dtype='int64')
d['extremum_channel'] = extremum_channel
channel_locations = we.recording.get_channel_locations()
# TODO try it with real locaion
unit_locations = channel_locations[extremum_channel]
#~ print(unit_locations)
# distance between units
unit_distances = scipy.spatial.distance.cdist(unit_locations, unit_locations, metric='euclidean')
# seach for closet units and unitary discriminant vector
closest_units = []
for unit_ind, unit_id in enumerate(unit_ids):
order = np.argsort(unit_distances[unit_ind, :])
closest_u = np.arange(unit_ids.size)[order].tolist()
closest_u.remove(unit_ind)
closest_u = np.array(closest_u[:d['num_closest']])
# compute unitary discriminent vector
chans, = np.nonzero(d['template_sparsity'][unit_ind, :])
template_sparse = templates[unit_ind, :, :][:, chans]
closest_vec = []
# against N closets
for u in closest_u:
vec = (templates[u, :, :][:, chans] - template_sparse)
vec /= np.sum(vec ** 2)
closest_vec.append((u, vec))
# against noise
closest_vec.append((None, - template_sparse / np.sum(template_sparse ** 2)))
closest_units.append(closest_vec)
d['closest_units'] = closest_units
# distance channel from unit
distances = scipy.spatial.distance.cdist(channel_locations, unit_locations, metric='euclidean')
near_cluster_mask = distances < d['local_radius_um']
# nearby cluster for each channel
possible_clusters_by_channel = []
for channel_ind in range(distances.shape[0]):
cluster_inds, = np.nonzero(near_cluster_mask[channel_ind, :])
possible_clusters_by_channel.append(cluster_inds)
d['possible_clusters_by_channel'] = possible_clusters_by_channel
d['possible_shifts'] = np.arange(-d['sample_shift'], d['sample_shift'] +1, dtype='int64')
return d
@classmethod
def serialize_method_kwargs(cls, kwargs):
kwargs = dict(kwargs)
# remove waveform_extractor
kwargs.pop('waveform_extractor')
return kwargs
@classmethod
def unserialize_in_worker(cls, kwargs):
return kwargs
@classmethod
def get_margin(cls, recording, kwargs):
margin = 2 * (kwargs['nbefore'] + kwargs['nafter'])
return margin
@classmethod
def main_function(cls, traces, d):
traces = traces.copy()
all_spikes = []
level = 0
while True:
spikes = _tdc_find_spikes(traces, d, level=level)
keep = (spikes['cluster_ind'] >= 0)
if not np.any(keep):
break
all_spikes.append(spikes[keep])
level += 1
if level == d['num_peeler_loop']:
break
if len(all_spikes) > 0:
all_spikes = np.concatenate(all_spikes)
order = np.argsort(all_spikes['sample_ind'])
all_spikes = all_spikes[order]
else:
all_spikes = np.zeros(0, dtype=spike_dtype)
return all_spikes
def _tdc_find_spikes(traces, d, level=0):
peak_sign = d['peak_sign']
templates = d['templates']
templates_short = d['templates_short']
margin = d['margin']
possible_clusters_by_channel = d['possible_clusters_by_channel']
peak_traces = traces[margin // 2:-margin // 2, :]
peak_sample_ind, peak_chan_ind = detect_peak_locally_exclusive(peak_traces, peak_sign,
d['abs_threholds'], d['peak_shift'], d['neighbours_mask'])
peak_sample_ind += margin // 2
peak_amplitude = traces[peak_sample_ind, peak_chan_ind]
order = np.argsort(np.abs(peak_amplitude))[::-1]
peak_sample_ind = peak_sample_ind[order]
peak_chan_ind = peak_chan_ind[order]
spikes = np.zeros(peak_sample_ind.size, dtype=spike_dtype)
spikes['sample_ind'] = peak_sample_ind
spikes['channel_ind'] = peak_chan_ind # TODO need to put the channel from template
possible_shifts = d['possible_shifts']
distances_shift = np.zeros(possible_shifts.size)
for i in range(peak_sample_ind.size):
sample_ind = peak_sample_ind[i]
chan_ind = peak_chan_ind[i]
possible_clusters = possible_clusters_by_channel[chan_ind]
if possible_clusters.size > 0:
#~ s0 = sample_ind - d['nbefore']
#~ s1 = sample_ind + d['nafter']
#~ wf = traces[s0:s1, :]
s0 = sample_ind - d['nbefore_short']
s1 = sample_ind + d['nafter_short']
wf_short = traces[s0:s1, :]
## pure numpy with cluster spasity
# distances = np.sum(np.sum((templates[possible_clusters, :, :] - wf[None, : , :])**2, axis=1), axis=1)
## pure numpy with cluster+channel spasity
# union_channels, = np.nonzero(np.any(d['template_sparsity'][possible_clusters, :], axis=0))
# distances = np.sum(np.sum((templates[possible_clusters][:, :, union_channels] - wf[: , union_channels][None, : :])**2, axis=1), axis=1)
## numba with cluster+channel spasity
union_channels = np.any(d['template_sparsity'][possible_clusters, :], axis=0)
# distances = numba_sparse_dist(wf, templates, union_channels, possible_clusters)
distances = numba_sparse_dist(wf_short, templates_short, union_channels, possible_clusters)
# DEBUG
#~ ind = np.argmin(distances)
#~ cluster_ind = possible_clusters[ind]
for ind in np.argsort(distances)[:d['num_template_try']]:
cluster_ind = possible_clusters[ind]
chan_sparsity = d['template_sparsity'][cluster_ind, :]
template_sparse = templates[cluster_ind, :, :][:, chan_sparsity]
# find best shift
## pure numpy version
# for s, shift in enumerate(possible_shifts):
# wf_shift = traces[s0 + shift: s1 + shift, chan_sparsity]
# distances_shift[s] = np.sum((template_sparse - wf_shift)**2)
# ind_shift = np.argmin(distances_shift)
# shift = possible_shifts[ind_shift]
## numba version
numba_best_shift(traces, templates[cluster_ind, :, :], sample_ind, d['nbefore'], possible_shifts, distances_shift, chan_sparsity)
ind_shift = np.argmin(distances_shift)
shift = possible_shifts[ind_shift]
sample_ind = sample_ind + shift
s0 = sample_ind - d['nbefore']
s1 = sample_ind + d['nafter']
wf_sparse = traces[s0:s1, chan_sparsity]
# accept or not
centered = wf_sparse - template_sparse
accepted = True
for other_ind, other_vector in d['closest_units'][cluster_ind]:
v = np.sum(centered * other_vector)
if np.abs(v) >0.5:
accepted = False
break
if accepted:
#~ if ind != np.argsort(distances)[0]:
#~ print('not first one', np.argsort(distances), ind)
break
if accepted:
amplitude = 1.
# remove template
template = templates[cluster_ind, :, :]
s0 = sample_ind - d['nbefore']
s1 = sample_ind + d['nafter']
traces[s0:s1, :] -= template * amplitude
else:
cluster_ind = -1
amplitude = 0.
else:
cluster_ind = -1
amplitude = 0.
spikes['cluster_ind'][i] = cluster_ind
spikes['amplitude'][i] =amplitude
return spikes
if HAVE_NUMBA:
@jit(nopython=True)
def numba_sparse_dist(wf, templates, union_channels, possible_clusters):
"""
numba implementation that compute distance from template with sparsity
handle by two separate vectors
"""
total_cluster, width, num_chan = templates.shape
num_cluster = possible_clusters.shape[0]
distances = np.zeros((num_cluster,), dtype=np.float32)
for i in prange(num_cluster):
cluster_ind = possible_clusters[i]
sum_dist = 0.
for chan_ind in range(num_chan):
if union_channels[chan_ind]:
for s in range(width):
v = wf[s, chan_ind]
t = templates[cluster_ind, s, chan_ind]
sum_dist += (v - t) ** 2
distances[i] = sum_dist
return distances
@jit(nopython=True)
def numba_best_shift(traces, template, sample_ind, nbefore, possible_shifts, distances_shift, chan_sparsity):
"""
numba implementation to compute several sample shift before template substraction
"""
width, num_chan = template.shape
n_shift = possible_shifts.size
for i in range(n_shift):
shift = possible_shifts[i]
sum_dist = 0.
for chan_ind in range(num_chan):
if chan_sparsity[chan_ind]:
for s in range(width):
v = traces[sample_ind - nbefore + s +shift, chan_ind]
t = template[s, chan_ind]
sum_dist += (v - t) ** 2
distances_shift[i] = sum_dist
return distances_shift
#################
# Circus peeler #
#################
# if HAVE_NUMBA:
# @jit(nopython=True)
# def fastconvolution(traces, templates, output):
# nb_time, nb_channels = traces.shape
# nb_templates, nb_samples, nb_channels = templates.shape
# center = nb_samples // 2
# for i in range(center, nb_time - center + 1):
# offset_1 = i - center
# for k in range(nb_templates):
# for jj in range(nb_samples):
# offset_2 = offset_1 + jj
# for j in range(nb_channels):
# output[k, offset_1] += (templates[k, jj, j] * traces[offset_2, j])
# return output
class CircusOMPPeeler(BaseTemplateMatchingEngine):
"""
Orthogonal Matching Pursuit inspired from Spyking Circus sorter
https://elifesciences.org/articles/34518
This is an Orthogonal Template Matching algorithm. For speed and
memory optimization, templates are automatically sparsified if the
density of the matrix falls below a given threshold. Signal is
convolved with the templates, and as long as some scalar products
are higher than a given threshold, we use a Cholesky decomposition
to compute the optimal amplitudes needed to reconstruct the signal.
IMPORTANT NOTE: small chunks are more efficient for such Peeler,
consider using 100ms chunk
Parameters
----------
noise_levels: array
The noise levels, for every channels
random_chunk_kwargs: dict
Parameters for computing noise levels, if not provided (sub optimal)
amplitude: tuple
(Minimal, Maximal) amplitudes allowed for every template
omp_min_sps: float
Stopping criteria of the OMP algorithm, in percentage of the norm
sparsify_threshold: float
Templates are sparsified in order to keep only the channels necessary
to explain a given fraction of the total norm
use_sparse_matrix_threshold: float
If density of the templates is below a given threshold, sparse matrix
are used (memory efficient)
progress_bar_steps: bool
In order to display or not steps from the algorithm
-----
"""
_default_params = {
'sparsify_threshold': 0.99,
'amplitudes' : [0.5, 1.5],
'use_sparse_matrix_threshold' : 0.25,
'noise_levels': None,
'random_chunk_kwargs': {},
'omp_min_sps' : 0.5,
'progess_bar_steps' : False,
}
@classmethod
def _sparsify_template(cls, template, sparsify_threshold, noise_levels):
is_silent = template.std(0) < 0.25*noise_levels
template[:, is_silent] = 0
channel_norms = np.linalg.norm(template, axis=0)**2
total_norm = np.linalg.norm(template)**2
idx = np.argsort(channel_norms)[::-1]
explained_norms = np.cumsum(channel_norms[idx]/total_norm)
channel = np.searchsorted(explained_norms, sparsify_threshold)
active_channels = np.sort(idx[:channel])
template[:, idx[channel:]] = 0
return template, active_channels
@classmethod
def _prepare_templates(cls, d):
waveform_extractor = d['waveform_extractor']
nb_samples = d['nb_samples']
nb_channels = d['nb_channels']
nb_templates = d['nb_templates']
use_sparse_matrix_threshold = d['use_sparse_matrix_threshold']
d['norms'] = np.zeros(nb_templates, dtype=np.float32)
all_units = list(d['waveform_extractor'].sorting.unit_ids)
templates = waveform_extractor.get_all_templates(mode='median').copy()
d['sparsities'] = {}
for count, unit_id in enumerate(all_units):
templates[count], active_channels = cls._sparsify_template(templates[count], d['sparsify_threshold'], d['noise_levels'])
d['sparsities'][count] = active_channels
d['norms'][count] = np.linalg.norm(templates[count])
templates[count] /= d['norms'][count]
templates = templates.reshape(nb_templates, -1)
nnz = np.sum(templates != 0)/(nb_templates * nb_samples * nb_channels)
if nnz <= use_sparse_matrix_threshold:
templates = scipy.sparse.csr_matrix(templates)
print(f'Templates are automatically sparsified (sparsity level is {nnz})')
d['is_dense'] = False
else:
d['is_dense'] = True
d['templates'] = templates
return d
@classmethod
def _prepare_overlaps(cls, d):
templates = d['templates']
nb_samples = d['nb_samples']
nb_channels = d['nb_channels']
nb_templates = d['nb_templates']
is_dense = d['is_dense']
if not is_dense:
dense_templates = templates.toarray()
else:
dense_templates = templates
dense_templates = dense_templates.reshape(nb_templates, nb_samples, nb_channels)
size = 2 * nb_samples - 1
all_delays = list(range(nb_samples))
if d['progess_bar_steps']:
all_delays = tqdm(all_delays, desc='[1] compute overlaps')
overlaps = {}
for delay in all_delays:
source = dense_templates[:, :delay, :].reshape(nb_templates, -1)
target = dense_templates[:, nb_samples-delay:, :].reshape(nb_templates, -1)
if delay > 0:
overlaps[delay] = scipy.sparse.csr_matrix(source.dot(target.T))
else:
overlaps[delay] = scipy.sparse.csr_matrix((nb_templates, nb_templates), dtype=np.float32)
if delay < nb_samples:
overlaps[size - delay-1] = overlaps[delay].T.tocsr()
new_overlaps = []
for i in range(nb_templates):
data = [overlaps[j][i, :].T for j in range(size)]
data = scipy.sparse.hstack(data)
new_overlaps += [data]
d['overlaps'] = new_overlaps
return d
@classmethod
def initialize_and_check_kwargs(cls, recording, kwargs):
d = cls._default_params.copy()
d.update(kwargs)
assert isinstance(d['waveform_extractor'], WaveformExtractor)
for v in ['sparsify_threshold', 'omp_min_sps','use_sparse_matrix_threshold']:
assert (d[v] >= 0) and (d[v] <= 1), f'{v} should be in [0, 1]'
if d['noise_levels'] is None:
print('CircusOMPPeeler : noise should be computed outside')
d['noise_levels'] = get_noise_levels(recording, **d['random_chunk_kwargs'])
d['nb_channels'] = d['waveform_extractor'].recording.get_num_channels()
d['nb_samples'] = d['waveform_extractor'].nsamples
d['nb_templates'] = len(d['waveform_extractor'].sorting.unit_ids)
d['nbefore'] = d['waveform_extractor'].nbefore
d['nafter'] = d['waveform_extractor'].nafter
d = cls._prepare_templates(d)
d = cls._prepare_overlaps(d)
return d
@classmethod
def serialize_method_kwargs(cls, kwargs):
kwargs = dict(kwargs)
# remove waveform_extractor
kwargs.pop('waveform_extractor')
return kwargs
@classmethod
def unserialize_in_worker(cls, kwargs):
return kwargs
@classmethod
def get_margin(cls, recording, kwargs):
margin = 2 * max(kwargs['nbefore'], kwargs['nafter'])
return margin
@classmethod
def main_function(cls, traces, d):
templates = d['templates']
nb_templates = d['nb_templates']
nb_channels = d['nb_channels']
overlaps = d['overlaps']
margin = d['margin']
norms = d['norms']
nbefore = d['nbefore']
nafter = d['nafter']
omp_tol = np.finfo(np.float32).eps
omp_min_sps = d['omp_min_sps']
nb_samples = d['nafter'] + d['nbefore']
neighbor_window = nb_samples - 1
min_amplitude, max_amplitude = d['amplitudes']
sparsities = d['sparsities']
is_dense = d['is_dense']
stop_criteria = omp_min_sps * norms[:, np.newaxis]
nb_peaks = len(traces) - nb_samples + 1
if is_dense:
kernel_filters = templates.reshape(nb_templates, nb_samples, nb_channels)[:, ::-1, :]
scalar_products = scipy.signal.fftconvolve(kernel_filters, traces[np.newaxis, :, :], axes=(0, 1), mode='valid').sum(2)
else:
scalar_products = np.empty((nb_templates, nb_peaks), dtype=np.float32)
for i in range(nb_templates):
kernel_filter = templates[i].toarray().reshape(nb_samples, nb_channels)
kernel_filter = kernel_filter[::-1, sparsities[i]]
convolution = scipy.signal.fftconvolve(kernel_filter, traces[:, sparsities[i]], axes=0, mode='valid')
if len(convolution) > 0:
scalar_products[i] = convolution.sum(1)
else:
scalar_products[i] = 0
peak_chan_ind = np.zeros(nb_peaks)
nb_spikes = 0
spikes = np.empty(scalar_products.size, dtype=spike_dtype)
idx_lookup = np.arange(scalar_products.size).reshape(nb_templates, -1)
M = np.zeros((nb_peaks, nb_peaks), dtype=np.float32)
all_selections = np.empty((2, scalar_products.size), dtype=np.int32)
res_sps = np.zeros(0, dtype=np.float32)
final_amplitudes = np.zeros(scalar_products.shape, dtype=np.float32)
nb_selection = 0
full_sps = scalar_products.copy()
neighbors = {}
cached_overlaps = {}
is_valid = (scalar_products > stop_criteria)
while np.any(is_valid):
best_amplitude_ind = scalar_products[is_valid].argmax()
best_cluster_ind, peak_index = np.unravel_index(idx_lookup[is_valid][best_amplitude_ind], idx_lookup.shape)
all_selections[:, nb_selection] = [best_cluster_ind, peak_index]
nb_selection += 1
selection = all_selections[:, :nb_selection]
res_sps = full_sps[selection[0], selection[1]]
mb_selection = nb_selection - 1
delta_t = selection[1] - peak_index
idx = np.where(np.abs(delta_t) <= neighbor_window)[0]
myline = neighbor_window + delta_t[idx]
if best_cluster_ind not in cached_overlaps.keys():
cached_overlaps[best_cluster_ind] = overlaps[best_cluster_ind].toarray()
M[mb_selection, idx] = cached_overlaps[best_cluster_ind][selection[0, idx], myline]
if nb_selection >= (M.shape[0] - 1):
Z = np.zeros((2*M.shape[0], 2*M.shape[1]), dtype=np.float32)
Z[:nb_selection, :nb_selection] = M[:nb_selection, :nb_selection]
M = Z
if mb_selection > 0:
scipy.linalg.solve_triangular(M[:mb_selection, :mb_selection], M[mb_selection, :mb_selection], trans=0,
lower=1,
overwrite_b=True,
check_finite=False)
v = nrm2(M[mb_selection, :mb_selection]) ** 2
if 1 - v <= omp_tol: # selected atoms are dependent
break
M[mb_selection, mb_selection] = np.sqrt(1 - v)
all_amplitudes, _ = potrs(M[:nb_selection, :nb_selection], res_sps,
lower=True, overwrite_b=False)
all_amplitudes /= norms[selection[0]]
diff_amplitudes = (all_amplitudes - final_amplitudes[selection[0], selection[1]])
modified = np.where(np.abs(diff_amplitudes) > omp_tol)[0]
final_amplitudes[selection[0], selection[1]] = all_amplitudes
for i in modified:
tmp_best, tmp_peak = selection[:, i]
diff_amp = diff_amplitudes[i]*norms[tmp_best]
if not tmp_best in cached_overlaps.keys():
cached_overlaps[tmp_best] = overlaps[tmp_best].toarray()
if not tmp_peak in neighbors.keys():
idx = [max(0, tmp_peak - neighbor_window), min(nb_peaks, tmp_peak + neighbor_window + 1)]
offset = [neighbor_window + idx[0] - tmp_peak, neighbor_window + idx[1] - tmp_peak]
neighbors[tmp_peak] = {'idx' : idx, 'tdx' : offset}
idx = neighbors[tmp_peak]['idx']
tdx = neighbors[tmp_peak]['tdx']
to_add = diff_amp * cached_overlaps[tmp_best][:, tdx[0]:tdx[1]]
scalar_products[:, idx[0]:idx[1]] -= to_add
scalar_products[best_cluster_ind, peak_index] = -np.inf
is_valid = (scalar_products > stop_criteria)
is_valid = (final_amplitudes > min_amplitude)*(final_amplitudes < max_amplitude)
valid_indices = np.where(is_valid)
nb_spikes = len(valid_indices[0])
spikes['sample_ind'][:nb_spikes] = valid_indices[1] + d['nbefore']
spikes['channel_ind'][:nb_spikes] = 0
spikes['cluster_ind'][:nb_spikes] = valid_indices[0]
spikes['amplitude'][:nb_spikes] = final_amplitudes[valid_indices[0], valid_indices[1]]
spikes = spikes[:nb_spikes]
order = np.argsort(spikes['sample_ind'])
spikes = spikes[order]
return spikes
class CircusPeeler(BaseTemplateMatchingEngine):
"""
Greedy Template-matching ported from the Spyking Circus sorter
https://elifesciences.org/articles/34518
This is a Greedy Template Matching algorithm. The idea is to detect
all the peaks (negative, positive or both) above a certain threshold
Then, at every peak (plus or minus some jitter) we look if the signal
can be explained with a scaled template.
The amplitudes allowed, for every templates, are automatically adjusted
in an optimal manner, to enhance the Matthew Correlation Coefficient
between all spikes/templates in the waveformextractor. For speed and
memory optimization, templates are automatically sparsified if the
density of the matrix falls below a given threshold
Parameters
----------
peak_sign: str
Sign of the peak (neg, pos, or both)
n_shifts: int
The number of samples before/after to classify a peak (should be low)
jitter: int
The number of samples considered before/after every peak to search for
matches
detect_threshold: int
The detection threshold
noise_levels: array
The noise levels, for every channels
random_chunk_kwargs: dict
Parameters for computing noise levels, if not provided (sub optimal)
max_amplitude: float
Maximal amplitude allowed for every template
min_amplitude: float
Minimal amplitude allowed for every template
sparsify_threshold: float
Templates are sparsified in order to keep only the channels necessary
to explain a given fraction of the total norm
use_sparse_matrix_threshold: float
If density of the templates is below a given threshold, sparse matrix
are used (memory efficient)
progress_bar_steps: bool
In order to display or not steps from the algorithm
-----
"""
_default_params = {
'peak_sign': 'neg',
'n_shifts': 1,
'jitter' : 1,
'detect_threshold': 5,
'noise_levels': None,
'random_chunk_kwargs': {},
'sparsify_threshold': 0.99,
'max_amplitude' : 1.5,
'min_amplitude' : 0.5,
'use_sparse_matrix_threshold' : 0.25,
'progess_bar_steps' : True,
}
@classmethod
def _sparsify_template(cls, template, sparsify_threshold, noise_levels):
is_silent = template.std(0) < 0.25*noise_levels
template[:, is_silent] = 0
channel_norms = np.linalg.norm(template, axis=0)**2
total_norm = np.linalg.norm(template)**2
idx = np.argsort(channel_norms)[::-1]
explained_norms = np.cumsum(channel_norms[idx]/total_norm)
channel = np.searchsorted(explained_norms, sparsify_threshold)
active_channels = np.sort(idx[:channel])
template[:, idx[channel:]] = 0
return template, active_channels
@classmethod
def _prepare_templates(cls, d):
waveform_extractor = d['waveform_extractor']
nb_samples = d['nb_samples']
nb_channels = d['nb_channels']
nb_templates = d['nb_templates']
max_amplitude = d['max_amplitude']
min_amplitude = d['min_amplitude']
use_sparse_matrix_threshold = d['use_sparse_matrix_threshold']
d['norms'] = np.zeros(nb_templates, dtype=np.float32)
all_units = list(d['waveform_extractor'].sorting.unit_ids)
templates = waveform_extractor.get_all_templates(mode='median').copy()
d['sparsities'] = {}
for count, unit_id in enumerate(all_units):
templates[count], active_channels = cls._sparsify_template(templates[count], d['sparsify_threshold'], d['noise_levels'])
d['sparsities'][count] = active_channels
d['norms'][count] = np.linalg.norm(templates[count])
templates[count] /= d['norms'][count]
templates = templates.reshape(nb_templates, -1)
nnz = np.sum(templates != 0)/(nb_templates * nb_samples * nb_channels)
if nnz <= use_sparse_matrix_threshold:
templates = scipy.sparse.csr_matrix(templates)
print(f'Templates are automatically sparsified (sparsity level is {nnz})')
d['is_dense'] = False
else:
d['is_dense'] = True
d['templates'] = templates
return d
@classmethod
def _prepare_overlaps(cls, d):
templates = d['templates']
nb_samples = d['nb_samples']
nb_channels = d['nb_channels']
nb_templates = d['nb_templates']
is_dense = d['is_dense']
if not is_dense:
dense_templates = templates.toarray()
else:
dense_templates = templates
dense_templates = dense_templates.reshape(nb_templates, nb_samples, nb_channels)
size = 2 * nb_samples - 1
all_delays = list(range(nb_samples))
if d['progess_bar_steps']:
all_delays = tqdm(all_delays, desc='[1] compute overlaps')
overlaps = {}
for delay in all_delays:
source = dense_templates[:, :delay, :].reshape(nb_templates, -1)
target = dense_templates[:, nb_samples-delay:, :].reshape(nb_templates, -1)
if delay > 0:
overlaps[delay] = scipy.sparse.csr_matrix(source.dot(target.T))
else:
overlaps[delay] = scipy.sparse.csr_matrix((nb_templates, nb_templates), dtype=np.float32)
if delay < nb_samples:
overlaps[size - delay-1] = overlaps[delay].T.tocsr()
new_overlaps = []
for i in range(nb_templates):
data = [overlaps[j][i, :].T for j in range(size)]
data = scipy.sparse.hstack(data)
new_overlaps += [data]
d['overlaps'] = new_overlaps
return d
@classmethod
def _mcc_error(cls, bounds, good, bad):
fn = np.sum((good < bounds[0]) | (good > bounds[1]))
fp = np.sum((bounds[0] <= bad) & (bad <= bounds[1]))
tp = np.sum((bounds[0] <= good) & (good <= bounds[1]))
tn = np.sum((bad < bounds[0]) | (bad > bounds[1]))
denom = (tp+fp)*(tp+fn)*(tn+fp)*(tn+fn)
if denom > 0:
mcc = 1 - (tp*tn - fp*fn)/np.sqrt(denom)
else:
mcc = 1
return mcc
@classmethod
def _cost_function_mcc(cls, bounds, good, bad, delta_amplitude, alpha):
# We want a minimal error, with the larger bounds that are possible
cost = alpha*cls._mcc_error(bounds, good, bad) + (1 - alpha)*np.abs((1 - (bounds[1] - bounds[0])/delta_amplitude))
return cost
@classmethod
def _optimize_amplitudes(cls, noise_snippets, d):
waveform_extractor = d['waveform_extractor']
templates = d['templates']
nb_templates = d['nb_templates']
max_amplitude = d['max_amplitude']
min_amplitude = d['min_amplitude']
alpha = 0.5
norms = d['norms']
all_units = list(waveform_extractor.sorting.unit_ids)
if d['progess_bar_steps']:
all_units = tqdm(all_units, desc='[2] compute amplitudes')
d['amplitudes'] = np.zeros((nb_templates, 2), dtype=np.float32)
noise = templates.dot(noise_snippets)/norms[:, np.newaxis]
all_amps = {}
for count, unit_id in enumerate(all_units):
w = waveform_extractor.get_waveforms(unit_id)
snippets = w.reshape(w.shape[0], -1).T
amps = templates.dot(snippets)/norms[:, np.newaxis]
good = amps[count, :].flatten()
sub_amps = amps[np.concatenate((np.arange(count), np.arange(count+1, nb_templates))), :]
bad = sub_amps[sub_amps >= good]
bad = np.concatenate((bad, noise[count]))
cost_kwargs = [good, bad, max_amplitude - min_amplitude, alpha]
cost_bounds = [(min_amplitude, 1), (1, max_amplitude)]
res = scipy.optimize.differential_evolution(cls._cost_function_mcc, bounds=cost_bounds, args=cost_kwargs)
d['amplitudes'][count] = res.x
# import pylab as plt
# plt.hist(good, 100, alpha=0.5)
# plt.hist(bad, 100, alpha=0.5)
# plt.hist(noise[count], 100, alpha=0.5)
# ymin, ymax = plt.ylim()
# plt.plot([res.x[0], res.x[0]], [ymin, ymax], 'k--')
# plt.plot([res.x[1], res.x[1]], [ymin, ymax], 'k--')
# plt.savefig('test_%d.png' %count)
# plt.close()
return d
@classmethod
def initialize_and_check_kwargs(cls, recording, kwargs):
d = cls._default_params.copy()
d.update(kwargs)
assert isinstance(d['waveform_extractor'], WaveformExtractor)
for v in ['sparsify_threshold', 'use_sparse_matrix_threshold']:
assert (d[v] >= 0) and (d[v] <= 1), f'{v} should be in [0, 1]'
d['nb_channels'] = d['waveform_extractor'].recording.get_num_channels()
d['nb_samples'] = d['waveform_extractor'].nsamples
d['nb_templates'] = len(d['waveform_extractor'].sorting.unit_ids)
if d['noise_levels'] is None:
print('CircusPeeler : noise should be computed outside')
d['noise_levels'] = get_noise_levels(recording, **d['random_chunk_kwargs'])
d['abs_threholds'] = d['noise_levels'] * d['detect_threshold']
d = cls._prepare_templates(d)
d = cls._prepare_overlaps(d)
d['nbefore'] = d['waveform_extractor'].nbefore
d['nafter'] = d['waveform_extractor'].nafter
d['patch_sizes'] = (d['waveform_extractor'].nsamples, d['nb_channels'])
d['sym_patch'] = d['nbefore'] == d['nafter']
#d['jitter'] = int(1e-3*d['jitter'] * recording.get_sampling_frequency())
nb_segments = recording.get_num_segments()
if d['waveform_extractor']._params['max_spikes_per_unit'] is None:
nb_snippets = 1000
else:
nb_snippets = 2*d['waveform_extractor']._params['max_spikes_per_unit']
nb_chunks = nb_snippets // nb_segments
noise_snippets = get_random_data_chunks(recording, num_chunks_per_segment=nb_chunks, chunk_size=d['nb_samples'], seed=42)
noise_snippets = noise_snippets.reshape(nb_chunks, d['nb_samples'], d['nb_channels']).reshape(nb_chunks, -1).T
d = cls._optimize_amplitudes(noise_snippets, d)
return d
@classmethod
def serialize_method_kwargs(cls, kwargs):
kwargs = dict(kwargs)
# remove waveform_extractor
kwargs.pop('waveform_extractor')
return kwargs
@classmethod
def unserialize_in_worker(cls, kwargs):
return kwargs
@classmethod
def get_margin(cls, recording, kwargs):
margin = 2 * max(kwargs['nbefore'], kwargs['nafter'])
return margin
@classmethod
def main_function(cls, traces, d):
peak_sign = d['peak_sign']
abs_threholds = d['abs_threholds']
n_shifts = d['n_shifts']
templates = d['templates']
nb_templates = d['nb_templates']
nb_channels = d['nb_channels']
overlaps = d['overlaps']
margin = d['margin']
norms = d['norms']
jitter = d['jitter']
patch_sizes = d['patch_sizes']
nb_samples = d['nafter'] + d['nbefore']
neighbor_window = nb_samples - 1
amplitudes = d['amplitudes']
sym_patch = d['sym_patch']
sparsities = d['sparsities']
is_dense = d['is_dense']
peak_traces = traces[margin // 2:-margin // 2, :]
peak_sample_ind, peak_chan_ind = detect_peaks_by_channel(peak_traces, peak_sign, abs_threholds, n_shifts)
if jitter > 0:
jittered_peaks = peak_sample_ind[:, np.newaxis] + np.arange(-jitter, jitter)
jittered_channels = peak_chan_ind[:, np.newaxis] + np.zeros(2*jitter)
mask = (jittered_peaks > 0) & (jittered_peaks < len(peak_traces))
jittered_peaks = jittered_peaks[mask]
jittered_channels = jittered_channels[mask]
peak_sample_ind, unique_idx = np.unique(jittered_peaks, return_index=True)
peak_chan_ind = jittered_channels[unique_idx]
else:
peak_sample_ind, unique_idx = np.unique(peak_sample_ind, return_index=True)
peak_chan_ind = peak_chan_ind[unique_idx]
nb_peaks = len(peak_sample_ind)
if sym_patch:
snippets = extract_patches_2d(traces, patch_sizes)[peak_sample_ind]
peak_sample_ind += margin // 2
else:
peak_sample_ind += margin // 2
snippet_window = np.arange(-d['nbefore'], d['nafter'])
snippets = traces[peak_sample_ind[:, np.newaxis] + snippet_window]
if nb_peaks > 0:
snippets = snippets.reshape(nb_peaks, -1)
scalar_products = templates.dot(snippets.T)
else:
scalar_products = np.zeros((nb_templates, 0), dtype=np.float32)
nb_spikes = 0
spikes = np.empty(scalar_products.size, dtype=spike_dtype)
idx_lookup = np.arange(scalar_products.size).reshape(nb_templates, -1)
min_sps = (amplitudes[:, 0] * norms)[:, np.newaxis]
max_sps = (amplitudes[:, 1] * norms)[:, np.newaxis]
is_valid = (scalar_products > min_sps) & (scalar_products < max_sps)
cached_overlaps = {}
while np.any(is_valid):
best_amplitude_ind = scalar_products[is_valid].argmax()
best_cluster_ind, peak_index = np.unravel_index(idx_lookup[is_valid][best_amplitude_ind], idx_lookup.shape)
best_amplitude = scalar_products[best_cluster_ind, peak_index]
best_peak_sample_ind = peak_sample_ind[peak_index]
best_peak_chan_ind = peak_chan_ind[peak_index]
peak_data = peak_sample_ind - peak_sample_ind[peak_index]
is_valid = np.searchsorted(peak_data, [-neighbor_window, neighbor_window + 1])
idx_neighbor = peak_data[is_valid[0]:is_valid[1]] + neighbor_window
if not best_cluster_ind in cached_overlaps.keys():
cached_overlaps[best_cluster_ind] = overlaps[best_cluster_ind].toarray()
to_add = -best_amplitude * cached_overlaps[best_cluster_ind][:, idx_neighbor]
scalar_products[:, is_valid[0]:is_valid[1]] += to_add
scalar_products[best_cluster_ind, is_valid[0]:is_valid[1]] = -np.inf
spikes['sample_ind'][nb_spikes] = best_peak_sample_ind
spikes['channel_ind'][nb_spikes] = best_peak_chan_ind
spikes['cluster_ind'][nb_spikes] = best_cluster_ind
spikes['amplitude'][nb_spikes] = best_amplitude
nb_spikes += 1
is_valid = (scalar_products > min_sps) & (scalar_products < max_sps)
spikes['amplitude'][:nb_spikes] /= norms[spikes['cluster_ind'][:nb_spikes]]
spikes = spikes[:nb_spikes]
order = np.argsort(spikes['sample_ind'])
spikes = spikes[order]
return spikes
template_matching_methods = {
'naive' : NaiveMatching,
'tridesclous' : TridesclousPeeler,
'circus' : CircusPeeler,
'circus-omp' : CircusOMPPeeler
}
| [
"spikeinterface.toolkit.get_channel_distances",
"numpy.sum",
"numpy.abs",
"scipy.linalg.solve_triangular",
"numpy.empty",
"numpy.argmin",
"numpy.argsort",
"numpy.arange",
"numpy.linalg.norm",
"numba.prange",
"scipy.signal.fftconvolve",
"numpy.unique",
"spikeinterface.sortingcomponents.peak_d... | [((891, 950), 'scipy.linalg.get_lapack_funcs', 'scipy.linalg.get_lapack_funcs', (["('potrs',)"], {'dtype': 'np.float32'}), "(('potrs',), dtype=np.float32)\n", (920, 950), False, 'import scipy\n'), ((960, 1016), 'scipy.linalg.get_blas_funcs', 'scipy.linalg.get_blas_funcs', (["('nrm2',)"], {'dtype': 'np.float32'}), "(('nrm2',), dtype=np.float32)\n", (987, 1016), False, 'import scipy\n'), ((2722, 2858), 'spikeinterface.core.job_tools.ChunkRecordingExecutor', 'ChunkRecordingExecutor', (['recording', 'func', 'init_func', 'init_args'], {'handle_returns': '(True)', 'job_name': 'f"""find spikes ({method})"""'}), "(recording, func, init_func, init_args,\n handle_returns=True, job_name=f'find spikes ({method})', **job_kwargs)\n", (2744, 2858), False, 'from spikeinterface.core.job_tools import ChunkRecordingExecutor\n'), ((2937, 2959), 'numpy.concatenate', 'np.concatenate', (['spikes'], {}), '(spikes)\n', (2951, 2959), True, 'import numpy as np\n'), ((4175, 4273), 'spikeinterface.toolkit.get_chunk_with_margin', 'get_chunk_with_margin', (['recording_segment', 'start_frame', 'end_frame', 'None', 'margin'], {'add_zeros': '(True)'}), '(recording_segment, start_frame, end_frame, None,\n margin, add_zeros=True)\n', (4196, 4273), False, 'from spikeinterface.toolkit import get_noise_levels, get_template_channel_sparsity, get_channel_distances, get_chunk_with_margin, get_template_extremum_channel, get_random_data_chunks\n'), ((16805, 16922), 'spikeinterface.sortingcomponents.peak_detection.detect_peak_locally_exclusive', 'detect_peak_locally_exclusive', (['peak_traces', 'peak_sign', "d['abs_threholds']", "d['peak_shift']", "d['neighbours_mask']"], {}), "(peak_traces, peak_sign, d['abs_threholds'], d\n ['peak_shift'], d['neighbours_mask'])\n", (16834, 16922), False, 'from spikeinterface.sortingcomponents.peak_detection import detect_peak_locally_exclusive, detect_peaks_by_channel\n'), ((17228, 17277), 'numpy.zeros', 'np.zeros', (['peak_sample_ind.size'], {'dtype': 'spike_dtype'}), '(peak_sample_ind.size, dtype=spike_dtype)\n', (17236, 17277), True, 'import numpy as np\n'), ((17493, 17523), 'numpy.zeros', 'np.zeros', (['possible_shifts.size'], {}), '(possible_shifts.size)\n', (17501, 17523), True, 'import numpy as np\n'), ((21779, 21797), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (21782, 21797), False, 'from numba import jit, prange\n'), ((22655, 22673), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (22658, 22673), False, 'from numba import jit, prange\n'), ((3282, 3307), 'spikeinterface.core.load_extractor', 'load_extractor', (['recording'], {}), '(recording)\n', (3296, 3307), False, 'from spikeinterface.core import load_extractor\n'), ((4344, 4371), 'threadpoolctl.threadpool_limits', 'threadpool_limits', ([], {'limits': '(1)'}), '(limits=1)\n', (4361, 4371), False, 'from threadpoolctl import threadpool_limits\n'), ((7031, 7063), 'spikeinterface.toolkit.get_channel_distances', 'get_channel_distances', (['recording'], {}), '(recording)\n', (7052, 7063), False, 'from spikeinterface.toolkit import get_noise_levels, get_template_channel_sparsity, get_channel_distances, get_chunk_with_margin, get_template_extremum_channel, get_random_data_chunks\n'), ((8676, 8775), 'spikeinterface.sortingcomponents.peak_detection.detect_peak_locally_exclusive', 'detect_peak_locally_exclusive', (['peak_traces', 'peak_sign', 'abs_threholds', 'n_shifts', 'neighbours_mask'], {}), '(peak_traces, peak_sign, abs_threholds,\n n_shifts, neighbours_mask)\n', (8705, 8775), False, 'from spikeinterface.sortingcomponents.peak_detection import detect_peak_locally_exclusive, detect_peaks_by_channel\n'), ((8825, 8874), 'numpy.zeros', 'np.zeros', (['peak_sample_ind.size'], {'dtype': 'spike_dtype'}), '(peak_sample_ind.size, dtype=spike_dtype)\n', (8833, 8874), True, 'import numpy as np\n'), ((11920, 11952), 'spikeinterface.toolkit.get_channel_distances', 'get_channel_distances', (['recording'], {}), '(recording)\n', (11941, 11952), False, 'from spikeinterface.toolkit import get_noise_levels, get_template_channel_sparsity, get_channel_distances, get_chunk_with_margin, get_template_extremum_channel, get_random_data_chunks\n'), ((12275, 12409), 'spikeinterface.toolkit.get_template_channel_sparsity', 'get_template_channel_sparsity', (['we'], {'method': '"""threshold"""', 'peak_sign': "d['peak_sign']", 'outputs': '"""index"""', 'threshold': "d['detect_threshold']"}), "(we, method='threshold', peak_sign=d[\n 'peak_sign'], outputs='index', threshold=d['detect_threshold'])\n", (12304, 12409), False, 'from spikeinterface.toolkit import get_noise_levels, get_template_channel_sparsity, get_channel_distances, get_chunk_with_margin, get_template_extremum_channel, get_random_data_chunks\n'), ((12501, 12558), 'numpy.zeros', 'np.zeros', (['(unit_ids.size, channel_ids.size)'], {'dtype': '"""bool"""'}), "((unit_ids.size, channel_ids.size), dtype='bool')\n", (12509, 12558), True, 'import numpy as np\n'), ((12828, 12904), 'spikeinterface.toolkit.get_template_extremum_channel', 'get_template_extremum_channel', (['we'], {'peak_sign': "d['peak_sign']", 'outputs': '"""index"""'}), "(we, peak_sign=d['peak_sign'], outputs='index')\n", (12857, 12904), False, 'from spikeinterface.toolkit import get_noise_levels, get_template_channel_sparsity, get_channel_distances, get_chunk_with_margin, get_template_extremum_channel, get_random_data_chunks\n'), ((12958, 13034), 'numpy.array', 'np.array', (['[extremum_channel[unit_id] for unit_id in unit_ids]'], {'dtype': '"""int64"""'}), "([extremum_channel[unit_id] for unit_id in unit_ids], dtype='int64')\n", (12966, 13034), True, 'import numpy as np\n'), ((13368, 13453), 'scipy.spatial.distance.cdist', 'scipy.spatial.distance.cdist', (['unit_locations', 'unit_locations'], {'metric': '"""euclidean"""'}), "(unit_locations, unit_locations, metric='euclidean'\n )\n", (13396, 13453), False, 'import scipy\n'), ((14552, 14640), 'scipy.spatial.distance.cdist', 'scipy.spatial.distance.cdist', (['channel_locations', 'unit_locations'], {'metric': '"""euclidean"""'}), "(channel_locations, unit_locations, metric=\n 'euclidean')\n", (14580, 14640), False, 'import scipy\n'), ((15105, 15172), 'numpy.arange', 'np.arange', (["(-d['sample_shift'])", "(d['sample_shift'] + 1)"], {'dtype': '"""int64"""'}), "(-d['sample_shift'], d['sample_shift'] + 1, dtype='int64')\n", (15114, 15172), True, 'import numpy as np\n'), ((22144, 22186), 'numpy.zeros', 'np.zeros', (['(num_cluster,)'], {'dtype': 'np.float32'}), '((num_cluster,), dtype=np.float32)\n', (22152, 22186), True, 'import numpy as np\n'), ((22204, 22223), 'numba.prange', 'prange', (['num_cluster'], {}), '(num_cluster)\n', (22210, 22223), False, 'from numba import jit, prange\n'), ((26342, 26384), 'numpy.cumsum', 'np.cumsum', (['(channel_norms[idx] / total_norm)'], {}), '(channel_norms[idx] / total_norm)\n', (26351, 26384), True, 'import numpy as np\n'), ((26401, 26453), 'numpy.searchsorted', 'np.searchsorted', (['explained_norms', 'sparsify_threshold'], {}), '(explained_norms, sparsify_threshold)\n', (26416, 26453), True, 'import numpy as np\n'), ((26480, 26502), 'numpy.sort', 'np.sort', (['idx[:channel]'], {}), '(idx[:channel])\n', (26487, 26502), True, 'import numpy as np\n'), ((26909, 26949), 'numpy.zeros', 'np.zeros', (['nb_templates'], {'dtype': 'np.float32'}), '(nb_templates, dtype=np.float32)\n', (26917, 26949), True, 'import numpy as np\n'), ((32485, 32503), 'numpy.zeros', 'np.zeros', (['nb_peaks'], {}), '(nb_peaks)\n', (32493, 32503), True, 'import numpy as np\n'), ((32544, 32593), 'numpy.empty', 'np.empty', (['scalar_products.size'], {'dtype': 'spike_dtype'}), '(scalar_products.size, dtype=spike_dtype)\n', (32552, 32593), True, 'import numpy as np\n'), ((32686, 32734), 'numpy.zeros', 'np.zeros', (['(nb_peaks, nb_peaks)'], {'dtype': 'np.float32'}), '((nb_peaks, nb_peaks), dtype=np.float32)\n', (32694, 32734), True, 'import numpy as np\n'), ((32761, 32812), 'numpy.empty', 'np.empty', (['(2, scalar_products.size)'], {'dtype': 'np.int32'}), '((2, scalar_products.size), dtype=np.int32)\n', (32769, 32812), True, 'import numpy as np\n'), ((32831, 32860), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.float32'}), '(0, dtype=np.float32)\n', (32839, 32860), True, 'import numpy as np\n'), ((32888, 32937), 'numpy.zeros', 'np.zeros', (['scalar_products.shape'], {'dtype': 'np.float32'}), '(scalar_products.shape, dtype=np.float32)\n', (32896, 32937), True, 'import numpy as np\n'), ((33128, 33144), 'numpy.any', 'np.any', (['is_valid'], {}), '(is_valid)\n', (33134, 33144), True, 'import numpy as np\n'), ((36291, 36309), 'numpy.where', 'np.where', (['is_valid'], {}), '(is_valid)\n', (36299, 36309), True, 'import numpy as np\n'), ((36691, 36723), 'numpy.argsort', 'np.argsort', (["spikes['sample_ind']"], {}), "(spikes['sample_ind'])\n", (36701, 36723), True, 'import numpy as np\n'), ((39447, 39489), 'numpy.cumsum', 'np.cumsum', (['(channel_norms[idx] / total_norm)'], {}), '(channel_norms[idx] / total_norm)\n', (39456, 39489), True, 'import numpy as np\n'), ((39506, 39558), 'numpy.searchsorted', 'np.searchsorted', (['explained_norms', 'sparsify_threshold'], {}), '(explained_norms, sparsify_threshold)\n', (39521, 39558), True, 'import numpy as np\n'), ((39585, 39607), 'numpy.sort', 'np.sort', (['idx[:channel]'], {}), '(idx[:channel])\n', (39592, 39607), True, 'import numpy as np\n'), ((40100, 40140), 'numpy.zeros', 'np.zeros', (['nb_templates'], {'dtype': 'np.float32'}), '(nb_templates, dtype=np.float32)\n', (40108, 40140), True, 'import numpy as np\n'), ((42746, 42793), 'numpy.sum', 'np.sum', (['((good < bounds[0]) | (good > bounds[1]))'], {}), '((good < bounds[0]) | (good > bounds[1]))\n', (42752, 42793), True, 'import numpy as np\n'), ((42807, 42854), 'numpy.sum', 'np.sum', (['((bounds[0] <= bad) & (bad <= bounds[1]))'], {}), '((bounds[0] <= bad) & (bad <= bounds[1]))\n', (42813, 42854), True, 'import numpy as np\n'), ((42868, 42917), 'numpy.sum', 'np.sum', (['((bounds[0] <= good) & (good <= bounds[1]))'], {}), '((bounds[0] <= good) & (good <= bounds[1]))\n', (42874, 42917), True, 'import numpy as np\n'), ((42931, 42976), 'numpy.sum', 'np.sum', (['((bad < bounds[0]) | (bad > bounds[1]))'], {}), '((bad < bounds[0]) | (bad > bounds[1]))\n', (42937, 42976), True, 'import numpy as np\n'), ((43996, 44041), 'numpy.zeros', 'np.zeros', (['(nb_templates, 2)'], {'dtype': 'np.float32'}), '((nb_templates, 2), dtype=np.float32)\n', (44004, 44041), True, 'import numpy as np\n'), ((46926, 47034), 'spikeinterface.toolkit.get_random_data_chunks', 'get_random_data_chunks', (['recording'], {'num_chunks_per_segment': 'nb_chunks', 'chunk_size': "d['nb_samples']", 'seed': '(42)'}), "(recording, num_chunks_per_segment=nb_chunks,\n chunk_size=d['nb_samples'], seed=42)\n", (46948, 47034), False, 'from spikeinterface.toolkit import get_noise_levels, get_template_channel_sparsity, get_channel_distances, get_chunk_with_margin, get_template_extremum_channel, get_random_data_chunks\n'), ((48434, 48506), 'spikeinterface.sortingcomponents.peak_detection.detect_peaks_by_channel', 'detect_peaks_by_channel', (['peak_traces', 'peak_sign', 'abs_threholds', 'n_shifts'], {}), '(peak_traces, peak_sign, abs_threholds, n_shifts)\n', (48457, 48506), False, 'from spikeinterface.sortingcomponents.peak_detection import detect_peak_locally_exclusive, detect_peaks_by_channel\n'), ((49843, 49892), 'numpy.empty', 'np.empty', (['scalar_products.size'], {'dtype': 'spike_dtype'}), '(scalar_products.size, dtype=spike_dtype)\n', (49851, 49892), True, 'import numpy as np\n'), ((50216, 50232), 'numpy.any', 'np.any', (['is_valid'], {}), '(is_valid)\n', (50222, 50232), True, 'import numpy as np\n'), ((51768, 51800), 'numpy.argsort', 'np.argsort', (["spikes['sample_ind']"], {}), "(spikes['sample_ind'])\n", (51778, 51800), True, 'import numpy as np\n'), ((6875, 6930), 'spikeinterface.toolkit.get_noise_levels', 'get_noise_levels', (['recording'], {}), "(recording, **d['random_chunk_kwargs'])\n", (6891, 6930), False, 'from spikeinterface.toolkit import get_noise_levels, get_template_channel_sparsity, get_channel_distances, get_chunk_with_margin, get_template_extremum_channel, get_random_data_chunks\n'), ((7799, 7837), 'spikeinterface.core.WaveformExtractor.load_from_folder', 'WaveformExtractor.load_from_folder', (['we'], {}), '(we)\n', (7833, 7837), False, 'from spikeinterface.core import WaveformExtractor\n'), ((9361, 9376), 'numpy.argmin', 'np.argmin', (['dist'], {}), '(dist)\n', (9370, 9376), True, 'import numpy as np\n'), ((11788, 11815), 'spikeinterface.toolkit.get_noise_levels', 'get_noise_levels', (['recording'], {}), '(recording)\n', (11804, 11815), False, 'from spikeinterface.toolkit import get_noise_levels, get_template_channel_sparsity, get_channel_distances, get_chunk_with_margin, get_template_extremum_channel, get_random_data_chunks\n'), ((13624, 13663), 'numpy.argsort', 'np.argsort', (['unit_distances[unit_ind, :]'], {}), '(unit_distances[unit_ind, :])\n', (13634, 13663), True, 'import numpy as np\n'), ((13792, 13830), 'numpy.array', 'np.array', (["closest_u[:d['num_closest']]"], {}), "(closest_u[:d['num_closest']])\n", (13800, 13830), True, 'import numpy as np\n'), ((13903, 13950), 'numpy.nonzero', 'np.nonzero', (["d['template_sparsity'][unit_ind, :]"], {}), "(d['template_sparsity'][unit_ind, :])\n", (13913, 13950), True, 'import numpy as np\n'), ((14864, 14909), 'numpy.nonzero', 'np.nonzero', (['near_cluster_mask[channel_ind, :]'], {}), '(near_cluster_mask[channel_ind, :])\n', (14874, 14909), True, 'import numpy as np\n'), ((16201, 16227), 'numpy.concatenate', 'np.concatenate', (['all_spikes'], {}), '(all_spikes)\n', (16215, 16227), True, 'import numpy as np\n'), ((16248, 16284), 'numpy.argsort', 'np.argsort', (["all_spikes['sample_ind']"], {}), "(all_spikes['sample_ind'])\n", (16258, 16284), True, 'import numpy as np\n'), ((16367, 16397), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'spike_dtype'}), '(0, dtype=spike_dtype)\n', (16375, 16397), True, 'import numpy as np\n'), ((17086, 17108), 'numpy.abs', 'np.abs', (['peak_amplitude'], {}), '(peak_amplitude)\n', (17092, 17108), True, 'import numpy as np\n'), ((18689, 18749), 'numpy.any', 'np.any', (["d['template_sparsity'][possible_clusters, :]"], {'axis': '(0)'}), "(d['template_sparsity'][possible_clusters, :], axis=0)\n", (18695, 18749), True, 'import numpy as np\n'), ((26184, 26216), 'numpy.linalg.norm', 'np.linalg.norm', (['template'], {'axis': '(0)'}), '(template, axis=0)\n', (26198, 26216), True, 'import numpy as np\n'), ((26241, 26265), 'numpy.linalg.norm', 'np.linalg.norm', (['template'], {}), '(template)\n', (26255, 26265), True, 'import numpy as np\n'), ((26284, 26309), 'numpy.argsort', 'np.argsort', (['channel_norms'], {}), '(channel_norms)\n', (26294, 26309), True, 'import numpy as np\n'), ((27429, 27461), 'numpy.linalg.norm', 'np.linalg.norm', (['templates[count]'], {}), '(templates[count])\n', (27443, 27461), True, 'import numpy as np\n'), ((27584, 27606), 'numpy.sum', 'np.sum', (['(templates != 0)'], {}), '(templates != 0)\n', (27590, 27606), True, 'import numpy as np\n'), ((27720, 27754), 'scipy.sparse.csr_matrix', 'scipy.sparse.csr_matrix', (['templates'], {}), '(templates)\n', (27743, 27754), False, 'import scipy\n'), ((28577, 28622), 'tqdm.tqdm', 'tqdm', (['all_delays'], {'desc': '"""[1] compute overlaps"""'}), "(all_delays, desc='[1] compute overlaps')\n", (28581, 28622), False, 'from tqdm import tqdm\n'), ((29347, 29372), 'scipy.sparse.hstack', 'scipy.sparse.hstack', (['data'], {}), '(data)\n', (29366, 29372), False, 'import scipy\n'), ((30000, 30055), 'spikeinterface.toolkit.get_noise_levels', 'get_noise_levels', (['recording'], {}), "(recording, **d['random_chunk_kwargs'])\n", (30016, 30055), False, 'from spikeinterface.toolkit import get_noise_levels, get_template_channel_sparsity, get_channel_distances, get_chunk_with_margin, get_template_extremum_channel, get_random_data_chunks\n'), ((31242, 31262), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (31250, 31262), True, 'import numpy as np\n'), ((31924, 31976), 'numpy.empty', 'np.empty', (['(nb_templates, nb_peaks)'], {'dtype': 'np.float32'}), '((nb_templates, nb_peaks), dtype=np.float32)\n', (31932, 31976), True, 'import numpy as np\n'), ((33258, 33334), 'numpy.unravel_index', 'np.unravel_index', (['idx_lookup[is_valid][best_amplitude_ind]', 'idx_lookup.shape'], {}), '(idx_lookup[is_valid][best_amplitude_ind], idx_lookup.shape)\n', (33274, 33334), True, 'import numpy as np\n'), ((39289, 39321), 'numpy.linalg.norm', 'np.linalg.norm', (['template'], {'axis': '(0)'}), '(template, axis=0)\n', (39303, 39321), True, 'import numpy as np\n'), ((39346, 39370), 'numpy.linalg.norm', 'np.linalg.norm', (['template'], {}), '(template)\n', (39360, 39370), True, 'import numpy as np\n'), ((39389, 39414), 'numpy.argsort', 'np.argsort', (['channel_norms'], {}), '(channel_norms)\n', (39399, 39414), True, 'import numpy as np\n'), ((40628, 40660), 'numpy.linalg.norm', 'np.linalg.norm', (['templates[count]'], {}), '(templates[count])\n', (40642, 40660), True, 'import numpy as np\n'), ((40783, 40805), 'numpy.sum', 'np.sum', (['(templates != 0)'], {}), '(templates != 0)\n', (40789, 40805), True, 'import numpy as np\n'), ((40919, 40953), 'scipy.sparse.csr_matrix', 'scipy.sparse.csr_matrix', (['templates'], {}), '(templates)\n', (40942, 40953), False, 'import scipy\n'), ((41776, 41821), 'tqdm.tqdm', 'tqdm', (['all_delays'], {'desc': '"""[1] compute overlaps"""'}), "(all_delays, desc='[1] compute overlaps')\n", (41780, 41821), False, 'from tqdm import tqdm\n'), ((42546, 42571), 'scipy.sparse.hstack', 'scipy.sparse.hstack', (['data'], {}), '(data)\n', (42565, 42571), False, 'import scipy\n'), ((43922, 43968), 'tqdm.tqdm', 'tqdm', (['all_units'], {'desc': '"""[2] compute amplitudes"""'}), "(all_units, desc='[2] compute amplitudes')\n", (43926, 43968), False, 'from tqdm import tqdm\n'), ((44566, 44601), 'numpy.concatenate', 'np.concatenate', (['(bad, noise[count])'], {}), '((bad, noise[count]))\n', (44580, 44601), True, 'import numpy as np\n'), ((44763, 44867), 'scipy.optimize.differential_evolution', 'scipy.optimize.differential_evolution', (['cls._cost_function_mcc'], {'bounds': 'cost_bounds', 'args': 'cost_kwargs'}), '(cls._cost_function_mcc, bounds=\n cost_bounds, args=cost_kwargs)\n', (44800, 44867), False, 'import scipy\n'), ((46070, 46125), 'spikeinterface.toolkit.get_noise_levels', 'get_noise_levels', (['recording'], {}), "(recording, **d['random_chunk_kwargs'])\n", (46086, 46125), False, 'from spikeinterface.toolkit import get_noise_levels, get_template_channel_sparsity, get_channel_distances, get_chunk_with_margin, get_template_extremum_channel, get_random_data_chunks\n'), ((48928, 48972), 'numpy.unique', 'np.unique', (['jittered_peaks'], {'return_index': '(True)'}), '(jittered_peaks, return_index=True)\n', (48937, 48972), True, 'import numpy as np\n'), ((49087, 49132), 'numpy.unique', 'np.unique', (['peak_sample_ind'], {'return_index': '(True)'}), '(peak_sample_ind, return_index=True)\n', (49096, 49132), True, 'import numpy as np\n'), ((49460, 49497), 'numpy.arange', 'np.arange', (["(-d['nbefore'])", "d['nafter']"], {}), "(-d['nbefore'], d['nafter'])\n", (49469, 49497), True, 'import numpy as np\n'), ((49757, 49802), 'numpy.zeros', 'np.zeros', (['(nb_templates, 0)'], {'dtype': 'np.float32'}), '((nb_templates, 0), dtype=np.float32)\n', (49765, 49802), True, 'import numpy as np\n'), ((50346, 50422), 'numpy.unravel_index', 'np.unravel_index', (['idx_lookup[is_valid][best_amplitude_ind]', 'idx_lookup.shape'], {}), '(idx_lookup[is_valid][best_amplitude_ind], idx_lookup.shape)\n', (50362, 50422), True, 'import numpy as np\n'), ((50715, 50782), 'numpy.searchsorted', 'np.searchsorted', (['peak_data', '[-neighbor_window, neighbor_window + 1]'], {}), '(peak_data, [-neighbor_window, neighbor_window + 1])\n', (50730, 50782), True, 'import numpy as np\n'), ((9277, 9326), 'numpy.sum', 'np.sum', (['((templates - wf[None, :, :]) ** 2)'], {'axis': '(1)'}), '((templates - wf[None, :, :]) ** 2, axis=1)\n', (9283, 9326), True, 'import numpy as np\n'), ((14204, 14220), 'numpy.sum', 'np.sum', (['(vec ** 2)'], {}), '(vec ** 2)\n', (14210, 14220), True, 'import numpy as np\n'), ((15938, 15950), 'numpy.any', 'np.any', (['keep'], {}), '(keep)\n', (15944, 15950), True, 'import numpy as np\n'), ((19160, 19181), 'numpy.argsort', 'np.argsort', (['distances'], {}), '(distances)\n', (19170, 19181), True, 'import numpy as np\n'), ((20121, 20147), 'numpy.argmin', 'np.argmin', (['distances_shift'], {}), '(distances_shift)\n', (20130, 20147), True, 'import numpy as np\n'), ((29012, 29083), 'scipy.sparse.csr_matrix', 'scipy.sparse.csr_matrix', (['(nb_templates, nb_templates)'], {'dtype': 'np.float32'}), '((nb_templates, nb_templates), dtype=np.float32)\n', (29035, 29083), False, 'import scipy\n'), ((32206, 32297), 'scipy.signal.fftconvolve', 'scipy.signal.fftconvolve', (['kernel_filter', 'traces[:, sparsities[i]]'], {'axes': '(0)', 'mode': '"""valid"""'}), "(kernel_filter, traces[:, sparsities[i]], axes=0,\n mode='valid')\n", (32230, 32297), False, 'import scipy\n'), ((32615, 32646), 'numpy.arange', 'np.arange', (['scalar_products.size'], {}), '(scalar_products.size)\n', (32624, 32646), True, 'import numpy as np\n'), ((34105, 34165), 'numpy.zeros', 'np.zeros', (['(2 * M.shape[0], 2 * M.shape[1])'], {'dtype': 'np.float32'}), '((2 * M.shape[0], 2 * M.shape[1]), dtype=np.float32)\n', (34113, 34165), True, 'import numpy as np\n'), ((34316, 34475), 'scipy.linalg.solve_triangular', 'scipy.linalg.solve_triangular', (['M[:mb_selection, :mb_selection]', 'M[mb_selection, :mb_selection]'], {'trans': '(0)', 'lower': '(1)', 'overwrite_b': '(True)', 'check_finite': '(False)'}), '(M[:mb_selection, :mb_selection], M[\n mb_selection, :mb_selection], trans=0, lower=1, overwrite_b=True,\n check_finite=False)\n', (34345, 34475), False, 'import scipy\n'), ((34724, 34738), 'numpy.sqrt', 'np.sqrt', (['(1 - v)'], {}), '(1 - v)\n', (34731, 34738), True, 'import numpy as np\n'), ((42211, 42282), 'scipy.sparse.csr_matrix', 'scipy.sparse.csr_matrix', (['(nb_templates, nb_templates)'], {'dtype': 'np.float32'}), '((nb_templates, nb_templates), dtype=np.float32)\n', (42234, 42282), False, 'import scipy\n'), ((43392, 43445), 'numpy.abs', 'np.abs', (['(1 - (bounds[1] - bounds[0]) / delta_amplitude)'], {}), '(1 - (bounds[1] - bounds[0]) / delta_amplitude)\n', (43398, 43445), True, 'import numpy as np\n'), ((48593, 48619), 'numpy.arange', 'np.arange', (['(-jitter)', 'jitter'], {}), '(-jitter, jitter)\n', (48602, 48619), True, 'import numpy as np\n'), ((48683, 48703), 'numpy.zeros', 'np.zeros', (['(2 * jitter)'], {}), '(2 * jitter)\n', (48691, 48703), True, 'import numpy as np\n'), ((49274, 49313), 'sklearn.feature_extraction.image.extract_patches_2d', 'extract_patches_2d', (['traces', 'patch_sizes'], {}), '(traces, patch_sizes)\n', (49292, 49313), False, 'from sklearn.feature_extraction.image import extract_patches_2d, reconstruct_from_patches_2d\n'), ((49914, 49945), 'numpy.arange', 'np.arange', (['scalar_products.size'], {}), '(scalar_products.size)\n', (49923, 49945), True, 'import numpy as np\n'), ((20663, 20694), 'numpy.sum', 'np.sum', (['(centered * other_vector)'], {}), '(centered * other_vector)\n', (20669, 20694), True, 'import numpy as np\n'), ((31779, 31876), 'scipy.signal.fftconvolve', 'scipy.signal.fftconvolve', (['kernel_filters', 'traces[np.newaxis, :, :]'], {'axes': '(0, 1)', 'mode': '"""valid"""'}), "(kernel_filters, traces[np.newaxis, :, :], axes=(0,\n 1), mode='valid')\n", (31803, 31876), False, 'import scipy\n'), ((43085, 43099), 'numpy.sqrt', 'np.sqrt', (['denom'], {}), '(denom)\n', (43092, 43099), True, 'import numpy as np\n'), ((13688, 13712), 'numpy.arange', 'np.arange', (['unit_ids.size'], {}), '(unit_ids.size)\n', (13697, 13712), True, 'import numpy as np\n'), ((14352, 14380), 'numpy.sum', 'np.sum', (['(template_sparse ** 2)'], {}), '(template_sparse ** 2)\n', (14358, 14380), True, 'import numpy as np\n'), ((20722, 20731), 'numpy.abs', 'np.abs', (['v'], {}), '(v)\n', (20728, 20731), True, 'import numpy as np\n'), ((33694, 33709), 'numpy.abs', 'np.abs', (['delta_t'], {}), '(delta_t)\n', (33700, 33709), True, 'import numpy as np\n'), ((35045, 35068), 'numpy.abs', 'np.abs', (['diff_amplitudes'], {}), '(diff_amplitudes)\n', (35051, 35068), True, 'import numpy as np\n'), ((44446, 44462), 'numpy.arange', 'np.arange', (['count'], {}), '(count)\n', (44455, 44462), True, 'import numpy as np\n'), ((44464, 44498), 'numpy.arange', 'np.arange', (['(count + 1)', 'nb_templates'], {}), '(count + 1, nb_templates)\n', (44473, 44498), True, 'import numpy as np\n')] |
# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the NiBabel package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
""" Preliminary MINC2 support
Use with care; I haven't tested this against a wide range of MINC files.
If you have a file that isn't read correctly, please send an example.
Test reading with something like::
import nibabel as nib
img = nib.load('my_funny.mnc')
data = img.get_data()
print(data.mean())
print(data.max())
print(data.min())
and compare against command line output of::
mincstats my_funny.mnc
"""
import numpy as np
from .optpkg import optional_package
h5py, have_h5py, setup_module = optional_package('h5py')
from .minc1 import Minc1File, Minc1Image, MincError
class Hdf5Bunch(object):
""" Make object for accessing attributes of variable
"""
def __init__(self, var):
for name, value in var.attrs.items():
setattr(self, name, value)
class Minc2File(Minc1File):
''' Class to wrap MINC2 format file
Although it has some of the same methods as a ``Header``, we use
this only when reading a MINC2 file, to pull out useful header
information, and for the method of reading the data out
'''
def __init__(self, mincfile):
self._mincfile = mincfile
minc_part = mincfile['minc-2.0']
# The whole image is the first of the entries in 'image'
image = minc_part['image']['0']
self._image = image['image']
self._dim_names = self._get_dimensions(self._image)
dimensions = minc_part['dimensions']
self._dims = [Hdf5Bunch(dimensions[s]) for s in self._dim_names]
# We don't currently support irregular spacing
# http://en.wikibooks.org/wiki/MINC/Reference/MINC2.0_File_Format_Reference#Dimension_variable_attributes
for dim in self._dims:
if dim.spacing != b'regular__':
raise ValueError('Irregular spacing not supported')
self._spatial_dims = [name for name in self._dim_names
if name.endswith('space')]
self._image_max = image['image-max']
self._image_min = image['image-min']
def _get_dimensions(self, var):
# Dimensions for a particular variable
# Differs for MINC1 and MINC2 - see:
# http://en.wikibooks.org/wiki/MINC/Reference/MINC2.0_File_Format_Reference#Associating_HDF5_dataspaces_with_MINC_dimensions
return var.attrs['dimorder'].split(',')
def get_data_dtype(self):
return self._image.dtype
def get_data_shape(self):
return self._image.shape
def _get_valid_range(self):
''' Return valid range for image data
The valid range can come from the image 'valid_range' or
failing that, from the data type range
'''
ddt = self.get_data_dtype()
info = np.iinfo(ddt.type)
try:
valid_range = self._image.attrs['valid_range']
except AttributeError:
valid_range = [info.min, info.max]
else:
if valid_range[0] < info.min or valid_range[1] > info.max:
raise ValueError('Valid range outside input '
'data type range')
return np.asarray(valid_range, dtype=np.float)
def get_scaled_data(self):
data = np.asarray(self._image)
return self._normalize(data)
class Minc2Image(Minc1Image):
''' Class for MINC2 images
The MINC2 image class uses the default header type, rather than a
specific MINC header type - and reads the relevant information from
the MINC file on load.
'''
# MINC2 does not do compressed whole files
_compressed_exts = ()
@classmethod
def from_file_map(klass, file_map):
holder = file_map['image']
if holder.filename is None:
raise MincError('MINC2 needs filename for load')
minc_file = Minc2File(h5py.File(holder.filename, 'r'))
affine = minc_file.get_affine()
if affine.shape != (4, 4):
raise MincError('Image does not have 3 spatial dimensions')
data_dtype = minc_file.get_data_dtype()
shape = minc_file.get_data_shape()
zooms = minc_file.get_zooms()
header = klass.header_class(data_dtype, shape, zooms)
data = klass.ImageArrayProxy(minc_file)
return klass(data, affine, header, extra=None, file_map=file_map)
load = Minc2Image.load
| [
"numpy.asarray",
"numpy.iinfo"
] | [((3122, 3140), 'numpy.iinfo', 'np.iinfo', (['ddt.type'], {}), '(ddt.type)\n', (3130, 3140), True, 'import numpy as np\n'), ((3505, 3544), 'numpy.asarray', 'np.asarray', (['valid_range'], {'dtype': 'np.float'}), '(valid_range, dtype=np.float)\n', (3515, 3544), True, 'import numpy as np\n'), ((3593, 3616), 'numpy.asarray', 'np.asarray', (['self._image'], {}), '(self._image)\n', (3603, 3616), True, 'import numpy as np\n')] |
"""
<NAME>
This file contains util functions
"""
import networkx as nx
import ujson as json
import numpy as np
import torch
import os
import queue
import shutil
import logging
import tqdm
from sklearn.metrics import roc_curve,auc
import torch.utils.data as data
from torch_geometric.data import Data,Batch
from sklearn.model_selection import train_test_split
def reindex_nx_graph(G,ordered_node_list):
"""reindex the nodes in nx graph according to given ordering
Args:
G: nx graph object
ordered_node_list: a list served as node ordering, the first is 0
"""
ordered_node_dict=dict(zip(ordered_node_list,range(len(ordered_node_list))))
return nx.relabel_nodes(G,ordered_node_dict)
def save_json(filename, obj, message=None,ascii=True):
"""Save data in JSON format
Args:
filename(str):name of save directory(including file name)
obj(object):data you want to save
message(str):anything you want to print
"""
if message is not None:
print(f"Saving {message}...")
with open(filename, "w") as fh:
json.dump(obj, fh,ensure_ascii=ascii)
def nx_to_graph_data(G,x,y):
"""convert nx graph to torch geometric Data object
Args
G(networkx): nx graph
x(numpy.array): node feature for each node given index
"""
edge_list=G.edges
edge_index = torch.from_numpy(np.array(edge_list).T).to(torch.long)
edge_attr=list(nx.get_edge_attributes(G,"edge_type").values())
if len(edge_attr)>0:
edge_attr = torch.from_numpy(np.array(edge_attr)).to(torch.long)
return Data(x=x,edge_index=edge_index,edge_attr=edge_attr,y=y)
else:
return Data(x=x,edge_index=edge_index,edge_attr=None,y=y)
def make_ad_dataset(ad_data_path,group,val_ratio,test_ratio,seed=234):
"""Make ad dataset
Args:
ad_data_path(str): file path of ad data
group(str): which group used for training or testing, support male or female
val_ratio(float):ratio of validation dataset
test_ratio(float):ratio of test dataset
seed(int): random seed
"""
ad_data=np.load(ad_data_path,allow_pickle=True)
if group=="female":
positive_data=ad_data["ex_early_female"]
negative_data=ad_data["ex_normal_female"]
elif group=="male":
positive_data=ad_data["ex_early_male"]
negative_data=ad_data["ex_normal_male"]
else:
raise ValueError("invaild scRNA data group")
expression_data=np.concatenate([positive_data,negative_data],axis=0)
positive_y=np.ones([positive_data.shape[0]])
negative_y=np.zeros([negative_data.shape[0]])
y=np.concatenate([positive_y,negative_y],axis=0)
train_expression,remain_expression,train_y,remain_y=\
train_test_split(expression_data,y,test_size=(val_ratio+test_ratio),stratify=y,random_state=seed)
val_expression,test_expression,val_y,test_y=\
train_test_split(remain_expression,remain_y,test_size=test_ratio/(val_ratio+test_ratio),
stratify=remain_y,random_state=seed)
A=ad_data["A"].tolist()
gene_list=ad_data["gene_list"]
gene_feature=ad_data["gene_feature"]
return train_expression,train_y,val_expression,val_y,test_expression,test_y,\
A,gene_feature,gene_list
class LoadTrainDataset(data.Dataset):
"""load ad dataset for training
Args:
expression(numpy.array):scRNA expression data
y(numpy.array): prediction label
A(numpy.array): adajacency matrix
gene_feature(numpy.array): gene feature matrix
gene_list(list): gene name list
device(list): the device used in training
"""
def __init__(self,expression,y,A,gene_feature,gene_list):
super(LoadTrainDataset, self).__init__()
self.gene_list=gene_list
expression=torch.from_numpy(expression).float()
self.length=expression.size(0)
self.y=torch.from_numpy(y).long()
G=nx.from_scipy_sparse_matrix(A)
gene_feature=torch.from_numpy(gene_feature).float()
data_list=[]
for idx in range(expression.size(0)):
expression_data=expression[idx].unsqueeze(-1)
x=torch.cat([gene_feature,expression_data],dim=-1)
data_list.append(nx_to_graph_data(G,x,self.y[idx].view(-1)))
self.data_list=Batch.from_data_list(data_list)
def __len__(self):
return self.length
def __getitem__(self,idx):
return self.data_list[idx]
def collate_fn(examples):
data_list=Batch.from_data_list(examples)
return (data_list)
class AverageMeter:
"""Keep track of average values over time.
Adapted from:
> https://github.com/pytorch/examples/blob/master/imagenet/main.py
"""
def __init__(self):
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
"""Reset meter."""
self.__init__()
def update(self, val, num_samples=1):
"""Update meter with new value `val`, the average of `num` samples.
Args:
val (float): Average value to update the meter with.
num_samples (int): Number of samples that were averaged to
produce `val`.
"""
self.count += num_samples
self.sum += val * num_samples
self.avg = self.sum / self.count
class EMA:
"""Exponential moving average of model parameters.
Args:
model (torch.nn.Module): Model with parameters whose EMA will be kept.
decay (float): Decay rate for exponential moving average.
"""
def __init__(self, model, decay):
self.decay = decay
self.shadow = {}
self.original = {}
# Register model parameters
for name, param in model.named_parameters():
if param.requires_grad:
self.shadow[name] = param.data.clone()
def __call__(self, model, num_updates):
decay = min(self.decay, (1.0 + num_updates) / (10.0 + num_updates))
for name, param in model.named_parameters():
if param.requires_grad:
assert name in self.shadow
new_average = \
(1.0 - decay) * param.data + decay * self.shadow[name]
self.shadow[name] = new_average.clone()
def assign(self, model):
"""Assign exponential moving average of parameter values to the
respective parameters.
Args:
model (torch.nn.Module): Model to assign parameter values.
"""
for name, param in model.named_parameters():
if param.requires_grad:
assert name in self.shadow
self.original[name] = param.data.clone()
param.data = self.shadow[name]
def resume(self, model):
"""Restore original parameters to a model. That is, put back
the values that were in each parameter at the last call to `assign`.
Args:
model (torch.nn.Module): Model to assign parameter values.
"""
for name, param in model.named_parameters():
if param.requires_grad:
assert name in self.shadow
param.data = self.original[name]
class CheckpointSaver:
"""Class to save and load model checkpoints.
Save the best checkpoints as measured by a metric value passed into the
`save` method. Overwrite checkpoints with better checkpoints once
`max_checkpoints` have been saved.
Args:
save_dir (str): Directory to save checkpoints.
max_checkpoints (int): Maximum number of checkpoints to keep before
overwriting old ones.
metric_name (str): Name of metric used to determine best model.
maximize_metric (bool): If true, best checkpoint is that which maximizes
the metric value passed in via `save`. Otherwise, best checkpoint
minimizes the metric.
log (logging.Logger): Optional logger for printing information.
"""
def __init__(self, save_dir, max_checkpoints, metric_name,
maximize_metric=False, log=None):
super(CheckpointSaver, self).__init__()
self.save_dir = save_dir
self.max_checkpoints = max_checkpoints
self.metric_name = metric_name
self.maximize_metric = maximize_metric
self.best_val = None
self.ckpt_paths = queue.PriorityQueue()
self.log = log
self._print(f"Saver will {'max' if maximize_metric else 'min'}imize {metric_name}...")
def is_best(self, metric_val):
"""Check whether `metric_val` is the best seen so far.
Args:
metric_val (float): Metric value to compare to prior checkpoints.
"""
if metric_val is None:
# No metric reported
return False
if self.best_val is None:
# No checkpoint saved yet
return True
return ((self.maximize_metric and self.best_val < metric_val)
or (not self.maximize_metric and self.best_val > metric_val))
def _print(self, message):
"""Print a message if logging is enabled."""
if self.log is not None:
self.log.info(message)
def save(self, step, model_dict, metric_val, device):
"""Save model parameters to disk.
Args:
step (int): Total number of examples seen during training so far.
model (torch.nn.DataParallel): Model to save.
metric_val (float): Determines whether checkpoint is best so far.
device (torch.device): Device where model resides.
"""
checkpoint_path = os.path.join(self.save_dir,f'step_{step}')
for name,model in model_dict.items():
ckpt_dict = {
'model_name': model.__class__.__name__,
'model_state': model.cpu().state_dict(),
'step': step
}
model.to(device)
torch.save(ckpt_dict, f"{checkpoint_path}{name}.pth.tar")
self._print(f'Saved checkpoint: {checkpoint_path}')
if self.is_best(metric_val):
# Save the best model
self.best_val = metric_val
best_path = os.path.join(self.save_dir, 'best')
for name in model_dict.keys():
shutil.copy(f"{checkpoint_path}{name}.pth.tar", f"{best_path}{name}.pth.tar")
self._print(f'New best checkpoint at step {step}...')
# Add checkpoint path to priority queue (lowest priority removed first)
if self.maximize_metric:
priority_order = metric_val
else:
priority_order = -metric_val
self.ckpt_paths.put((priority_order, checkpoint_path))
# Remove a checkpoint if more than max_checkpoints have been saved
if self.ckpt_paths.qsize() > self.max_checkpoints:
_, worst_ckpt = self.ckpt_paths.get()
try:
for name in model_dict.keys():
os.remove(f"{worst_ckpt}{name}.pth.tar")
self._print(f'Removed checkpoint: {worst_ckpt}')
except OSError:
# Avoid crashing if checkpoint has been removed or protected
pass
def load_model(model, checkpoint_path, gpu_ids, return_step=True):
"""Load model parameters from disk.
Args:
model (torch.nn.DataParallel): Load parameters into this model.
checkpoint_path (str): Path to checkpoint to load.
gpu_ids (list): GPU IDs for DataParallel.
return_step (bool): Also return the step at which checkpoint was saved.
Returns:
model (torch.nn.DataParallel): Model loaded from checkpoint.
step (int): Step at which checkpoint was saved. Only if `return_step`.
"""
device = f"cuda:{gpu_ids[0]}" if gpu_ids else 'cpu'
ckpt_dict = torch.load(checkpoint_path, map_location=device)
# Build model, load parameters
model.load_state_dict(ckpt_dict['model_state'])
if return_step:
step = ckpt_dict['step']
return model, step
return model
def get_available_devices() -> object:
"""Get IDs of all available GPUs.
Returns:
device (torch.device): Main device (GPU 0 or CPU).
gpu_ids (list): List of IDs of all GPUs that are available.
"""
gpu_ids = []
if torch.cuda.is_available():
gpu_ids += [gpu_id for gpu_id in range(torch.cuda.device_count())]
device = torch.device(f'cuda:{gpu_ids[0]}')
torch.cuda.set_device(device)
else:
device = torch.device('cpu')
return device, gpu_ids
def get_save_dir(base_dir, name, type, id_max=100):
"""Get a unique save directory by appending the smallest positive integer
`id < id_max` that is not already taken (i.e., no dir exists with that id).
Args:
base_dir (str): Base directory in which to make save directories.
name (str): Name to identify this training run. Need not be unique.
training (bool): Save dir. is for training (determines subdirectory).
id_max (int): Maximum ID number before raising an exception.
Returns:
save_dir (str): Path to a new directory with a unique name.
"""
for uid in range(1, id_max):
subdir = type
save_dir = os.path.join(base_dir, subdir, f'{name}-{uid:02d}')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
return save_dir
raise RuntimeError('Too many save directories created with the same name. \
Delete old save directories or use another name.')
def get_logger(log_dir, name):
"""Get a `logging.Logger` instance that prints to the console
and an auxiliary file.
Args:
log_dir (str): Directory in which to create the log file.
name (str): Name to identify the logs.
Returns:
logger (logging.Logger): Logger instance for logging events.
"""
class StreamHandlerWithTQDM(logging.Handler):
"""Let `logging` print without breaking `tqdm` progress bars.
See Also:
> https://stackoverflow.com/questions/38543506
"""
def emit(self, record):
try:
msg = self.format(record)
tqdm.tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
# Create logger
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# Log everything (i.e., DEBUG level and above) to a file
log_path = os.path.join(log_dir, 'log.txt')
file_handler = logging.FileHandler(log_path)
file_handler.setLevel(logging.DEBUG)
# Log everything except DEBUG level (i.e., INFO level and above) to console
console_handler = StreamHandlerWithTQDM()
console_handler.setLevel(logging.INFO)
# Create format for the logs
file_formatter = logging.Formatter('[%(asctime)s] %(message)s',
datefmt='%m.%d.%y %H:%M:%S')
file_handler.setFormatter(file_formatter)
console_formatter = logging.Formatter('[%(asctime)s] %(message)s',
datefmt='%m.%d.%y %H:%M:%S')
console_handler.setFormatter(console_formatter)
# add the handlers to the logger
logger.addHandler(file_handler)
logger.addHandler(console_handler)
return logger
def torch_from_json(path, dtype=torch.float32):
"""Load a PyTorch Tensor from a JSON file.
Args:
path (str): Path to the JSON file to load.
dtype (torch.dtype): Data type of loaded array.
Returns:
tensor (torch.Tensor): Tensor loaded from JSON file.
"""
with open(path, 'r') as fh:
array = np.array(json.load(fh))
tensor = torch.from_numpy(array).type(dtype)
return tensor
class MetricsMeter:
"""Keep track of model performance.
"""
def __init__(self,threshold=0.5):
self.TP = 0
self.FP = 0
self.TN = 0
self.FN= 0
self.threshold=0.5
self.prediction=np.array([1])
self.label=np.array([1])
def reset(self):
"""Reset meter."""
self.__init__()
def update(self, input,target ):
"""Update meter with new result
Args:
input (torch.tensor, Batch_size*1): predicted probability tensor.
target (torch.tensor, Batch_size*1): ground true, 1 represent positive
"""
predict=(input>self.threshold).int()
self.TP+=(target[torch.where(predict==1)]==1).sum().item()
self.FP += (target[torch.where(predict==1)]==0).sum().item()
self.TN += (target[torch.where(predict==0)]==0).sum().item()
self.FN += (target[torch.where(predict==0)]==1).sum().item()
input=input.view(-1).numpy()
target=target.view(-1).numpy()
self.prediction=np.concatenate([self.prediction,input],axis=-1)
self.label=np.concatenate([self.label,target],axis=-1)
def return_metrics(self):
recall=self.TP/(self.TP+self.FN+1e-30)
precision=self.TP/(self.TP+self.FP+1e-30)
specificity=self.TN/(self.TN+self.FP+1e-30)
accuracy=(self.TP+self.TN)/(self.TP+self.FP+self.TN+self.FN+1e-30)
F1=self.TP/(self.TP+0.5*(self.FP+self.FN)+1e-30)
fpr,tpr,thresholds=roc_curve(self.label[1:],self.prediction[1:])
AUC=auc(fpr,tpr)
metrics_result = {'Accuracy': accuracy,
"Recall": recall,
"Precision": precision,
"Specificity": specificity,
"F1":F1,
"AUC":AUC,
"fpr":fpr,
"tpr":tpr,
"thresholds":thresholds
}
return metrics_result
| [
"networkx.from_scipy_sparse_matrix",
"numpy.load",
"os.remove",
"ujson.dump",
"sklearn.model_selection.train_test_split",
"numpy.ones",
"torch.cat",
"torch.cuda.device_count",
"logging.Formatter",
"torch_geometric.data.Data",
"torch.device",
"os.path.join",
"shutil.copy",
"logging.FileHand... | [((672, 710), 'networkx.relabel_nodes', 'nx.relabel_nodes', (['G', 'ordered_node_dict'], {}), '(G, ordered_node_dict)\n', (688, 710), True, 'import networkx as nx\n'), ((2106, 2146), 'numpy.load', 'np.load', (['ad_data_path'], {'allow_pickle': '(True)'}), '(ad_data_path, allow_pickle=True)\n', (2113, 2146), True, 'import numpy as np\n'), ((2473, 2527), 'numpy.concatenate', 'np.concatenate', (['[positive_data, negative_data]'], {'axis': '(0)'}), '([positive_data, negative_data], axis=0)\n', (2487, 2527), True, 'import numpy as np\n'), ((2541, 2574), 'numpy.ones', 'np.ones', (['[positive_data.shape[0]]'], {}), '([positive_data.shape[0]])\n', (2548, 2574), True, 'import numpy as np\n'), ((2590, 2624), 'numpy.zeros', 'np.zeros', (['[negative_data.shape[0]]'], {}), '([negative_data.shape[0]])\n', (2598, 2624), True, 'import numpy as np\n'), ((2631, 2679), 'numpy.concatenate', 'np.concatenate', (['[positive_y, negative_y]'], {'axis': '(0)'}), '([positive_y, negative_y], axis=0)\n', (2645, 2679), True, 'import numpy as np\n'), ((2740, 2845), 'sklearn.model_selection.train_test_split', 'train_test_split', (['expression_data', 'y'], {'test_size': '(val_ratio + test_ratio)', 'stratify': 'y', 'random_state': 'seed'}), '(expression_data, y, test_size=val_ratio + test_ratio,\n stratify=y, random_state=seed)\n', (2756, 2845), False, 'from sklearn.model_selection import train_test_split\n'), ((2893, 3030), 'sklearn.model_selection.train_test_split', 'train_test_split', (['remain_expression', 'remain_y'], {'test_size': '(test_ratio / (val_ratio + test_ratio))', 'stratify': 'remain_y', 'random_state': 'seed'}), '(remain_expression, remain_y, test_size=test_ratio / (\n val_ratio + test_ratio), stratify=remain_y, random_state=seed)\n', (2909, 3030), False, 'from sklearn.model_selection import train_test_split\n'), ((4499, 4529), 'torch_geometric.data.Batch.from_data_list', 'Batch.from_data_list', (['examples'], {}), '(examples)\n', (4519, 4529), False, 'from torch_geometric.data import Data, Batch\n'), ((11811, 11859), 'torch.load', 'torch.load', (['checkpoint_path'], {'map_location': 'device'}), '(checkpoint_path, map_location=device)\n', (11821, 11859), False, 'import torch\n'), ((12299, 12324), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (12322, 12324), False, 'import torch\n'), ((14437, 14460), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (14454, 14460), False, 'import logging\n'), ((14573, 14605), 'os.path.join', 'os.path.join', (['log_dir', '"""log.txt"""'], {}), "(log_dir, 'log.txt')\n", (14585, 14605), False, 'import os\n'), ((14625, 14654), 'logging.FileHandler', 'logging.FileHandler', (['log_path'], {}), '(log_path)\n', (14644, 14654), False, 'import logging\n'), ((14921, 14996), 'logging.Formatter', 'logging.Formatter', (['"""[%(asctime)s] %(message)s"""'], {'datefmt': '"""%m.%d.%y %H:%M:%S"""'}), "('[%(asctime)s] %(message)s', datefmt='%m.%d.%y %H:%M:%S')\n", (14938, 14996), False, 'import logging\n'), ((15106, 15181), 'logging.Formatter', 'logging.Formatter', (['"""[%(asctime)s] %(message)s"""'], {'datefmt': '"""%m.%d.%y %H:%M:%S"""'}), "('[%(asctime)s] %(message)s', datefmt='%m.%d.%y %H:%M:%S')\n", (15123, 15181), False, 'import logging\n'), ((1083, 1121), 'ujson.dump', 'json.dump', (['obj', 'fh'], {'ensure_ascii': 'ascii'}), '(obj, fh, ensure_ascii=ascii)\n', (1092, 1121), True, 'import ujson as json\n'), ((1583, 1641), 'torch_geometric.data.Data', 'Data', ([], {'x': 'x', 'edge_index': 'edge_index', 'edge_attr': 'edge_attr', 'y': 'y'}), '(x=x, edge_index=edge_index, edge_attr=edge_attr, y=y)\n', (1587, 1641), False, 'from torch_geometric.data import Data, Batch\n'), ((1664, 1717), 'torch_geometric.data.Data', 'Data', ([], {'x': 'x', 'edge_index': 'edge_index', 'edge_attr': 'None', 'y': 'y'}), '(x=x, edge_index=edge_index, edge_attr=None, y=y)\n', (1668, 1717), False, 'from torch_geometric.data import Data, Batch\n'), ((3931, 3961), 'networkx.from_scipy_sparse_matrix', 'nx.from_scipy_sparse_matrix', (['A'], {}), '(A)\n', (3958, 3961), True, 'import networkx as nx\n'), ((4307, 4338), 'torch_geometric.data.Batch.from_data_list', 'Batch.from_data_list', (['data_list'], {}), '(data_list)\n', (4327, 4338), False, 'from torch_geometric.data import Data, Batch\n'), ((8344, 8365), 'queue.PriorityQueue', 'queue.PriorityQueue', ([], {}), '()\n', (8363, 8365), False, 'import queue\n'), ((9608, 9651), 'os.path.join', 'os.path.join', (['self.save_dir', 'f"""step_{step}"""'], {}), "(self.save_dir, f'step_{step}')\n", (9620, 9651), False, 'import os\n'), ((12418, 12452), 'torch.device', 'torch.device', (['f"""cuda:{gpu_ids[0]}"""'], {}), "(f'cuda:{gpu_ids[0]}')\n", (12430, 12452), False, 'import torch\n'), ((12461, 12490), 'torch.cuda.set_device', 'torch.cuda.set_device', (['device'], {}), '(device)\n', (12482, 12490), False, 'import torch\n'), ((12518, 12537), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (12530, 12537), False, 'import torch\n'), ((13250, 13301), 'os.path.join', 'os.path.join', (['base_dir', 'subdir', 'f"""{name}-{uid:02d}"""'], {}), "(base_dir, subdir, f'{name}-{uid:02d}')\n", (13262, 13301), False, 'import os\n'), ((16086, 16099), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (16094, 16099), True, 'import numpy as np\n'), ((16119, 16132), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (16127, 16132), True, 'import numpy as np\n'), ((16892, 16941), 'numpy.concatenate', 'np.concatenate', (['[self.prediction, input]'], {'axis': '(-1)'}), '([self.prediction, input], axis=-1)\n', (16906, 16941), True, 'import numpy as np\n'), ((16959, 17004), 'numpy.concatenate', 'np.concatenate', (['[self.label, target]'], {'axis': '(-1)'}), '([self.label, target], axis=-1)\n', (16973, 17004), True, 'import numpy as np\n'), ((17343, 17389), 'sklearn.metrics.roc_curve', 'roc_curve', (['self.label[1:]', 'self.prediction[1:]'], {}), '(self.label[1:], self.prediction[1:])\n', (17352, 17389), False, 'from sklearn.metrics import roc_curve, auc\n'), ((17401, 17414), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (17404, 17414), False, 'from sklearn.metrics import roc_curve, auc\n'), ((4161, 4211), 'torch.cat', 'torch.cat', (['[gene_feature, expression_data]'], {'dim': '(-1)'}), '([gene_feature, expression_data], dim=-1)\n', (4170, 4211), False, 'import torch\n'), ((9921, 9978), 'torch.save', 'torch.save', (['ckpt_dict', 'f"""{checkpoint_path}{name}.pth.tar"""'], {}), "(ckpt_dict, f'{checkpoint_path}{name}.pth.tar')\n", (9931, 9978), False, 'import torch\n'), ((10174, 10209), 'os.path.join', 'os.path.join', (['self.save_dir', '"""best"""'], {}), "(self.save_dir, 'best')\n", (10186, 10209), False, 'import os\n'), ((13317, 13341), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (13331, 13341), False, 'import os\n'), ((13355, 13376), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (13366, 13376), False, 'import os\n'), ((15763, 15776), 'ujson.load', 'json.load', (['fh'], {}), '(fh)\n', (15772, 15776), True, 'import ujson as json\n'), ((15792, 15815), 'torch.from_numpy', 'torch.from_numpy', (['array'], {}), '(array)\n', (15808, 15815), False, 'import torch\n'), ((1422, 1460), 'networkx.get_edge_attributes', 'nx.get_edge_attributes', (['G', '"""edge_type"""'], {}), "(G, 'edge_type')\n", (1444, 1460), True, 'import networkx as nx\n'), ((3803, 3831), 'torch.from_numpy', 'torch.from_numpy', (['expression'], {}), '(expression)\n', (3819, 3831), False, 'import torch\n'), ((3894, 3913), 'torch.from_numpy', 'torch.from_numpy', (['y'], {}), '(y)\n', (3910, 3913), False, 'import torch\n'), ((3983, 4013), 'torch.from_numpy', 'torch.from_numpy', (['gene_feature'], {}), '(gene_feature)\n', (3999, 4013), False, 'import torch\n'), ((10269, 10346), 'shutil.copy', 'shutil.copy', (['f"""{checkpoint_path}{name}.pth.tar"""', 'f"""{best_path}{name}.pth.tar"""'], {}), "(f'{checkpoint_path}{name}.pth.tar', f'{best_path}{name}.pth.tar')\n", (10280, 10346), False, 'import shutil\n'), ((14218, 14238), 'tqdm.tqdm.write', 'tqdm.tqdm.write', (['msg'], {}), '(msg)\n', (14233, 14238), False, 'import tqdm\n'), ((1364, 1383), 'numpy.array', 'np.array', (['edge_list'], {}), '(edge_list)\n', (1372, 1383), True, 'import numpy as np\n'), ((1532, 1551), 'numpy.array', 'np.array', (['edge_attr'], {}), '(edge_attr)\n', (1540, 1551), True, 'import numpy as np\n'), ((10956, 10996), 'os.remove', 'os.remove', (['f"""{worst_ckpt}{name}.pth.tar"""'], {}), "(f'{worst_ckpt}{name}.pth.tar')\n", (10965, 10996), False, 'import os\n'), ((12373, 12398), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (12396, 12398), False, 'import torch\n'), ((16543, 16568), 'torch.where', 'torch.where', (['(predict == 1)'], {}), '(predict == 1)\n', (16554, 16568), False, 'import torch\n'), ((16612, 16637), 'torch.where', 'torch.where', (['(predict == 1)'], {}), '(predict == 1)\n', (16623, 16637), False, 'import torch\n'), ((16681, 16706), 'torch.where', 'torch.where', (['(predict == 0)'], {}), '(predict == 0)\n', (16692, 16706), False, 'import torch\n'), ((16750, 16775), 'torch.where', 'torch.where', (['(predict == 0)'], {}), '(predict == 0)\n', (16761, 16775), False, 'import torch\n')] |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.decomposition import FactorAnalysis, PCA
import tensorly as tl
from tensorly import unfold as tl_unfold
from tensorly.decomposition import parafac,non_negative_parafac
from sklearn.preprocessing import scale
from sklearn.preprocessing import normalize
import copy
import os
import pickle
from sklearn.feature_selection import mutual_info_classif
from tqdm import tqdm_notebook as tqdm
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold, StratifiedKFold
import keras
from IPython.display import clear_output
import pydot
import tensorflow as tf
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical, np_utils,plot_model
from keras import backend as K
from keras.models import Model, Sequential
from keras.layers import Embedding, Dense, TimeDistributed, LSTM, Activation, Flatten
from keras.layers import Dropout, Lambda, RepeatVector,Masking,Input,Bidirectional
from keras.optimizers import SGD, RMSprop, Adam
from keras import objectives
from models import create_lstm_ae, create_binned_lstm_vae, create_lstm_vae
from align_maze import align_maze
from data_generator import DataGenerator
#============================================
# evaluate models by reconstruction mse loss
#============================================
def ae_eval(bin_training_data,bin_validation_data,validation,latent_dim,latent_fac,epochs,batch_size):
#epochs: int or str(early_stop)
_, n_bin, n_neuron = bin_training_data.shape
ae, _, _ = create_lstm_ae(input_dim=n_neuron, timesteps=n_bin, latent_dim=latent_dim,
latent_fac=latent_fac)
if validation == True:
ae.fit(bin_training_data, bin_training_data, epochs=epochs, batch_size=batch_size, verbose=0,
validation_data=(bin_validation_data, bin_validation_data))
else:
ae.fit(bin_training_data, bin_training_data, epochs=epochs, batch_size=batch_size, verbose=0)
val_reconstruction = ae.predict(bin_validation_data, verbose=0)
mse = tf.keras.losses.MeanSquaredError()
mse_val = mse(bin_validation_data, val_reconstruction).numpy()
train_reconstruction = ae.predict(bin_training_data, verbose=0)
mse = tf.keras.losses.MeanSquaredError()
mse_train = mse(bin_training_data, train_reconstruction).numpy()
return mse_val,mse_train
def vae_binned_eval(bin_training_data,bin_validation_data,validation,latent_dim,latent_fac,epochs,batch_size):
_, n_bin, n_neuron = bin_training_data.shape
vae_binned, _, _ = create_binned_lstm_vae(input_dim=n_neuron, timesteps=n_bin, batch_size=batch_size,
intermediate_dim=latent_dim, latent_dim=latent_dim,
latent_fac=latent_fac, epsilon_std=1.)
if validation == True:
vae_binned.fit(bin_training_data, bin_training_data, epochs=epochs, batch_size=batch_size, verbose=0,
validation_data=(bin_validation_data, bin_validation_data))
else:
vae_binned.fit(bin_training_data, bin_training_data, epochs=epochs, batch_size=batch_size, verbose=0)
val_reconstruction = vae_binned.predict(bin_validation_data, batch_size=batch_size, verbose=0)
mse = tf.keras.losses.MeanSquaredError()
mse_val = mse(bin_validation_data, val_reconstruction).numpy()
train_reconstruction = vae_binned.predict(bin_training_data, batch_size=batch_size, verbose=0)
mse = tf.keras.losses.MeanSquaredError()
mse_train = mse(bin_training_data, train_reconstruction).numpy()
return mse_val, mse_train
def vae_eval(train_indexes, val_indexes,frame_trial, maze_position,
nobin_training_data,nobin_validation_data,validation,latent_dim,latent_fac,epochs,batch_size=1):
n_neuron=nobin_training_data[0].shape[-1]
training_generator = DataGenerator(nobin_training_data, nobin_training_data, batch_size=batch_size)
validation_generator = DataGenerator(nobin_validation_data, nobin_validation_data, batch_size=batch_size)
vae, _, _ = create_lstm_vae(input_dim=n_neuron, timesteps=None, batch_size=batch_size,
intermediate_dim=latent_dim, latent_dim=latent_dim,
latent_fac=latent_fac, epsilon_std=1.)
if validation == True:
vae.fit_generator(generator=training_generator,
validation_data=validation_generator,
epochs=epochs, verbose=0)
else:
vae.fit_generator(generator=training_generator,
epochs=epochs, verbose=0)
reconstruct_train = []
for i in range(len(nobin_training_data)):
shape1, shape2 = nobin_training_data[i].shape
reconstruct_train.append(vae.predict(nobin_training_data[i].reshape(1, shape1, shape2), verbose=0))
reconstruct_val = []
for i in range(len(nobin_validation_data)):
shape1, shape2 = nobin_validation_data[i].shape
reconstruct_val.append(vae.predict(nobin_validation_data[i].reshape(1, shape1, shape2), verbose=0))
aligned_train_data = align_maze(train_indexes, nobin_training_data,
frame_trial, maze_position)
aligned_train_reconstruct = align_maze(train_indexes,reconstruct_train,
frame_trial, maze_position, reshape=True)
aligned_train_data[np.isnan(aligned_train_data)] = 0
aligned_train_reconstruct[np.isnan(aligned_train_reconstruct)] = 0
mse = tf.keras.losses.MeanSquaredError()
mse_train = mse(aligned_train_data, aligned_train_reconstruct).numpy()
aligned_val_data = align_maze(val_indexes, nobin_validation_data,
frame_trial, maze_position)
aligned_val_reconstruct = align_maze(val_indexes,reconstruct_val,
frame_trial, maze_position, reshape=True)
aligned_val_data[np.isnan(aligned_val_data)] = 0
aligned_val_reconstruct[np.isnan(aligned_val_reconstruct)] = 0
mse = tf.keras.losses.MeanSquaredError()
mse_val = mse(aligned_val_data, aligned_val_reconstruct).numpy()
return mse_val, mse_train
| [
"align_maze.align_maze",
"tensorflow.keras.losses.MeanSquaredError",
"data_generator.DataGenerator",
"numpy.isnan",
"models.create_lstm_ae",
"models.create_lstm_vae",
"models.create_binned_lstm_vae"
] | [((1621, 1722), 'models.create_lstm_ae', 'create_lstm_ae', ([], {'input_dim': 'n_neuron', 'timesteps': 'n_bin', 'latent_dim': 'latent_dim', 'latent_fac': 'latent_fac'}), '(input_dim=n_neuron, timesteps=n_bin, latent_dim=latent_dim,\n latent_fac=latent_fac)\n', (1635, 1722), False, 'from models import create_lstm_ae, create_binned_lstm_vae, create_lstm_vae\n'), ((2144, 2178), 'tensorflow.keras.losses.MeanSquaredError', 'tf.keras.losses.MeanSquaredError', ([], {}), '()\n', (2176, 2178), True, 'import tensorflow as tf\n'), ((2325, 2359), 'tensorflow.keras.losses.MeanSquaredError', 'tf.keras.losses.MeanSquaredError', ([], {}), '()\n', (2357, 2359), True, 'import tensorflow as tf\n'), ((2644, 2827), 'models.create_binned_lstm_vae', 'create_binned_lstm_vae', ([], {'input_dim': 'n_neuron', 'timesteps': 'n_bin', 'batch_size': 'batch_size', 'intermediate_dim': 'latent_dim', 'latent_dim': 'latent_dim', 'latent_fac': 'latent_fac', 'epsilon_std': '(1.0)'}), '(input_dim=n_neuron, timesteps=n_bin, batch_size=\n batch_size, intermediate_dim=latent_dim, latent_dim=latent_dim,\n latent_fac=latent_fac, epsilon_std=1.0)\n', (2666, 2827), False, 'from models import create_lstm_ae, create_binned_lstm_vae, create_lstm_vae\n'), ((3359, 3393), 'tensorflow.keras.losses.MeanSquaredError', 'tf.keras.losses.MeanSquaredError', ([], {}), '()\n', (3391, 3393), True, 'import tensorflow as tf\n'), ((3571, 3605), 'tensorflow.keras.losses.MeanSquaredError', 'tf.keras.losses.MeanSquaredError', ([], {}), '()\n', (3603, 3605), True, 'import tensorflow as tf\n'), ((3957, 4035), 'data_generator.DataGenerator', 'DataGenerator', (['nobin_training_data', 'nobin_training_data'], {'batch_size': 'batch_size'}), '(nobin_training_data, nobin_training_data, batch_size=batch_size)\n', (3970, 4035), False, 'from data_generator import DataGenerator\n'), ((4063, 4150), 'data_generator.DataGenerator', 'DataGenerator', (['nobin_validation_data', 'nobin_validation_data'], {'batch_size': 'batch_size'}), '(nobin_validation_data, nobin_validation_data, batch_size=\n batch_size)\n', (4076, 4150), False, 'from data_generator import DataGenerator\n'), ((4162, 4337), 'models.create_lstm_vae', 'create_lstm_vae', ([], {'input_dim': 'n_neuron', 'timesteps': 'None', 'batch_size': 'batch_size', 'intermediate_dim': 'latent_dim', 'latent_dim': 'latent_dim', 'latent_fac': 'latent_fac', 'epsilon_std': '(1.0)'}), '(input_dim=n_neuron, timesteps=None, batch_size=batch_size,\n intermediate_dim=latent_dim, latent_dim=latent_dim, latent_fac=\n latent_fac, epsilon_std=1.0)\n', (4177, 4337), False, 'from models import create_lstm_ae, create_binned_lstm_vae, create_lstm_vae\n'), ((5208, 5282), 'align_maze.align_maze', 'align_maze', (['train_indexes', 'nobin_training_data', 'frame_trial', 'maze_position'], {}), '(train_indexes, nobin_training_data, frame_trial, maze_position)\n', (5218, 5282), False, 'from align_maze import align_maze\n'), ((5351, 5441), 'align_maze.align_maze', 'align_maze', (['train_indexes', 'reconstruct_train', 'frame_trial', 'maze_position'], {'reshape': '(True)'}), '(train_indexes, reconstruct_train, frame_trial, maze_position,\n reshape=True)\n', (5361, 5441), False, 'from align_maze import align_maze\n'), ((5618, 5652), 'tensorflow.keras.losses.MeanSquaredError', 'tf.keras.losses.MeanSquaredError', ([], {}), '()\n', (5650, 5652), True, 'import tensorflow as tf\n'), ((5752, 5826), 'align_maze.align_maze', 'align_maze', (['val_indexes', 'nobin_validation_data', 'frame_trial', 'maze_position'], {}), '(val_indexes, nobin_validation_data, frame_trial, maze_position)\n', (5762, 5826), False, 'from align_maze import align_maze\n'), ((5891, 5977), 'align_maze.align_maze', 'align_maze', (['val_indexes', 'reconstruct_val', 'frame_trial', 'maze_position'], {'reshape': '(True)'}), '(val_indexes, reconstruct_val, frame_trial, maze_position,\n reshape=True)\n', (5901, 5977), False, 'from align_maze import align_maze\n'), ((6144, 6178), 'tensorflow.keras.losses.MeanSquaredError', 'tf.keras.losses.MeanSquaredError', ([], {}), '()\n', (6176, 6178), True, 'import tensorflow as tf\n'), ((5503, 5531), 'numpy.isnan', 'np.isnan', (['aligned_train_data'], {}), '(aligned_train_data)\n', (5511, 5531), True, 'import numpy as np\n'), ((5567, 5602), 'numpy.isnan', 'np.isnan', (['aligned_train_reconstruct'], {}), '(aligned_train_reconstruct)\n', (5575, 5602), True, 'import numpy as np\n'), ((6035, 6061), 'numpy.isnan', 'np.isnan', (['aligned_val_data'], {}), '(aligned_val_data)\n', (6043, 6061), True, 'import numpy as np\n'), ((6095, 6128), 'numpy.isnan', 'np.isnan', (['aligned_val_reconstruct'], {}), '(aligned_val_reconstruct)\n', (6103, 6128), True, 'import numpy as np\n')] |
import matplotlib
matplotlib.rcParams = matplotlib.rc_params_from_file('../../matplotlibrc')
import matplotlib.pyplot as plt
import numpy as np
import sys
def basins_1d():
f = lambda x:x**2-1
Df = lambda x:2*x
x0 = np.linspace(-1.5, 1.5, 40)
xold=x0
n = 0
while n <= 6:
xnew = xold - f(xold)/Df(xold)
xold = xnew
n += 1
plt.scatter(x0, np.zeros_like(x0), marker='s', c=xnew, edgecolor='None', cmap='bwr')
plt.plot(x0, f(x0), c='black', linewidth=3)
plt.savefig('basins1d.pdf', bbox_inches='tight')
plt.close()
def fractal_1d():
f = lambda x:x**3-x
Df = lambda x:3*x**2-1
x0 = np.linspace(-1.5, 1.5, 500)
xold=x0
n = 0
while n <= 50:
xnew = xold - f(xold)/Df(xold)
xold = xnew
n += 1
y = np.array([-.1, .1])
X, Y = np.meshgrid(x0, y)
plt.pcolormesh(X, Y, np.atleast_2d(xnew).repeat(2, axis=0), cmap='brg')
plt.plot(x0, f(x0), c='black', linewidth=3)
plt.savefig('fractal1d.pdf', bbox_inches='tight')
plt.close()
def plot_basins(f, Df, roots, xmin, xmax, ymin, ymax, numpoints=100, iters=15, colormap='brg', name='name.png', dpinum=150):
xreal = np.linspace(xmin, xmax, numpoints)
ximag = np.linspace(ymin, ymax, numpoints)
Xreal, Ximag = np.meshgrid(xreal, ximag)
Xold = Xreal + 1j * Ximag
for i in xrange(iters):
Xnew = Xold - f(Xold)/Df(Xold)
Xold = Xnew
m,n = Xnew.shape
for i in xrange(m):
for j in xrange(n):
Xnew[i,j] = np.argmin(np.abs(Xnew[i,j]-roots))
plt.pcolormesh(Xreal, Ximag, Xnew, cmap=colormap)
plt.savefig(name, bbox_inches='tight', dpi=dpinum)
if __name__ == "__main__":
basins_1d()
fractal_1d()
| [
"numpy.meshgrid",
"numpy.zeros_like",
"numpy.abs",
"matplotlib.pyplot.close",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.pcolormesh",
"matplotlib.rc_params_from_file",
"matplotlib.pyplot.savefig",
"numpy.atleast_2d"
] | [((40, 92), 'matplotlib.rc_params_from_file', 'matplotlib.rc_params_from_file', (['"""../../matplotlibrc"""'], {}), "('../../matplotlibrc')\n", (70, 92), False, 'import matplotlib\n'), ((229, 255), 'numpy.linspace', 'np.linspace', (['(-1.5)', '(1.5)', '(40)'], {}), '(-1.5, 1.5, 40)\n', (240, 255), True, 'import numpy as np\n'), ((522, 570), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""basins1d.pdf"""'], {'bbox_inches': '"""tight"""'}), "('basins1d.pdf', bbox_inches='tight')\n", (533, 570), True, 'import matplotlib.pyplot as plt\n'), ((575, 586), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (584, 586), True, 'import matplotlib.pyplot as plt\n'), ((670, 697), 'numpy.linspace', 'np.linspace', (['(-1.5)', '(1.5)', '(500)'], {}), '(-1.5, 1.5, 500)\n', (681, 697), True, 'import numpy as np\n'), ((831, 852), 'numpy.array', 'np.array', (['[-0.1, 0.1]'], {}), '([-0.1, 0.1])\n', (839, 852), True, 'import numpy as np\n'), ((862, 880), 'numpy.meshgrid', 'np.meshgrid', (['x0', 'y'], {}), '(x0, y)\n', (873, 880), True, 'import numpy as np\n'), ((1009, 1058), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""fractal1d.pdf"""'], {'bbox_inches': '"""tight"""'}), "('fractal1d.pdf', bbox_inches='tight')\n", (1020, 1058), True, 'import matplotlib.pyplot as plt\n'), ((1063, 1074), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1072, 1074), True, 'import matplotlib.pyplot as plt\n'), ((1220, 1254), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'numpoints'], {}), '(xmin, xmax, numpoints)\n', (1231, 1254), True, 'import numpy as np\n'), ((1267, 1301), 'numpy.linspace', 'np.linspace', (['ymin', 'ymax', 'numpoints'], {}), '(ymin, ymax, numpoints)\n', (1278, 1301), True, 'import numpy as np\n'), ((1321, 1346), 'numpy.meshgrid', 'np.meshgrid', (['xreal', 'ximag'], {}), '(xreal, ximag)\n', (1332, 1346), True, 'import numpy as np\n'), ((1604, 1653), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['Xreal', 'Ximag', 'Xnew'], {'cmap': 'colormap'}), '(Xreal, Ximag, Xnew, cmap=colormap)\n', (1618, 1653), True, 'import matplotlib.pyplot as plt\n'), ((1658, 1708), 'matplotlib.pyplot.savefig', 'plt.savefig', (['name'], {'bbox_inches': '"""tight"""', 'dpi': 'dpinum'}), "(name, bbox_inches='tight', dpi=dpinum)\n", (1669, 1708), True, 'import matplotlib.pyplot as plt\n'), ((400, 417), 'numpy.zeros_like', 'np.zeros_like', (['x0'], {}), '(x0)\n', (413, 417), True, 'import numpy as np\n'), ((906, 925), 'numpy.atleast_2d', 'np.atleast_2d', (['xnew'], {}), '(xnew)\n', (919, 925), True, 'import numpy as np\n'), ((1571, 1597), 'numpy.abs', 'np.abs', (['(Xnew[i, j] - roots)'], {}), '(Xnew[i, j] - roots)\n', (1577, 1597), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2019 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import numpy as np
import pandapower as pp
import pytest
import pandas as pd
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
def test_convenience_create_functions():
net = pp.create_empty_network()
b1 = pp.create_bus(net, 110.)
b2 = pp.create_bus(net, 110.)
b3 = pp.create_bus(net, 20)
pp.create_ext_grid(net, b1)
pp.create_line_from_parameters(net, b1, b2, length_km=20., r_ohm_per_km=0.0487,
x_ohm_per_km=0.1382301, c_nf_per_km=160., max_i_ka=0.664)
l0 = pp.create_load_from_cosphi(net, b2, 10, 0.95, "ind", name="load")
pp.runpp(net, init="flat")
assert net.load.p_mw.at[l0] == 9.5
assert net.load.q_mvar.at[l0] > 0
assert np.sqrt(net.load.p_mw.at[l0] ** 2 + net.load.q_mvar.at[l0] ** 2) == 10
assert np.isclose(net.res_bus.vm_pu.at[b2], 0.99990833838)
assert net.load.name.at[l0] == "load"
sh0 = pp.create_shunt_as_capacitor(net, b2, 10, loss_factor=0.01, name="shunt")
pp.runpp(net, init="flat")
assert np.isclose(net.res_shunt.q_mvar.at[sh0], -10.043934174)
assert np.isclose(net.res_shunt.p_mw.at[sh0], 0.10043933665)
assert np.isclose(net.res_bus.vm_pu.at[b2], 1.0021942964)
assert net.shunt.name.at[sh0] == "shunt"
sg0 = pp.create_sgen_from_cosphi(net, b2, 5, 0.95, "cap", name="sgen")
pp.runpp(net, init="flat")
assert np.sqrt(net.sgen.p_mw.at[sg0] ** 2 + net.sgen.q_mvar.at[sg0] ** 2) == 5
assert net.sgen.p_mw.at[sg0] == 4.75
assert net.sgen.q_mvar.at[sg0] > 0
assert np.isclose(net.res_bus.vm_pu.at[b2], 1.0029376578)
assert net.sgen.name.at[sg0] == "sgen"
tol = 1e-6
sind = pp.create_series_reactor_as_impedance(net, b1, b2, r_ohm=100, x_ohm=200, sn_mva=100)
assert net.impedance.at[sind, 'rft_pu'] - 8.264463e-04 < tol
assert net.impedance.at[sind, 'xft_pu'] - 0.001653 < tol
tid = pp.create_transformer_from_parameters(net, hv_bus=b2, lv_bus=b3, sn_mva=0.1, vn_hv_kv=110,
vn_lv_kv=20, vkr_percent=5, vk_percent=20,
pfe_kw=1, i0_percent=1)
pp.create_load(net, b3, 0.1)
assert net.trafo.at[tid, 'df'] == 1
pp.runpp(net)
tr_l = net.res_trafo.at[tid, 'loading_percent']
net.trafo.at[tid, 'df'] = 2
pp.runpp(net)
tr_l_2 = net.res_trafo.at[tid, 'loading_percent']
assert tr_l == tr_l_2 * 2
net.trafo.at[tid, 'df'] = 0
with pytest.raises(UserWarning):
pp.runpp(net)
def test_nonexistent_bus():
from functools import partial
net = pp.create_empty_network()
create_functions = [partial(pp.create_load, net=net, p_mw=0, q_mvar=0, bus=0, index=0),
partial(pp.create_sgen, net=net, p_mw=0, q_mvar=0, bus=0, index=0),
partial(pp.create_dcline, net, from_bus=0, to_bus=1, p_mw=0.1,
loss_percent=0, loss_mw=0.01, vm_from_pu=1., vm_to_pu=1., index=0),
partial(pp.create_gen, net=net, p_mw=0, bus=0, index=0),
partial(pp.create_ward, net, 0, 0, 0, 0, 0, index=0),
partial(pp.create_xward, net, 0, 0, 0, 0, 0, 1, 1, 1, index=0),
partial(pp.create_shunt, net=net, q_mvar=0, bus=0, index=0),
partial(pp.create_ext_grid, net=net, bus=1, index=0),
partial(pp.create_line, net=net, from_bus=0, to_bus=1, length_km=1.,
std_type="NAYY 4x50 SE", index=0),
partial(pp.create_line_from_parameters, net=net, from_bus=0, to_bus=1,
length_km=1., r_ohm_per_km=0.1, x_ohm_per_km=0.1, max_i_ka=0.4,
c_nf_per_km=10, index=1),
partial(pp.create_transformer, net=net, hv_bus=0, lv_bus=1,
std_type="63 MVA 110/20 kV", index=0),
partial(pp.create_transformer3w, net=net, hv_bus=0, lv_bus=1, mv_bus=2,
std_type="63/25/38 MVA 110/20/10 kV", index=0),
partial(pp.create_transformer3w_from_parameters, net=net, hv_bus=0,
lv_bus=1, mv_bus=2, i0_percent=0.89, pfe_kw=3.5,
vn_hv_kv=110, vn_lv_kv=10, vn_mv_kv=20, sn_hv_mva=63,
sn_lv_mva=38, sn_mv_mva=25, vk_hv_percent=10.4,
vk_lv_percent=10.4, vk_mv_percent=10.4, vkr_hv_percent=0.28,
vkr_lv_percent=0.35, vkr_mv_percent=0.32, index=1),
partial(pp.create_transformer_from_parameters, net=net, hv_bus=0, lv_bus=1,
sn_mva=60, vn_hv_kv=20., vn_lv_kv=0.4, vk_percent=10,
vkr_percent=0.1, pfe_kw=0, i0_percent=0, index=1),
partial(pp.create_impedance, net=net, from_bus=0, to_bus=1,
rft_pu=0.1, xft_pu=0.1, sn_mva=0.6, index=0),
partial(pp.create_switch, net, bus=0, element=1, et="b", index=0)]
for func in create_functions:
with pytest.raises(Exception): # exception has to be raised since bus doesn't exist
func()
pp.create_bus(net, 0.4)
pp.create_bus(net, 0.4)
pp.create_bus(net, 0.4)
for func in create_functions:
func() # buses exist, element can be created
with pytest.raises(Exception): # exception is raised because index already exists
func()
def test_tap_phase_shifter_default():
expected_default = False
net = pp.create_empty_network()
pp.create_bus(net, 110)
pp.create_bus(net, 20)
data = pp.load_std_type(net, "25 MVA 110/20 kV", "trafo")
if "tap_phase_shifter" in data:
del data["tap_phase_shifter"]
pp.create_std_type(net, data, "without_tap_shifter_info", "trafo")
pp.create_transformer_from_parameters(net, 0, 1, 25e3, 110, 20, 0.4, 12, 20, 0.07)
pp.create_transformer(net, 0, 1, "without_tap_shifter_info")
assert (net.trafo.tap_phase_shifter == expected_default).all()
def test_create_line_conductance():
net = pp.create_empty_network()
pp.create_bus(net, 20)
pp.create_bus(net, 20)
pp.create_std_type(net, {'c_nf_per_km': 210, 'max_i_ka': 0.142, 'q_mm2': 50,
'r_ohm_per_km': 0.642, 'type': 'cs', 'x_ohm_per_km': 0.083,
"g_us_per_km": 1}, "test_conductance")
l = pp.create_line(net, 0, 1, 1., "test_conductance")
assert net.line.g_us_per_km.at[l] == 1
def test_create_buses():
net = pp.create_empty_network()
# standard
b1 = pp.create_buses(net, 3, 110)
# with geodata
b2 = pp.create_buses(net, 3, 110, geodata=(10, 20))
# with geodata as array
geodata = np.array([[10, 20], [20, 30], [30, 40]])
b3 = pp.create_buses(net, 3, 110, geodata=geodata)
assert len(net.bus) == 9
assert len(net.bus_geodata) == 6
for i in b2:
assert net.bus_geodata.at[i, 'x'] == 10
assert net.bus_geodata.at[i, 'y'] == 20
assert (net.bus_geodata.loc[b3, ['x', 'y']].values == geodata).all()
# no way of creating buses with not matching shape
with pytest.raises(ValueError):
pp.create_buses(net, 2, 110, geodata=geodata)
def test_create_lines():
# standard
net = pp.create_empty_network()
b1 = pp.create_bus(net, 10)
b2 = pp.create_bus(net, 10)
l = pp.create_lines(net, [b1, b1], [b2, b2], 4, std_type="48-AL1/8-ST1A 10.0")
assert len(net.line) == 2
assert len(net.line_geodata) == 0
assert sum(net.line.std_type == "48-AL1/8-ST1A 10.0") == 2
assert len(set(net.line.r_ohm_per_km)) == 1
# with geodata
net = pp.create_empty_network()
b1 = pp.create_bus(net, 10)
b2 = pp.create_bus(net, 10)
l = pp.create_lines(net, [b1, b1], [b2, b2], [1.5, 3], std_type="48-AL1/8-ST1A 10.0",
geodata=[[(1,1),(2,2),(3,3)], [(1,1),(1,2)]])
assert len(net.line) == 2
assert len(net.line_geodata) == 2
assert net.line_geodata.at[l[0], "coords"] == [(1,1),(2,2),(3,3)]
assert net.line_geodata.at[l[1], "coords"] == [(1,1),(1,2)]
# setting params as single value
net = pp.create_empty_network()
b1 = pp.create_bus(net, 10)
b2 = pp.create_bus(net, 10)
l = pp.create_lines(net, [b1, b1], [b2, b2], length_km=5, df=0.8, in_service=False,
geodata=[(10, 10), (20, 20)], parallel=1, max_loading_percent=90,
name="test", std_type="48-AL1/8-ST1A 10.0")
assert len(net.line) == 2
assert len(net.line_geodata) == 2
assert net.line.length_km.at[l[0]] == 5
assert net.line.length_km.at[l[1]] == 5
assert net.line.at[l[0], "in_service"] == False # is actually <class 'numpy.bool_'>
assert net.line.at[l[1], "in_service"] == False # is actually <class 'numpy.bool_'>
assert net.line_geodata.at[l[0], "coords"] == [(10,10), (20,20)]
assert net.line_geodata.at[l[1], "coords"] == [(10,10), (20,20)]
assert net.line.at[l[0], "name"] == "test"
assert net.line.at[l[1], "name"] == "test"
assert net.line.at[l[0], "max_loading_percent"] == 90
assert net.line.at[l[1], "max_loading_percent"] == 90
assert net.line.at[l[0], "parallel"] == 1
assert net.line.at[l[1], "parallel"] == 1
# setting params as array
net = pp.create_empty_network()
b1 = pp.create_bus(net, 10)
b2 = pp.create_bus(net, 10)
l = pp.create_lines(net, [b1, b1], [b2, b2], length_km=[1, 5], df=[0.8, 0.7],
in_service=[True, False],
geodata=[[(10, 10), (20, 20)], [(100, 10), (200, 20)]], parallel=[2, 1],
max_loading_percent=[80, 90], name=["test1", "test2"],
std_type="48-AL1/8-ST1A 10.0")
assert len(net.line) == 2
assert len(net.line_geodata) == 2
assert net.line.at[l[0], "length_km"] == 1
assert net.line.at[l[1], "length_km"] == 5
assert net.line.at[l[0], "in_service"] == True # is actually <class 'numpy.bool_'>
assert net.line.at[l[1], "in_service"] == False # is actually <class 'numpy.bool_'>
assert net.line_geodata.at[l[0], "coords"] == [(10,10), (20,20)]
assert net.line_geodata.at[l[1], "coords"] == [(100,10), (200,20)]
assert net.line.at[l[0], "name"] == "test1"
assert net.line.at[l[1], "name"] == "test2"
assert net.line.at[l[0], "max_loading_percent"] == 80
assert net.line.at[l[1], "max_loading_percent"] == 90
assert net.line.at[l[0], "parallel"] == 2
assert net.line.at[l[1], "parallel"] == 1
def test_create_line_alpha_temperature():
net=pp.create_empty_network()
b = pp.create_buses(net, 5, 110)
l1=pp.create_line(net,0,1, 10, "48-AL1/8-ST1A 10.0")
l2=pp.create_line(net,1,2, 10, "48-AL1/8-ST1A 10.0", alpha=4.03e-3, temperature_degree_celsius=80)
l3=pp.create_line(net,2,3, 10, "48-AL1/8-ST1A 10.0")
l4=pp.create_line_from_parameters(net, 3,4,10, 1,1,1,100)
l5=pp.create_line_from_parameters(net, 3,4,10, 1,1,1,100, alpha=4.03e-3)
assert 'alpha' in net.line.columns
assert all(net.line.loc[[l2,l3,l5], 'alpha'] == 4.03e-3)
assert all(net.line.loc[[l1,l4], 'alpha'].isnull())
assert net.line.loc[l2, 'temperature_degree_celsius'] == 80
assert all(net.line.loc[[l1,l3,l4,l5], 'temperature_degree_celsius'].isnull())
if __name__ == '__main__':
test_create_lines()
# pytest.main(["test_create.py"])
| [
"pandapower.create_transformer_from_parameters",
"pandapower.create_ext_grid",
"pandapower.load_std_type",
"pandapower.create_line",
"pandapower.create_bus",
"numpy.isclose",
"pandas.set_option",
"pandapower.create_lines",
"pandapower.create_series_reactor_as_impedance",
"pandapower.create_empty_n... | [((268, 306), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', '(500)'], {}), "('display.max_rows', 500)\n", (281, 306), True, 'import pandas as pd\n'), ((307, 348), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', '(500)'], {}), "('display.max_columns', 500)\n", (320, 348), True, 'import pandas as pd\n'), ((349, 385), 'pandas.set_option', 'pd.set_option', (['"""display.width"""', '(1000)'], {}), "('display.width', 1000)\n", (362, 385), True, 'import pandas as pd\n'), ((438, 463), 'pandapower.create_empty_network', 'pp.create_empty_network', ([], {}), '()\n', (461, 463), True, 'import pandapower as pp\n'), ((473, 498), 'pandapower.create_bus', 'pp.create_bus', (['net', '(110.0)'], {}), '(net, 110.0)\n', (486, 498), True, 'import pandapower as pp\n'), ((507, 532), 'pandapower.create_bus', 'pp.create_bus', (['net', '(110.0)'], {}), '(net, 110.0)\n', (520, 532), True, 'import pandapower as pp\n'), ((541, 563), 'pandapower.create_bus', 'pp.create_bus', (['net', '(20)'], {}), '(net, 20)\n', (554, 563), True, 'import pandapower as pp\n'), ((568, 595), 'pandapower.create_ext_grid', 'pp.create_ext_grid', (['net', 'b1'], {}), '(net, b1)\n', (586, 595), True, 'import pandapower as pp\n'), ((600, 744), 'pandapower.create_line_from_parameters', 'pp.create_line_from_parameters', (['net', 'b1', 'b2'], {'length_km': '(20.0)', 'r_ohm_per_km': '(0.0487)', 'x_ohm_per_km': '(0.1382301)', 'c_nf_per_km': '(160.0)', 'max_i_ka': '(0.664)'}), '(net, b1, b2, length_km=20.0, r_ohm_per_km=\n 0.0487, x_ohm_per_km=0.1382301, c_nf_per_km=160.0, max_i_ka=0.664)\n', (630, 744), True, 'import pandapower as pp\n'), ((783, 848), 'pandapower.create_load_from_cosphi', 'pp.create_load_from_cosphi', (['net', 'b2', '(10)', '(0.95)', '"""ind"""'], {'name': '"""load"""'}), "(net, b2, 10, 0.95, 'ind', name='load')\n", (809, 848), True, 'import pandapower as pp\n'), ((853, 879), 'pandapower.runpp', 'pp.runpp', (['net'], {'init': '"""flat"""'}), "(net, init='flat')\n", (861, 879), True, 'import pandapower as pp\n'), ((1050, 1101), 'numpy.isclose', 'np.isclose', (['net.res_bus.vm_pu.at[b2]', '(0.99990833838)'], {}), '(net.res_bus.vm_pu.at[b2], 0.99990833838)\n', (1060, 1101), True, 'import numpy as np\n'), ((1155, 1228), 'pandapower.create_shunt_as_capacitor', 'pp.create_shunt_as_capacitor', (['net', 'b2', '(10)'], {'loss_factor': '(0.01)', 'name': '"""shunt"""'}), "(net, b2, 10, loss_factor=0.01, name='shunt')\n", (1183, 1228), True, 'import pandapower as pp\n'), ((1233, 1259), 'pandapower.runpp', 'pp.runpp', (['net'], {'init': '"""flat"""'}), "(net, init='flat')\n", (1241, 1259), True, 'import pandapower as pp\n'), ((1271, 1326), 'numpy.isclose', 'np.isclose', (['net.res_shunt.q_mvar.at[sh0]', '(-10.043934174)'], {}), '(net.res_shunt.q_mvar.at[sh0], -10.043934174)\n', (1281, 1326), True, 'import numpy as np\n'), ((1338, 1391), 'numpy.isclose', 'np.isclose', (['net.res_shunt.p_mw.at[sh0]', '(0.10043933665)'], {}), '(net.res_shunt.p_mw.at[sh0], 0.10043933665)\n', (1348, 1391), True, 'import numpy as np\n'), ((1403, 1453), 'numpy.isclose', 'np.isclose', (['net.res_bus.vm_pu.at[b2]', '(1.0021942964)'], {}), '(net.res_bus.vm_pu.at[b2], 1.0021942964)\n', (1413, 1453), True, 'import numpy as np\n'), ((1510, 1574), 'pandapower.create_sgen_from_cosphi', 'pp.create_sgen_from_cosphi', (['net', 'b2', '(5)', '(0.95)', '"""cap"""'], {'name': '"""sgen"""'}), "(net, b2, 5, 0.95, 'cap', name='sgen')\n", (1536, 1574), True, 'import pandapower as pp\n'), ((1579, 1605), 'pandapower.runpp', 'pp.runpp', (['net'], {'init': '"""flat"""'}), "(net, init='flat')\n", (1587, 1605), True, 'import pandapower as pp\n'), ((1780, 1830), 'numpy.isclose', 'np.isclose', (['net.res_bus.vm_pu.at[b2]', '(1.0029376578)'], {}), '(net.res_bus.vm_pu.at[b2], 1.0029376578)\n', (1790, 1830), True, 'import numpy as np\n'), ((1901, 1989), 'pandapower.create_series_reactor_as_impedance', 'pp.create_series_reactor_as_impedance', (['net', 'b1', 'b2'], {'r_ohm': '(100)', 'x_ohm': '(200)', 'sn_mva': '(100)'}), '(net, b1, b2, r_ohm=100, x_ohm=200,\n sn_mva=100)\n', (1938, 1989), True, 'import pandapower as pp\n'), ((2123, 2288), 'pandapower.create_transformer_from_parameters', 'pp.create_transformer_from_parameters', (['net'], {'hv_bus': 'b2', 'lv_bus': 'b3', 'sn_mva': '(0.1)', 'vn_hv_kv': '(110)', 'vn_lv_kv': '(20)', 'vkr_percent': '(5)', 'vk_percent': '(20)', 'pfe_kw': '(1)', 'i0_percent': '(1)'}), '(net, hv_bus=b2, lv_bus=b3, sn_mva=0.1,\n vn_hv_kv=110, vn_lv_kv=20, vkr_percent=5, vk_percent=20, pfe_kw=1,\n i0_percent=1)\n', (2160, 2288), True, 'import pandapower as pp\n'), ((2381, 2409), 'pandapower.create_load', 'pp.create_load', (['net', 'b3', '(0.1)'], {}), '(net, b3, 0.1)\n', (2395, 2409), True, 'import pandapower as pp\n'), ((2454, 2467), 'pandapower.runpp', 'pp.runpp', (['net'], {}), '(net)\n', (2462, 2467), True, 'import pandapower as pp\n'), ((2556, 2569), 'pandapower.runpp', 'pp.runpp', (['net'], {}), '(net)\n', (2564, 2569), True, 'import pandapower as pp\n'), ((2820, 2845), 'pandapower.create_empty_network', 'pp.create_empty_network', ([], {}), '()\n', (2843, 2845), True, 'import pandapower as pp\n'), ((5555, 5578), 'pandapower.create_bus', 'pp.create_bus', (['net', '(0.4)'], {}), '(net, 0.4)\n', (5568, 5578), True, 'import pandapower as pp\n'), ((5583, 5606), 'pandapower.create_bus', 'pp.create_bus', (['net', '(0.4)'], {}), '(net, 0.4)\n', (5596, 5606), True, 'import pandapower as pp\n'), ((5611, 5634), 'pandapower.create_bus', 'pp.create_bus', (['net', '(0.4)'], {}), '(net, 0.4)\n', (5624, 5634), True, 'import pandapower as pp\n'), ((5912, 5937), 'pandapower.create_empty_network', 'pp.create_empty_network', ([], {}), '()\n', (5935, 5937), True, 'import pandapower as pp\n'), ((5942, 5965), 'pandapower.create_bus', 'pp.create_bus', (['net', '(110)'], {}), '(net, 110)\n', (5955, 5965), True, 'import pandapower as pp\n'), ((5970, 5992), 'pandapower.create_bus', 'pp.create_bus', (['net', '(20)'], {}), '(net, 20)\n', (5983, 5992), True, 'import pandapower as pp\n'), ((6004, 6054), 'pandapower.load_std_type', 'pp.load_std_type', (['net', '"""25 MVA 110/20 kV"""', '"""trafo"""'], {}), "(net, '25 MVA 110/20 kV', 'trafo')\n", (6020, 6054), True, 'import pandapower as pp\n'), ((6133, 6199), 'pandapower.create_std_type', 'pp.create_std_type', (['net', 'data', '"""without_tap_shifter_info"""', '"""trafo"""'], {}), "(net, data, 'without_tap_shifter_info', 'trafo')\n", (6151, 6199), True, 'import pandapower as pp\n'), ((6204, 6293), 'pandapower.create_transformer_from_parameters', 'pp.create_transformer_from_parameters', (['net', '(0)', '(1)', '(25000.0)', '(110)', '(20)', '(0.4)', '(12)', '(20)', '(0.07)'], {}), '(net, 0, 1, 25000.0, 110, 20, 0.4, 12,\n 20, 0.07)\n', (6241, 6293), True, 'import pandapower as pp\n'), ((6291, 6351), 'pandapower.create_transformer', 'pp.create_transformer', (['net', '(0)', '(1)', '"""without_tap_shifter_info"""'], {}), "(net, 0, 1, 'without_tap_shifter_info')\n", (6312, 6351), True, 'import pandapower as pp\n'), ((6467, 6492), 'pandapower.create_empty_network', 'pp.create_empty_network', ([], {}), '()\n', (6490, 6492), True, 'import pandapower as pp\n'), ((6497, 6519), 'pandapower.create_bus', 'pp.create_bus', (['net', '(20)'], {}), '(net, 20)\n', (6510, 6519), True, 'import pandapower as pp\n'), ((6524, 6546), 'pandapower.create_bus', 'pp.create_bus', (['net', '(20)'], {}), '(net, 20)\n', (6537, 6546), True, 'import pandapower as pp\n'), ((6551, 6734), 'pandapower.create_std_type', 'pp.create_std_type', (['net', "{'c_nf_per_km': 210, 'max_i_ka': 0.142, 'q_mm2': 50, 'r_ohm_per_km': 0.642,\n 'type': 'cs', 'x_ohm_per_km': 0.083, 'g_us_per_km': 1}", '"""test_conductance"""'], {}), "(net, {'c_nf_per_km': 210, 'max_i_ka': 0.142, 'q_mm2': 50,\n 'r_ohm_per_km': 0.642, 'type': 'cs', 'x_ohm_per_km': 0.083,\n 'g_us_per_km': 1}, 'test_conductance')\n", (6569, 6734), True, 'import pandapower as pp\n'), ((6794, 6844), 'pandapower.create_line', 'pp.create_line', (['net', '(0)', '(1)', '(1.0)', '"""test_conductance"""'], {}), "(net, 0, 1, 1.0, 'test_conductance')\n", (6808, 6844), True, 'import pandapower as pp\n'), ((6924, 6949), 'pandapower.create_empty_network', 'pp.create_empty_network', ([], {}), '()\n', (6947, 6949), True, 'import pandapower as pp\n'), ((6974, 7002), 'pandapower.create_buses', 'pp.create_buses', (['net', '(3)', '(110)'], {}), '(net, 3, 110)\n', (6989, 7002), True, 'import pandapower as pp\n'), ((7031, 7077), 'pandapower.create_buses', 'pp.create_buses', (['net', '(3)', '(110)'], {'geodata': '(10, 20)'}), '(net, 3, 110, geodata=(10, 20))\n', (7046, 7077), True, 'import pandapower as pp\n'), ((7120, 7160), 'numpy.array', 'np.array', (['[[10, 20], [20, 30], [30, 40]]'], {}), '([[10, 20], [20, 30], [30, 40]])\n', (7128, 7160), True, 'import numpy as np\n'), ((7170, 7215), 'pandapower.create_buses', 'pp.create_buses', (['net', '(3)', '(110)'], {'geodata': 'geodata'}), '(net, 3, 110, geodata=geodata)\n', (7185, 7215), True, 'import pandapower as pp\n'), ((7669, 7694), 'pandapower.create_empty_network', 'pp.create_empty_network', ([], {}), '()\n', (7692, 7694), True, 'import pandapower as pp\n'), ((7704, 7726), 'pandapower.create_bus', 'pp.create_bus', (['net', '(10)'], {}), '(net, 10)\n', (7717, 7726), True, 'import pandapower as pp\n'), ((7736, 7758), 'pandapower.create_bus', 'pp.create_bus', (['net', '(10)'], {}), '(net, 10)\n', (7749, 7758), True, 'import pandapower as pp\n'), ((7767, 7841), 'pandapower.create_lines', 'pp.create_lines', (['net', '[b1, b1]', '[b2, b2]', '(4)'], {'std_type': '"""48-AL1/8-ST1A 10.0"""'}), "(net, [b1, b1], [b2, b2], 4, std_type='48-AL1/8-ST1A 10.0')\n", (7782, 7841), True, 'import pandapower as pp\n'), ((8051, 8076), 'pandapower.create_empty_network', 'pp.create_empty_network', ([], {}), '()\n', (8074, 8076), True, 'import pandapower as pp\n'), ((8086, 8108), 'pandapower.create_bus', 'pp.create_bus', (['net', '(10)'], {}), '(net, 10)\n', (8099, 8108), True, 'import pandapower as pp\n'), ((8118, 8140), 'pandapower.create_bus', 'pp.create_bus', (['net', '(10)'], {}), '(net, 10)\n', (8131, 8140), True, 'import pandapower as pp\n'), ((8149, 8289), 'pandapower.create_lines', 'pp.create_lines', (['net', '[b1, b1]', '[b2, b2]', '[1.5, 3]'], {'std_type': '"""48-AL1/8-ST1A 10.0"""', 'geodata': '[[(1, 1), (2, 2), (3, 3)], [(1, 1), (1, 2)]]'}), "(net, [b1, b1], [b2, b2], [1.5, 3], std_type=\n '48-AL1/8-ST1A 10.0', geodata=[[(1, 1), (2, 2), (3, 3)], [(1, 1), (1, 2)]])\n", (8164, 8289), True, 'import pandapower as pp\n'), ((8553, 8578), 'pandapower.create_empty_network', 'pp.create_empty_network', ([], {}), '()\n', (8576, 8578), True, 'import pandapower as pp\n'), ((8588, 8610), 'pandapower.create_bus', 'pp.create_bus', (['net', '(10)'], {}), '(net, 10)\n', (8601, 8610), True, 'import pandapower as pp\n'), ((8620, 8642), 'pandapower.create_bus', 'pp.create_bus', (['net', '(10)'], {}), '(net, 10)\n', (8633, 8642), True, 'import pandapower as pp\n'), ((8651, 8849), 'pandapower.create_lines', 'pp.create_lines', (['net', '[b1, b1]', '[b2, b2]'], {'length_km': '(5)', 'df': '(0.8)', 'in_service': '(False)', 'geodata': '[(10, 10), (20, 20)]', 'parallel': '(1)', 'max_loading_percent': '(90)', 'name': '"""test"""', 'std_type': '"""48-AL1/8-ST1A 10.0"""'}), "(net, [b1, b1], [b2, b2], length_km=5, df=0.8, in_service=\n False, geodata=[(10, 10), (20, 20)], parallel=1, max_loading_percent=90,\n name='test', std_type='48-AL1/8-ST1A 10.0')\n", (8666, 8849), True, 'import pandapower as pp\n'), ((9705, 9730), 'pandapower.create_empty_network', 'pp.create_empty_network', ([], {}), '()\n', (9728, 9730), True, 'import pandapower as pp\n'), ((9740, 9762), 'pandapower.create_bus', 'pp.create_bus', (['net', '(10)'], {}), '(net, 10)\n', (9753, 9762), True, 'import pandapower as pp\n'), ((9772, 9794), 'pandapower.create_bus', 'pp.create_bus', (['net', '(10)'], {}), '(net, 10)\n', (9785, 9794), True, 'import pandapower as pp\n'), ((9803, 10075), 'pandapower.create_lines', 'pp.create_lines', (['net', '[b1, b1]', '[b2, b2]'], {'length_km': '[1, 5]', 'df': '[0.8, 0.7]', 'in_service': '[True, False]', 'geodata': '[[(10, 10), (20, 20)], [(100, 10), (200, 20)]]', 'parallel': '[2, 1]', 'max_loading_percent': '[80, 90]', 'name': "['test1', 'test2']", 'std_type': '"""48-AL1/8-ST1A 10.0"""'}), "(net, [b1, b1], [b2, b2], length_km=[1, 5], df=[0.8, 0.7],\n in_service=[True, False], geodata=[[(10, 10), (20, 20)], [(100, 10), (\n 200, 20)]], parallel=[2, 1], max_loading_percent=[80, 90], name=[\n 'test1', 'test2'], std_type='48-AL1/8-ST1A 10.0')\n", (9818, 10075), True, 'import pandapower as pp\n'), ((10994, 11019), 'pandapower.create_empty_network', 'pp.create_empty_network', ([], {}), '()\n', (11017, 11019), True, 'import pandapower as pp\n'), ((11028, 11056), 'pandapower.create_buses', 'pp.create_buses', (['net', '(5)', '(110)'], {}), '(net, 5, 110)\n', (11043, 11056), True, 'import pandapower as pp\n'), ((11065, 11116), 'pandapower.create_line', 'pp.create_line', (['net', '(0)', '(1)', '(10)', '"""48-AL1/8-ST1A 10.0"""'], {}), "(net, 0, 1, 10, '48-AL1/8-ST1A 10.0')\n", (11079, 11116), True, 'import pandapower as pp\n'), ((11122, 11223), 'pandapower.create_line', 'pp.create_line', (['net', '(1)', '(2)', '(10)', '"""48-AL1/8-ST1A 10.0"""'], {'alpha': '(0.00403)', 'temperature_degree_celsius': '(80)'}), "(net, 1, 2, 10, '48-AL1/8-ST1A 10.0', alpha=0.00403,\n temperature_degree_celsius=80)\n", (11136, 11223), True, 'import pandapower as pp\n'), ((11225, 11276), 'pandapower.create_line', 'pp.create_line', (['net', '(2)', '(3)', '(10)', '"""48-AL1/8-ST1A 10.0"""'], {}), "(net, 2, 3, 10, '48-AL1/8-ST1A 10.0')\n", (11239, 11276), True, 'import pandapower as pp\n'), ((11282, 11341), 'pandapower.create_line_from_parameters', 'pp.create_line_from_parameters', (['net', '(3)', '(4)', '(10)', '(1)', '(1)', '(1)', '(100)'], {}), '(net, 3, 4, 10, 1, 1, 1, 100)\n', (11312, 11341), True, 'import pandapower as pp\n'), ((11344, 11418), 'pandapower.create_line_from_parameters', 'pp.create_line_from_parameters', (['net', '(3)', '(4)', '(10)', '(1)', '(1)', '(1)', '(100)'], {'alpha': '(0.00403)'}), '(net, 3, 4, 10, 1, 1, 1, 100, alpha=0.00403)\n', (11374, 11418), True, 'import pandapower as pp\n'), ((968, 1032), 'numpy.sqrt', 'np.sqrt', (['(net.load.p_mw.at[l0] ** 2 + net.load.q_mvar.at[l0] ** 2)'], {}), '(net.load.p_mw.at[l0] ** 2 + net.load.q_mvar.at[l0] ** 2)\n', (975, 1032), True, 'import numpy as np\n'), ((1617, 1683), 'numpy.sqrt', 'np.sqrt', (['(net.sgen.p_mw.at[sg0] ** 2 + net.sgen.q_mvar.at[sg0] ** 2)'], {}), '(net.sgen.p_mw.at[sg0] ** 2 + net.sgen.q_mvar.at[sg0] ** 2)\n', (1624, 1683), True, 'import numpy as np\n'), ((2695, 2721), 'pytest.raises', 'pytest.raises', (['UserWarning'], {}), '(UserWarning)\n', (2708, 2721), False, 'import pytest\n'), ((2731, 2744), 'pandapower.runpp', 'pp.runpp', (['net'], {}), '(net)\n', (2739, 2744), True, 'import pandapower as pp\n'), ((2870, 2936), 'functools.partial', 'partial', (['pp.create_load'], {'net': 'net', 'p_mw': '(0)', 'q_mvar': '(0)', 'bus': '(0)', 'index': '(0)'}), '(pp.create_load, net=net, p_mw=0, q_mvar=0, bus=0, index=0)\n', (2877, 2936), False, 'from functools import partial\n'), ((2962, 3028), 'functools.partial', 'partial', (['pp.create_sgen'], {'net': 'net', 'p_mw': '(0)', 'q_mvar': '(0)', 'bus': '(0)', 'index': '(0)'}), '(pp.create_sgen, net=net, p_mw=0, q_mvar=0, bus=0, index=0)\n', (2969, 3028), False, 'from functools import partial\n'), ((3054, 3190), 'functools.partial', 'partial', (['pp.create_dcline', 'net'], {'from_bus': '(0)', 'to_bus': '(1)', 'p_mw': '(0.1)', 'loss_percent': '(0)', 'loss_mw': '(0.01)', 'vm_from_pu': '(1.0)', 'vm_to_pu': '(1.0)', 'index': '(0)'}), '(pp.create_dcline, net, from_bus=0, to_bus=1, p_mw=0.1, loss_percent\n =0, loss_mw=0.01, vm_from_pu=1.0, vm_to_pu=1.0, index=0)\n', (3061, 3190), False, 'from functools import partial\n'), ((3241, 3296), 'functools.partial', 'partial', (['pp.create_gen'], {'net': 'net', 'p_mw': '(0)', 'bus': '(0)', 'index': '(0)'}), '(pp.create_gen, net=net, p_mw=0, bus=0, index=0)\n', (3248, 3296), False, 'from functools import partial\n'), ((3322, 3374), 'functools.partial', 'partial', (['pp.create_ward', 'net', '(0)', '(0)', '(0)', '(0)', '(0)'], {'index': '(0)'}), '(pp.create_ward, net, 0, 0, 0, 0, 0, index=0)\n', (3329, 3374), False, 'from functools import partial\n'), ((3400, 3462), 'functools.partial', 'partial', (['pp.create_xward', 'net', '(0)', '(0)', '(0)', '(0)', '(0)', '(1)', '(1)', '(1)'], {'index': '(0)'}), '(pp.create_xward, net, 0, 0, 0, 0, 0, 1, 1, 1, index=0)\n', (3407, 3462), False, 'from functools import partial\n'), ((3488, 3547), 'functools.partial', 'partial', (['pp.create_shunt'], {'net': 'net', 'q_mvar': '(0)', 'bus': '(0)', 'index': '(0)'}), '(pp.create_shunt, net=net, q_mvar=0, bus=0, index=0)\n', (3495, 3547), False, 'from functools import partial\n'), ((3573, 3625), 'functools.partial', 'partial', (['pp.create_ext_grid'], {'net': 'net', 'bus': '(1)', 'index': '(0)'}), '(pp.create_ext_grid, net=net, bus=1, index=0)\n', (3580, 3625), False, 'from functools import partial\n'), ((3651, 3758), 'functools.partial', 'partial', (['pp.create_line'], {'net': 'net', 'from_bus': '(0)', 'to_bus': '(1)', 'length_km': '(1.0)', 'std_type': '"""NAYY 4x50 SE"""', 'index': '(0)'}), "(pp.create_line, net=net, from_bus=0, to_bus=1, length_km=1.0,\n std_type='NAYY 4x50 SE', index=0)\n", (3658, 3758), False, 'from functools import partial\n'), ((3811, 3979), 'functools.partial', 'partial', (['pp.create_line_from_parameters'], {'net': 'net', 'from_bus': '(0)', 'to_bus': '(1)', 'length_km': '(1.0)', 'r_ohm_per_km': '(0.1)', 'x_ohm_per_km': '(0.1)', 'max_i_ka': '(0.4)', 'c_nf_per_km': '(10)', 'index': '(1)'}), '(pp.create_line_from_parameters, net=net, from_bus=0, to_bus=1,\n length_km=1.0, r_ohm_per_km=0.1, x_ohm_per_km=0.1, max_i_ka=0.4,\n c_nf_per_km=10, index=1)\n', (3818, 3979), False, 'from functools import partial\n'), ((4060, 4162), 'functools.partial', 'partial', (['pp.create_transformer'], {'net': 'net', 'hv_bus': '(0)', 'lv_bus': '(1)', 'std_type': '"""63 MVA 110/20 kV"""', 'index': '(0)'}), "(pp.create_transformer, net=net, hv_bus=0, lv_bus=1, std_type=\n '63 MVA 110/20 kV', index=0)\n", (4067, 4162), False, 'from functools import partial\n'), ((4215, 4337), 'functools.partial', 'partial', (['pp.create_transformer3w'], {'net': 'net', 'hv_bus': '(0)', 'lv_bus': '(1)', 'mv_bus': '(2)', 'std_type': '"""63/25/38 MVA 110/20/10 kV"""', 'index': '(0)'}), "(pp.create_transformer3w, net=net, hv_bus=0, lv_bus=1, mv_bus=2,\n std_type='63/25/38 MVA 110/20/10 kV', index=0)\n", (4222, 4337), False, 'from functools import partial\n'), ((4391, 4739), 'functools.partial', 'partial', (['pp.create_transformer3w_from_parameters'], {'net': 'net', 'hv_bus': '(0)', 'lv_bus': '(1)', 'mv_bus': '(2)', 'i0_percent': '(0.89)', 'pfe_kw': '(3.5)', 'vn_hv_kv': '(110)', 'vn_lv_kv': '(10)', 'vn_mv_kv': '(20)', 'sn_hv_mva': '(63)', 'sn_lv_mva': '(38)', 'sn_mv_mva': '(25)', 'vk_hv_percent': '(10.4)', 'vk_lv_percent': '(10.4)', 'vk_mv_percent': '(10.4)', 'vkr_hv_percent': '(0.28)', 'vkr_lv_percent': '(0.35)', 'vkr_mv_percent': '(0.32)', 'index': '(1)'}), '(pp.create_transformer3w_from_parameters, net=net, hv_bus=0, lv_bus=\n 1, mv_bus=2, i0_percent=0.89, pfe_kw=3.5, vn_hv_kv=110, vn_lv_kv=10,\n vn_mv_kv=20, sn_hv_mva=63, sn_lv_mva=38, sn_mv_mva=25, vk_hv_percent=\n 10.4, vk_lv_percent=10.4, vk_mv_percent=10.4, vkr_hv_percent=0.28,\n vkr_lv_percent=0.35, vkr_mv_percent=0.32, index=1)\n', (4398, 4739), False, 'from functools import partial\n'), ((4907, 5095), 'functools.partial', 'partial', (['pp.create_transformer_from_parameters'], {'net': 'net', 'hv_bus': '(0)', 'lv_bus': '(1)', 'sn_mva': '(60)', 'vn_hv_kv': '(20.0)', 'vn_lv_kv': '(0.4)', 'vk_percent': '(10)', 'vkr_percent': '(0.1)', 'pfe_kw': '(0)', 'i0_percent': '(0)', 'index': '(1)'}), '(pp.create_transformer_from_parameters, net=net, hv_bus=0, lv_bus=1,\n sn_mva=60, vn_hv_kv=20.0, vn_lv_kv=0.4, vk_percent=10, vkr_percent=0.1,\n pfe_kw=0, i0_percent=0, index=1)\n', (4914, 5095), False, 'from functools import partial\n'), ((5176, 5284), 'functools.partial', 'partial', (['pp.create_impedance'], {'net': 'net', 'from_bus': '(0)', 'to_bus': '(1)', 'rft_pu': '(0.1)', 'xft_pu': '(0.1)', 'sn_mva': '(0.6)', 'index': '(0)'}), '(pp.create_impedance, net=net, from_bus=0, to_bus=1, rft_pu=0.1,\n xft_pu=0.1, sn_mva=0.6, index=0)\n', (5183, 5284), False, 'from functools import partial\n'), ((5338, 5403), 'functools.partial', 'partial', (['pp.create_switch', 'net'], {'bus': '(0)', 'element': '(1)', 'et': '"""b"""', 'index': '(0)'}), "(pp.create_switch, net, bus=0, element=1, et='b', index=0)\n", (5345, 5403), False, 'from functools import partial\n'), ((7536, 7561), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7549, 7561), False, 'import pytest\n'), ((7571, 7616), 'pandapower.create_buses', 'pp.create_buses', (['net', '(2)', '(110)'], {'geodata': 'geodata'}), '(net, 2, 110, geodata=geodata)\n', (7586, 7616), True, 'import pandapower as pp\n'), ((5452, 5476), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (5465, 5476), False, 'import pytest\n'), ((5736, 5760), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (5749, 5760), False, 'import pytest\n')] |
import numpy as np
from sklearn.datasets import load_svmlight_file, dump_svmlight_file
from sklearn import preprocessing
import os
import sys
def get_query_weight(datafile, weightfile):
# read data file
#features, labels, num_feat, num_labels = read_data(datafile, header=True)
features = read_data(datafile, header=True)
features = features.toarray()
w0, b0, w1, b1 = read_weight(weightfile)
query =np.matmul(features, w0) + b0
#normalize
# w1 = preprocessing.normalize(w1, norm = 'l1', axis = 1)
# query = preprocessing.normalize(query, norm = 'l1', axis = 1)
# print("feautre shape", features.shape)
# print("w0 shape",w0.shape)
# print("w1 shape", w1.shape)
# print("query", query.shape)
return query, w1
def read_data(filename, header=False, dtype='float32', zero_based=True):
with open(filename, 'rb') as f:
_l_shape = None
if header:
line = f.readline().decode('utf-8').rstrip("\n")
line = line.split(" ")
num_samples, num_feat, num_labels = int(line[0]), int(line[1]), int(line[2])
_l_shape = (num_samples, num_labels)
else:
num_samples, num_feat, num_labels = None, None, None
features, labels = load_svmlight_file(f,n_features = num_feat, multilabel=True)
return features
# return features.toarray(), labels, num_feat, num_labels
def read_weight(filename):
data = np.load(filename)
w0 = data['W1']
b0 = data['b1']
w1 = data['W2']
b1 = data['b2']
w1 = np.transpose(w1)
return w0, b0, w1, b1
| [
"numpy.load",
"numpy.transpose",
"sklearn.datasets.load_svmlight_file",
"numpy.matmul"
] | [((1450, 1467), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (1457, 1467), True, 'import numpy as np\n'), ((1557, 1573), 'numpy.transpose', 'np.transpose', (['w1'], {}), '(w1)\n', (1569, 1573), True, 'import numpy as np\n'), ((428, 451), 'numpy.matmul', 'np.matmul', (['features', 'w0'], {}), '(features, w0)\n', (437, 451), True, 'import numpy as np\n'), ((1266, 1325), 'sklearn.datasets.load_svmlight_file', 'load_svmlight_file', (['f'], {'n_features': 'num_feat', 'multilabel': '(True)'}), '(f, n_features=num_feat, multilabel=True)\n', (1284, 1325), False, 'from sklearn.datasets import load_svmlight_file, dump_svmlight_file\n')] |
import numpy
from fqe.fci_graph import Spinmap, FciGraph
from fqe.fqe_data import FqeData
from fqe.fqe_data_set import FqeDataSet
from fqe.wavefunction import Wavefunction
def compare_Spinmap(A: Spinmap, B: Spinmap) -> bool:
"""Compares two Spinmaps
Args:
A: First spinmap
B: Second spinmap
Returns:
(bool) - A == B
"""
assert A.keys() == B.keys()
for k in A.keys():
assert numpy.array_equal(A[k], B[k])
return True
def compareFciGraph(graph1: 'FciGraph', graph2: 'FciGraph'):
"""Compares the equality between two FciGraph objects.
Args:
graph1: The FciGraph object to compare to.
graph2: The FciGraph object to compare to.
"""
assert graph1._norb == graph2._norb
assert graph1._nalpha == graph2._nalpha
assert graph1._nbeta == graph2._nbeta
assert graph1._lena == graph2._lena
assert graph1._lenb == graph2._lenb
assert numpy.array_equal(graph1._astr, graph2._astr)
assert numpy.array_equal(graph1._bstr, graph2._bstr)
assert numpy.array_equal(graph1._aind, graph2._aind)
assert numpy.array_equal(graph1._bind, graph2._bind)
assert compare_Spinmap(graph1._alpha_map, graph2._alpha_map)
assert compare_Spinmap(graph1._beta_map, graph2._beta_map)
assert numpy.array_equal(graph1._dexca, graph2._dexca)
assert numpy.array_equal(graph1._dexcb, graph2._dexcb)
return True
def FqeData_isclose(data1: 'FqeData', data2: 'FqeData', **kwargs) -> bool:
"""Compares two FqeData's end compare their closeness.
Args:
data1 (FqeData) - The set to compare to.
data2 (FqeData) - The set to compare to.
Kwargs:
rtol (float) - The relative tolerance parameter (as in numpy.isclose).
atol (float) - The absolute tolerance parameter (as in numpy.isclose).
Returns:
(bool) - if closeness is satisfied
"""
assert data1._nele == data2._nele
assert data1._m_s == data2._m_s
assert compareFciGraph(data1._core, data2._core)
assert data1._dtype == data2._dtype
assert numpy.allclose(data1.coeff, data2.coeff, **kwargs)
return True
def FqeDataSet_isclose(dataset1: 'FqeDataSet', dataset2: 'FqeDataSet',
**kwargs) -> bool:
"""Compares two FqeDataSet's end compare their closeness.
Args:
dataset1 (FqeDataSet) - The set to compare to.
dataset2 (FqeDataSet) - The set to compare to.
Kwargs:
rtol (float) - The relative tolerance parameter (as in numpy.isclose).
atol (float) - The absolute tolerance parameter (as in numpy.isclose).
Returns:
(bool) - if closeness is satisfied
"""
assert dataset1._nele == dataset2._nele
assert dataset1._norb == dataset2._norb
assert dataset1._data.keys() == dataset2._data.keys()
for k in dataset1._data:
assert FqeData_isclose(dataset1._data[k], dataset2._data[k], **kwargs)
return True
def Wavefunction_isclose(wfn1: 'Wavefunction', wfn2: 'Wavefunction',
**kwargs) -> bool:
"""Compares two Wavefunction's end compare their closeness.
Args:
wfn1 (Wavefunction) - The set to compare to.
wfn2 (Wavefunction) - The set to compare to.
Kwargs:
rtol (float) - The relative tolerance parameter (as in numpy.isclose).
atol (float) - The absolute tolerance parameter (as in numpy.isclose).
Returns:
(bool) - if closeness is satisfied
"""
assert wfn1._symmetry_map == wfn2._symmetry_map
assert wfn1._conserved == wfn2._conserved
assert wfn1._conserve_spin == wfn2._conserve_spin
assert wfn1._conserve_number == wfn2._conserve_number
assert wfn1._norb == wfn2._norb
assert wfn1._civec.keys() == wfn2._civec.keys()
for k in wfn1._civec:
assert FqeData_isclose(wfn1._civec[k], wfn2._civec[k], **kwargs)
return True
| [
"numpy.array_equal",
"numpy.allclose"
] | [((940, 985), 'numpy.array_equal', 'numpy.array_equal', (['graph1._astr', 'graph2._astr'], {}), '(graph1._astr, graph2._astr)\n', (957, 985), False, 'import numpy\n'), ((997, 1042), 'numpy.array_equal', 'numpy.array_equal', (['graph1._bstr', 'graph2._bstr'], {}), '(graph1._bstr, graph2._bstr)\n', (1014, 1042), False, 'import numpy\n'), ((1054, 1099), 'numpy.array_equal', 'numpy.array_equal', (['graph1._aind', 'graph2._aind'], {}), '(graph1._aind, graph2._aind)\n', (1071, 1099), False, 'import numpy\n'), ((1111, 1156), 'numpy.array_equal', 'numpy.array_equal', (['graph1._bind', 'graph2._bind'], {}), '(graph1._bind, graph2._bind)\n', (1128, 1156), False, 'import numpy\n'), ((1296, 1343), 'numpy.array_equal', 'numpy.array_equal', (['graph1._dexca', 'graph2._dexca'], {}), '(graph1._dexca, graph2._dexca)\n', (1313, 1343), False, 'import numpy\n'), ((1355, 1402), 'numpy.array_equal', 'numpy.array_equal', (['graph1._dexcb', 'graph2._dexcb'], {}), '(graph1._dexcb, graph2._dexcb)\n', (1372, 1402), False, 'import numpy\n'), ((2078, 2128), 'numpy.allclose', 'numpy.allclose', (['data1.coeff', 'data2.coeff'], {}), '(data1.coeff, data2.coeff, **kwargs)\n', (2092, 2128), False, 'import numpy\n'), ((434, 463), 'numpy.array_equal', 'numpy.array_equal', (['A[k]', 'B[k]'], {}), '(A[k], B[k])\n', (451, 463), False, 'import numpy\n')] |
import numpy as np
import tensorflow as tf
from keras import keras_parameterized, layers
from keras.utils.generic_utils import register_keras_serializable
from tfswin.winatt import WindowAttention
from testing_utils import layer_multi_io_test
@register_keras_serializable('TFSwin')
class WindowAttentionSqueeze(WindowAttention):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.input_spec = self.input_spec[:-1] + [layers.InputSpec(ndim=1, dtype='bool')]
def build(self, input_shape):
super().build(input_shape)
self.input_spec = self.input_spec[:-1] + [layers.InputSpec(ndim=1, dtype='bool')]
def call(self, inputs, **kwargs):
inputs, mask, with_mask = inputs
return super().call([inputs, mask, tf.squeeze(with_mask, axis=0)], **kwargs)
@keras_parameterized.run_all_keras_modes
class TestWindowAttention(keras_parameterized.TestCase):
def test_layer(self):
inputs = 10 * np.random.random((1, 49, 96)) - 0.5
masks = 10 * np.random.random((1, 1, 1, 49, 49)) - 0.5
layer_multi_io_test(
WindowAttentionSqueeze,
kwargs={'window_size': 7, 'num_heads': 3, 'qkv_bias': True, 'qk_scale': None, 'attn_drop': 0.,
'proj_drop': 0.},
input_datas=[inputs, masks, np.array([False])],
input_dtypes=['float32', 'float32', 'bool'],
expected_output_shapes=[(None, 49, 96)],
expected_output_dtypes=['float32']
)
layer_multi_io_test(
WindowAttentionSqueeze,
kwargs={'window_size': 7, 'num_heads': 3, 'qkv_bias': True, 'qk_scale': None, 'attn_drop': 0.,
'proj_drop': 0.},
input_datas=[inputs, masks, np.array([True])],
input_dtypes=['float32', 'float32', 'bool'],
expected_output_shapes=[(None, 49, 96)],
expected_output_dtypes=['float32']
)
if __name__ == '__main__':
tf.test.main()
| [
"tensorflow.test.main",
"numpy.random.random",
"numpy.array",
"tensorflow.squeeze",
"keras.utils.generic_utils.register_keras_serializable",
"keras.layers.InputSpec"
] | [((246, 283), 'keras.utils.generic_utils.register_keras_serializable', 'register_keras_serializable', (['"""TFSwin"""'], {}), "('TFSwin')\n", (273, 283), False, 'from keras.utils.generic_utils import register_keras_serializable\n'), ((1984, 1998), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (1996, 1998), True, 'import tensorflow as tf\n'), ((464, 502), 'keras.layers.InputSpec', 'layers.InputSpec', ([], {'ndim': '(1)', 'dtype': '"""bool"""'}), "(ndim=1, dtype='bool')\n", (480, 502), False, 'from keras import keras_parameterized, layers\n'), ((624, 662), 'keras.layers.InputSpec', 'layers.InputSpec', ([], {'ndim': '(1)', 'dtype': '"""bool"""'}), "(ndim=1, dtype='bool')\n", (640, 662), False, 'from keras import keras_parameterized, layers\n'), ((788, 817), 'tensorflow.squeeze', 'tf.squeeze', (['with_mask'], {'axis': '(0)'}), '(with_mask, axis=0)\n', (798, 817), True, 'import tensorflow as tf\n'), ((978, 1007), 'numpy.random.random', 'np.random.random', (['(1, 49, 96)'], {}), '((1, 49, 96))\n', (994, 1007), True, 'import numpy as np\n'), ((1035, 1070), 'numpy.random.random', 'np.random.random', (['(1, 1, 1, 49, 49)'], {}), '((1, 1, 1, 49, 49))\n', (1051, 1070), True, 'import numpy as np\n'), ((1328, 1345), 'numpy.array', 'np.array', (['[False]'], {}), '([False])\n', (1336, 1345), True, 'import numpy as np\n'), ((1765, 1781), 'numpy.array', 'np.array', (['[True]'], {}), '([True])\n', (1773, 1781), True, 'import numpy as np\n')] |
#! /usr/bin/env python3
#
# In this script we solve the Laplace problem on a unit square that has the
# bottom-right quadrant removed (a.k.a. an L-shaped domain) with Dirichlet
# boundary conditions matching the harmonic function
#
# .. math:: \sqrt[3]{x^2 + y^2} \cos\left(\tfrac23 \arctan\frac{y+x}{y-x}\right),
#
# shifted by 0.5 such that the origin coincides with the middle of the unit
# square. This variation of a well known benchmark problem is known to converge
# suboptimally under uniform refinement due to a singular gradient in the
# reentrant corner. This script demonstrates that optimal convergence can be
# restored by using adaptive refinement.
import nutils, numpy
# The main function defines the parameter space for the script. Configurable
# parameters are the element type (square, triangle, or mixed), type of basis
# function (std or spline, with availability depending on element type),
# polynomial degree, and the number of refinement steps to perform before
# quitting (by default the script will run forever).
def main(etype: 'type of elements (square/triangle/mixed)' = 'square',
btype: 'type of basis function (h/th-std/spline)' = 'h-std',
degree: 'polynomial degree' = 2,
nrefine: 'number of refinement steps (-1 for unlimited)' = -1):
domain, geom = nutils.mesh.unitsquare(2, etype)
x, y = geom - .5
exact = (x**2 + y**2)**(1/3) * nutils.function.cos(nutils.function.arctan2(y+x, y-x) * (2/3))
domain = domain.trim(exact-1e-15, maxrefine=0)
linreg = nutils.util.linear_regressor()
for irefine in nutils.log.count('level'):
ns = nutils.function.Namespace()
ns.x = geom
ns.basis = domain.basis(btype, degree=degree)
ns.u = 'basis_n ?lhs_n'
ns.du = ns.u - exact
sqr = domain.boundary['trimmed'].integral('u^2 d:x' @ ns, degree=degree*2)
cons = nutils.solver.optimize('lhs', sqr, droptol=1e-15)
sqr = domain.boundary.integral('du^2 d:x' @ ns, degree=7)
cons = nutils.solver.optimize('lhs', sqr, droptol=1e-15, constrain=cons)
res = domain.integral('basis_n,k u_,k d:x' @ ns, degree=degree*2)
lhs = nutils.solver.solve_linear('lhs', res, constrain=cons)
ndofs = len(ns.basis)
error = domain.integral('<du^2, du_,k du_,k>_i d:x' @ ns, degree=7).eval(lhs=lhs)**.5
rate, offset = linreg.add(numpy.log(len(ns.basis)), numpy.log(error))
nutils.log.user('ndofs: {ndofs}, L2 error: {error[0]:.2e} ({rate[0]:.2f}), H1 error: {error[1]:.2e} ({rate[1]:.2f})'.format(ndofs=len(ns.basis), error=error, rate=rate))
bezier = domain.sample('bezier', 9)
x, u, du = bezier.eval(['x_i', 'u', 'du'] @ ns, lhs=lhs)
nutils.export.triplot('sol.jpg', x, u, tri=bezier.tri, hull=bezier.hull)
nutils.export.triplot('err.jpg', x, du, tri=bezier.tri, hull=bezier.hull)
if irefine == nrefine:
break
refdom = domain.refined
ns.refbasis = refdom.basis(btype, degree=degree)
indicator = refdom.integral('refbasis_n,k u_,k d:x' @ ns, degree=degree*2).eval(lhs=lhs)
indicator -= refdom.boundary.integral('refbasis_n u_,k n_k d:x' @ ns, degree=degree*2).eval(lhs=lhs)
mask = indicator**2 > numpy.mean(indicator**2)
domain = domain.refined_by(elem.transform[:-1] for elem in domain.refined.supp(ns.refbasis, mask))
return ndofs, error, rate, lhs
# If the script is executed (as opposed to imported), :func:`nutils.cli.run`
# calls the main function with arguments provided from the command line. For
# example, to perform four refinement steps with quadratic basis functions
# starting from a triangle mesh run :sh:`python3 adaptivity.py etype=triangle
# degree=2 nrefine=4`.
if __name__ == '__main__':
nutils.cli.run(main)
# Once a simulation is developed and tested, it is good practice to save a few
# strategicly chosen return values for routine regression testing. Here we use
# the standard :mod:`unittest` framework, with
# :func:`nutils.numeric.assert_allclose64` facilitating the embedding of
# desired results as compressed base64 data.
import unittest
class test(unittest.TestCase):
def test_square_quadratic(self):
ndofs, error, rate, lhs = main(nrefine=2, etype='square', degree=2)
self.assertEqual(ndofs, 149)
numpy.testing.assert_almost_equal(error, [0.00065, 0.03461], decimal=5)
numpy.testing.assert_almost_equal(rate, [-1.066, -0.478], decimal=3)
nutils.numeric.assert_allclose64(lhs, 'eNo1j6FrQmEUxT8RBi4KllVfMsl3z/nK4zEmLC'
'6bhsKCw2gSw5IPFsymGbZiWnr+By8Ii7Yhsk3BMtC4Z9sJ223ncs85vzvmM9+Yhix8hDIjtnkd'
'HqQSdDDDj1Qajr5qPXN/07MZ2vI4V7UOIvmdO/oEZY45xYDnoR7ikLHAHVpcs2A1TLhChDO+MO'
'eWt5xjYzm6fOQrGxxiZPeoMGaf37hCyU72hB0u6PglPcQcKxRI/KUd7AYLvMPpsqGkCTPumzWf'
'+qV92kKevjK36ozDP/FSnh1iteWiqWuf+oMaKuyKaC1i52rKPokiF2WLA/20bya+ZCPbWKRPpv'
'gFaedebw==')
def test_triangle_quadratic(self):
ndofs, error, rate, lhs = main(nrefine=2, etype='triangle', degree=2)
self.assertEqual(ndofs, 98)
numpy.testing.assert_almost_equal(error, [0.00138, 0.05324], decimal=5)
numpy.testing.assert_almost_equal(rate, [-1.111, -0.548], decimal=3)
nutils.numeric.assert_allclose64(lhs, 'eNprMV1oesqU2VTO1Nbko6myWbhpq+kckwST90'
'avjRgYzptYm+YYMwBBk3GQWavZb1NXs2+mm83um1WYbQbyXYEiQWbKZjNM7wJVzjBlYICoPW8C'
'MiXH+LXRR9NwoPkg82xN5IB2MZu2mGabSBnnAbGscYEJj3GVYQAQg/TVGfaA7RI0BsErRjeNeo'
'wDgDQPmF9gkmciaJxtArGjzrAKCGWNpYAQAL0kOBE=')
def test_mixed_linear(self):
ndofs, error, rate, lhs = main(nrefine=2, etype='mixed', degree=1)
self.assertEqual(ndofs, 34)
numpy.testing.assert_almost_equal(error, [0.00450, 0.11683], decimal=5)
numpy.testing.assert_almost_equal(rate, [-1.143, -0.545], decimal=3)
nutils.numeric.assert_allclose64(lhs, 'eNprMT1u6mQyxUTRzMCUAQhazL6b3jNrMYPxp5'
'iA5FtMD+lcMgDxHa4aXzS+6HDV+fKO85cMnC8zMBzSAQDBThbY')
| [
"nutils.numeric.assert_allclose64",
"nutils.log.count",
"numpy.log",
"nutils.cli.run",
"numpy.testing.assert_almost_equal",
"nutils.function.arctan2",
"nutils.mesh.unitsquare",
"nutils.export.triplot",
"numpy.mean",
"nutils.solver.solve_linear",
"nutils.function.Namespace",
"nutils.solver.opti... | [((1317, 1349), 'nutils.mesh.unitsquare', 'nutils.mesh.unitsquare', (['(2)', 'etype'], {}), '(2, etype)\n', (1339, 1349), False, 'import nutils, numpy\n'), ((1526, 1556), 'nutils.util.linear_regressor', 'nutils.util.linear_regressor', ([], {}), '()\n', (1554, 1556), False, 'import nutils, numpy\n'), ((1575, 1600), 'nutils.log.count', 'nutils.log.count', (['"""level"""'], {}), "('level')\n", (1591, 1600), False, 'import nutils, numpy\n'), ((3668, 3688), 'nutils.cli.run', 'nutils.cli.run', (['main'], {}), '(main)\n', (3682, 3688), False, 'import nutils, numpy\n'), ((1612, 1639), 'nutils.function.Namespace', 'nutils.function.Namespace', ([], {}), '()\n', (1637, 1639), False, 'import nutils, numpy\n'), ((1850, 1899), 'nutils.solver.optimize', 'nutils.solver.optimize', (['"""lhs"""', 'sqr'], {'droptol': '(1e-15)'}), "('lhs', sqr, droptol=1e-15)\n", (1872, 1899), False, 'import nutils, numpy\n'), ((1974, 2039), 'nutils.solver.optimize', 'nutils.solver.optimize', (['"""lhs"""', 'sqr'], {'droptol': '(1e-15)', 'constrain': 'cons'}), "('lhs', sqr, droptol=1e-15, constrain=cons)\n", (1996, 2039), False, 'import nutils, numpy\n'), ((2121, 2175), 'nutils.solver.solve_linear', 'nutils.solver.solve_linear', (['"""lhs"""', 'res'], {'constrain': 'cons'}), "('lhs', res, constrain=cons)\n", (2147, 2175), False, 'import nutils, numpy\n'), ((2647, 2719), 'nutils.export.triplot', 'nutils.export.triplot', (['"""sol.jpg"""', 'x', 'u'], {'tri': 'bezier.tri', 'hull': 'bezier.hull'}), "('sol.jpg', x, u, tri=bezier.tri, hull=bezier.hull)\n", (2668, 2719), False, 'import nutils, numpy\n'), ((2724, 2797), 'nutils.export.triplot', 'nutils.export.triplot', (['"""err.jpg"""', 'x', 'du'], {'tri': 'bezier.tri', 'hull': 'bezier.hull'}), "('err.jpg', x, du, tri=bezier.tri, hull=bezier.hull)\n", (2745, 2797), False, 'import nutils, numpy\n'), ((4207, 4278), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['error', '[0.00065, 0.03461]'], {'decimal': '(5)'}), '(error, [0.00065, 0.03461], decimal=5)\n', (4240, 4278), False, 'import nutils, numpy\n'), ((4283, 4351), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['rate', '[-1.066, -0.478]'], {'decimal': '(3)'}), '(rate, [-1.066, -0.478], decimal=3)\n', (4316, 4351), False, 'import nutils, numpy\n'), ((4356, 4750), 'nutils.numeric.assert_allclose64', 'nutils.numeric.assert_allclose64', (['lhs', '"""eNo1j6FrQmEUxT8RBi4KllVfMsl3z/nK4zEmLC6bhsKCw2gSw5IPFsymGbZiWnr+By8Ii7Yhsk3BMtC4Z9sJ223ncs85vzvmM9+Yhix8hDIjtnkdHqQSdDDDj1Qajr5qPXN/07MZ2vI4V7UOIvmdO/oEZY45xYDnoR7ikLHAHVpcs2A1TLhChDO+MOeWt5xjYzm6fOQrGxxiZPeoMGaf37hCyU72hB0u6PglPcQcKxRI/KUd7AYLvMPpsqGkCTPumzWf+qV92kKevjK36ozDP/FSnh1iteWiqWuf+oMaKuyKaC1i52rKPokiF2WLA/20bya+ZCPbWKRPpvgFaedebw=="""'], {}), "(lhs,\n 'eNo1j6FrQmEUxT8RBi4KllVfMsl3z/nK4zEmLC6bhsKCw2gSw5IPFsymGbZiWnr+By8Ii7Yhsk3BMtC4Z9sJ223ncs85vzvmM9+Yhix8hDIjtnkdHqQSdDDDj1Qajr5qPXN/07MZ2vI4V7UOIvmdO/oEZY45xYDnoR7ikLHAHVpcs2A1TLhChDO+MOeWt5xjYzm6fOQrGxxiZPeoMGaf37hCyU72hB0u6PglPcQcKxRI/KUd7AYLvMPpsqGkCTPumzWf+qV92kKevjK36ozDP/FSnh1iteWiqWuf+oMaKuyKaC1i52rKPokiF2WLA/20bya+ZCPbWKRPpvgFaedebw=='\n )\n", (4388, 4750), False, 'import nutils, numpy\n'), ((4935, 5006), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['error', '[0.00138, 0.05324]'], {'decimal': '(5)'}), '(error, [0.00138, 0.05324], decimal=5)\n', (4968, 5006), False, 'import nutils, numpy\n'), ((5011, 5079), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['rate', '[-1.111, -0.548]'], {'decimal': '(3)'}), '(rate, [-1.111, -0.548], decimal=3)\n', (5044, 5079), False, 'import nutils, numpy\n'), ((5084, 5362), 'nutils.numeric.assert_allclose64', 'nutils.numeric.assert_allclose64', (['lhs', '"""eNprMV1oesqU2VTO1Nbko6myWbhpq+kckwST90avjRgYzptYm+YYMwBBk3GQWavZb1NXs2+mm83um1WYbQbyXYEiQWbKZjNM7wJVzjBlYICoPW8CMiXH+LXRR9NwoPkg82xN5IB2MZu2mGabSBnnAbGscYEJj3GVYQAQg/TVGfaA7RI0BsErRjeNeowDgDQPmF9gkmciaJxtArGjzrAKCGWNpYAQAL0kOBE="""'], {}), "(lhs,\n 'eNprMV1oesqU2VTO1Nbko6myWbhpq+kckwST90avjRgYzptYm+YYMwBBk3GQWavZb1NXs2+mm83um1WYbQbyXYEiQWbKZjNM7wJVzjBlYICoPW8CMiXH+LXRR9NwoPkg82xN5IB2MZu2mGabSBnnAbGscYEJj3GVYQAQg/TVGfaA7RI0BsErRjeNeowDgDQPmF9gkmciaJxtArGjzrAKCGWNpYAQAL0kOBE='\n )\n", (5116, 5362), False, 'import nutils, numpy\n'), ((5520, 5590), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['error', '[0.0045, 0.11683]'], {'decimal': '(5)'}), '(error, [0.0045, 0.11683], decimal=5)\n', (5553, 5590), False, 'import nutils, numpy\n'), ((5596, 5664), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['rate', '[-1.143, -0.545]'], {'decimal': '(3)'}), '(rate, [-1.143, -0.545], decimal=3)\n', (5629, 5664), False, 'import nutils, numpy\n'), ((5669, 5807), 'nutils.numeric.assert_allclose64', 'nutils.numeric.assert_allclose64', (['lhs', '"""eNprMT1u6mQyxUTRzMCUAQhazL6b3jNrMYPxp5iA5FtMD+lcMgDxHa4aXzS+6HDV+fKO85cMnC8zMBzSAQDBThbY"""'], {}), "(lhs,\n 'eNprMT1u6mQyxUTRzMCUAQhazL6b3jNrMYPxp5iA5FtMD+lcMgDxHa4aXzS+6HDV+fKO85cMnC8zMBzSAQDBThbY'\n )\n", (5701, 5807), False, 'import nutils, numpy\n'), ((2349, 2365), 'numpy.log', 'numpy.log', (['error'], {}), '(error)\n', (2358, 2365), False, 'import nutils, numpy\n'), ((3144, 3170), 'numpy.mean', 'numpy.mean', (['(indicator ** 2)'], {}), '(indicator ** 2)\n', (3154, 3170), False, 'import nutils, numpy\n'), ((1423, 1460), 'nutils.function.arctan2', 'nutils.function.arctan2', (['(y + x)', '(y - x)'], {}), '(y + x, y - x)\n', (1446, 1460), False, 'import nutils, numpy\n')] |
"""NTM Read/Write Head."""
import torch
from torch import nn
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
def _split_cols(mat, lengths):
"""Split a 2D matrix to variable length columns."""
assert mat.size()[1] == sum(lengths), "Lengths must be summed to num columns"
l = np.cumsum([0] + lengths)
results = []
for s, e in zip(l[:-1], l[1:]):
results += [mat[:, s:e]]
return results
class NTMHeadBase(nn.Module):
"""An NTM Read/Write Head."""
def __init__(self, memory, controller_size):
"""Initilize the read/write head.
:param memory: The :class:`NTMMemory` to be addressed by the head.
:param controller_size: The size of the internal representation.
"""
super(NTMHeadBase, self).__init__()
self.memory = memory
self.N, self.M = memory.size()
self.controller_size = controller_size
def create_new_state(self, batch_size):
raise NotImplementedError
def init_weights(self):
raise NotImplementedError
def is_read_head(self):
return NotImplementedError
def _address_memory(self, k, β, g, s, γ, w_prev):
# Activations
k = F.relu(k)
β = F.relu(β)
g = F.sigmoid(g)
s = F.softmax(F.relu(s))
γ = 1 + F.relu(γ)
w = self.memory.address(k, β, g, s, γ, w_prev)
return w
class NTMReadHead(NTMHeadBase):
def __init__(self, memory, controller_size):
super(NTMReadHead, self).__init__(memory, controller_size)
# Corresponding to k, β, g, s, γ sizes from the paper
self.read_lengths = [self.M, 1, 1, 3, 1]
self.fc_read = nn.Linear(controller_size, sum(self.read_lengths))
self.reset_parameters()
def create_new_state(self, batch_size):
# The state holds the previous time step address weightings
return Variable(torch.zeros(batch_size, self.N))
def reset_parameters(self):
# Initialize the linear layers
nn.init.xavier_uniform(self.fc_read.weight, gain=1.4)
nn.init.normal(self.fc_read.bias, std=0.01)
def is_read_head(self):
return True
def forward(self, embeddings, w_prev):
"""NTMReadHead forward function.
:param embeddings: input representation of the controller.
:param w_prev: previous step state
"""
o = self.fc_read(embeddings)
k, β, g, s, γ = _split_cols(o, self.read_lengths)
# Read from memory
w = self._address_memory(k, β, g, s, γ, w_prev)
r = self.memory.read(w)
return r, w
class NTMWriteHead(NTMHeadBase):
def __init__(self, memory, controller_size):
super(NTMWriteHead, self).__init__(memory, controller_size)
# Corresponding to k, β, g, s, γ, e, a sizes from the paper
self.write_lengths = [self.M, 1, 1, 3, 1, self.M, self.M]
self.fc_write = nn.Linear(controller_size, sum(self.write_lengths))
self.reset_parameters()
def create_new_state(self, batch_size):
return Variable(torch.zeros(batch_size, self.N))
def reset_parameters(self):
# Initialize the linear layers
nn.init.xavier_uniform(self.fc_write.weight, gain=1.4)
nn.init.normal(self.fc_write.bias, std=0.01)
def is_read_head(self):
return False
def forward(self, embeddings, w_prev):
"""NTMWriteHead forward function.
:param embeddings: input representation of the controller.
:param w_prev: previous step state
"""
o = self.fc_write(embeddings)
k, β, g, s, γ, e, a = _split_cols(o, self.write_lengths)
# Handle activations
e = F.relu(e)
a = F.relu(a)
# Write to memory
w = self._address_memory(k, β, g, s, γ, w_prev)
self.memory.write(w, e, a)
return w
| [
"torch.nn.init.xavier_uniform",
"numpy.cumsum",
"torch.nn.init.normal",
"torch.nn.functional.sigmoid",
"torch.nn.functional.relu",
"torch.zeros"
] | [((327, 351), 'numpy.cumsum', 'np.cumsum', (['([0] + lengths)'], {}), '([0] + lengths)\n', (336, 351), True, 'import numpy as np\n'), ((1231, 1240), 'torch.nn.functional.relu', 'F.relu', (['k'], {}), '(k)\n', (1237, 1240), True, 'import torch.nn.functional as F\n'), ((1254, 1263), 'torch.nn.functional.relu', 'F.relu', (['β'], {}), '(β)\n', (1260, 1263), True, 'import torch.nn.functional as F\n'), ((1275, 1287), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['g'], {}), '(g)\n', (1284, 1287), True, 'import torch.nn.functional as F\n'), ((2039, 2092), 'torch.nn.init.xavier_uniform', 'nn.init.xavier_uniform', (['self.fc_read.weight'], {'gain': '(1.4)'}), '(self.fc_read.weight, gain=1.4)\n', (2061, 2092), False, 'from torch import nn\n'), ((2101, 2144), 'torch.nn.init.normal', 'nn.init.normal', (['self.fc_read.bias'], {'std': '(0.01)'}), '(self.fc_read.bias, std=0.01)\n', (2115, 2144), False, 'from torch import nn\n'), ((3211, 3265), 'torch.nn.init.xavier_uniform', 'nn.init.xavier_uniform', (['self.fc_write.weight'], {'gain': '(1.4)'}), '(self.fc_write.weight, gain=1.4)\n', (3233, 3265), False, 'from torch import nn\n'), ((3274, 3318), 'torch.nn.init.normal', 'nn.init.normal', (['self.fc_write.bias'], {'std': '(0.01)'}), '(self.fc_write.bias, std=0.01)\n', (3288, 3318), False, 'from torch import nn\n'), ((3723, 3732), 'torch.nn.functional.relu', 'F.relu', (['e'], {}), '(e)\n', (3729, 3732), True, 'import torch.nn.functional as F\n'), ((3745, 3754), 'torch.nn.functional.relu', 'F.relu', (['a'], {}), '(a)\n', (3751, 3754), True, 'import torch.nn.functional as F\n'), ((1310, 1319), 'torch.nn.functional.relu', 'F.relu', (['s'], {}), '(s)\n', (1316, 1319), True, 'import torch.nn.functional as F\n'), ((1338, 1347), 'torch.nn.functional.relu', 'F.relu', (['γ'], {}), '(γ)\n', (1344, 1347), True, 'import torch.nn.functional as F\n'), ((1926, 1957), 'torch.zeros', 'torch.zeros', (['batch_size', 'self.N'], {}), '(batch_size, self.N)\n', (1937, 1957), False, 'import torch\n'), ((3098, 3129), 'torch.zeros', 'torch.zeros', (['batch_size', 'self.N'], {}), '(batch_size, self.N)\n', (3109, 3129), False, 'import torch\n')] |
import pandas as pd
import numpy as np
from impliedVolatility.ImpliedVolatility import ImpliedVolatility
modes = ['impvol', 'histvol']
for mode in modes:
if mode == 'impvol':
test_df = pd.read_csv('data-source/test_df.csv')
else:
test_df = pd.read_csv('data-source/test_df_histvol.csv')
# read x_values and y_values
y_values = test_df[['result']].values
bs_obj = ImpliedVolatility(np.log(1.00501), 1, 1)
underlying = test_df['3'].values
call_put = test_df['0'].values
strike = test_df['1'].values
tenor = test_df['4'].values
sigma = test_df['5'].values
results = bs_obj.calc_opt_price(underlying, call_put, strike, tenor, sigma)
data = {'results': results.reshape(-1,),
'expected': y_values.reshape(-1,)}
final_df = pd.DataFrame(data=data)
final_df['diff'] = final_df['expected'] - final_df['results']
final_df['mse'] = np.mean(np.square(final_df['diff']))
final_df['rel'] = final_df['diff'] / final_df['expected']
final_df['bias'] = 100 * np.median(final_df['rel'])
final_df['aape'] = 100 * np.mean(np.abs(final_df['rel']))
final_df['mape'] = 100 * np.median(np.abs(final_df['rel']))
final_df['pe5'] = 100 * sum(np.abs(final_df['rel']) < 0.05) / len(final_df['rel'])
final_df['pe10'] = 100 * sum(np.abs(final_df['rel']) < 0.10) / len(final_df['rel'])
final_df['pe20'] = 100 * sum(np.abs(final_df['rel']) < 0.20) / len(final_df['rel'])
final_df.to_csv(f'data-source/bs-results-{mode}.csv', index=False)
statistics = {
'max': np.max(final_df['diff']),
'mean': np.mean(final_df['diff']),
'median': np.median(final_df['diff']),
'min': np.min(final_df['diff']),
'rmse': np.sqrt(np.mean(np.power(final_df['diff'], 2))),
'sse': np.sum(np.power(final_df['diff'], 2)),
'std': np.std(final_df['diff']),
'mse': final_df['mse'].mean(),
'aape': final_df['aape'].mean(),
'mape': final_df['mape'].mean(),
'pe5': final_df['pe5'].mean(),
'pe10': final_df['pe10'].mean(),
'pe20': final_df['pe20'].mean(),
}
# write response to a .txt file
with open(f'data-source/bs-statistics-{mode}.txt', 'w') as f:
for key, value in statistics.items():
f.write(f'{key}: {value} \n\n')
print()
| [
"pandas.DataFrame",
"numpy.abs",
"numpy.log",
"pandas.read_csv",
"numpy.median",
"numpy.std",
"numpy.square",
"numpy.power",
"numpy.max",
"numpy.mean",
"numpy.min"
] | [((803, 826), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data'}), '(data=data)\n', (815, 826), True, 'import pandas as pd\n'), ((199, 237), 'pandas.read_csv', 'pd.read_csv', (['"""data-source/test_df.csv"""'], {}), "('data-source/test_df.csv')\n", (210, 237), True, 'import pandas as pd\n'), ((266, 312), 'pandas.read_csv', 'pd.read_csv', (['"""data-source/test_df_histvol.csv"""'], {}), "('data-source/test_df_histvol.csv')\n", (277, 312), True, 'import pandas as pd\n'), ((421, 436), 'numpy.log', 'np.log', (['(1.00501)'], {}), '(1.00501)\n', (427, 436), True, 'import numpy as np\n'), ((923, 950), 'numpy.square', 'np.square', (["final_df['diff']"], {}), "(final_df['diff'])\n", (932, 950), True, 'import numpy as np\n'), ((1043, 1069), 'numpy.median', 'np.median', (["final_df['rel']"], {}), "(final_df['rel'])\n", (1052, 1069), True, 'import numpy as np\n'), ((1566, 1590), 'numpy.max', 'np.max', (["final_df['diff']"], {}), "(final_df['diff'])\n", (1572, 1590), True, 'import numpy as np\n'), ((1608, 1633), 'numpy.mean', 'np.mean', (["final_df['diff']"], {}), "(final_df['diff'])\n", (1615, 1633), True, 'import numpy as np\n'), ((1653, 1680), 'numpy.median', 'np.median', (["final_df['diff']"], {}), "(final_df['diff'])\n", (1662, 1680), True, 'import numpy as np\n'), ((1697, 1721), 'numpy.min', 'np.min', (["final_df['diff']"], {}), "(final_df['diff'])\n", (1703, 1721), True, 'import numpy as np\n'), ((1857, 1881), 'numpy.std', 'np.std', (["final_df['diff']"], {}), "(final_df['diff'])\n", (1863, 1881), True, 'import numpy as np\n'), ((1107, 1130), 'numpy.abs', 'np.abs', (["final_df['rel']"], {}), "(final_df['rel'])\n", (1113, 1130), True, 'import numpy as np\n'), ((1171, 1194), 'numpy.abs', 'np.abs', (["final_df['rel']"], {}), "(final_df['rel'])\n", (1177, 1194), True, 'import numpy as np\n'), ((1810, 1839), 'numpy.power', 'np.power', (["final_df['diff']", '(2)'], {}), "(final_df['diff'], 2)\n", (1818, 1839), True, 'import numpy as np\n'), ((1755, 1784), 'numpy.power', 'np.power', (["final_df['diff']", '(2)'], {}), "(final_df['diff'], 2)\n", (1763, 1784), True, 'import numpy as np\n'), ((1228, 1251), 'numpy.abs', 'np.abs', (["final_df['rel']"], {}), "(final_df['rel'])\n", (1234, 1251), True, 'import numpy as np\n'), ((1316, 1339), 'numpy.abs', 'np.abs', (["final_df['rel']"], {}), "(final_df['rel'])\n", (1322, 1339), True, 'import numpy as np\n'), ((1404, 1427), 'numpy.abs', 'np.abs', (["final_df['rel']"], {}), "(final_df['rel'])\n", (1410, 1427), True, 'import numpy as np\n')] |
from configuration import config
from qm.qua import *
from qm.QuantumMachinesManager import QuantumMachinesManager
import numpy as np
from scipy.optimize import minimize
Navg = 40000
thresh = 0.001 # threshold on the Q value to determine the state e/g
res_time = 10000
def reset_qubit():
qv = declare(fixed)
align("res", "qubit")
wait(res_time, "res") # a little bit of buffer
align("res", "qubit")
measure("meas_op_res", "res", None, demod.full("integ_w_s", qv))
with while_(qv > -0.005): # tight threshold to get high fidelity of reset
play("pi_op_qubit", "qubit")
measure("meas_op_res", "res", None, demod.full("integ_w_s", qv))
# rotation name to operation conversion
rot_dict = {
"X": (0.0, 1.0),
"x": (0.0, 0.5),
"Y": (0.25, 1.0),
"y": (0.25, 0.5),
"id": (0.0, 0.0),
}
sequence = [ # based on https://rsl.yale.edu/sites/default/files/physreva.82.pdf-optimized_driving_0.pdf
("id", "id"),
("X", "X"),
("Y", "Y"),
("X", "Y"),
("Y", "X"),
("x", "id"),
("y", "id"),
("x", "y"),
("y", "x"),
("x", "Y"),
("y", "X"),
("X", "y"),
("Y", "x"),
("x", "X"),
("X", "x"),
("y", "Y"),
("Y", "y"),
("X", "id"),
("Y", "id"),
("x", "x"),
("y", "y"),
]
##############################################
# convert the sequence of pulses into lists of angles and amplitudes
# X - pi pulse amp - 1, x - pi/2 pulse amp - 0.5, rotation angle - 0
# Y - pi pulse amp - 1, y - pi/2 pulse amp - 0.5, rotation angle - pi/2
##############################################
angle_array1 = [rot_dict[element[0]][0] for element in sequence]
angle_array2 = [rot_dict[element[1]][0] for element in sequence]
amp_array1 = [rot_dict[element[0]][1] for element in sequence]
amp_array2 = [rot_dict[element[1]][1] for element in sequence]
def all_xy(amplitude):
with program() as prog:
n = declare(int)
I = declare(fixed)
Q = declare(fixed)
angle1 = declare(fixed)
angle2 = declare(fixed)
amp1 = declare(fixed)
amp2 = declare(fixed)
state_estimate = declare(int)
sigma_z = declare_stream()
with for_(n, 0, n < Navg, n + 1):
with for_each_(
(angle1, angle2, amp1, amp2),
(angle_array1, angle_array2, amp_array1, amp_array2),
):
reset_qubit()
align("qubit", "res")
frame_rotation_2pi(
angle1, "qubit"
) # rotate by pi/2 (relative to X) to achieve Y rotation using pi pulse
play("pi_gauss_op_qubit" * amp(amp1 * amplitude), "qubit")
frame_rotation_2pi(-angle1 + angle2, "qubit")
play("pi_gauss_op_qubit" * amp(amp2 * amplitude), "qubit")
frame_rotation_2pi(-angle2, "qubit")
align("qubit", "res")
measure(
"meas_op_res",
"res",
None,
demod.full("integ_w_c", I), # cos integration weights for I
demod.full("integ_w_s", Q), # sin integration weights for Q
)
save(I, "I")
save(Q, "Q")
with if_(Q > thresh):
assign(state_estimate, 1) # excited state
save(state_estimate, sigma_z)
with else_():
assign(state_estimate, -1) # ground state
save(state_estimate, sigma_z)
with stream_processing():
sigma_z.buffer(
len(sequence)
).average().save() # calculate the expectation of the pauli z operator for all combination of pulses
return prog
qmm = QuantumMachinesManager()
def cost(freq, amplitude):
config["elements"]["qubit"]["intermediate_frequency"] = freq
qm1 = qmm.open_qm(config)
job = qm1.execute(all_xy(amplitude), data_limit=int(1e9), duration_limit=0)
print("waiting for values...")
job.result_handles.wait_for_all_values(timeout=120)
print("done.")
target = np.array(
[-1] * 5 + [0] * 12 + [1] * 4
) # the goal values for the sigma z expectation values
result = job.result_handles.sigma_z.fetch_all()
fit = np.norm(target - result)
return fit
# optimize over the frequency and amplitude to for the pi pulse
res = minimize(cost, np.array([1.53e6, 1]))
IF = res.x[0]
amplitude = res.x[1]
| [
"numpy.norm",
"numpy.array",
"qm.QuantumMachinesManager.QuantumMachinesManager"
] | [((3928, 3952), 'qm.QuantumMachinesManager.QuantumMachinesManager', 'QuantumMachinesManager', ([], {}), '()\n', (3950, 3952), False, 'from qm.QuantumMachinesManager import QuantumMachinesManager\n'), ((4294, 4333), 'numpy.array', 'np.array', (['([-1] * 5 + [0] * 12 + [1] * 4)'], {}), '([-1] * 5 + [0] * 12 + [1] * 4)\n', (4302, 4333), True, 'import numpy as np\n'), ((4472, 4496), 'numpy.norm', 'np.norm', (['(target - result)'], {}), '(target - result)\n', (4479, 4496), True, 'import numpy as np\n'), ((4606, 4630), 'numpy.array', 'np.array', (['[1530000.0, 1]'], {}), '([1530000.0, 1])\n', (4614, 4630), True, 'import numpy as np\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import logging
import time
from multiprocessing import cpu_count
from multiprocessing.dummy import Pool
from typing import List, Optional, Union, Any, Tuple, Dict
import joblib
import numpy as np
import pandas as pd
import torch
from kats.consts import TimeSeriesData
from kats.models.globalmodel.model import GMModel, gmparam_from_string
from kats.models.globalmodel.utils import GMParam, gmpreprocess, split
class GMEnsemble:
"""A class for building the global model ensemble.
GMEnsemble is a framework for building the ensemble of global models. It provides functions including train, predict and save_model.
Attributes:
gmparam: A :class:`kats.models.globalmodel.utils.GMParam` object building the for global model ensemble.
ensemble_type: Optional; A string representing the ensemble type. Can be 'median' or 'mean'. Default is 'median'.
splits: Optional; An positive integer representing the number of sub-datasets to be built. Default is 3.
overlap: Optional; A boolean representing whether or not sub-datasets overlap with each other or not. For example, we have samples [ts1, ts2, ts3] and splits = 3.
If overlap is True, then three subsets are [[ts1], [ts2], [ts3]], i.e., each sample only appears in one sub-dataset.
If overlap is False, then three subsets are [[ts1, ts2], [ts2, ts3], [ts3, ts1]], i.e., each sample appears in (splits-1) sub-datasets.
Default is True.
replicate: Optional; A positive integer representing the number of global models to be trained on each sub-datasets. Default is 1.
multi: Optional; A boolean representing whether or not to use multi-processing for training and prediction. Default is False.
max_core: Optional; A positive integer representing the number of available cpu cores. Default is None, which sets the number of cores to (total_cores - 1) // 2.
Sample Usage:
>>> gme = GMEnsemble(params)
>>> # train an ensemble object and get training info (e.g., training/validation losses)
>>> training_info = gme.train(train_TSs, valid_TSs)
>>> # make prediction
>>> gme.predict(train_TSs)
>>> # save model
>>> gme.save_model("global_model_ensemble.pickle")
>>> # Evalute model performance on a given dataset.
>>> evals = gme.evalute(test_train, test_test)
"""
def __init__(
self,
gmparam: GMParam,
ensemble_type: str = "median",
splits: int = 3,
overlap: bool = True,
replicate: int = 1,
multi: bool = False,
max_core: Optional[int] = None,
) -> None:
if not isinstance(gmparam, GMParam):
msg = f"gmparam should be GMParam object but receives {type(gmparam)}."
logging.error(msg)
raise ValueError(msg)
self.params = gmparam
if ensemble_type == "median":
self._ensemble_func = np.median
elif ensemble_type == "mean":
self._ensemble_func = np.mean
else:
msg = f"ensemble_type should be either 'mean' or 'median' but receives {ensemble_type}."
logging.error(msg)
raise ValueError(msg)
self.ensemble_type = ensemble_type
if not isinstance(splits, int) or splits < 1:
msg = f"splits should be a positive integer but receives {splits}."
logging.error(msg)
raise ValueError(msg)
self.splits = splits
self.overlap = overlap
if not isinstance(replicate, int) or replicate < 1:
msg = f"rep should be a positive integer but receives {replicate}."
logging.error(msg)
raise ValueError(msg)
self.replicate = replicate
self.model_num = int(self.replicate * self.splits)
self.multi = multi
total_cores = cpu_count()
if max_core is None:
self.max_core = max((total_cores - 1) // 2, 1)
elif isinstance(max_core, int) and max_core > 0 and max_core < total_cores:
self.max_core = max_core
else:
msg = f"max_core should be a positive integer in [1, {total_cores}] but receives {max_core}."
logging.error(msg)
raise ValueError(msg)
self.gm_info = []
self.gm_models = [GMModel(self.params) for _ in range(self.model_num)]
self.test_ids = []
def _fit_single_gm(
self,
gm: GMModel,
train_TSs: Dict[Any, TimeSeriesData],
valid_TSs: Optional[Dict[Any, TimeSeriesData]],
random_seed: Optional[int] = None,
test_train_TSs: Optional[Dict[Any, TimeSeriesData]] = None,
test_valid_TSs: Optional[Dict[Any, TimeSeriesData]] = None,
) -> Dict[str, Any]:
"""Fit a global model and return training information.
Args:
gmparam: A GMParam object for global model.
train_TSs: A dictionary representing the training time series.
valid_TSs: A dictionary representing the corresponding validation time series.
random_seed: Optional; An integer representing the random seed. Default is None, i.e., no random seed is set.
test_train_TSs: Optional; A dictionary representing the training part of the test time series. Default is None.
test_test_TSs: Optional; A dictionary representing the testing part of the test time series. Default is None.
Returns:
gm: A :class:`kats.models.globalmodel.model.GMModel` object representing the trained global model.
info: A dictionary representing the training information of the global model.
"""
if random_seed is not None:
np.random.seed(random_seed)
torch.manual_seed(random_seed)
# to ensure performance
torch.set_num_threads(1)
training_info = gm.train(
train_TSs,
valid_TSs,
test_train_TSs,
test_valid_TSs,
fcst_monitor=False,
debug=False,
)
return training_info
def _predict_single_gm(self, gm, test_TSs, steps, test_batch_size=1000):
t = time.time()
fcst = gm.predict(
test_TSs, steps=steps, raw=True, test_batch_size=test_batch_size
)
logging.info(f"fcst {len(fcst)} TSs with {time.time()-t}.")
return fcst
def train(
self,
data: Union[Dict[Any, TimeSeriesData], List[TimeSeriesData]],
test_size: float = 0.1,
valid_set: bool = False,
) -> None:
"""Train base global models.
Args:
data: A list or a dictionary of time series.
test_size: Optional; A float in [0,1) representing the percentage that the test set takes up. Default is 0.1
valid_set: Optional; A boolean specifying whether or not to have a validation set during training. Default is False.
"""
n = len(data)
keys = np.array(list(data.keys())) if isinstance(data, dict) else np.arange(n)
if test_size < 0 or test_size > 1:
msg = f"test_size should be in [0,1) but receives {test_size}."
logging.error(msg)
raise ValueError(msg)
if test_size > 0:
m = max(1, int(n * test_size))
np.random.shuffle(keys)
all_test_TSs = {keys[i]: data[keys[i]] for i in range(m)}
test_train_TSs, test_valid_TSs = gmpreprocess(
self.params, all_test_TSs, mode="test"
)
all_train_TSs = {keys[i]: data[keys[i]] for i in range(m, n)}
train_TSs, valid_TSs = gmpreprocess(
self.params, all_train_TSs, mode="train", valid_set=valid_set
)
self.test_ids = list(test_train_TSs.keys())
else:
train_TSs, valid_TSs = gmpreprocess(
self.params, data, mode="train", valid_set=valid_set
)
test_train_TSs, test_valid_TSs = None, None
self.test_ids = []
split_data = split(self.splits, self.overlap, train_TSs, valid_TSs)
# multi processing
if self.multi:
t0 = time.time()
rds = np.random.randint(1, int(10000 * self.model_num), self.model_num)
model_params = [
(
self.gm_models[i],
split_data[i % self.splits][0],
split_data[i % self.splits][1],
rds[i],
test_train_TSs,
test_valid_TSs,
)
for i in range(self.model_num)
]
pool = Pool(self.max_core)
results = pool.starmap(self._fit_single_gm, model_params)
pool.close()
pool.join()
# return results
self.gm_info = results
logging.info(
f"fit {self.model_num} global models using time {time.time()-t0}"
)
else:
self.gm_info = []
t0 = time.time()
i = 0
for _ in range(self.replicate):
for train, valid in split_data:
info = self._fit_single_gm(
self.gm_models[i],
train,
valid,
test_train_TSs=test_train_TSs,
test_valid_TSs=test_valid_TSs,
)
self.gm_info.append(info)
i += 1
logging.info(
f"fit {self.model_num} global models using time {time.time()-t0}"
)
return
def _combine_fcst(
self,
idx: Any,
fcsts: List[np.ndarray],
steps: int,
raw: bool,
first_timestamp: Optional[pd.Timestamp] = None,
col_names: Optional[List[str]] = None,
) -> Tuple[Any, Any]:
"""Combine the forecasts from each global model."""
fcst = [
self._ensemble_func([fcsts[i][j] for i in range(len(fcsts))], axis=0)
for j in range(len(fcsts[0]))
]
if raw:
return idx, fcst
else:
n_quantile = len(self.params.quantile)
df = pd.DataFrame(
np.column_stack([t.reshape(n_quantile, -1) for t in fcst]).T
).iloc[:steps]
df.columns = col_names
df["time"] = pd.date_range(
first_timestamp + self.params.freq, periods=steps, freq=self.params.freq
)
return idx, df
def predict(
self,
test_TSs: Union[
TimeSeriesData, List[TimeSeriesData], Dict[Any, TimeSeriesData]
],
steps: int,
test_batch_size: int = 500,
raw: bool = False,
) -> Dict[Any, Union[pd.DataFrame, List[np.ndarray]]]:
"""Generate forecasts for the target time series.
Args:
test_TSs: A TimeSeriesDdata object, list or a dictionary of time series to generate forecasts for.
steps: An integer representing the forecast steps.
test_batch_size: Optional; An integer representing the batch size for testing. Default is 500.
raw: Optional; A boolean representing whether or not to return raw forecasts (i.e., `numpy.ndarray` objects). If False, the forecasts are `pandas.DataFrame` objects. Default is False.
Returns:
A dictionary of forecasts, whose keys are the ids for time series, and values are the corresponding forecasts.
"""
if isinstance(test_TSs, TimeSeriesData):
test_TSs = [test_TSs]
elif isinstance(test_TSs, dict) or isinstance(test_TSs, list):
pass
else:
msg = f"predict function only accepts a TimeSeriesData object, a dictionary or a list of TimeSeriesData objects, but receives {type(test_TSs)}"
if steps <= 0:
msg = f"step should be a positive integer but receives {steps}."
logging.error(msg)
raise ValueError(msg)
if not isinstance(test_batch_size, int) or test_batch_size <= 0:
msg = f"test_batch_size should be a positive integer but receives {test_batch_size}."
logging.error(msg)
raise ValueError(msg)
t0 = time.time()
if self.multi:
pool = Pool(self.max_core)
all_fcsts = pool.starmap(
self._predict_single_gm,
[(t, test_TSs, steps, test_batch_size) for t in self.gm_models],
)
pool.close()
pool.join()
else:
all_fcsts = [
m.predict(test_TSs, steps, raw=True, test_batch_size=test_batch_size)
for m in self.gm_models
]
logging.info(
f"time for all global model to generate forecasts: {time.time() - t0}."
)
keys = (
test_TSs.keys() if isinstance(test_TSs, dict) else np.arange(len(test_TSs))
)
col_names = (
[f"fcst_quantile_{q}" for q in self.params.quantile] if (not raw) else None
)
if self.multi:
cf_params = [
(
k,
[all_fcsts[i][k] for i in range(self.model_num)],
steps,
raw,
test_TSs[k].time.iloc[-1],
col_names,
)
for k in keys
]
pool = Pool(self.max_core)
results = pool.starmap(self._combine_fcst, cf_params)
pool.close()
pool.join()
return {t[0]: t[1] for t in results}
else:
ans = {}
for k in keys:
try:
ans[k] = self._combine_fcst(
k,
[all_fcsts[i][k] for i in range(self.model_num)],
steps,
raw,
test_TSs[k].time.iloc[-1],
col_names,
)[1]
except Exception as e:
msg = f"Fail to generate forecasts with Exception {e}."
logging.error(msg)
raise ValueError(msg)
return ans
def save_model(self, file_name: str) -> None:
"""Save ensemble model to file.
Args:
file_name: A string representing the file address and file name.
"""
if len(self.gm_models) == 0:
msg = "Please train global models before saving GMEnsemble."
logging.error(msg)
raise ValueError(msg)
try:
# clean-up unnecessary info
[gm._reset_nn_states() for gm in self.gm_models]
state_dict = (
[gm.rnn.state_dict() for gm in self.gm_models]
if self.params.model_type == "rnn"
else None
)
encoder_dict = (
[gm.encoder.state_dict() for gm in self.gm_models]
if self.params.model_type == "s2s"
else None
)
decoder_dict = (
[gm.decoder.state_dict() for gm in self.gm_models]
if self.params.model_type == "s2s"
else None
)
gmparam_string = self.params.to_string()
info = {
"state_dict": state_dict,
"encoder_dict": encoder_dict,
"decoder_dict": decoder_dict,
"gmparam_string": gmparam_string,
"gm_info": self.gm_info,
"test_ids": self.test_ids,
"gmensemble_params": {},
}
for attr in [
"splits",
"overlap",
"replicate",
"multi",
"max_core",
"ensemble_type",
]:
info["gmensemble_params"][attr] = getattr(self, attr)
with open(file_name, "wb") as f:
joblib.dump(info, f)
logging.info(f"Successfully save GMEnsemble to {file_name}.")
except Exception as e:
msg = f"Fail to save GMEnsemble to {file_name} with Exception {e}."
logging.error(msg)
raise ValueError(msg)
def evaluate(
self,
test_train_TSs: Union[
TimeSeriesData, List[TimeSeriesData], Dict[Any, TimeSeriesData]
],
test_valid_TSs: Union[
TimeSeriesData, List[TimeSeriesData], Dict[Any, TimeSeriesData]
],
) -> pd.DataFrame:
"""Evaluate the GMEnsemble object performance.
A wrapper function to evaluate model performance on a given time series data set.
Args:
test_train_TSs: A list or a dictionary of :class:`kats.consts.TimeSeriesData` objects for warming-ups.
test_valid_TSs: A list or a dictionary of :class:`kats.consts.TimeSeriesData` objects for evaluation.
Returns:
A `pandas.DataFrame` object representing the evaluation results.
"""
if type(test_train_TSs) != type(test_valid_TSs):
msg = (
"The data type of test_train_TSs and test_valid_TSs should be the same."
)
logging.error(msg)
raise ValueError(msg)
if isinstance(test_train_TSs, TimeSeriesData):
test_train_TSs = [test_train_TSs]
# pyre-fixme[9]
test_valid_TSs = [test_valid_TSs]
if len(test_train_TSs) != len(test_valid_TSs):
msg = "test_train_TSs and test_valid_TSs should be of the same length."
logging.error(msg)
raise ValueError(msg)
keys = (
test_train_TSs.keys()
if isinstance(test_train_TSs, dict)
else range(len(test_train_TSs))
)
if len(keys) == 0:
msg = "The input collection of time series should not be empty."
logging.error(msg)
raise ValueError(msg)
steps = np.max([len(test_valid_TSs[t]) for t in keys])
fcst = self.predict(test_train_TSs, steps=steps, raw=True)
logging.info(
f"Successfully generate forecasts for all test time series with length {steps}."
)
eval_func = self.gm_models[0].build_validation_function()
fcst_window = self.params.fcst_window
ans = []
keys = (
test_train_TSs.keys()
if isinstance(test_train_TSs, dict)
else range(len(test_train_TSs))
)
for k in keys:
tmp = test_valid_TSs[k].value.values
tmp_step = len(tmp) // fcst_window + int(len(tmp) % fcst_window != 0)
tmp_fcst_length = tmp_step * fcst_window
actuals = np.full(tmp_fcst_length, np.nan, np.float)
actuals[: len(tmp)] = tmp
for j in range(tmp_step):
tmp_actuals = actuals[j * fcst_window : (j + 1) * fcst_window]
tmp = eval_func(fcst[k][j], tmp_actuals)
tmp["step"] = j
tmp["idx"] = k
ans.append(tmp)
return pd.DataFrame(ans)
def load_gmensemble_from_file(file_name: str) -> GMEnsemble:
"""Load a trained :class:`GMEnsemble` object from file.
Args:
file_name: A string representing the file saving the :class:`GMEnsemble` object.
Returns:
A :class:`GMEnsemble` object loaded from the file.
"""
try:
info = joblib.load(open(file_name, "rb"))
gmparam = gmparam_from_string(info["gmparam_string"])
n = (
len(info["state_dict"])
if info["state_dict"] is not None
else len(info["encoder_dict"])
)
gm_models = []
for i in range(n):
tmp_gmmodel = GMModel(gmparam)
if gmparam.model_type == "rnn":
tmp_gmmodel.build_rnn()
tmp_gmmodel.rnn.load_state_dict(info["state_dict"][i])
else:
tmp_gmmodel.build_s2s()
tmp_gmmodel.encoder.load_state_dict(info["encoder_dict"][i])
tmp_gmmodel.decoder.load_state_dict(info["decoder_dict"][i])
gm_models.append(tmp_gmmodel)
info["gmensemble_params"]["gmparam"] = gmparam
gmensemble = GMEnsemble(**info["gmensemble_params"])
gmensemble.gm_models = gm_models
gmensemble.gm_info = info["gm_info"]
except Exception as e:
msg = f"Fail to load GMEnsemble from {file_name} with Exception {e}."
logging.error(msg)
raise ValueError(msg)
return gmensemble
| [
"pandas.DataFrame",
"numpy.full",
"logging.error",
"numpy.random.seed",
"pandas.date_range",
"kats.models.globalmodel.utils.gmpreprocess",
"kats.models.globalmodel.model.GMModel",
"multiprocessing.dummy.Pool",
"torch.manual_seed",
"kats.models.globalmodel.utils.split",
"kats.models.globalmodel.m... | [((4088, 4099), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (4097, 4099), False, 'from multiprocessing import cpu_count\n'), ((6406, 6417), 'time.time', 'time.time', ([], {}), '()\n', (6415, 6417), False, 'import time\n'), ((8295, 8349), 'kats.models.globalmodel.utils.split', 'split', (['self.splits', 'self.overlap', 'train_TSs', 'valid_TSs'], {}), '(self.splits, self.overlap, train_TSs, valid_TSs)\n', (8300, 8349), False, 'from kats.models.globalmodel.utils import GMParam, gmpreprocess, split\n'), ((12573, 12584), 'time.time', 'time.time', ([], {}), '()\n', (12582, 12584), False, 'import time\n'), ((18507, 18611), 'logging.info', 'logging.info', (['f"""Successfully generate forecasts for all test time series with length {steps}."""'], {}), "(\n f'Successfully generate forecasts for all test time series with length {steps}.'\n )\n", (18519, 18611), False, 'import logging\n'), ((19500, 19517), 'pandas.DataFrame', 'pd.DataFrame', (['ans'], {}), '(ans)\n', (19512, 19517), True, 'import pandas as pd\n'), ((19900, 19943), 'kats.models.globalmodel.model.gmparam_from_string', 'gmparam_from_string', (["info['gmparam_string']"], {}), "(info['gmparam_string'])\n", (19919, 19943), False, 'from kats.models.globalmodel.model import GMModel, gmparam_from_string\n'), ((3006, 3024), 'logging.error', 'logging.error', (['msg'], {}), '(msg)\n', (3019, 3024), False, 'import logging\n'), ((3621, 3639), 'logging.error', 'logging.error', (['msg'], {}), '(msg)\n', (3634, 3639), False, 'import logging\n'), ((3888, 3906), 'logging.error', 'logging.error', (['msg'], {}), '(msg)\n', (3901, 3906), False, 'import logging\n'), ((4546, 4566), 'kats.models.globalmodel.model.GMModel', 'GMModel', (['self.params'], {}), '(self.params)\n', (4553, 4566), False, 'from kats.models.globalmodel.model import GMModel, gmparam_from_string\n'), ((5940, 5967), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (5954, 5967), True, 'import numpy as np\n'), ((5980, 6010), 'torch.manual_seed', 'torch.manual_seed', (['random_seed'], {}), '(random_seed)\n', (5997, 6010), False, 'import torch\n'), ((6059, 6083), 'torch.set_num_threads', 'torch.set_num_threads', (['(1)'], {}), '(1)\n', (6080, 6083), False, 'import torch\n'), ((7268, 7280), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (7277, 7280), True, 'import numpy as np\n'), ((7413, 7431), 'logging.error', 'logging.error', (['msg'], {}), '(msg)\n', (7426, 7431), False, 'import logging\n'), ((7547, 7570), 'numpy.random.shuffle', 'np.random.shuffle', (['keys'], {}), '(keys)\n', (7564, 7570), True, 'import numpy as np\n'), ((7686, 7738), 'kats.models.globalmodel.utils.gmpreprocess', 'gmpreprocess', (['self.params', 'all_test_TSs'], {'mode': '"""test"""'}), "(self.params, all_test_TSs, mode='test')\n", (7698, 7738), False, 'from kats.models.globalmodel.utils import GMParam, gmpreprocess, split\n'), ((7878, 7953), 'kats.models.globalmodel.utils.gmpreprocess', 'gmpreprocess', (['self.params', 'all_train_TSs'], {'mode': '"""train"""', 'valid_set': 'valid_set'}), "(self.params, all_train_TSs, mode='train', valid_set=valid_set)\n", (7890, 7953), False, 'from kats.models.globalmodel.utils import GMParam, gmpreprocess, split\n'), ((8089, 8155), 'kats.models.globalmodel.utils.gmpreprocess', 'gmpreprocess', (['self.params', 'data'], {'mode': '"""train"""', 'valid_set': 'valid_set'}), "(self.params, data, mode='train', valid_set=valid_set)\n", (8101, 8155), False, 'from kats.models.globalmodel.utils import GMParam, gmpreprocess, split\n'), ((8418, 8429), 'time.time', 'time.time', ([], {}), '()\n', (8427, 8429), False, 'import time\n'), ((8902, 8921), 'multiprocessing.dummy.Pool', 'Pool', (['self.max_core'], {}), '(self.max_core)\n', (8906, 8921), False, 'from multiprocessing.dummy import Pool\n'), ((9288, 9299), 'time.time', 'time.time', ([], {}), '()\n', (9297, 9299), False, 'import time\n'), ((10681, 10773), 'pandas.date_range', 'pd.date_range', (['(first_timestamp + self.params.freq)'], {'periods': 'steps', 'freq': 'self.params.freq'}), '(first_timestamp + self.params.freq, periods=steps, freq=self.\n params.freq)\n', (10694, 10773), True, 'import pandas as pd\n'), ((12270, 12288), 'logging.error', 'logging.error', (['msg'], {}), '(msg)\n', (12283, 12288), False, 'import logging\n'), ((12507, 12525), 'logging.error', 'logging.error', (['msg'], {}), '(msg)\n', (12520, 12525), False, 'import logging\n'), ((12627, 12646), 'multiprocessing.dummy.Pool', 'Pool', (['self.max_core'], {}), '(self.max_core)\n', (12631, 12646), False, 'from multiprocessing.dummy import Pool\n'), ((13774, 13793), 'multiprocessing.dummy.Pool', 'Pool', (['self.max_core'], {}), '(self.max_core)\n', (13778, 13793), False, 'from multiprocessing.dummy import Pool\n'), ((14899, 14917), 'logging.error', 'logging.error', (['msg'], {}), '(msg)\n', (14912, 14917), False, 'import logging\n'), ((16391, 16452), 'logging.info', 'logging.info', (['f"""Successfully save GMEnsemble to {file_name}."""'], {}), "(f'Successfully save GMEnsemble to {file_name}.')\n", (16403, 16452), False, 'import logging\n'), ((17611, 17629), 'logging.error', 'logging.error', (['msg'], {}), '(msg)\n', (17624, 17629), False, 'import logging\n'), ((17992, 18010), 'logging.error', 'logging.error', (['msg'], {}), '(msg)\n', (18005, 18010), False, 'import logging\n'), ((18314, 18332), 'logging.error', 'logging.error', (['msg'], {}), '(msg)\n', (18327, 18332), False, 'import logging\n'), ((19135, 19177), 'numpy.full', 'np.full', (['tmp_fcst_length', 'np.nan', 'np.float'], {}), '(tmp_fcst_length, np.nan, np.float)\n', (19142, 19177), True, 'import numpy as np\n'), ((20169, 20185), 'kats.models.globalmodel.model.GMModel', 'GMModel', (['gmparam'], {}), '(gmparam)\n', (20176, 20185), False, 'from kats.models.globalmodel.model import GMModel, gmparam_from_string\n'), ((20910, 20928), 'logging.error', 'logging.error', (['msg'], {}), '(msg)\n', (20923, 20928), False, 'import logging\n'), ((3378, 3396), 'logging.error', 'logging.error', (['msg'], {}), '(msg)\n', (3391, 3396), False, 'import logging\n'), ((4441, 4459), 'logging.error', 'logging.error', (['msg'], {}), '(msg)\n', (4454, 4459), False, 'import logging\n'), ((16358, 16378), 'joblib.dump', 'joblib.dump', (['info', 'f'], {}), '(info, f)\n', (16369, 16378), False, 'import joblib\n'), ((16576, 16594), 'logging.error', 'logging.error', (['msg'], {}), '(msg)\n', (16589, 16594), False, 'import logging\n'), ((6582, 6593), 'time.time', 'time.time', ([], {}), '()\n', (6591, 6593), False, 'import time\n'), ((13136, 13147), 'time.time', 'time.time', ([], {}), '()\n', (13145, 13147), False, 'import time\n'), ((14497, 14515), 'logging.error', 'logging.error', (['msg'], {}), '(msg)\n', (14510, 14515), False, 'import logging\n'), ((9196, 9207), 'time.time', 'time.time', ([], {}), '()\n', (9205, 9207), False, 'import time\n'), ((9859, 9870), 'time.time', 'time.time', ([], {}), '()\n', (9868, 9870), False, 'import time\n')] |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Array type functions.
#
# JAX dtypes differ from NumPy in both:
# a) their type promotion rules, and
# b) the set of supported types (e.g., bfloat16),
# so we need our own implementation that deviates from NumPy in places.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from distutils.util import strtobool
import functools
import os
import numpy as onp
import six
from . import util
from .config import flags
from .lib import xla_client
FLAGS = flags.FLAGS
flags.DEFINE_bool('jax_enable_x64',
strtobool(os.getenv('JAX_ENABLE_X64', 'False')),
'Enable 64-bit types to be used.')
# bfloat16 support
bfloat16 = xla_client.bfloat16
_bfloat16_dtype = onp.dtype(bfloat16)
class _bfloat16_finfo(object):
bits = 16
eps = bfloat16(float.fromhex("0x1p-7"))
epsneg = bfloat16(float.fromhex("0x1p-8"))
machep = -7
negep = -8
max = bfloat16(float.fromhex("0x1.FEp127"))
min = -max
nexp = 8
nmant = 7
iexp = nexp
precision = 2
resolution = 10 ** -2
tiny = bfloat16(float.fromhex("0x1p-126"))
# Default types.
bool_ = onp.bool_
int_ = onp.int64
float_ = onp.float64
complex_ = onp.complex128
# TODO(phawkins): change the above defaults to:
# int_ = onp.int32
# float_ = onp.float32
# complex_ = onp.complex64
_dtype_to_32bit_dtype = {
onp.dtype('int64'): onp.dtype('int32'),
onp.dtype('uint64'): onp.dtype('uint32'),
onp.dtype('float64'): onp.dtype('float32'),
onp.dtype('complex128'): onp.dtype('complex64'),
}
@util.memoize
def canonicalize_dtype(dtype):
"""Convert from a dtype to a canonical dtype based on FLAGS.jax_enable_x64."""
dtype = onp.dtype(dtype)
if FLAGS.jax_enable_x64:
return dtype
else:
return _dtype_to_32bit_dtype.get(dtype, dtype)
# Default dtypes corresponding to Python scalars.
python_scalar_dtypes = {
bool: onp.dtype(bool_),
int: onp.dtype(int_),
float: onp.dtype(float_),
complex: onp.dtype(complex_),
}
if six.PY2:
python_scalar_dtypes[long] = onp.dtype(int_) # noqa: F821
def scalar_type_of(x):
typ = dtype(x)
if onp.issubdtype(typ, onp.bool_):
return bool
elif onp.issubdtype(typ, onp.integer):
return int
elif onp.issubdtype(typ, onp.floating):
return float
elif onp.issubdtype(typ, onp.complexfloating):
return complex
else:
raise TypeError("Invalid scalar value {}".format(x))
def coerce_to_array(x):
"""Coreces a scalar or NumPy array to an onp.array.
Handles Python scalar type promotion according to JAX's rules, not NumPy's
rules.
"""
dtype = python_scalar_dtypes.get(type(x), None)
return onp.array(x, dtype) if dtype else onp.array(x)
iinfo = onp.iinfo
def finfo(dtype):
# Since NumPy doesn't consider bfloat16 a floating-point type, we have to
# provide an alternative implementation of finfo that does so.
if onp.result_type(dtype) == _bfloat16_dtype:
return _bfloat16_finfo
else:
return onp.finfo(dtype)
def issubdtype(a, b):
if a == bfloat16:
return b in [bfloat16, _bfloat16_dtype, onp.floating, onp.inexact,
onp.number]
if not issubclass(b, onp.generic):
# Workaround for JAX scalar types. NumPy's issubdtype has a backward
# compatibility behavior for the second argument of issubdtype that
# interacts badly with JAX's custom scalar types. As a workaround,
# explicitly cast the second argument to a NumPy type object.
b = onp.dtype(b).type
return onp.issubdtype(a, b)
can_cast = onp.can_cast
issubsctype = onp.issubsctype
# List of all valid JAX dtypes, in the order they appear in the type promotion
# table.
_jax_types = [
onp.dtype('bool'),
onp.dtype('uint8'),
onp.dtype('uint16'),
onp.dtype('uint32'),
onp.dtype('uint64'),
onp.dtype('int8'),
onp.dtype('int16'),
onp.dtype('int32'),
onp.dtype('int64'),
onp.dtype(bfloat16),
onp.dtype('float16'),
onp.dtype('float32'),
onp.dtype('float64'),
onp.dtype('complex64'),
onp.dtype('complex128'),
]
# Mapping from types to their type numbers.
_jax_type_nums = {t: i for i, t in enumerate(_jax_types)}
def _make_type_promotion_table():
b1, u1, u2, u4, u8, s1, s2, s4, s8, bf, f2, f4, f8, c4, c8 = _jax_types
# b1, u1, u2, u4, u8, s1, s2, s4, s8, bf, f2, f4, f8, c4, c8
return onp.array([
[b1, u1, u2, u4, u8, s1, s2, s4, s8, bf, f2, f4, f8, c4, c8], # b1
[u1, u1, u2, u4, u8, s2, s2, s4, s8, bf, f2, f4, f8, c4, c8], # u1
[u2, u2, u2, u4, u8, s4, s4, s4, s8, bf, f2, f4, f8, c4, c8], # u2
[u4, u4, u4, u4, u8, s8, s8, s8, s8, bf, f2, f4, f8, c4, c8], # u4
[u8, u8, u8, u8, u8, f8, f8, f8, f8, bf, f2, f4, f8, c4, c8], # u8
[s1, s2, s4, s8, f8, s1, s2, s4, s8, bf, f2, f4, f8, c4, c8], # s1
[s2, s2, s4, s8, f8, s2, s2, s4, s8, bf, f2, f4, f8, c4, c8], # s2
[s4, s4, s4, s8, f8, s4, s4, s4, s8, bf, f2, f4, f8, c4, c8], # s4
[s8, s8, s8, s8, f8, s8, s8, s8, s8, bf, f2, f4, f8, c4, c8], # s8
[bf, bf, bf, bf, bf, bf, bf, bf, bf, bf, f4, f4, f8, c4, c8], # bf
[f2, f2, f2, f2, f2, f2, f2, f2, f2, f4, f2, f4, f8, c4, c8], # f2
[f4, f4, f4, f4, f4, f4, f4, f4, f4, f4, f4, f4, f8, c4, c8], # f4
[f8, f8, f8, f8, f8, f8, f8, f8, f8, f8, f8, f8, f8, c8, c8], # f8
[c4, c4, c4, c4, c4, c4, c4, c4, c4, c4, c4, c4, c8, c4, c8], # c4
[c8, c8, c8, c8, c8, c8, c8, c8, c8, c8, c8, c8, c8, c8, c8], # c8
])
_type_promotion_table = _make_type_promotion_table()
def promote_types(a, b):
"""Returns the type to which a binary operation should cast its arguments.
For details of JAX's type promotion semantics, see :ref:`type-promotion`.
Args:
a: a :class:`numpy.dtype` or a dtype specifier.
b: a :class:`numpy.dtype` or a dtype specifier.
Returns:
A :class:`numpy.dtype` object.
"""
a = onp.dtype(a)
b = onp.dtype(b)
try:
return _type_promotion_table[_jax_type_nums[a], _jax_type_nums[b]]
except KeyError:
pass
raise TypeError("Invalid type promotion of {} and {}".format(a, b))
def is_python_scalar(x):
try:
return x.aval.weak_type and onp.ndim(x) == 0
except AttributeError:
return type(x) in python_scalar_dtypes
def _dtype_priority(dtype):
if issubdtype(dtype, onp.bool_):
return 0
elif issubdtype(dtype, onp.integer):
return 1
elif issubdtype(dtype, onp.floating):
return 2
elif issubdtype(dtype, onp.complexfloating):
return 3
else:
raise TypeError("Dtype {} is not supported by JAX".format(dtype))
def dtype(x):
if type(x) in python_scalar_dtypes:
return python_scalar_dtypes[type(x)]
return onp.result_type(x)
def result_type(*args):
"""Convenience function to apply Numpy argument dtype promotion."""
# TODO(dougalm,mattjj): This is a performance bottleneck. Consider memoizing.
if len(args) < 2:
return dtype(args[0])
scalars = []
dtypes = []
for x in args:
(scalars if is_python_scalar(x) else dtypes).append(dtype(x))
array_priority = max(map(_dtype_priority, dtypes)) if dtypes else -1
dtypes += [x for x in scalars if _dtype_priority(x) > array_priority]
return canonicalize_dtype(functools.reduce(promote_types, dtypes))
| [
"numpy.result_type",
"numpy.dtype",
"numpy.ndim",
"numpy.finfo",
"numpy.array",
"functools.reduce",
"os.getenv",
"numpy.issubdtype"
] | [((1328, 1347), 'numpy.dtype', 'onp.dtype', (['bfloat16'], {}), '(bfloat16)\n', (1337, 1347), True, 'import numpy as onp\n'), ((1938, 1956), 'numpy.dtype', 'onp.dtype', (['"""int64"""'], {}), "('int64')\n", (1947, 1956), True, 'import numpy as onp\n'), ((1982, 2001), 'numpy.dtype', 'onp.dtype', (['"""uint64"""'], {}), "('uint64')\n", (1991, 2001), True, 'import numpy as onp\n'), ((2028, 2048), 'numpy.dtype', 'onp.dtype', (['"""float64"""'], {}), "('float64')\n", (2037, 2048), True, 'import numpy as onp\n'), ((2076, 2099), 'numpy.dtype', 'onp.dtype', (['"""complex128"""'], {}), "('complex128')\n", (2085, 2099), True, 'import numpy as onp\n'), ((1958, 1976), 'numpy.dtype', 'onp.dtype', (['"""int32"""'], {}), "('int32')\n", (1967, 1976), True, 'import numpy as onp\n'), ((2003, 2022), 'numpy.dtype', 'onp.dtype', (['"""uint32"""'], {}), "('uint32')\n", (2012, 2022), True, 'import numpy as onp\n'), ((2050, 2070), 'numpy.dtype', 'onp.dtype', (['"""float32"""'], {}), "('float32')\n", (2059, 2070), True, 'import numpy as onp\n'), ((2101, 2123), 'numpy.dtype', 'onp.dtype', (['"""complex64"""'], {}), "('complex64')\n", (2110, 2123), True, 'import numpy as onp\n'), ((2264, 2280), 'numpy.dtype', 'onp.dtype', (['dtype'], {}), '(dtype)\n', (2273, 2280), True, 'import numpy as onp\n'), ((2470, 2486), 'numpy.dtype', 'onp.dtype', (['bool_'], {}), '(bool_)\n', (2479, 2486), True, 'import numpy as onp\n'), ((2495, 2510), 'numpy.dtype', 'onp.dtype', (['int_'], {}), '(int_)\n', (2504, 2510), True, 'import numpy as onp\n'), ((2521, 2538), 'numpy.dtype', 'onp.dtype', (['float_'], {}), '(float_)\n', (2530, 2538), True, 'import numpy as onp\n'), ((2551, 2570), 'numpy.dtype', 'onp.dtype', (['complex_'], {}), '(complex_)\n', (2560, 2570), True, 'import numpy as onp\n'), ((2618, 2633), 'numpy.dtype', 'onp.dtype', (['int_'], {}), '(int_)\n', (2627, 2633), True, 'import numpy as onp\n'), ((2694, 2724), 'numpy.issubdtype', 'onp.issubdtype', (['typ', 'onp.bool_'], {}), '(typ, onp.bool_)\n', (2708, 2724), True, 'import numpy as onp\n'), ((4056, 4076), 'numpy.issubdtype', 'onp.issubdtype', (['a', 'b'], {}), '(a, b)\n', (4070, 4076), True, 'import numpy as onp\n'), ((4239, 4256), 'numpy.dtype', 'onp.dtype', (['"""bool"""'], {}), "('bool')\n", (4248, 4256), True, 'import numpy as onp\n'), ((4260, 4278), 'numpy.dtype', 'onp.dtype', (['"""uint8"""'], {}), "('uint8')\n", (4269, 4278), True, 'import numpy as onp\n'), ((4282, 4301), 'numpy.dtype', 'onp.dtype', (['"""uint16"""'], {}), "('uint16')\n", (4291, 4301), True, 'import numpy as onp\n'), ((4305, 4324), 'numpy.dtype', 'onp.dtype', (['"""uint32"""'], {}), "('uint32')\n", (4314, 4324), True, 'import numpy as onp\n'), ((4328, 4347), 'numpy.dtype', 'onp.dtype', (['"""uint64"""'], {}), "('uint64')\n", (4337, 4347), True, 'import numpy as onp\n'), ((4351, 4368), 'numpy.dtype', 'onp.dtype', (['"""int8"""'], {}), "('int8')\n", (4360, 4368), True, 'import numpy as onp\n'), ((4372, 4390), 'numpy.dtype', 'onp.dtype', (['"""int16"""'], {}), "('int16')\n", (4381, 4390), True, 'import numpy as onp\n'), ((4394, 4412), 'numpy.dtype', 'onp.dtype', (['"""int32"""'], {}), "('int32')\n", (4403, 4412), True, 'import numpy as onp\n'), ((4416, 4434), 'numpy.dtype', 'onp.dtype', (['"""int64"""'], {}), "('int64')\n", (4425, 4434), True, 'import numpy as onp\n'), ((4438, 4457), 'numpy.dtype', 'onp.dtype', (['bfloat16'], {}), '(bfloat16)\n', (4447, 4457), True, 'import numpy as onp\n'), ((4461, 4481), 'numpy.dtype', 'onp.dtype', (['"""float16"""'], {}), "('float16')\n", (4470, 4481), True, 'import numpy as onp\n'), ((4485, 4505), 'numpy.dtype', 'onp.dtype', (['"""float32"""'], {}), "('float32')\n", (4494, 4505), True, 'import numpy as onp\n'), ((4509, 4529), 'numpy.dtype', 'onp.dtype', (['"""float64"""'], {}), "('float64')\n", (4518, 4529), True, 'import numpy as onp\n'), ((4533, 4555), 'numpy.dtype', 'onp.dtype', (['"""complex64"""'], {}), "('complex64')\n", (4542, 4555), True, 'import numpy as onp\n'), ((4559, 4582), 'numpy.dtype', 'onp.dtype', (['"""complex128"""'], {}), "('complex128')\n", (4568, 4582), True, 'import numpy as onp\n'), ((4871, 5865), 'numpy.array', 'onp.array', (['[[b1, u1, u2, u4, u8, s1, s2, s4, s8, bf, f2, f4, f8, c4, c8], [u1, u1, u2,\n u4, u8, s2, s2, s4, s8, bf, f2, f4, f8, c4, c8], [u2, u2, u2, u4, u8,\n s4, s4, s4, s8, bf, f2, f4, f8, c4, c8], [u4, u4, u4, u4, u8, s8, s8,\n s8, s8, bf, f2, f4, f8, c4, c8], [u8, u8, u8, u8, u8, f8, f8, f8, f8,\n bf, f2, f4, f8, c4, c8], [s1, s2, s4, s8, f8, s1, s2, s4, s8, bf, f2,\n f4, f8, c4, c8], [s2, s2, s4, s8, f8, s2, s2, s4, s8, bf, f2, f4, f8,\n c4, c8], [s4, s4, s4, s8, f8, s4, s4, s4, s8, bf, f2, f4, f8, c4, c8],\n [s8, s8, s8, s8, f8, s8, s8, s8, s8, bf, f2, f4, f8, c4, c8], [bf, bf,\n bf, bf, bf, bf, bf, bf, bf, bf, f4, f4, f8, c4, c8], [f2, f2, f2, f2,\n f2, f2, f2, f2, f2, f4, f2, f4, f8, c4, c8], [f4, f4, f4, f4, f4, f4,\n f4, f4, f4, f4, f4, f4, f8, c4, c8], [f8, f8, f8, f8, f8, f8, f8, f8,\n f8, f8, f8, f8, f8, c8, c8], [c4, c4, c4, c4, c4, c4, c4, c4, c4, c4,\n c4, c4, c8, c4, c8], [c8, c8, c8, c8, c8, c8, c8, c8, c8, c8, c8, c8,\n c8, c8, c8]]'], {}), '([[b1, u1, u2, u4, u8, s1, s2, s4, s8, bf, f2, f4, f8, c4, c8], [\n u1, u1, u2, u4, u8, s2, s2, s4, s8, bf, f2, f4, f8, c4, c8], [u2, u2,\n u2, u4, u8, s4, s4, s4, s8, bf, f2, f4, f8, c4, c8], [u4, u4, u4, u4,\n u8, s8, s8, s8, s8, bf, f2, f4, f8, c4, c8], [u8, u8, u8, u8, u8, f8,\n f8, f8, f8, bf, f2, f4, f8, c4, c8], [s1, s2, s4, s8, f8, s1, s2, s4,\n s8, bf, f2, f4, f8, c4, c8], [s2, s2, s4, s8, f8, s2, s2, s4, s8, bf,\n f2, f4, f8, c4, c8], [s4, s4, s4, s8, f8, s4, s4, s4, s8, bf, f2, f4,\n f8, c4, c8], [s8, s8, s8, s8, f8, s8, s8, s8, s8, bf, f2, f4, f8, c4,\n c8], [bf, bf, bf, bf, bf, bf, bf, bf, bf, bf, f4, f4, f8, c4, c8], [f2,\n f2, f2, f2, f2, f2, f2, f2, f2, f4, f2, f4, f8, c4, c8], [f4, f4, f4,\n f4, f4, f4, f4, f4, f4, f4, f4, f4, f8, c4, c8], [f8, f8, f8, f8, f8,\n f8, f8, f8, f8, f8, f8, f8, f8, c8, c8], [c4, c4, c4, c4, c4, c4, c4,\n c4, c4, c4, c4, c4, c8, c4, c8], [c8, c8, c8, c8, c8, c8, c8, c8, c8,\n c8, c8, c8, c8, c8, c8]])\n', (4880, 5865), True, 'import numpy as onp\n'), ((6374, 6386), 'numpy.dtype', 'onp.dtype', (['a'], {}), '(a)\n', (6383, 6386), True, 'import numpy as onp\n'), ((6393, 6405), 'numpy.dtype', 'onp.dtype', (['b'], {}), '(b)\n', (6402, 6405), True, 'import numpy as onp\n'), ((7156, 7174), 'numpy.result_type', 'onp.result_type', (['x'], {}), '(x)\n', (7171, 7174), True, 'import numpy as onp\n'), ((1167, 1203), 'os.getenv', 'os.getenv', (['"""JAX_ENABLE_X64"""', '"""False"""'], {}), "('JAX_ENABLE_X64', 'False')\n", (1176, 1203), False, 'import os\n'), ((2749, 2781), 'numpy.issubdtype', 'onp.issubdtype', (['typ', 'onp.integer'], {}), '(typ, onp.integer)\n', (2763, 2781), True, 'import numpy as onp\n'), ((3221, 3240), 'numpy.array', 'onp.array', (['x', 'dtype'], {}), '(x, dtype)\n', (3230, 3240), True, 'import numpy as onp\n'), ((3255, 3267), 'numpy.array', 'onp.array', (['x'], {}), '(x)\n', (3264, 3267), True, 'import numpy as onp\n'), ((3452, 3474), 'numpy.result_type', 'onp.result_type', (['dtype'], {}), '(dtype)\n', (3467, 3474), True, 'import numpy as onp\n'), ((3541, 3557), 'numpy.finfo', 'onp.finfo', (['dtype'], {}), '(dtype)\n', (3550, 3557), True, 'import numpy as onp\n'), ((7679, 7718), 'functools.reduce', 'functools.reduce', (['promote_types', 'dtypes'], {}), '(promote_types, dtypes)\n', (7695, 7718), False, 'import functools\n'), ((2805, 2838), 'numpy.issubdtype', 'onp.issubdtype', (['typ', 'onp.floating'], {}), '(typ, onp.floating)\n', (2819, 2838), True, 'import numpy as onp\n'), ((4029, 4041), 'numpy.dtype', 'onp.dtype', (['b'], {}), '(b)\n', (4038, 4041), True, 'import numpy as onp\n'), ((2864, 2904), 'numpy.issubdtype', 'onp.issubdtype', (['typ', 'onp.complexfloating'], {}), '(typ, onp.complexfloating)\n', (2878, 2904), True, 'import numpy as onp\n'), ((6648, 6659), 'numpy.ndim', 'onp.ndim', (['x'], {}), '(x)\n', (6656, 6659), True, 'import numpy as onp\n')] |
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
import cv2
from collections import defaultdict
from io import StringIO
from PIL import Image
from pathlib import Path
cap = cv2.VideoCapture('/Users/tomaspiaggio/Desktop/test-ppl-counting/v3.mp4')
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
from utils import ops as utils_ops
from utils import label_map_util
from utils import visualization_utils as vis_util
if tf.__version__ < '1.4.0':
raise ImportError('Please upgrade your tensorflow installation to v1.4.* or later!')
# What model to download.
# MODEL_NAME = 'ssd_mobilenet_v1_coco_2017_11_17' # Regular model
# MODEL_NAME = 'ssd_mobilenet_v1_0.75_depth_300x300_coco14_sync_2018_07_03'
# MODEL_NAME = 'mask_rcnn_inception_v2_coco_2018_01_28' # Mask fast model
# MODEL_NAME = 'mask_rcnn_resnet50_atrous_coco_2018_01_28' # Mask slow (potentially more accurate) model
# MODEL_NAME = 'ssd_resnet50_v1_fpn_shared_box_predictor_640x640_coco14_sync_2018_07_03' # Resnet model
MODEL_NAME = 'faster_rcnn_nas_coco_2018_01_28' # (Probably) most accurate boxes
MODEL_FILE = MODEL_NAME + '.tar.gz'
DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')
NUM_CLASSES = 90
# Download Model
if(not Path(MODEL_FILE).exists()):
print('Downloading model ... This may take a while. Go grab a coffee.')
opener = urllib.request.URLopener()
opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)
print('Unzipping the model')
tar_file = tarfile.open(MODEL_FILE)
for file in tar_file.getmembers():
file_name = os.path.basename(file.name)
if 'frozen_inference_graph.pb' in file_name:
tar_file.extract(file, os.getcwd())
# Loading a frozen Tensorflow model into memory.
print('Loading computational graph into memory.')
detection_graph = tf.Graph()
with detection_graph.as_default():
print('Creating the session')
session = tf.Session()
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# Loading label map
# Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`.
# Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine.
print('Creating label map.')
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Helper function
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
# Detection and rectangle creation
def run_inference_for_single_image(image, graph=detection_graph, sess=session):
# Get handles to input and output tensors
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_boxes', 'detection_scores',
'detection_classes', 'detection_masks'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
tensor_name)
if 'detection_masks' in tensor_dict:
# The following processing is only for single image
detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
# Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image.shape[0], image.shape[1])
detection_masks_reframed = tf.cast(
tf.greater(detection_masks_reframed, 0.5), tf.uint8)
# Follow the convention by adding back the batch dimension
tensor_dict['detection_masks'] = tf.expand_dims(
detection_masks_reframed, 0)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
# Run inference
output_dict = sess.run(tensor_dict,
feed_dict={image_tensor: np.expand_dims(image, 0)})
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict[
'detection_classes'][0].astype(np.uint8)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
if 'detection_masks' in output_dict:
output_dict['detection_masks'] = output_dict['detection_masks'][0]
return output_dict
# In[16]:
with detection_graph.as_default():
for i in range(1, 7):
image_np = cv2.imread('source/img' + str(i) + '.png')
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
# Actual detection.
output_dict = run_inference_for_single_image(image_np)
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
instance_masks=output_dict.get('detection_masks'),
use_normalized_coordinates=True,
line_thickness=8)
cv2.imwrite('source/result/img' + str(i) + '.png', image_np) | [
"pathlib.Path",
"tensorflow.get_default_graph",
"tensorflow.greater",
"os.path.join",
"sys.path.append",
"utils.label_map_util.convert_label_map_to_categories",
"tensorflow.cast",
"six.moves.urllib.request.URLopener",
"tensorflow.squeeze",
"tarfile.open",
"tensorflow.GraphDef",
"os.path.basena... | [((254, 326), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""/Users/tomaspiaggio/Desktop/test-ppl-counting/v3.mp4"""'], {}), "('/Users/tomaspiaggio/Desktop/test-ppl-counting/v3.mp4')\n", (270, 326), False, 'import cv2\n'), ((406, 427), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (421, 427), False, 'import sys\n'), ((1549, 1595), 'os.path.join', 'os.path.join', (['"""data"""', '"""mscoco_label_map.pbtxt"""'], {}), "('data', 'mscoco_label_map.pbtxt')\n", (1561, 1595), False, 'import os\n'), ((2227, 2237), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2235, 2237), True, 'import tensorflow as tf\n'), ((2910, 2954), 'utils.label_map_util.load_labelmap', 'label_map_util.load_labelmap', (['PATH_TO_LABELS'], {}), '(PATH_TO_LABELS)\n', (2938, 2954), False, 'from utils import label_map_util\n'), ((2968, 3082), 'utils.label_map_util.convert_label_map_to_categories', 'label_map_util.convert_label_map_to_categories', (['label_map'], {'max_num_classes': 'NUM_CLASSES', 'use_display_name': '(True)'}), '(label_map, max_num_classes=\n NUM_CLASSES, use_display_name=True)\n', (3014, 3082), False, 'from utils import label_map_util\n'), ((3095, 3143), 'utils.label_map_util.create_category_index', 'label_map_util.create_category_index', (['categories'], {}), '(categories)\n', (3131, 3143), False, 'from utils import label_map_util\n'), ((1756, 1782), 'six.moves.urllib.request.URLopener', 'urllib.request.URLopener', ([], {}), '()\n', (1780, 1782), True, 'import six.moves.urllib as urllib\n'), ((1891, 1915), 'tarfile.open', 'tarfile.open', (['MODEL_FILE'], {}), '(MODEL_FILE)\n', (1903, 1915), False, 'import tarfile\n'), ((2321, 2333), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2331, 2333), True, 'import tensorflow as tf\n'), ((2353, 2366), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (2364, 2366), True, 'import tensorflow as tf\n'), ((1975, 2002), 'os.path.basename', 'os.path.basename', (['file.name'], {}), '(file.name)\n', (1991, 2002), False, 'import os\n'), ((2376, 2410), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['PATH_TO_CKPT', '"""rb"""'], {}), "(PATH_TO_CKPT, 'rb')\n", (2390, 2410), True, 'import tensorflow as tf\n'), ((2520, 2562), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['od_graph_def'], {'name': '""""""'}), "(od_graph_def, name='')\n", (2539, 2562), True, 'import tensorflow as tf\n'), ((4087, 4134), 'tensorflow.squeeze', 'tf.squeeze', (["tensor_dict['detection_boxes']", '[0]'], {}), "(tensor_dict['detection_boxes'], [0])\n", (4097, 4134), True, 'import tensorflow as tf\n'), ((4161, 4208), 'tensorflow.squeeze', 'tf.squeeze', (["tensor_dict['detection_masks']", '[0]'], {}), "(tensor_dict['detection_masks'], [0])\n", (4171, 4208), True, 'import tensorflow as tf\n'), ((4352, 4403), 'tensorflow.cast', 'tf.cast', (["tensor_dict['num_detections'][0]", 'tf.int32'], {}), "(tensor_dict['num_detections'][0], tf.int32)\n", (4359, 4403), True, 'import tensorflow as tf\n'), ((4430, 4489), 'tensorflow.slice', 'tf.slice', (['detection_boxes', '[0, 0]', '[real_num_detection, -1]'], {}), '(detection_boxes, [0, 0], [real_num_detection, -1])\n', (4438, 4489), True, 'import tensorflow as tf\n'), ((4516, 4582), 'tensorflow.slice', 'tf.slice', (['detection_masks', '[0, 0, 0]', '[real_num_detection, -1, -1]'], {}), '(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])\n', (4524, 4582), True, 'import tensorflow as tf\n'), ((4618, 4730), 'utils.ops.reframe_box_masks_to_image_masks', 'utils_ops.reframe_box_masks_to_image_masks', (['detection_masks', 'detection_boxes', 'image.shape[0]', 'image.shape[1]'], {}), '(detection_masks, detection_boxes,\n image.shape[0], image.shape[1])\n', (4660, 4730), True, 'from utils import ops as utils_ops\n'), ((4957, 5000), 'tensorflow.expand_dims', 'tf.expand_dims', (['detection_masks_reframed', '(0)'], {}), '(detection_masks_reframed, 0)\n', (4971, 5000), True, 'import tensorflow as tf\n'), ((6025, 6057), 'numpy.expand_dims', 'np.expand_dims', (['image_np'], {'axis': '(0)'}), '(image_np, axis=0)\n', (6039, 6057), True, 'import numpy as np\n'), ((1639, 1655), 'pathlib.Path', 'Path', (['MODEL_FILE'], {}), '(MODEL_FILE)\n', (1643, 1655), False, 'from pathlib import Path\n'), ((3507, 3529), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (3527, 3529), True, 'import tensorflow as tf\n'), ((4796, 4837), 'tensorflow.greater', 'tf.greater', (['detection_masks_reframed', '(0.5)'], {}), '(detection_masks_reframed, 0.5)\n', (4806, 4837), True, 'import tensorflow as tf\n'), ((5033, 5055), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (5053, 5055), True, 'import tensorflow as tf\n'), ((2091, 2102), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2100, 2102), False, 'import os\n'), ((5207, 5231), 'numpy.expand_dims', 'np.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (5221, 5231), True, 'import numpy as np\n'), ((3888, 3910), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (3908, 3910), True, 'import tensorflow as tf\n')] |
"""ML-Ensemble
:author: <NAME>
:copyright: 2017-2018
:license: MIT
Base classes for parallel estimation
Schedulers for global setups:
0:
Base setups - independent of other features:
IndexMixin._setup_0_index
1:
Global setups - reserved for aggregating classes:
Layer._setup_1_global
2:
Dependents on 0:
ProbaMixin.__setup_2_multiplier
3:
Dependents on 0, 2:
OutputMixin.__setup_3__output_columns
Note that schedulers are experimental and may change without a deprecation
cycle.
"""
import warnings
from abc import abstractmethod
import numpy as np
from ._base_functions import check_stack, check_params
from .. import config
from ..utils.exceptions import ParallelProcessingError
from ..externals.sklearn.base import clone, BaseEstimator as _BaseEstimator
class ParamMixin(_BaseEstimator, object):
"""Parameter Mixin
Mixin for protecting static parameters from changes after fitting.
.. Note::
To use this mixin the instance inheriting it must set
``__static__=list()`` and ``_static_fit_params_=dict()``
in ``__init__``.
"""
def _store_static_params(self):
"""Record current static params for future comparison."""
if self.__static__:
for key, val in self.get_params(deep=False).items():
if key in self.__static__:
self._static_fit_params[key] = clone(val, safe=False)
def _check_static_params(self):
"""Check if current static params are identical to previous params"""
current_static_params = {
k: v for k, v in self.get_params(deep=False).items()
if k in self.__static__}
return check_params(self._static_fit_params, current_static_params)
class IndexMixin(object):
"""Indexer mixin
Mixin for handling indexers.
.. note::
To use this mixin the instance inheriting it must set the
``indexer`` or ``indexers`` attribute in ``__init__`` (not both).
"""
@property
def __indexer__(self):
"""Flag for existence of indexer"""
return hasattr(self, 'indexer') or hasattr(self, 'indexers')
def _check_indexer(self, indexer):
"""Check consistent indexer classes"""
cls = indexer.__class__.__name__.lower()
if 'index' not in cls:
ValueError("Passed indexer does not appear to be valid indexer")
lcls = [idx.__class__.__name__.lower() for idx in self._get_indexers()]
if lcls:
if 'blendindex' in lcls and cls != 'blendindex':
raise ValueError(
"Instance has blendindex, but was passed full type")
elif 'blendindex' not in lcls and cls == 'blendindex':
raise ValueError(
"Instance has full type index, but was passed blendindex")
def _get_indexers(self):
"""Return list of indexers"""
if not self.__indexer__:
raise AttributeError("No indexer or indexers attribute available")
indexers = [getattr(self, 'indexer', None)]
if None in indexers:
indexers = getattr(self, 'indexers', [None])
return indexers
def _setup_0_index(self, X, y, job):
indexers = self._get_indexers()
for indexer in indexers:
indexer.fit(X, y, job)
class OutputMixin(IndexMixin):
"""Output Mixin
Mixin class for interfacing with ParallelProcessing when outputs are
desired.
.. note::
To use this mixin the instance inheriting it must set the
``feature_span`` attribute and ``__no_output__`` flag in ``__init__``.
"""
@abstractmethod
def set_output_columns(self, X, y, job, n_left_concats=0):
"""Set output columns for prediction array"""
pass
def _setup_3_output_columns(self, X, y, job, n_left_concats=0):
"""Set output columns for prediction array. Used during setup"""
if not self.__no_output__:
self.set_output_columns(X, y, job, n_left_concats)
def shape(self, job):
"""Prediction array shape"""
if not hasattr(self, 'feature_span'):
raise ParallelProcessingError(
"Instance dose not set the feature_span attribute "
"in the constructor.")
if not self.feature_span:
raise ValueError("Columns not set. Call set_output_columns.")
return self.size(job), self.feature_span[1]
def size(self, attr):
"""Get size of dim 0"""
if attr not in ['n_test_samples', 'n_samples']:
attr = 'n_test_samples' if attr != 'predict' else 'n_samples'
indexers = self._get_indexers()
sizes = list()
for indexer in indexers:
sizes.append(getattr(indexer, attr))
sizes = np.unique(sizes)
if not sizes.shape[0] == 1:
warnings.warn(
"Inconsistent output sizes generated by indexers "
"(sizes: %r from indexers %r).\n"
"outputs will be zero-padded"
% (sizes.tolist(), indexers))
return max(sizes)
return sizes[0]
class ProbaMixin(object):
""""Probability Mixin
Mixin for probability features on objects
interfacing with :class:`~mlens.parallel.backend.ParallelProcessing`
.. note::
To use this mixin the instance inheriting it must set the ``proba``
and the ``_classes(=None)``attribute in ``__init__``.
"""
def _setup_2_multiplier(self, X, y, job=None):
if self.proba and y is not None:
self.classes_ = y
def _get_multiplier(self, X, y, alt=1):
if self.proba:
multiplier = self.classes_
else:
multiplier = alt
return multiplier
@property
def _predict_attr(self):
return 'predict' if not self.proba else 'predict_proba'
@property
def classes_(self):
"""Prediction classes during proba"""
return self._classes
@classes_.setter
def classes_(self, y):
"""Set classes given input y"""
self._classes = np.unique(y).shape[0]
class BaseBackend(object):
"""Base class for parallel backend
Implements default backend settings.
"""
def __init__(self, backend=None, n_jobs=-1, dtype=None,
raise_on_exception=True):
self.n_jobs = n_jobs
self.dtype = dtype if dtype is not None else config.get_dtype()
self.backend = backend if backend is not None else config.get_backend()
self.raise_on_exception = raise_on_exception
@abstractmethod
def __iter__(self):
yield
class BaseParallel(BaseBackend):
"""Base class for parallel objects
Parameters
----------
name : str
name of instance. Should be unique.
backend : str or object (default = 'threading')
backend infrastructure to use during call to
:class:`mlens.externals.joblib.Parallel`. See Joblib for further
documentation. To set global backend,
see :func:`~mlens.config.set_backend`.
raise_on_exception : bool (default = True)
whether to issue warnings on soft exceptions or raise error.
Examples include lack of layers, bad inputs, and failed fit of an
estimator in a layer. If set to ``False``, warnings are issued instead
but estimation continues unless exception is fatal. Note that this
can result in unexpected behavior unless the exception is anticipated.
verbose : int or bool (default = False)
level of verbosity.
n_jobs : int (default = -1)
Degree of concurrency in estimation. Set to -1 to maximize,
1 runs on a single process (or thread).
dtype : obj (default = np.float32)
data type to use, must be compatible with a numpy array dtype.
"""
def __init__(self, name, *args, **kwargs):
super(BaseParallel, self).__init__(*args, **kwargs)
self.name = name
self.__no_output__ = False
@abstractmethod
def __iter__(self):
"""Iterator for process manager"""
yield
def setup(self, X, y, job, skip=None, **kwargs):
"""Setup instance for estimation"""
skip = ['_setup_%s' % s for s in skip] if skip else []
funs = [f for f in dir(self)
if f.startswith('_setup_') and f not in skip]
for f in sorted(funs):
func = getattr(self, f)
args = func.__func__.__code__.co_varnames
fargs = {k: v for k, v in kwargs.items() if k in args}
func(X, y, job, **fargs)
class BaseEstimator(ParamMixin, _BaseEstimator, BaseParallel):
"""Base Parallel Estimator class
Modified Scikit-learn class to handle backend params that we want to
protect from changes.
"""
def __init__(self, *args, **kwargs):
super(BaseEstimator, self).__init__(*args, **kwargs)
self.__static__ = list()
self._static_fit_params = dict()
def get_params(self, deep=True):
out = super(BaseEstimator, self).get_params(deep=deep)
for name in BaseBackend.__init__.__code__.co_varnames:
if name not in ['self']:
out[name] = getattr(self, name)
return out
@property
@abstractmethod
def __fitted__(self):
"""Fit status"""
return self._check_static_params()
class BaseStacker(BaseEstimator):
"""Base class for instanes that stack job estimators"""
def __init__(self, stack=None, verbose=False, *args, **kwargs):
super(BaseStacker, self).__init__(*args, **kwargs)
if stack and not isinstance(stack, list):
raise ValueError("Stack must be a list. Got %r:" % type(stack))
self.stack = stack if stack else list()
self._verbose = verbose
@abstractmethod
def __iter__(self):
yield
def push(self, *stack):
"""Push onto stack"""
check_stack(stack, self.stack)
for item in stack:
self.stack.append(item)
attr = item.name.replace('-', '_').replace(' ', '').strip()
setattr(self, attr, item)
return self
def replace(self, idx, item):
"""Replace a current member of the stack with a new instance"""
attr = item.name.replace('-', '_').replace(' ', '').strip()
setattr(self, attr, item)
self.stack[idx] = item
def pop(self, idx):
"""Pop a previous push with index idx"""
return self.stack.pop(idx)
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep : boolean, optional
whether to return nested parameters.
"""
out = super(BaseStacker, self).get_params(deep=deep)
if not deep:
return out
for item in self.stack:
out[item.name] = item
for key, val in item.get_params(deep=True).items():
out['%s__%s' % (item.name, key)] = val
return out
@property
def __fitted__(self):
"""Fitted status"""
if not self.stack or not self._check_static_params():
return False
return all([g.__fitted__ for g in self.stack])
@property
def __stack__(self):
"""Check stack"""
if not isinstance(self.stack, list):
raise ValueError(
"Stack corrupted. Extected list. Got %r" % type(self.stack))
return len(self.stack) > 0
@property
def verbose(self):
"""Verbosity"""
return self._verbose
@verbose.setter
def verbose(self, verbose):
"""Set verbosity"""
self._verbose = verbose
for g in self.stack:
g.verbose = verbose
| [
"numpy.unique"
] | [((4843, 4859), 'numpy.unique', 'np.unique', (['sizes'], {}), '(sizes)\n', (4852, 4859), True, 'import numpy as np\n'), ((6154, 6166), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (6163, 6166), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""Async Bayesian optimization classes"""
import sys
import time
from typing import Optional, Callable
import numpy as np
import pandas as pd
from bayesopt.acquisition import AcquisitionWithOffset
from bayesopt.util import add_hallucinations_to_x_and_y, stable_cholesky
from ml_utils import timed_print as print
from ml_utils.lipschitz import estimate_lipschitz_constant, \
estimate_lipschitz_around_x
from ml_utils.models import GP
from .acquisition import LocallyPenalisedAcquisition, \
AcquisitionFunction, LocalLipschitzPenalisedAcquisition, \
HardMinAwareConeAcquisition
from .bayesopt import BayesianOptimisation
from .executor import ExecutorBase
class AsyncBayesianOptimization(BayesianOptimisation):
"""Async Bayesian optimization class
Performs Bayesian optimization with a set number of busy and free workers
Parameters
----------
sampler : Callable
function handle returning sample from expensive function being
optimized
surrogate : basic_gp.GP
(GP) model that models the surface of 'objective'
bounds : ndarray
bounds of each dimension of x as a Dx2 vector (default [0, 1])
async_interface : ExecutorBase
Interface that deals with exchange of information between
async workers and the BO loop
batch_size : int
How many tasks to suggest in one go. This will wait for the
required number of workers to become free before evaluating the batch
acq_dict : acquisition.AcquisitionFunction
Defaults to EI
starting_jobs : list(dicts)
list of dicts in the form {'x': np.ndarray, 'f': callable, 't': float}
optimise_surrogate_model : bool
Whether to optimise the surrogate model after each BayesOpt iteration
track_cond_k : bool
Whether to keep track of cond(K) of the surrogate model across
BayesOpt iterations
y_min_opt_params : dict
opt_params dict with the following fields:
- method = 'standard', multigrad', 'direct'
- n_direct_evals = for direct
- num_restarts = for multigrad
acq_opt_params : dict
opt_params dict with the following fields:
- method = 'multigrad', 'direct'
- n_direct_evals = for direct
- num_restarts = for multigrad
n_bo_steps : int
Number of BayesOpt steps
min_acq : float
cut-off threshold for acquisition function
"""
def __init__(self, sampler: Callable, surrogate: GP, bounds: np.ndarray,
async_interface: ExecutorBase = None,
starting_jobs: Optional[list] = None,
**kwargs):
self.starting_jobs = starting_jobs
self.interface = async_interface
super().__init__(sampler, surrogate, bounds,
**kwargs)
def _initialise_bo_df(self):
"""
Initialise the DataFrame for keeping track of the BO run
"""
self.df = pd.DataFrame(
columns=['ii', 't', 'y_min', 'x_min', 'n_busy', 'x_busy', 'n_data',
'model_x', 'model_y', 'model_param_array',
'acq_at_sample', 'requested_x_sample', 'x_sample',
'y_sample', 'time_taken_opt_surrogate',
'time_taken_find_y_min', 'time_taken_get_next',
'time_taken_bo_step', 'var_at_y_min', 'cond_k'])
self.x_min, self.y_min, self.var_at_y_min = self._get_y_min()
if self.starting_jobs is not None:
x_busy = np.vstack([job['x']
for job in
self.starting_jobs])
else:
x_busy = None
starting_record = {'ii': -1,
'iteration': 0,
't': self.interface.status['t'],
'y_min': self.y_min,
'x_min': self.x_min,
'n_busy': self.interface.n_busy_workers,
'x_busy': x_busy,
'n_free': self.interface.n_free_workers,
'n_data': len(self.surrogate.X),
'model_x': self.surrogate.X,
'model_y': self.surrogate.Y,
'model_param_array': self.surrogate.param_array,
'acq_at_sample': np.nan,
'requested_x_sample': np.nan,
'y_sample': np.nan,
'x_sample': np.nan,
'time_taken_opt_surrogate': np.nan,
'time_taken_find_y_min': np.nan,
'time_taken_get_next': np.nan,
'time_taken_bo_step': np.nan,
'var_at_y_min': self.var_at_y_min,
'cond_k': np.nan}
self.df = self.df.append([starting_record], sort=True)
def _update_bo_df(self, x_batch, acq_at_x_best, new_sample_x, new_sample_y,
time_dict):
"""Updates the local dataframe with the current iteration's data
Parameters
----------
x_batch
Best location to sample at
acq_at_x_best
Acquisition function value at x_best
new_sample_x
actual sample received
new_sample_y
actual sample received
time_dict
time taken for different parts of the algo in seconds
"""
# requested_x_sample = new points queued in the async worker
current_record = {
'ii': self.curr_bo_step,
't': self.interface.status['t'],
'iteration': self.curr_bo_step + 1,
'y_min': self.y_min,
'x_min': self.x_min,
'n_busy': self.interface.n_busy_workers,
'x_busy': self.interface.get_array_of_running_jobs(),
'n_free': self.interface.n_free_workers,
'n_data': len(self.surrogate.X),
'model_x': self.surrogate.X,
'model_y': self.surrogate.Y,
'model_param_array': self.surrogate.param_array,
'acq_at_sample': acq_at_x_best,
'requested_x_sample': x_batch,
'y_sample': new_sample_y,
'x_sample': new_sample_x,
'time_taken_opt_surrogate': time_dict['time_taken_opt_surrogate'],
'time_taken_find_y_min': time_dict['time_taken_find_y_min'],
'time_taken_get_next': time_dict['time_taken_get_next'],
'time_taken_bo_step': time_dict['time_taken_bo_step'],
'var_at_y_min': self.var_at_y_min,
'cond_k': (self.cond_k_hist[
self.curr_bo_step] if self.track_cond_k else None)
}
self.df = self.df.append([current_record], sort=True)
def run(self):
"""
Run the Async BayesOpt loop
"""
# TODO: test this
t_starting_run = time.time()
if self.verbose:
print("Started BayesOpt.run()")
self._initialise_bo_df()
if self.starting_jobs is not None:
for job in self.starting_jobs:
self.interface.add_job_to_queue(job)
for self.curr_bo_step in range(0, self.n_bo_steps):
new_sample_x, new_sample_y = None, None
# try:
if True:
t_beginning_of_bo_step = time.time()
if self.verbose:
print("**--** Starting BayesOpt iteration {}/{} **--**"
.format(self.curr_bo_step + 1, self.n_bo_steps))
# Move time ahead until we have the correct number of free
# workers
self.interface.run_until_n_free(self.batch_size)
n_free_workers = self.interface.status['n_free_workers']
completed_jobs = self.interface.get_completed_jobs()
if len(completed_jobs) > 0:
new_sample_x, new_sample_y = \
self._add_completed_jobs_to_surrogate(completed_jobs)
assert n_free_workers >= self.batch_size
t_before_opt_surrogate = time.time()
# if self.verbose:
# print(f"Surrogate n_data = {len(self.surrogate.X)}")
# if self.optimise_surrogate_model_flag:
# if self.verbose > 1:
# print("Optimising surrogate model...")
# self.surrogate.optimize()
# self.param_array_hist.append(self.surrogate.param_array)
# if self.verbose > 1:
# print(
# f"Surrogate model optimisation complete. "
# f"New param_array = {self.surrogate.param_array}")
self.optimize_surrogate_if_needed()
t_after_opt_surrogate = time.time()
t_before_find_y_min = time.time()
self.x_min, self.y_min, self.var_at_y_min = self._get_y_min()
t_after_find_y_min = t_before_get_next = time.time()
if self.verbose:
print("Selecting next point(s)...")
x_batch, acq_at_x_batch = self.get_next()
t_after_get_next = t_end_of_bo_step = time.time()
time_taken_opt_surrogate = \
(t_after_opt_surrogate - t_before_opt_surrogate)
time_taken_find_y_min = \
(t_after_find_y_min - t_before_find_y_min)
time_taken_get_next = \
(t_after_get_next - t_before_get_next)
time_taken_bo_step = \
(t_end_of_bo_step - t_beginning_of_bo_step)
time_taken_dict = {
'time_taken_opt_surrogate': time_taken_opt_surrogate,
'time_taken_find_y_min': time_taken_find_y_min,
'time_taken_get_next': time_taken_get_next,
'time_taken_bo_step': time_taken_bo_step, }
if self.create_plots:
self.plot_step(x_batch=x_batch)
# queue the jobs
jobs = []
for ii in range(len(x_batch)):
job = {'x': x_batch[ii], 'f': self.sampler}
jobs.append(job)
self.interface.add_job_to_queue(jobs)
self.save_history(None)
if self.curr_bo_step == self.n_bo_steps - 1: # last step
if self.verbose > 1:
print("Used up budget.")
print("Minimum at",
self.surrogate.X[np.argmin(self.surrogate.Y)])
self._update_bo_df(x_batch, acq_at_x_batch, new_sample_x,
new_sample_y, time_taken_dict)
# Attempting to force SLURM to update the output file
sys.stdout.flush()
# except np.linalg.linalg.LinAlgError:
# print("WARNING: BayesOpt crashed at iteration {}!".format(
# self.curr_bo_step))
# break
if self.verbose:
print(
f"Completed BO exp in;"
f" {round(time.time() - t_starting_run, 2)}s")
def get_next(self):
"""Finds the next point(s) to sample at
Returns
-------
x_best : np.ndarray
Location to sample at
acq_at_x_best : float
Value of the acquisition function at the sampling locations
"""
raise NotImplementedError
def _add_completed_jobs_to_surrogate(self, completed_jobs):
# TODO: test this
x = []
y = []
for job in completed_jobs:
x.append(job['x'])
y.append(job['y'])
x = np.vstack(x)
y = np.vstack(y)
self._update_surrogate_with_new_data(x, y)
return x, y
def plot_step(self, x_batch=None, save_plots=None, **kwargs):
if save_plots is None:
save_plots = self.save_plots
if isinstance(x_batch, list):
x_batch = np.vstack(x_batch)
fig, axes = super().plot_step(x_batch, external_call=True)
acq = self._create_acq_function()
if len(self.bounds) == 1: # 1D
x_busy = self.interface.get_array_of_running_jobs()
if x_busy is not None:
axes[0].plot(x_busy, self.surrogate.predict(x_busy)[0], 'g*',
label="Busy", markersize=16)
axes[1].plot(x_busy, acq.evaluate(x_busy), 'g*',
label="Busy", markersize=16)
axes[0].legend(numpoints=1)
axes[1].legend(numpoints=1)
if save_plots:
self.save_plots_to_disk(fig)
else:
fig.show()
return fig, axes
class AsyncBOHeuristicQEI(AsyncBayesianOptimization):
"""Async BO with approximate q-EI
Q-EI is approximated by sequentially finding the best location and
setting its y-value using one of Ginsbourger's heuristics until the
batch is full
"""
def __init__(self, sampler, surrogate, bounds,
async_infill_strategy='kriging_believer',
**kwargs):
if async_infill_strategy is None:
self.async_infill_strategy = 'constant_liar_min'
else:
self.async_infill_strategy = async_infill_strategy
super().__init__(sampler, surrogate, bounds, **kwargs)
def get_next(self):
"""Finds the next point(s) to sample at
This function interacts with the async interface to get info about
completed and running jobs and computes the next point(s) to add
to the queue based on the batch size
Returns
-------
x_best : np.ndarray
Location to sample at
acq_at_x_best : float
Value of the acquisition function at the sampling locations
"""
old_surrogate_x = self.surrogate.X
old_surrogate_y = self.surrogate.Y_raw
x_busy = self.interface.get_array_of_running_jobs()
fixed_dim_vals = None
surrogate_x_with_fake, surrogate_y_with_fake = \
add_hallucinations_to_x_and_y(self, old_surrogate_x,
old_surrogate_y, x_busy,
fixed_dim_vals=fixed_dim_vals)
self.surrogate.set_XY(X=surrogate_x_with_fake,
Y=surrogate_y_with_fake)
acq = self._create_acq_function()
x_best, acq_at_x_best = self._optimise_acq_func(acq)
x_batch = [x_best, ]
acq_at_each_x_batch = [acq_at_x_best, ]
if self.batch_size > 1:
for ii in range(self.batch_size - 1):
# Using the async infill heuristic
current_surrogate_x = self.surrogate.X
current_surrogate_y = self.surrogate.Y_raw
surrogate_x_with_fake, surrogate_y_with_fake = \
add_hallucinations_to_x_and_y(
self, current_surrogate_x,
current_surrogate_y, x_batch,
fixed_dim_vals=fixed_dim_vals)
self.surrogate.set_XY(X=surrogate_x_with_fake,
Y=surrogate_y_with_fake)
acq = self._create_acq_function()
x_best, acq_at_x_best = self._optimise_acq_func(acq)
x_batch.append(x_best)
acq_at_each_x_batch.append(acq_at_x_best)
self.surrogate.set_XY(X=old_surrogate_x, Y=old_surrogate_y)
assert len(x_batch) == self.batch_size
return x_batch, acq_at_each_x_batch
class PLAyBOOK_L(AsyncBayesianOptimization):
"""Async BO with local penalisation.
Penalisers are places at the busy points.
"""
def __init__(self, sampler, surrogate, bounds,
lp_transform=None,
offset_acq=False, min_acq_opt_params=None,
**kwargs):
super().__init__(sampler, surrogate, bounds,
**kwargs)
self.lp_transform = lp_transform
self.L = None
self.offset_acq = offset_acq
if min_acq_opt_params is None:
self.min_acq_opt_params = self.acq_opt_params
else:
self.min_acq_opt_params = min_acq_opt_params
def _get_min_value_for_acq(self) -> float:
"""This will usually return self.y_min unless we are using Gumble
sampling to define the minimum value used in the acquisition
function's computations
Returns
-------
y_min
"""
return self.y_min
def get_next(self):
"""Finds the next point(s) to sample at
Returns
-------
x_best : np.ndarray
Location to sample at
acq_at_x_best : float
Value of the acquisition function at the sampling locations
"""
acq_orig = self._create_acq_function()
# fix for jumping straight to get_next() instead of via run()
if self.y_min is None:
self.x_min, self.y_min, self.var_at_y_min = self._get_y_min()
M = self._get_min_value_for_acq()
if self.offset_acq:
_, min_val = self._optimise_acq_func(
acq_orig, max_or_min='min',
acq_opt_params=self.min_acq_opt_params)
acq_orig = AcquisitionWithOffset(acq_orig, min_val)
L = estimate_lipschitz_constant(self.surrogate, self.bounds)
self.L = L
# ASYNC
x_busy = self.interface.get_array_of_running_jobs()
if x_busy is not None:
acq = self._create_lp_acq_function(x_busy, L, M, acq_orig=acq_orig)
else:
acq = acq_orig
# First point is the result of a greedy search on the unpenalized
# acquisition function
x_best, acq_at_x_best = self._optimise_acq_func(acq)
if self.debug:
self.plot_acq(acq.evaluate, x_busy=x_busy)
x_batch = x_best
acq_at_each_x_batch = acq_at_x_best
if self.batch_size > 1:
if self.verbose:
print(f"Lipschitz constant estimate = {L}")
while len(x_batch) < self.batch_size:
# ASYNC
if x_busy is not None:
x_pen = np.vstack((x_busy, x_batch))
else:
x_pen = x_batch
acq = self._create_lp_acq_function(x_pen, L, M,
acq_orig=acq_orig)
if self.debug:
self.plot_acq(acq.evaluate, x_batch=x_batch, x_busy=x_busy)
x_best, acq_at_x_best = self._optimise_acq_func(acq)
x_batch = np.vstack((x_batch, x_best))
acq_at_each_x_batch = np.hstack((acq_at_each_x_batch,
acq_at_x_best))
return x_batch, acq_at_each_x_batch
def _create_lp_acq_function(self, x_pen, L_pen, M,
acq_orig=None,
**kwargs) -> AcquisitionFunction:
"""Create the LP acquisition function object
This function is only used to convert the acquisition function into
a Local Penalization version
Returns
-------
AcquisitionFunction
The instantiation of the desired acquisition function class
"""
if acq_orig is None:
acq = self._create_acq_function()
else:
acq = acq_orig
lp_acq = LocallyPenalisedAcquisition(self.surrogate, acq, x_pen, L_pen,
M, self.lp_transform,
**kwargs)
return lp_acq
class PLAyBOOK_LL(PLAyBOOK_L):
"""Async BO with local penalisation with local Lipschitz constant
"""
def get_next(self):
"""Finds the next point(s) to sample at
Returns
-------
x_best : np.ndarray
Location to sample at
acq_at_x_best : float
Value of the acquisition function at the sampling locations
"""
acq_orig = self._create_acq_function()
M = self._get_min_value_for_acq()
if self.offset_acq:
_, min_val = self._optimise_acq_func(
acq_orig, max_or_min='min',
acq_opt_params=self.min_acq_opt_params)
acq_orig = AcquisitionWithOffset(acq_orig, min_val)
# ASYNC
x_busy = self.interface.get_array_of_running_jobs()
if x_busy is not None:
L_busy = []
for ii in range(len(x_busy)):
L = estimate_lipschitz_around_x(x_busy[ii], self.surrogate,
self.bounds)
L_busy.append(L)
L_busy = np.hstack(L_busy).flatten()
acq = self._create_lp_acq_function(x_busy, L_busy, M,
acq_orig=acq_orig)
else:
L_busy = None
acq = acq_orig
x_best, acq_at_x_best = self._optimise_acq_func(acq)
if self.debug:
self.plot_acq(acq.evaluate, x_busy=x_busy)
x_batch = x_best
acq_at_each_x_batch = acq_at_x_best
L_batch = np.array(())
if self.batch_size > 1:
while len(x_batch) < self.batch_size:
L = estimate_lipschitz_around_x(x_batch[-1], self.surrogate,
self.bounds)
L_batch = np.hstack((L_batch, L))
# ASYNC
if x_busy is not None:
x_pen = np.vstack((x_busy, x_batch))
L_pen = np.hstack((L_busy, L_batch))
else:
x_pen = x_batch
L_pen = L_batch
# acq = self._create_lp_acq_function(
# np.vstack([np.atleast_2d(p) for p in x_pen]),
# L_pen, M, acq_orig=acq_orig)
acq = self._create_lp_acq_function(x_pen,
L_pen, M, acq_orig=acq_orig)
if self.debug:
self.plot_acq(acq.evaluate, x_batch=x_batch, x_busy=x_busy)
x_best, acq_at_x_best = self._optimise_acq_func(acq)
x_batch = np.vstack((x_batch, x_best))
acq_at_each_x_batch = np.hstack((acq_at_each_x_batch,
acq_at_x_best))
return x_batch, acq_at_each_x_batch
def _create_lp_acq_function(self, x_pen, L_pen, M,
acq_orig=None,
**kwargs) -> AcquisitionFunction:
"""Create the LP acquisition function object
This function is only used to convert the acquisition function into
a Local Penalization version
Returns
-------
AcquisitionFunction
The instantiation of the desired acquisition function class
"""
if acq_orig is None:
acq = self._create_acq_function()
else:
acq = acq_orig
lp_acq = LocalLipschitzPenalisedAcquisition(
self.surrogate, acq, x_pen, L_pen, best=M,
transform=self.lp_transform, **kwargs)
return lp_acq
class PLAyBOOK_H(PLAyBOOK_L):
"""
This uses the radius of the ball as a function of the distribution
over L
"""
def _create_lp_acq_function(self, x_batch, L, M,
acq_orig=None,
**kwargs) -> AcquisitionFunction:
"""Create the LP acquisition function object
This function is only used to convert the acquisition function into
a Local Penalization version
Returns
-------
AcquisitionFunction
The instantiation of the desired acquisition function class
"""
if acq_orig is None:
acq = self._create_acq_function()
else:
acq = acq_orig
lp_acq = HardMinAwareConeAcquisition(self.surrogate, acq, x_batch,
L, best=M,
transform=self.lp_transform,
**kwargs)
return lp_acq
class PLAyBOOK_HL(PLAyBOOK_LL):
"""
Local Penalisation with local Lipschitz constants
This uses the radius of the ball as a function of the distribution
over L
"""
def _create_lp_acq_function(self, x_batch, L, M,
acq_orig=None,
**kwargs) -> AcquisitionFunction:
"""Create the LP acquisition function object
This function is only used to convert the acquisition function into
a Local Penalization version
Returns
-------
AcquisitionFunction
The instantiation of the desired acquisition function class
"""
if acq_orig is None:
acq = self._create_acq_function()
else:
acq = acq_orig
lp_acq = HardMinAwareConeAcquisition(self.surrogate, acq, x_batch,
L, best=M,
transform=self.lp_transform,
**kwargs)
return lp_acq
class AsyncBOTS(PLAyBOOK_L):
"""BO with Thompson Sampling
"""
def draw_gp_samples(self, x, n_samples):
"""
Draw GP samples
:param num_sample: number of samples to be draw
:param x: test inputs (Nxd)
:return: a sample from GP (Nx1)
"""
mu, cov = self.surrogate.predict(x, full_cov=True)
# draw GP sample
L = stable_cholesky(cov)
U = np.random.rand(x.shape[0], n_samples)
f_samples = L.dot(U) + mu
return f_samples
def rand_maximiser(self, obj_f, gridSize=10000):
"""
maximising using random grid search
"""
# gridSize = len(self.bounds) * 1000
# gridSize = 2000
x_ob = np.copy(self.surrogate.X)
XGrid = (self.bounds[:, 1] - self.bounds[:, 0]) * \
np.random.rand(gridSize, len(self.bounds)) + self.bounds[:, 0]
XGrid = np.vstack((XGrid, x_ob))
f_val = obj_f(XGrid)
max_idx = np.argmax(f_val)
max_val = f_val[max_idx]
max_X = XGrid[max_idx]
return max_X, max_val
def get_next(self):
"""Finds the next point(s) to sample at
Returns
-------
x_best : np.ndarray
Location to sample at
acq_at_x_best : float
Value of the acquisition function at the sampling locations
"""
minus_f_samples = lambda x: - self.draw_gp_samples(x, 1)
x_best, acq_at_x_best = self.rand_maximiser(minus_f_samples, 10000)
if self.debug:
x_busy = self.interface.get_array_of_running_jobs()
self.plot_acq(None, x_best=x_best, x_busy=x_busy)
x_batch = [x_best, ]
acq_at_each_x_batch = [acq_at_x_best, ]
if self.batch_size > 1:
while len(x_batch) < self.batch_size:
if self.debug:
self.plot_acq(minus_f_samples, x_batch)
x_best, acq_at_x_best = self.rand_maximiser(minus_f_samples,
10000)
x_batch.append(x_best)
acq_at_each_x_batch.append(acq_at_x_best)
return x_batch, acq_at_each_x_batch
| [
"pandas.DataFrame",
"bayesopt.util.stable_cholesky",
"numpy.copy",
"numpy.argmax",
"numpy.random.rand",
"bayesopt.acquisition.AcquisitionWithOffset",
"ml_utils.lipschitz.estimate_lipschitz_constant",
"numpy.argmin",
"time.time",
"numpy.hstack",
"numpy.array",
"sys.stdout.flush",
"ml_utils.ti... | [((3020, 3347), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['ii', 't', 'y_min', 'x_min', 'n_busy', 'x_busy', 'n_data', 'model_x',\n 'model_y', 'model_param_array', 'acq_at_sample', 'requested_x_sample',\n 'x_sample', 'y_sample', 'time_taken_opt_surrogate',\n 'time_taken_find_y_min', 'time_taken_get_next', 'time_taken_bo_step',\n 'var_at_y_min', 'cond_k']"}), "(columns=['ii', 't', 'y_min', 'x_min', 'n_busy', 'x_busy',\n 'n_data', 'model_x', 'model_y', 'model_param_array', 'acq_at_sample',\n 'requested_x_sample', 'x_sample', 'y_sample',\n 'time_taken_opt_surrogate', 'time_taken_find_y_min',\n 'time_taken_get_next', 'time_taken_bo_step', 'var_at_y_min', 'cond_k'])\n", (3032, 3347), True, 'import pandas as pd\n'), ((7053, 7064), 'time.time', 'time.time', ([], {}), '()\n', (7062, 7064), False, 'import time\n'), ((11983, 11995), 'numpy.vstack', 'np.vstack', (['x'], {}), '(x)\n', (11992, 11995), True, 'import numpy as np\n'), ((12008, 12020), 'numpy.vstack', 'np.vstack', (['y'], {}), '(y)\n', (12017, 12020), True, 'import numpy as np\n'), ((14405, 14517), 'bayesopt.util.add_hallucinations_to_x_and_y', 'add_hallucinations_to_x_and_y', (['self', 'old_surrogate_x', 'old_surrogate_y', 'x_busy'], {'fixed_dim_vals': 'fixed_dim_vals'}), '(self, old_surrogate_x, old_surrogate_y,\n x_busy, fixed_dim_vals=fixed_dim_vals)\n', (14434, 14517), False, 'from bayesopt.util import add_hallucinations_to_x_and_y, stable_cholesky\n'), ((17711, 17767), 'ml_utils.lipschitz.estimate_lipschitz_constant', 'estimate_lipschitz_constant', (['self.surrogate', 'self.bounds'], {}), '(self.surrogate, self.bounds)\n', (17738, 17767), False, 'from ml_utils.lipschitz import estimate_lipschitz_constant, estimate_lipschitz_around_x\n'), ((21603, 21615), 'numpy.array', 'np.array', (['()'], {}), '(())\n', (21611, 21615), True, 'import numpy as np\n'), ((26134, 26154), 'bayesopt.util.stable_cholesky', 'stable_cholesky', (['cov'], {}), '(cov)\n', (26149, 26154), False, 'from bayesopt.util import add_hallucinations_to_x_and_y, stable_cholesky\n'), ((26167, 26204), 'numpy.random.rand', 'np.random.rand', (['x.shape[0]', 'n_samples'], {}), '(x.shape[0], n_samples)\n', (26181, 26204), True, 'import numpy as np\n'), ((26474, 26499), 'numpy.copy', 'np.copy', (['self.surrogate.X'], {}), '(self.surrogate.X)\n', (26481, 26499), True, 'import numpy as np\n'), ((26655, 26679), 'numpy.vstack', 'np.vstack', (['(XGrid, x_ob)'], {}), '((XGrid, x_ob))\n', (26664, 26679), True, 'import numpy as np\n'), ((26728, 26744), 'numpy.argmax', 'np.argmax', (['f_val'], {}), '(f_val)\n', (26737, 26744), True, 'import numpy as np\n'), ((3585, 3636), 'numpy.vstack', 'np.vstack', (["[job['x'] for job in self.starting_jobs]"], {}), "([job['x'] for job in self.starting_jobs])\n", (3594, 3636), True, 'import numpy as np\n'), ((7102, 7133), 'ml_utils.timed_print', 'print', (['"""Started BayesOpt.run()"""'], {}), "('Started BayesOpt.run()')\n", (7107, 7133), True, 'from ml_utils import timed_print as print\n'), ((12293, 12311), 'numpy.vstack', 'np.vstack', (['x_batch'], {}), '(x_batch)\n', (12302, 12311), True, 'import numpy as np\n'), ((17657, 17697), 'bayesopt.acquisition.AcquisitionWithOffset', 'AcquisitionWithOffset', (['acq_orig', 'min_val'], {}), '(acq_orig, min_val)\n', (17678, 17697), False, 'from bayesopt.acquisition import AcquisitionWithOffset\n'), ((20740, 20780), 'bayesopt.acquisition.AcquisitionWithOffset', 'AcquisitionWithOffset', (['acq_orig', 'min_val'], {}), '(acq_orig, min_val)\n', (20761, 20780), False, 'from bayesopt.acquisition import AcquisitionWithOffset\n'), ((7502, 7513), 'time.time', 'time.time', ([], {}), '()\n', (7511, 7513), False, 'import time\n'), ((8280, 8291), 'time.time', 'time.time', ([], {}), '()\n', (8289, 8291), False, 'import time\n'), ((9015, 9026), 'time.time', 'time.time', ([], {}), '()\n', (9024, 9026), False, 'import time\n'), ((9066, 9077), 'time.time', 'time.time', ([], {}), '()\n', (9075, 9077), False, 'import time\n'), ((9213, 9224), 'time.time', 'time.time', ([], {}), '()\n', (9222, 9224), False, 'import time\n'), ((9426, 9437), 'time.time', 'time.time', ([], {}), '()\n', (9435, 9437), False, 'import time\n'), ((11076, 11094), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (11092, 11094), False, 'import sys\n'), ((15223, 15344), 'bayesopt.util.add_hallucinations_to_x_and_y', 'add_hallucinations_to_x_and_y', (['self', 'current_surrogate_x', 'current_surrogate_y', 'x_batch'], {'fixed_dim_vals': 'fixed_dim_vals'}), '(self, current_surrogate_x,\n current_surrogate_y, x_batch, fixed_dim_vals=fixed_dim_vals)\n', (15252, 15344), False, 'from bayesopt.util import add_hallucinations_to_x_and_y, stable_cholesky\n'), ((18410, 18453), 'ml_utils.timed_print', 'print', (['f"""Lipschitz constant estimate = {L}"""'], {}), "(f'Lipschitz constant estimate = {L}')\n", (18415, 18453), True, 'from ml_utils import timed_print as print\n'), ((19025, 19053), 'numpy.vstack', 'np.vstack', (['(x_batch, x_best)'], {}), '((x_batch, x_best))\n', (19034, 19053), True, 'import numpy as np\n'), ((19092, 19139), 'numpy.hstack', 'np.hstack', (['(acq_at_each_x_batch, acq_at_x_best)'], {}), '((acq_at_each_x_batch, acq_at_x_best))\n', (19101, 19139), True, 'import numpy as np\n'), ((20975, 21043), 'ml_utils.lipschitz.estimate_lipschitz_around_x', 'estimate_lipschitz_around_x', (['x_busy[ii]', 'self.surrogate', 'self.bounds'], {}), '(x_busy[ii], self.surrogate, self.bounds)\n', (21002, 21043), False, 'from ml_utils.lipschitz import estimate_lipschitz_constant, estimate_lipschitz_around_x\n'), ((21718, 21787), 'ml_utils.lipschitz.estimate_lipschitz_around_x', 'estimate_lipschitz_around_x', (['x_batch[-1]', 'self.surrogate', 'self.bounds'], {}), '(x_batch[-1], self.surrogate, self.bounds)\n', (21745, 21787), False, 'from ml_utils.lipschitz import estimate_lipschitz_constant, estimate_lipschitz_around_x\n'), ((21862, 21885), 'numpy.hstack', 'np.hstack', (['(L_batch, L)'], {}), '((L_batch, L))\n', (21871, 21885), True, 'import numpy as np\n'), ((22678, 22706), 'numpy.vstack', 'np.vstack', (['(x_batch, x_best)'], {}), '((x_batch, x_best))\n', (22687, 22706), True, 'import numpy as np\n'), ((22745, 22792), 'numpy.hstack', 'np.hstack', (['(acq_at_each_x_batch, acq_at_x_best)'], {}), '((acq_at_each_x_batch, acq_at_x_best))\n', (22754, 22792), True, 'import numpy as np\n'), ((9278, 9313), 'ml_utils.timed_print', 'print', (['"""Selecting next point(s)..."""'], {}), "('Selecting next point(s)...')\n", (9283, 9313), True, 'from ml_utils import timed_print as print\n'), ((18595, 18623), 'numpy.vstack', 'np.vstack', (['(x_busy, x_batch)'], {}), '((x_busy, x_batch))\n', (18604, 18623), True, 'import numpy as np\n'), ((21146, 21163), 'numpy.hstack', 'np.hstack', (['L_busy'], {}), '(L_busy)\n', (21155, 21163), True, 'import numpy as np\n'), ((21978, 22006), 'numpy.vstack', 'np.vstack', (['(x_busy, x_batch)'], {}), '((x_busy, x_batch))\n', (21987, 22006), True, 'import numpy as np\n'), ((22035, 22063), 'numpy.hstack', 'np.hstack', (['(L_busy, L_batch)'], {}), '((L_busy, L_batch))\n', (22044, 22063), True, 'import numpy as np\n'), ((10702, 10726), 'ml_utils.timed_print', 'print', (['"""Used up budget."""'], {}), "('Used up budget.')\n", (10707, 10726), True, 'from ml_utils import timed_print as print\n'), ((10818, 10845), 'numpy.argmin', 'np.argmin', (['self.surrogate.Y'], {}), '(self.surrogate.Y)\n', (10827, 10845), True, 'import numpy as np\n'), ((11400, 11411), 'time.time', 'time.time', ([], {}), '()\n', (11409, 11411), False, 'import time\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import scipy.signal
__all__ = ["find_rayleigh_damping_coeffs",
"get_frequency_vector", "modal_assurance_criterion",
"find_psd_matrix", "w2f", "f2w", "norm2", "modal_scale_factor",
"mean_phase", "mean_phase_deviation", "Mode",
]
def find_rayleigh_damping_coeffs(freqs, damping_ratios):
"""Rayleigh damping coefficients from target freqs and damping ratio
Rayleigh damping is defined by the following relation
C = a*M + b*K
where C is the damping matrix, M is the mass matrix and K is the
stiffness matrix. The damping ratio `xi` at frequency `f` is then
given by the following equation:
xi = 1/2 * (a/(2*pi*f) + b * (2*pi*f))
The damping coefficients can be determined by specifying the desired
damping ratio at two or more frequencies. The least square method is
used to determine the damping ratios.
Arguments
---------
freqs : 1darray
Frequencies (rad/s) where the damping ratios are specified
damping_ratios : 1darray
The damping ratios (c / c_cr) at the specified frequencies.
"""
A = .5 * np.array([[1 / wn, wn]
for wn in freqs])
return np.linalg.lstsq(A, damping_ratios, rcond=None)[0]
def w2f(w):
"Convert angular frequency (rad/s) to frequency (Hz)"
return w / (2*np.pi)
def f2w(f):
"Convert frequency (Hz) to angular frequency (rad/s)"
return f * (2*np.pi)
def modal_scale_factor(u, v):
"""Determine the scale factor between u and v
Find the scale factor between u and v such that
u' = msf * u
has similar length and direction as v.
Argument
--------
u, v : ndarray[Complex]
Vectors to find the modal scale factor between
Return
------
complex
The modal scale factor that makes the two vectors of
similar length and phase.
"""
return u.conj().dot(v) / np.linalg.norm(u)**2
def modal_phase_collinearity(u):
"""Modal phase collinearity of mode vector u
Mode phase collinearity (MPC) quantifies spatial consistency of
identification results. For classical normal modes, all dofs in a
structure vibrate in phase with one another such that a maximum
modal value is reached at the same instant for all dofs. MPC
ranges from 0 to 1, where MPC=1 indicates that all dofs are in
phase, and zero indicates out of phase dofs.
Arguments
---------
u : 1darray[complex]
Mode shape vector
Returns
-------
float
Modal phase collinearity of the mode vector
References
----------
Pappa RS, <NAME>, <NAME> (1992) A consistent-mode indicator
for the eigensystem realization algorithm. NASA Report TM-107607
"""
S = np.cov(u.real, u.imag)
l = np.linalg.eigvals(S)
return (2*l[0]/(l[0]+l[1]) - 1)**2
def mean_phase(u):
"""Mean phase of mode vector u
Mean phase (MP) is the angle of a linear line fitted through the
mode shape in the complex plane.
Arguments
---------
u : 1darray[complex]
Mode shape vector
Returns
-------
float
Mean phase of the mode vector
References
----------
<NAME>., <NAME>., <NAME>., 2012. Fully automated
(operational) modal analysis. Mechanical Systems and Signal
Processing 29, 228–250. https://doi.org/10.1016/j.ymssp.2012.01.007
"""
U, s, VT = np.linalg.svd(np.c_[np.real(u), np.imag(u)])
V = VT.T
return np.arctan(-V[0, 1] / V[1, 1])
def mean_phase_deviation(u):
"""Mean phase deviation of mode vector u
Mean phase deviation (MPD) is the deviation in phase from the mean
phase and is a measure of mode shape complexity.
Arguments
---------
u : 1darray[complex]
Mode shape vector
Returns
-------
float
Mean phase deviation of the mode vector
References
----------
<NAME>., <NAME>., <NAME>., 2012. Fully automated
(operational) modal analysis. Mechanical Systems and Signal
Processing 29, 228–250. https://doi.org/10.1016/j.ymssp.2012.01.007
"""
U, s, VT = np.linalg.svd(np.c_[np.real(u), np.imag(u)])
V = VT.T
w = np.abs(u)
num = np.real(u)*V[1, 1] - np.imag(u)*V[0, 1]
den = np.sqrt(V[0, 1]**2+V[1, 1]**2)*np.abs(u)
return np.sum(w*np.arccos(np.abs(num/den))) / np.sum(w)
def norm2(v):
"Return the euler norm (||v||_2) of vector `v`"
return np.linalg.norm(v, 2)
def get_frequency_vector(fs, n):
return np.linspace(0., fs/2, n)
def modal_assurance_criterion(u, v):
"""Calculate the modal assurance criterion (MAC)
MAC is the square of the linear correlation between two mode shapes
and is a measure of the similarity of two different modes which varies
between 0 and 1. MAC=1 means a perfect correlation between two vectors
exists, while MAC=0 means no linear correlation.
Arguments
---------
u, v : 1darray
Mode shapes to check the MAC for.
Returns
-------
float
MAC between two vectors u and v.
"""
H = lambda x: np.conjugate(x).T
return np.abs(H(u).dot(v))**2 / (H(u).dot(u) * H(v).dot(v)).real
def find_psd_matrix(y, **kwargs):
"""Calculate the PSD matrix from the measured data A
Arguments
---------
y : ndarray
Measurement matrix where each row corresponds to the entire time
series of a measurement channel.
kwargs :
All keyword arguments are passed to the scipy.signal.csd,
see docstring.
Returns
-------
3darray
PSD (n x m x m) matrix where the first dimension refers to the
frequency of the psd estimator, see get_frequency_vector, and
the second and third dimensions refers to the degree of freedom
of the input and output as given in y.
"""
Pyy = np.array(
[[scipy.signal.csd(yi, yj, **kwargs)[1]
for yj in y] for yi in y]).T
return Pyy
class ShearFrame(object):
def __init__(self, n, m, k):
"""Create a shear frame object
Define the shear frame with the mass `m` of each floor,
stiffness `k` of each column, and the number of storeys
`n`, see figure below.
m DOF
========= ---> n
| |
k | | k
| m |
========= ---> n-1
| |
k | | k
| m |
========= ---> n-2
| |
: :
: :
| m |
========= ---> 3
| |
k | | k
| m |
========= ---> 2
| |
k | | k
| m |
========= ---> 1
| |
k | | k
| |
+++ +++
The natural frequencies and the modeshapes of this system
can be determined analytically. The `r`th natural frequency
of a shear frame is defined by
w_r = 2 * sqrt(k / m) * sin(p * (2r-1) / 2) / (2n+1)
and the `i`th element of the `r`th mode shape is defined by
phi = sin(i *pi*(2r-1)/(2n+1))
Arguments
---------
n : int
Number of storeys and also the number of DOF of the dynamic
system.
m : float
Mass of each floor
k : float
Stiffness of each column in each storey.
"""
self.n = n
self.m = m
self.k = k
self.M = np.eye(n) * m
self.K = self._get_K()
self.C = np.zeros_like(self.M)
def _get_K(self):
k, n = self.k, self.n
K = np.zeros((n, n), np.float)
for i in range(n):
K[i, i] = 2 * k
if i > 0:
K[i-1, i] = -k
if i < n-1:
K[i+1, i] = -k
K[-1, -1] = k
return K
def get_natural_frequency(self, r):
"""Returns the analytical natural frequency of mode `r`
Arguments
---------
r : int
Mode to return the frequency for.
Returns
-------
float
Natural frequency of mode `r` in rad/s
"""
k, m, n = self.k, self.m, self.n
return 2 * np.sqrt(k / m) * np.sin(np.pi / 2 * (2*r-1) / (2*n+1))
def get_mode_shape(self, r):
"""Returns the analytical mode shape of mode `r`
Arguments
---------
r : int
Mode to return the mode shape for.
Returns
-------
1darray
Mode shape of mode `r`, the mode shape is normalized
to have unit length.
"""
x = np.array([np.sin(i*np.pi*(2*r-1)/(2*self.n+1))
for i in range(1, self.n+1)])
return x / norm2(x)
def set_rayleigh_damping_matrix(self, freqs, xis):
"""Set the damping matrix to the Rayleigh damping matrix
Rayleigh damping is a classical damping matrix which defines
the modal damping ratio x(w) by the following equation
xi(w) = 1/2 * (a/w + b*w)
where w is the frequency in rad/s and a and b are the Rayleigh
damping coefficients. Specify the damping ratio for atleast two
frequencies, if more than two damping ratios are specified, the
Rayleigh damping coefficients are determined by least squre fitting.
Arguments
---------
freqs : 1darray
Frequencies (rad/s) where the damping ratios are specified
damping_ratios : 1darray
The damping ratios (c / c_cr) at the specified frequencies.
"""
a, b = find_rayleigh_damping_coeffs(freqs, xis)
self._rayleigh_coeffs = (a, b)
self.C = a*self.M + b*self.K
def get_rayleigh_damping_ratio(self, r):
"""Returns the Rayleigh damping ratio of mode `r`
It is assumed that Rayleigh damping is set, see
set_rayleigh_damping_matrix method.
Arguments
---------
r : int
Mode to return the damping ratio for.
Returns
-------
float
Damping ratio of mode `r`
"""
a, b = self._rayleigh_coeffs
w = self.get_natural_frequency(r)
return .5*(a/w+b*w)
@property
def state_matrix(self):
"CT State space matrix (B)"
M, C, K = self.M, self.C, self.K
Z = np.zeros_like(M)
I = np.eye(M.shape[0])
A11 = -np.linalg.solve(M, C)
A12 = -np.linalg.solve(M, K)
A = np.r_[np.c_[A11, A12],
np.c_[I, Z]]
return A
@property
def input_influence_matrix(self):
"CT State space input influence matrix (B)"
return np.r_[np.linalg.solve(self.M, np.eye(self.n)),
np.zeros((self.n, self.n))]
def get_state_space_matrices(self):
"Continous time state space matrices A, B, C, D"
A = self.state_matrix
B = self.input_influence_matrix
n = self.n
O = np.zeros((n, n))
I = np.eye(n)
C = np.r_[A[:n, :],
np.c_[I, O],
np.c_[O, I]]
D = np.r_[np.linalg.solve(self.M, I),
O,
O]
return A, B, C, D
def simulate(self, t, F=None, d0=None, v0=None):
"""Obtain system response to load and initial conditions.
Simulate the system response at time points `t` due to loads `F` and with
initial displacements `d0` and velocities `v0`.
Arguments
---------
t : 1darray
Time points to evaluate the system response.
F : Optional[2darray]
Load matrix where each column corresponds to time points in
`t` and each row is the load applied to a system dof. Fij is
then the load applied to dof `i` at time `j`. Zeros is assumed
if None.
d0, v0 : Optional[1darray]
Initial displacment and velocity vector. Zeros is assumed
if None.
Returns
-------
A, V, D : 2darray
Acceleration, velocity and displacement vector for the system.
"""
n = self.n
d0 = np.zeros(n) if d0 is None else d0
v0 = np.zeros(n) if v0 is None else v0
x0 = np.r_[v0, d0]
sys = scipy.signal.StateSpace(*self.get_state_space_matrices())
U = np.zeros((t.size, n)) if F is None else F.T
_, y, _ = scipy.signal.lsim(sys, U, t, X0=x0)
y = y.T
A = y[:n, :]
V = y[n:2*n, :]
D = y[2*n:, :]
return A, V, D
class Mode(object):
def __init__(self, eigenvalue, eigenvector):
"""Mode converts eigenvalue/vector to vibration mode characteristics
A mode defines a single degree of freedom dynamic system
with frequency (f and w) and damping (xi) characteristics and mode
shape vector (v).
Arguments
---------
eigenvalue : complex
Eigenvalue (in continous time) of the mode.
eigenvector : ndarray[float or complex]
Eigenvector or modal vector.
"""
self.eigenvalue = eigenvalue
self.eigenvector = eigenvector
@property
def v(self):
return self.eigenvector / np.linalg.norm(self.eigenvector, 2)
@property
def w(self):
return self.eigenvalue.imag / np.sqrt(1-self.xi**2)
@property
def wd(self):
return self.eigenvalue.imag
@property
def xi(self):
u = self.eigenvalue
return -u.real / np.abs(u)
@property
def f(self):
return self.w / (2.0*np.pi)
@property
def fd(self):
return self.wd / (2.*np.pi)
@property
def mean_phase_colinearity(self):
return modal_phase_collinearity(self.v)
@property
def mpc(self):
return self.mean_phase_colinearity
@property
def mean_phase(self):
return mean_phase(self.v)
@property
def mp(self):
return self.mean_phase
@property
def mean_phase_deviation(self):
return mean_phase_deviation(self.v)
@property
def mpd(self):
return self.mean_phase_deviation
@classmethod
def find_modes_from_ss(cls, A, C, fs):
"""Return modes from the (discrete) system matrices A and C
This method finds all modes from the discrete state space system
matrices A and C.
Arguments
---------
A : 2darray
Discrete time state space matrix
C : 2darray
Output influence matrix
fs : float
Sampling rate
Returns
-------
list
List of modes (Mode objects)
"""
lr, Q = np.linalg.eig(A)
u = fs*np.log(lr)
mask = u.imag > 0
u = u[mask]
Q = Q[:, mask]
Phi = C.dot(Q)
return [cls(ui, q) for ui, q in zip(u, Phi.T)]
| [
"numpy.linalg.eigvals",
"numpy.abs",
"numpy.sum",
"numpy.imag",
"numpy.sin",
"numpy.linalg.norm",
"numpy.conjugate",
"numpy.linalg.solve",
"numpy.zeros_like",
"numpy.linalg.eig",
"numpy.linspace",
"numpy.real",
"numpy.cov",
"numpy.arctan",
"numpy.linalg.lstsq",
"numpy.log",
"numpy.ze... | [((2816, 2838), 'numpy.cov', 'np.cov', (['u.real', 'u.imag'], {}), '(u.real, u.imag)\n', (2822, 2838), True, 'import numpy as np\n'), ((2847, 2867), 'numpy.linalg.eigvals', 'np.linalg.eigvals', (['S'], {}), '(S)\n', (2864, 2867), True, 'import numpy as np\n'), ((3536, 3565), 'numpy.arctan', 'np.arctan', (['(-V[0, 1] / V[1, 1])'], {}), '(-V[0, 1] / V[1, 1])\n', (3545, 3565), True, 'import numpy as np\n'), ((4239, 4248), 'numpy.abs', 'np.abs', (['u'], {}), '(u)\n', (4245, 4248), True, 'import numpy as np\n'), ((4489, 4509), 'numpy.linalg.norm', 'np.linalg.norm', (['v', '(2)'], {}), '(v, 2)\n', (4503, 4509), True, 'import numpy as np\n'), ((4556, 4583), 'numpy.linspace', 'np.linspace', (['(0.0)', '(fs / 2)', 'n'], {}), '(0.0, fs / 2, n)\n', (4567, 4583), True, 'import numpy as np\n'), ((1178, 1218), 'numpy.array', 'np.array', (['[[1 / wn, wn] for wn in freqs]'], {}), '([[1 / wn, wn] for wn in freqs])\n', (1186, 1218), True, 'import numpy as np\n'), ((1253, 1299), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['A', 'damping_ratios'], {'rcond': 'None'}), '(A, damping_ratios, rcond=None)\n', (1268, 1299), True, 'import numpy as np\n'), ((4309, 4345), 'numpy.sqrt', 'np.sqrt', (['(V[0, 1] ** 2 + V[1, 1] ** 2)'], {}), '(V[0, 1] ** 2 + V[1, 1] ** 2)\n', (4316, 4345), True, 'import numpy as np\n'), ((4340, 4349), 'numpy.abs', 'np.abs', (['u'], {}), '(u)\n', (4346, 4349), True, 'import numpy as np\n'), ((4400, 4409), 'numpy.sum', 'np.sum', (['w'], {}), '(w)\n', (4406, 4409), True, 'import numpy as np\n'), ((7804, 7825), 'numpy.zeros_like', 'np.zeros_like', (['self.M'], {}), '(self.M)\n', (7817, 7825), True, 'import numpy as np\n'), ((7891, 7917), 'numpy.zeros', 'np.zeros', (['(n, n)', 'np.float'], {}), '((n, n), np.float)\n', (7899, 7917), True, 'import numpy as np\n'), ((10654, 10670), 'numpy.zeros_like', 'np.zeros_like', (['M'], {}), '(M)\n', (10667, 10670), True, 'import numpy as np\n'), ((10683, 10701), 'numpy.eye', 'np.eye', (['M.shape[0]'], {}), '(M.shape[0])\n', (10689, 10701), True, 'import numpy as np\n'), ((11274, 11290), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (11282, 11290), True, 'import numpy as np\n'), ((11303, 11312), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (11309, 11312), True, 'import numpy as np\n'), ((15011, 15027), 'numpy.linalg.eig', 'np.linalg.eig', (['A'], {}), '(A)\n', (15024, 15027), True, 'import numpy as np\n'), ((1975, 1992), 'numpy.linalg.norm', 'np.linalg.norm', (['u'], {}), '(u)\n', (1989, 1992), True, 'import numpy as np\n'), ((4259, 4269), 'numpy.real', 'np.real', (['u'], {}), '(u)\n', (4266, 4269), True, 'import numpy as np\n'), ((4280, 4290), 'numpy.imag', 'np.imag', (['u'], {}), '(u)\n', (4287, 4290), True, 'import numpy as np\n'), ((5141, 5156), 'numpy.conjugate', 'np.conjugate', (['x'], {}), '(x)\n', (5153, 5156), True, 'import numpy as np\n'), ((7742, 7751), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (7748, 7751), True, 'import numpy as np\n'), ((8512, 8557), 'numpy.sin', 'np.sin', (['(np.pi / 2 * (2 * r - 1) / (2 * n + 1))'], {}), '(np.pi / 2 * (2 * r - 1) / (2 * n + 1))\n', (8518, 8557), True, 'import numpy as np\n'), ((10717, 10738), 'numpy.linalg.solve', 'np.linalg.solve', (['M', 'C'], {}), '(M, C)\n', (10732, 10738), True, 'import numpy as np\n'), ((10754, 10775), 'numpy.linalg.solve', 'np.linalg.solve', (['M', 'K'], {}), '(M, K)\n', (10769, 10775), True, 'import numpy as np\n'), ((12466, 12477), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (12474, 12477), True, 'import numpy as np\n'), ((12513, 12524), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (12521, 12524), True, 'import numpy as np\n'), ((12659, 12680), 'numpy.zeros', 'np.zeros', (['(t.size, n)'], {}), '((t.size, n))\n', (12667, 12680), True, 'import numpy as np\n'), ((13545, 13580), 'numpy.linalg.norm', 'np.linalg.norm', (['self.eigenvector', '(2)'], {}), '(self.eigenvector, 2)\n', (13559, 13580), True, 'import numpy as np\n'), ((13651, 13676), 'numpy.sqrt', 'np.sqrt', (['(1 - self.xi ** 2)'], {}), '(1 - self.xi ** 2)\n', (13658, 13676), True, 'import numpy as np\n'), ((13828, 13837), 'numpy.abs', 'np.abs', (['u'], {}), '(u)\n', (13834, 13837), True, 'import numpy as np\n'), ((15043, 15053), 'numpy.log', 'np.log', (['lr'], {}), '(lr)\n', (15049, 15053), True, 'import numpy as np\n'), ((3487, 3497), 'numpy.real', 'np.real', (['u'], {}), '(u)\n', (3494, 3497), True, 'import numpy as np\n'), ((3499, 3509), 'numpy.imag', 'np.imag', (['u'], {}), '(u)\n', (3506, 3509), True, 'import numpy as np\n'), ((4193, 4203), 'numpy.real', 'np.real', (['u'], {}), '(u)\n', (4200, 4203), True, 'import numpy as np\n'), ((4205, 4215), 'numpy.imag', 'np.imag', (['u'], {}), '(u)\n', (4212, 4215), True, 'import numpy as np\n'), ((8495, 8509), 'numpy.sqrt', 'np.sqrt', (['(k / m)'], {}), '(k / m)\n', (8502, 8509), True, 'import numpy as np\n'), ((8923, 8973), 'numpy.sin', 'np.sin', (['(i * np.pi * (2 * r - 1) / (2 * self.n + 1))'], {}), '(i * np.pi * (2 * r - 1) / (2 * self.n + 1))\n', (8929, 8973), True, 'import numpy as np\n'), ((11047, 11073), 'numpy.zeros', 'np.zeros', (['(self.n, self.n)'], {}), '((self.n, self.n))\n', (11055, 11073), True, 'import numpy as np\n'), ((11421, 11447), 'numpy.linalg.solve', 'np.linalg.solve', (['self.M', 'I'], {}), '(self.M, I)\n', (11436, 11447), True, 'import numpy as np\n'), ((4380, 4397), 'numpy.abs', 'np.abs', (['(num / den)'], {}), '(num / den)\n', (4386, 4397), True, 'import numpy as np\n'), ((11009, 11023), 'numpy.eye', 'np.eye', (['self.n'], {}), '(self.n)\n', (11015, 11023), True, 'import numpy as np\n')] |
import os
import matplotlib.pyplot as plt
import numpy as np
import datetime
#Very Hard Coded script to produce statistics on Logs received from nodes at the cluster.
#Just place the logs in the logs folder. The folder should contain 2 directories
#both have logs from the same protocols stack, but in each directory 2 variables change
#The payload size and bitrate
#This script produces three images, first is the graphic bar of SentMessages
#The other is the Failed Messages and finnaly the reliability
#More statistics could be introduced if wished.
rootdir = 'logs'
label1 ="Payload=10B and rate=1s"
label3 ="Payload=10B and rate=3s"
label4 ="Payload=1000B and rate=1s"
label2 ="Payload=1000B and rate=3s"
label5 ="Size in bytes"
title = "Plumtree + Hyparview"
SentMessages=[]
FailedMessages=[]
lastMetrics =""
listval =[]
failedvals =[]
nfiles = 0
size1 = 0
size2 = 0
testalllat = []
for subdir, dirs, files in os.walk(rootdir):
# iterates each log direcory for analising payldsize and messageRate
for dir2 in dirs:
for dir in os.listdir("C:/Users/mikep/PycharmProjects/logcomputations/logs/"+dir2):
listval = []
failedvals = []
mtimes = dict()
nfiles=0
for file in os.listdir("C:/Users/mikep/PycharmProjects/logcomputations/logs/"+dir2+"/"+dir):
nfiles = nfiles + 1
qbfile = open("C:/Users/mikep/PycharmProjects/logcomputations/logs/"+dir2+"/"+dir+"/"+file, "r")
sentCount = 0
receivedCount = 0
lastMetrics=""
for line in qbfile:
if "m Sent" in line:
sentCount = sentCount + 1
if "m Received GossipMessage" in line:
receivedCount = receivedCount +1
if "ProtocolMetrics" in line:
lastMetrics = line
if "m Sending" in line:
msgid = line[line.index("m Sending: "):len(line)-1]
msgid=msgid.split(':')[1]
timestring = line[7:19]
splited = timestring.split(':')
micro = splited[2].split(',')
now = datetime.datetime.now()
time = now.replace(hour=int(splited[0]), minute=int(splited[1]), second=int(micro[0]), microsecond=int(micro[1]))
if msgid in mtimes:
if time < mtimes[msgid][0]:
mtimes[msgid][0] = time
if time > mtimes[msgid][1]:
mtimes[msgid][1] = time
else:
vals = []
vals.append(time)
vals.append(time)
mtimes[msgid] = vals
if "BroadcastApp" in line and "m Received" in line:
msgid = line[line.index("m Received "):line.index(" from")]
msgid2=msgid.split(' ')[2] +msgid.split(' ')[3] + msgid.split(' ')[4] +msgid.split(' ')[5]
timestring = line[7:19]
splited = timestring.split(':')
micro = splited[2].split(',')
now = datetime.datetime.now()
time = now.replace(hour=int(splited[0]), minute=int(splited[1]), second=int(micro[0]), microsecond=int(micro[1]))
if msgid2 in mtimes:
if time < mtimes[msgid2][0]:
mtimes[msgid2][0] = time
if time > mtimes[msgid2][1]:
mtimes[msgid2][1] = time
else:
vals = []
vals.append(time)
vals.append(time)
mtimes[msgid2] = vals
listval.append(sentCount)
failedvals.append(lastMetrics.split(",")[2].split("=")[1])
SentMessages.append(listval)
FailedMessages.append(failedvals)
#calc avrg latency
totallatency = 0
for key in mtimes:
duration = mtimes[key][1] - mtimes[key][0]
totallatency = totallatency + float(duration.total_seconds())
totallatency = totallatency/nfiles
testalllat.append(totallatency)
break
y = []
b =[]
for j in range(0, nfiles):
y.append(j)
b.append(j)
ind = np.arange(nfiles)
# Figure size
# plt.figure(figsize=(10,5))
# Width of a bar
width = 0.4
totalsizes = []
for j in SentMessages[0]:
totalsizes.append(j*10)
totalsizes2 = []
for j in SentMessages[1]:
totalsizes2.append(j*10)
totalsizes3 = []
for j in SentMessages[2]:
totalsizes3.append(j*10000)
totalsizes4 = []
for j in SentMessages[3]:
totalsizes4.append(j*10000)
fig = plt.figure()
fig.set_figheight(10)
fig.set_figwidth(10)
# Plotting
plt.bar(ind,SentMessages[0] , width, label=label1)
# plt.bar(ind+ width , totalsizes, width, label="wat")
plt.bar(ind+ width , SentMessages[1], width, label=label3)
# plt.bar(ind+ width , totalsizes2, width, label=label5)
plt.bar(ind+ width , SentMessages[2], width, label=label4)
# plt.bar(ind+ width , totalsizes3, width, label=label5)
plt.bar(ind+ width , SentMessages[3], width, label=label2)
# plt.bar(ind+ width , totalsizes4, width, label=label5)
plt.xlabel('Nodes')
plt.ylabel('Messages Sent')
plt.title(title)
# xticks()
# First argument - A list of positions at which ticks should be placed
# Second argument - A list of labels to place at the given locations
# for j in range(0, nfiles):
# plt.xticks(ind + width / 2, ('Xtick1', 'Xtick3', 'Xtick3'))
# Finding the best position for legends and putting it
plt.legend(loc='best')
plt.savefig(title+"Sent", dpi=300)
plt.clf()
totalfail = []
totalfail1 = []
totalfail2 = []
totalfail3 = []
for j in FailedMessages[0]:
totalfail.append(j*10)
for j in FailedMessages[1]:
totalfail1.append(j*10)
for j in FailedMessages[2]:
totalfail2.append(j*10000)
for j in FailedMessages[3]:
totalfail3.append(j*10000)
plt.bar(ind,FailedMessages[0] , width, label=label1)
# plt.bar(ind+ width , totalfail, width, label="wat1")
plt.bar(ind+ width , FailedMessages[1], width, label=label3)
# plt.bar(ind+ width , totalfail1, width, label="wat3")
plt.bar(ind+ width , FailedMessages[2], width, label=label4)
# plt.bar(ind+ width , totalfail2, width, label="watwow")
plt.bar(ind+ width , FailedMessages[3], width, label=label2)
# plt.bar(ind+ width , totalfail3, width, label="nani")
plt.xlabel('Nodes')
plt.ylabel('Failed Messages')
plt.title(title)
plt.legend(loc='best')
plt.savefig(title+"FailedMessages", dpi=300)
plt.clf()
relial =[]
relial1 =[]
relial2 =[]
relial3 =[]
test = FailedMessages[0]
total = SentMessages[0]
for val in range(0, nfiles):
try:
relial.append((1-(int(test[val])/total[val])))
except ZeroDivisionError :
relial.append(0)
test = FailedMessages[1]
total = SentMessages[1]
for val in range(0, nfiles):
try:
relial1.append((1-(int(test[val])/total[val])))
except ZeroDivisionError:
relial1.append(0)
test = FailedMessages[2]
total = SentMessages[2]
for val in range(0, nfiles):
try:
relial2.append((1-(int(test[val])/total[val])))
except ZeroDivisionError:
relial2.append(0)
test = FailedMessages[3]
total = SentMessages[3]
for val in range(0, nfiles):
try:
relial3.append((1-(int(test[val])/total[val])))
except ZeroDivisionError:
relial3.append(0)
plt.bar(ind, relial , width, label=label1)
plt.bar(ind+ width , relial1, width, label=label3)
plt.bar(ind+ width, relial2, width, label=label4)
plt.bar(ind+ width , relial3, width, label=label2)
plt.xlabel('Nodes')
plt.ylabel('Avrg reliability')
plt.title(title)
plt.legend(loc='best')
plt.savefig(title+"Avrg_Reliability", dpi=300)
f = open(title+".txt", "x")
f.write("Avrg Latency by test s\n")
for val in testalllat:
f.write("Test 1 -> "+ str(round(val, 2)) +'s\n')
f.close()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.bar",
"os.walk",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"datetime.datetime.now",
"os.listdir",
"matplotlib.pyplot.savefig"
] | [((920, 936), 'os.walk', 'os.walk', (['rootdir'], {}), '(rootdir)\n', (927, 936), False, 'import os\n'), ((4643, 4660), 'numpy.arange', 'np.arange', (['nfiles'], {}), '(nfiles)\n', (4652, 4660), True, 'import numpy as np\n'), ((5042, 5054), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5052, 5054), True, 'import matplotlib.pyplot as plt\n'), ((5110, 5160), 'matplotlib.pyplot.bar', 'plt.bar', (['ind', 'SentMessages[0]', 'width'], {'label': 'label1'}), '(ind, SentMessages[0], width, label=label1)\n', (5117, 5160), True, 'import matplotlib.pyplot as plt\n'), ((5217, 5275), 'matplotlib.pyplot.bar', 'plt.bar', (['(ind + width)', 'SentMessages[1]', 'width'], {'label': 'label3'}), '(ind + width, SentMessages[1], width, label=label3)\n', (5224, 5275), True, 'import matplotlib.pyplot as plt\n'), ((5334, 5392), 'matplotlib.pyplot.bar', 'plt.bar', (['(ind + width)', 'SentMessages[2]', 'width'], {'label': 'label4'}), '(ind + width, SentMessages[2], width, label=label4)\n', (5341, 5392), True, 'import matplotlib.pyplot as plt\n'), ((5451, 5509), 'matplotlib.pyplot.bar', 'plt.bar', (['(ind + width)', 'SentMessages[3]', 'width'], {'label': 'label2'}), '(ind + width, SentMessages[3], width, label=label2)\n', (5458, 5509), True, 'import matplotlib.pyplot as plt\n'), ((5571, 5590), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Nodes"""'], {}), "('Nodes')\n", (5581, 5590), True, 'import matplotlib.pyplot as plt\n'), ((5591, 5618), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Messages Sent"""'], {}), "('Messages Sent')\n", (5601, 5618), True, 'import matplotlib.pyplot as plt\n'), ((5619, 5635), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (5628, 5635), True, 'import matplotlib.pyplot as plt\n'), ((5940, 5962), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (5950, 5962), True, 'import matplotlib.pyplot as plt\n'), ((5963, 5999), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(title + 'Sent')"], {'dpi': '(300)'}), "(title + 'Sent', dpi=300)\n", (5974, 5999), True, 'import matplotlib.pyplot as plt\n'), ((5999, 6008), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6006, 6008), True, 'import matplotlib.pyplot as plt\n'), ((6307, 6359), 'matplotlib.pyplot.bar', 'plt.bar', (['ind', 'FailedMessages[0]', 'width'], {'label': 'label1'}), '(ind, FailedMessages[0], width, label=label1)\n', (6314, 6359), True, 'import matplotlib.pyplot as plt\n'), ((6416, 6476), 'matplotlib.pyplot.bar', 'plt.bar', (['(ind + width)', 'FailedMessages[1]', 'width'], {'label': 'label3'}), '(ind + width, FailedMessages[1], width, label=label3)\n', (6423, 6476), True, 'import matplotlib.pyplot as plt\n'), ((6534, 6594), 'matplotlib.pyplot.bar', 'plt.bar', (['(ind + width)', 'FailedMessages[2]', 'width'], {'label': 'label4'}), '(ind + width, FailedMessages[2], width, label=label4)\n', (6541, 6594), True, 'import matplotlib.pyplot as plt\n'), ((6654, 6714), 'matplotlib.pyplot.bar', 'plt.bar', (['(ind + width)', 'FailedMessages[3]', 'width'], {'label': 'label2'}), '(ind + width, FailedMessages[3], width, label=label2)\n', (6661, 6714), True, 'import matplotlib.pyplot as plt\n'), ((6773, 6792), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Nodes"""'], {}), "('Nodes')\n", (6783, 6792), True, 'import matplotlib.pyplot as plt\n'), ((6793, 6822), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Failed Messages"""'], {}), "('Failed Messages')\n", (6803, 6822), True, 'import matplotlib.pyplot as plt\n'), ((6823, 6839), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (6832, 6839), True, 'import matplotlib.pyplot as plt\n'), ((6841, 6863), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (6851, 6863), True, 'import matplotlib.pyplot as plt\n'), ((6864, 6910), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(title + 'FailedMessages')"], {'dpi': '(300)'}), "(title + 'FailedMessages', dpi=300)\n", (6875, 6910), True, 'import matplotlib.pyplot as plt\n'), ((6910, 6919), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6917, 6919), True, 'import matplotlib.pyplot as plt\n'), ((7770, 7811), 'matplotlib.pyplot.bar', 'plt.bar', (['ind', 'relial', 'width'], {'label': 'label1'}), '(ind, relial, width, label=label1)\n', (7777, 7811), True, 'import matplotlib.pyplot as plt\n'), ((7814, 7864), 'matplotlib.pyplot.bar', 'plt.bar', (['(ind + width)', 'relial1', 'width'], {'label': 'label3'}), '(ind + width, relial1, width, label=label3)\n', (7821, 7864), True, 'import matplotlib.pyplot as plt\n'), ((7866, 7916), 'matplotlib.pyplot.bar', 'plt.bar', (['(ind + width)', 'relial2', 'width'], {'label': 'label4'}), '(ind + width, relial2, width, label=label4)\n', (7873, 7916), True, 'import matplotlib.pyplot as plt\n'), ((7918, 7968), 'matplotlib.pyplot.bar', 'plt.bar', (['(ind + width)', 'relial3', 'width'], {'label': 'label2'}), '(ind + width, relial3, width, label=label2)\n', (7925, 7968), True, 'import matplotlib.pyplot as plt\n'), ((7970, 7989), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Nodes"""'], {}), "('Nodes')\n", (7980, 7989), True, 'import matplotlib.pyplot as plt\n'), ((7990, 8020), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Avrg reliability"""'], {}), "('Avrg reliability')\n", (8000, 8020), True, 'import matplotlib.pyplot as plt\n'), ((8021, 8037), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (8030, 8037), True, 'import matplotlib.pyplot as plt\n'), ((8039, 8061), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (8049, 8061), True, 'import matplotlib.pyplot as plt\n'), ((8062, 8110), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(title + 'Avrg_Reliability')"], {'dpi': '(300)'}), "(title + 'Avrg_Reliability', dpi=300)\n", (8073, 8110), True, 'import matplotlib.pyplot as plt\n'), ((1052, 1125), 'os.listdir', 'os.listdir', (["('C:/Users/mikep/PycharmProjects/logcomputations/logs/' + dir2)"], {}), "('C:/Users/mikep/PycharmProjects/logcomputations/logs/' + dir2)\n", (1062, 1125), False, 'import os\n'), ((1251, 1340), 'os.listdir', 'os.listdir', (["('C:/Users/mikep/PycharmProjects/logcomputations/logs/' + dir2 + '/' + dir)"], {}), "('C:/Users/mikep/PycharmProjects/logcomputations/logs/' + dir2 +\n '/' + dir)\n", (1261, 1340), False, 'import os\n'), ((2274, 2297), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2295, 2297), False, 'import datetime\n'), ((3375, 3398), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3396, 3398), False, 'import datetime\n')] |
#!/usr/bin/env python
# coding: utf-8
# # ResCenterNet Trial
#
# I am very new to these concepts so I am trying out by changing this amazing and probably only 3D model related awesome public kernel by Ruslan
# https://www.kaggle.com/hocop1/centernet-baseline
#
# Most of the codes are loaned from there . There are other codes that I took from OFT implementation github . But I dont know what is OFT , so I have not yet implemented it .
#
# My current score is not from this kernel( as there are some errors in this kernel) , but from some simple architecture modification of the original public kernel.
#
# In[1]:
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import cv2
from tqdm import tqdm
import matplotlib.pyplot as plt
import seaborn as sns
from functools import reduce
import os
from sklearn.model_selection import train_test_split
from scipy.optimize import minimize
from tqdm.auto import tqdm as tq
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.utils.data import Dataset, DataLoader
from torchvision import models
from torchvision import transforms, utils
from albumentations import ( Compose, OneOf, RandomBrightnessContrast,
RandomGamma, HueSaturationValue, RGBShift, MotionBlur, Blur,
GaussNoise, ChannelShuffle, MultiplicativeNoise, GaussNoise, ISONoise
)
PATH = '/home/hy/pkuad/'
#os.listdir(PATH)
# # Load data
# In[2]:
train = pd.read_csv(PATH + 'train.csv')
test = pd.read_csv(PATH + 'sample_submission.csv')
bad_list = ['ID_1a5a10365', 'ID_4d238ae90.jpg', 'ID_408f58e9f', 'ID_bb1d991f6', 'ID_c44983aeb']
train = train.loc[~train['ImageId'].isin(bad_list)]
# From camera.zip
camera_matrix = np.array([[2304.5479, 0, 1686.2379],
[0, 2305.8757, 1354.9849],
[0, 0, 1]], dtype=np.float32)
camera_matrix_inv = np.linalg.inv(camera_matrix)
train.head()
# **ImageId** column contains names of images:
# In[3]:
def imread(path, fast_mode=False):
img = cv2.imread(path)
if not fast_mode and img is not None and len(img.shape) == 3:
img = np.array(img[:, :, ::-1])
return img
img = imread(PATH + 'train_images/ID_8a6e65317' + '.jpg')
IMG_SHAPE = img.shape
#plt.figure(figsize=(15,8))
#plt.imshow(img);
# In[4]:
def str2coords(s, names=['id', 'yaw', 'pitch', 'roll', 'x', 'y', 'z']):
'''
Input:
s: PredictionString (e.g. from train dataframe)
names: array of what to extract from the string
Output:
list of dicts with keys from `names`
'''
coords = []
for l in np.array(s.split()).reshape([-1, 7]):
coords.append(dict(zip(names, l.astype('float'))))
if 'id' in coords[-1]:
coords[-1]['id'] = int(coords[-1]['id'])
return coords
# In[5]:
inp = train['PredictionString'][0]
print('Example input:\n', inp)
print()
print('Output:\n', str2coords(inp))
# # Data distributions
# In[6]:
def rotate(x, angle):
x = x + angle
x = x - (x + np.pi) // (2 * np.pi) * 2 * np.pi
return x
# # 2D Visualization
# In[7]:
def get_img_coords(s):
'''
Input is a PredictionString (e.g. from train dataframe)
Output is two arrays:
xs: x coordinates in the image
ys: y coordinates in the image
'''
coords = str2coords(s)
xs = [c['x'] for c in coords]
ys = [c['y'] for c in coords]
zs = [c['z'] for c in coords]
P = np.array(list(zip(xs, ys, zs))).T
img_p = np.dot(camera_matrix, P).T
img_p[:, 0] /= img_p[:, 2]
img_p[:, 1] /= img_p[:, 2]
img_xs = img_p[:, 0]
img_ys = img_p[:, 1]
img_zs = img_p[:, 2] # z = Distance from the camera
return img_xs, img_ys
# One point is out of image!
# Let's look at the distribution of all points. Image is here just for reference.
# # 3D Visualization
# Used code from https://www.kaggle.com/zstusnoopy/visualize-the-location-and-3d-bounding-box-of-car, but made it one function
# In[8]:
from math import sin, cos
# convert euler angle to rotation matrix
def euler_to_Rot(yaw, pitch, roll):
Y = np.array([[cos(yaw), 0, sin(yaw)],
[0, 1, 0],
[-sin(yaw), 0, cos(yaw)]])
P = np.array([[1, 0, 0],
[0, cos(pitch), -sin(pitch)],
[0, sin(pitch), cos(pitch)]])
R = np.array([[cos(roll), -sin(roll), 0],
[sin(roll), cos(roll), 0],
[0, 0, 1]])
return np.dot(Y, np.dot(P, R))
# # Image preprocessing
# In[9]:
IMG_WIDTH = 2052
IMG_HEIGHT = 1026
MODEL_SCALE = 8
def _regr_preprocess(regr_dict):
for name in ['x', 'y', 'z']:
regr_dict[name] = regr_dict[name] / 100
regr_dict['roll'] = rotate(regr_dict['roll'], np.pi)
regr_dict['pitch_sin'] = sin(regr_dict['pitch'])
regr_dict['pitch_cos'] = cos(regr_dict['pitch'])
regr_dict.pop('pitch')
regr_dict.pop('id')
return regr_dict
def _regr_back(regr_dict):
for name in ['x', 'y', 'z']:
regr_dict[name] = regr_dict[name] * 100
regr_dict['roll'] = rotate(regr_dict['roll'], -np.pi)
pitch_sin = regr_dict['pitch_sin'] / np.sqrt(regr_dict['pitch_sin']**2 + regr_dict['pitch_cos']**2)
pitch_cos = regr_dict['pitch_cos'] / np.sqrt(regr_dict['pitch_sin']**2 + regr_dict['pitch_cos']**2)
regr_dict['pitch'] = np.arccos(pitch_cos) * np.sign(pitch_sin)
return regr_dict
def preprocess_image(img):
img = img[img.shape[0] // 2:]
bg = np.ones_like(img) * img.mean(1, keepdims=True).astype(img.dtype)
bg = bg[:, :img.shape[1] // 4]
img = np.concatenate([bg, img, bg], 1)
img = cv2.resize(img, (IMG_WIDTH, IMG_HEIGHT))
return (img / 255).astype('float32')
def get_mask_and_regr(img, labels):
mask = np.zeros([IMG_HEIGHT // MODEL_SCALE, IMG_WIDTH // MODEL_SCALE], dtype='float32')
regr_names = ['x', 'y', 'z', 'yaw', 'pitch', 'roll']
regr = np.zeros([IMG_HEIGHT // MODEL_SCALE, IMG_WIDTH // MODEL_SCALE, 7], dtype='float32')
coords = str2coords(labels)
xs, ys = get_img_coords(labels)
for x, y, regr_dict in zip(xs, ys, coords):
x, y = y, x
x = (x - img.shape[0] // 2) * IMG_HEIGHT / (img.shape[0] // 2) / MODEL_SCALE
x = np.round(x).astype('int')
y = (y + img.shape[1] // 4) * IMG_WIDTH / (img.shape[1] * 1.5) / MODEL_SCALE
y = np.round(y).astype('int')
if x >= 0 and x < IMG_HEIGHT // MODEL_SCALE and y >= 0 and y < IMG_WIDTH // MODEL_SCALE:
mask[x, y] = 1
regr_dict = _regr_preprocess(regr_dict)
regr[x, y] = [regr_dict[n] for n in sorted(regr_dict)]
return mask, regr
# In[10]:
img0 = imread(PATH + 'train_images/' + train['ImageId'][0] + '.jpg')
img = preprocess_image(img0)
mask, regr = get_mask_and_regr(img0, train['PredictionString'][0])
print('img.shape', img.shape, 'std:', np.std(img))
print('mask.shape', mask.shape, 'std:', np.std(mask))
print('regr.shape', regr.shape, 'std:', np.std(regr))
# ## Data Aug
# In[11]:
albu_list = [RandomBrightnessContrast(brightness_limit=(-0.3, 0.3), contrast_limit=(-0.3, 0.3), p=0.3),
RandomGamma(p=0.2), HueSaturationValue(p=0.3), RGBShift(p=0.3), MotionBlur(p=0.1), Blur(p=0.1),
GaussNoise(var_limit=(20,100), p=0.2),
ChannelShuffle(p=0.2), MultiplicativeNoise(multiplier=(0.7, 1.2), p=0.2), ISONoise(p=0.2),GaussNoise(var_limit=(10.0, 50.0), mean=0, always_apply=False, p=0.5)]
# NOT in colab version: MultiplicativeNoise(multiplier=(0.7, 1.2), p=0.2), ISONoise(p=0.2),
# GaussNoise(var_limit=(10.0, 50.0), mean=0, always_apply=False, p=0.5)
p_transform_train = 0.1
albu_transform_train = Compose(albu_list, p=p_transform_train)
p_transform_val = 0.05
albu_transform_valid = Compose(albu_list, p=p_transform_val)
# # PyTorch Dataset
# In[12]:
class CarDataset(Dataset):
"""Car dataset."""
def __init__(self, dataframe, root_dir, training=True, transform=None):
self.df = dataframe
self.root_dir = root_dir
self.transform = transform
self.training = training
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
# Get image name
idx, labels = self.df.values[idx]
img_name = self.root_dir.format(idx)
# Read image
img0 = imread(img_name, True)
img = preprocess_image(img0)
img = np.rollaxis(img, 2, 0)
# Get mask and regression maps
if self.training:
mask, regr = get_mask_and_regr(img0, labels)
regr = np.rollaxis(regr, 2, 0)
else:
mask, regr = 0, 0
return [img, mask, regr]
# In[13]:
train_images_dir = PATH + 'train_images/{}.jpg'
test_images_dir = PATH + 'test_images/{}.jpg'
df_train, df_dev = train_test_split(train, test_size=0.1, random_state=42)
df_test = test
# Create dataset objects
train_dataset = CarDataset(df_train, train_images_dir,transform = albu_transform_train)
dev_dataset = CarDataset(df_dev, train_images_dir, transform = albu_transform_valid)
test_dataset = CarDataset(df_test, test_images_dir)
# Show some generated examples
# In[14]:
BATCH_SIZE = 3
# Create data generators - they will produce batches
train_loader = DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=4)
dev_loader = DataLoader(dataset=dev_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=4)
test_loader = DataLoader(dataset=test_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=4)
# # PyTorch Model
# In[15]:
class double_conv(nn.Module):
'''(conv => BN => ReLU) * 2'''
def __init__(self, in_ch, out_ch):
super(double_conv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.conv(x)
return x
class up(nn.Module):
def __init__(self, in_ch, out_ch, bilinear=True):
super(up, self).__init__()
# would be a nice idea if the upsampling could be learned too,
# but my machine do not have enough memory to handle all those weights
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d(in_ch//2, in_ch//2, 2, stride=2)
self.conv = double_conv(in_ch, out_ch)
def forward(self, x1, x2=None):
x1 = self.up(x1)
# input is CHW
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, (diffX // 2, diffX - diffX//2,
diffY // 2, diffY - diffY//2))
# for padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
if x2 is not None:
x = torch.cat([x2, x1], dim=1)
else:
x = x1
x = self.conv(x)
return x
def get_mesh(batch_size, shape_x, shape_y):
mg_x, mg_y = np.meshgrid(np.linspace(0, 1, shape_y), np.linspace(0, 1, shape_x))
mg_x = np.tile(mg_x[None, None, :, :], [batch_size, 1, 1, 1]).astype('float32')
mg_y = np.tile(mg_y[None, None, :, :], [batch_size, 1, 1, 1]).astype('float32')
mesh = torch.cat([torch.tensor(mg_x).to(device), torch.tensor(mg_y).to(device)], 1)
return mesh
# In[16]:
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.GroupNorm(16, planes)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.GroupNorm(16, planes)
if stride != 1 or inplanes != planes:
self.downsample = nn.Sequential(
conv1x1(inplanes, planes, stride), nn.GroupNorm(16, planes))
else:
self.downsample = None
def forward(self, x):
identity = x
out = F.relu(self.bn1(self.conv1(x)), inplace=True)
out = self.bn2(self.conv2(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = F.relu(out, inplace=True)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.GroupNorm(16, planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn2 = nn.GroupNorm(16, planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.GroupNorm(16, planes * self.expansion)
if stride != 1 or inplanes != planes * self.expansion:
self.downsample = nn.Sequential(
conv1x1(inplanes, planes * self.expansion, stride),
nn.GroupNorm(16, planes * self.expansion))
else:
self.downsample = None
def forward(self, x):
identity = x
out = F.relu(self.bn1(self.conv1(x)), inplace=True)
out = F.relu(self.bn2(self.conv2(out)), inplace=True)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = F.relu(out)
return out
class ResNetFeatures(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False):
super(ResNetFeatures, self).__init__()
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.GroupNorm(16, 64)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1):
layers = []
layers.append(block(self.inplanes, planes, stride))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
conv1 = F.relu(self.bn1(self.conv1(x)), inplace=True)
conv1 = F.max_pool2d(conv1, 3, stride=2, padding=1)
feats4 = self.layer1(conv1)
feats8 = self.layer2(feats4)
feats16 = self.layer3(feats8)
feats32 = self.layer4(feats16)
return feats8, feats16, feats32
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNetFeatures(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
_load_pretrained(model, model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNetFeatures(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
_load_pretrained(model, model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, root='./pretrain_models', **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
from ..models.model_store import get_model_file
model.load_state_dict(torch.load(
get_model_file('resnet50', root=root)), strict=False)
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNetFeatures(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
_load_pretrained(model, model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNetFeatures(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
_load_pretrained(model, model_zoo.load_url(model_urls['resnet152']))
return model
def _load_pretrained(model, pretrained):
model_dict = model.state_dict()
pretrained = {k : v for k, v in pretrained.items() if k in model_dict}
model_dict.update(pretrained)
model.load_state_dict(model_dict)
# In[17]:
from pytorchcv.model_provider import get_model as ptcv_get_model
# In[18]:
class CentResnet(nn.Module):
'''Mixture of previous classes'''
def __init__(self, n_classes):
super(CentResnet, self).__init__()
self.base_model = resnet18(pretrained=False)
#self.base_model = ptcv_get_model("dla34", pretrained=False)
# Lateral layers convert resnet outputs to a common feature size
self.lat8 = nn.Conv2d(128, 256, 1)
self.lat16 = nn.Conv2d(256, 256, 1)
self.lat32 = nn.Conv2d(512, 256, 1)
self.bn8 = nn.GroupNorm(16, 256)
self.bn16 = nn.GroupNorm(16, 256)
self.bn32 = nn.GroupNorm(16, 256)
self.conv0 = double_conv(5, 64)
self.conv1 = double_conv(64, 128)
self.conv2 = double_conv(128, 512)
self.conv3 = double_conv(512, 1024)
self.mp = nn.MaxPool2d(2)
self.up1 = up(1282 , 512) #+ 1024
self.up2 = up(512 + 512, 256)
self.outc = nn.Conv2d(256, n_classes, 1)
def forward(self, x):
batch_size = x.shape[0]
mesh1 = get_mesh(batch_size, x.shape[2], x.shape[3])
x0 = torch.cat([x, mesh1], 1)
x1 = self.mp(self.conv0(x0))
x2 = self.mp(self.conv1(x1))
x3 = self.mp(self.conv2(x2))
x4 = self.mp(self.conv3(x3))
#feats = self.base_model.extract_features(x)
# Run frontend network
feats8, feats16, feats32 = self.base_model(x)
lat8 = F.relu(self.bn8(self.lat8(feats8)))
lat16 = F.relu(self.bn16(self.lat16(feats16)))
lat32 = F.relu(self.bn32(self.lat32(feats32)))
# Add positional info
mesh2 = get_mesh(batch_size, lat32.shape[2], lat32.shape[3])
feats = torch.cat([lat32, mesh2], 1)
#print(feats.shape)
#print (x4.shape)
x = self.up1(feats, x4)
x = self.up2(x, x3)
x = self.outc(x)
return x
# In[19]:
# Gets the GPU if there is one, otherwise the cpu
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
n_epochs = 8
model = CentResnet(8).to(device)
optimizer = optim.AdamW(model.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.01, amsgrad=False)
exp_lr_scheduler = lr_scheduler.OneCycleLR(optimizer, max_lr=0.002, steps_per_epoch=len(train_loader), epochs=n_epochs)
#exp_lr_scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[7,10,14,17,20], gamma=0.5) #milestones=[5,7,8], gamma=0.1)
# In[20]:
#img_batch = torch.randn((1,3,512,2048))
# In[21]:
#test = model(img_batch.to(device))
# In[22]:
#model.load_state_dict(torch.load(f'../input/centernet2/model.pth'))
#model.eval();
# # Training
# In[23]:
def criterion(prediction, mask, regr, size_average=True):
# Binary mask loss
pred_mask = torch.sigmoid(prediction[:, 0])
# mask_loss = mask * (1 - pred_mask)**2 * torch.log(pred_mask + 1e-12) + (1 - mask) * pred_mask**2 * torch.log(1 - pred_mask + 1e-12)
mask_loss = mask * torch.log(pred_mask + 1e-12) + (1 - mask) * torch.log(1 - pred_mask + 1e-12)
mask_loss = -mask_loss.mean(0).sum()
# Regression L1 loss
pred_regr = prediction[:, 1:]
regr_loss = (torch.abs(pred_regr - regr).sum(1) * mask).sum(1).sum(1) / mask.sum(1).sum(1)
regr_loss = regr_loss.mean(0)
# Sum
loss = torch.log(mask_loss) + regr_loss
if not size_average:
loss *= prediction.shape[0]
return loss
# In[24]:
## Just for checking the shapes to manage our Unet
i = 0
for batch_idx, (img_batch, mask_batch, regr_batch) in enumerate(tqdm(train_loader)):
print(img_batch.shape)
print(mask_batch.shape)
print(regr_batch.shape)
i+=1
if i>1:
break
# In[25]:
def train(epoch, history=None):
model.train()
t = tqdm(train_loader)
for batch_idx, (img_batch, mask_batch, regr_batch) in enumerate(t):
img_batch = img_batch.to(device)
mask_batch = mask_batch.to(device)
regr_batch = regr_batch.to(device)
optimizer.zero_grad()
output = model(img_batch)
loss = criterion(output, mask_batch, regr_batch)
t.set_description('train_loss (loss=%g)' %loss)
if history is not None:
history.loc[epoch + batch_idx / len(train_loader), 'train_loss'] = loss.data.cpu().numpy()
loss.backward()
optimizer.step()
exp_lr_scheduler.step()
print('Train Epoch: {} \tLR: {:.6f}\tLoss: {:.6f}'.format(
epoch,
optimizer.state_dict()['param_groups'][0]['lr'],
loss.data))
def evaluate(epoch, history=None):
model.eval()
loss = 0
with torch.no_grad():
for img_batch, mask_batch, regr_batch in dev_loader:
img_batch = img_batch.to(device)
mask_batch = mask_batch.to(device)
regr_batch = regr_batch.to(device)
output = model(img_batch)
loss += criterion(output, mask_batch, regr_batch, size_average=False).data
loss /= len(dev_loader.dataset)
if history is not None:
history.loc[epoch, 'dev_loss'] = loss.cpu().numpy()
print('Dev loss: {:.4f}'.format(loss))
# In[26]:
import gc
history = pd.DataFrame()
for epoch in range(n_epochs):
torch.cuda.empty_cache()
gc.collect()
train(epoch, history)
evaluate(epoch, history)
# In[ ]:
torch.save(model.state_dict(), './resnet34-sz-2048-1024-8-ep-8-new-data-aug_final.pth')
# In[ ]:
#history['train_loss'].iloc[100:].plot();
# In[ ]:
#series = history.dropna()['dev_loss']
#plt.scatter(series.index, series);
# In[ ]:
## Simple test of probabilities
#act = torch.nn.Sigmoid()
#logtens = torch.from_numpy(logits)
#probs = act(logtens)
#probs = probs[probs>0.03]
#print(probs)
# In[ ]:
DISTANCE_THRESH_CLEAR = 2
def convert_3d_to_2d(x, y, z, fx = 2304.5479, fy = 2305.8757, cx = 1686.2379, cy = 1354.9849):
# stolen from https://www.kaggle.com/theshockwaverider/eda-visualization-baseline
return x * fx / z + cx, y * fy / z + cy
def optimize_xy(r, c, x0, y0, z0):
def distance_fn(xyz):
x, y, z = xyz
x, y = convert_3d_to_2d(x, y, z0)
y, x = x, y
x = (x - IMG_SHAPE[0] // 2) * IMG_HEIGHT / (IMG_SHAPE[0] // 2) / MODEL_SCALE
x = np.round(x).astype('int')
y = (y + IMG_SHAPE[1] // 4) * IMG_WIDTH / (IMG_SHAPE[1] * 1.5) / MODEL_SCALE
y = np.round(y).astype('int')
return (x-r)**2 + (y-c)**2
res = minimize(distance_fn, [x0, y0, z0], method='Powell')
x_new, y_new, z_new = res.x
return x_new, y_new, z0
def clear_duplicates(coords):
for c1 in coords:
xyz1 = np.array([c1['x'], c1['y'], c1['z']])
for c2 in coords:
xyz2 = np.array([c2['x'], c2['y'], c2['z']])
distance = np.sqrt(((xyz1 - xyz2)**2).sum())
if distance < DISTANCE_THRESH_CLEAR:
if c1['confidence'] < c2['confidence']:
c1['confidence'] = -1
return [c for c in coords if c['confidence'] > 0]
def extract_coords(prediction):
logits = prediction[0]
regr_output = prediction[1:]
points = np.argwhere(logits > 0)
col_names = sorted(['x', 'y', 'z', 'yaw', 'pitch_sin', 'pitch_cos', 'roll'])
coords = []
for r, c in points:
regr_dict = dict(zip(col_names, regr_output[:, r, c]))
coords.append(_regr_back(regr_dict))
coords[-1]['confidence'] = 1 / (1 + np.exp(-logits[r, c]))
coords[-1]['x'], coords[-1]['y'], coords[-1]['z'] = optimize_xy(r, c, coords[-1]['x'], coords[-1]['y'], coords[-1]['z'])
coords = clear_duplicates(coords)
return coords
def coords2str(coords, names=['yaw', 'pitch', 'roll', 'x', 'y', 'z', 'confidence']):
s = []
for c in coords:
for n in names:
s.append(str(c.get(n, 0)))
return ' '.join(s)
# In[ ]:
torch.cuda.empty_cache()
gc.collect()
# # Make submission
# In[ ]:
predictions = []
test_loader = DataLoader(dataset=test_dataset, batch_size=4, shuffle=False, num_workers=4)
model.eval()
for img, _, _ in tqdm(test_loader):
with torch.no_grad():
output = model(img.to(device))
output = output.data.cpu().numpy()
for out in output:
coords = extract_coords(out)
s = coords2str(coords)
predictions.append(s)
# In[ ]:
test = pd.read_csv(PATH + 'sample_submission.csv')
test['PredictionString'] = predictions
test.to_csv('resnet34-sz-2048-1024-8-ep-8-new-data-aug.csv', index=False)
test.head()
| [
"albumentations.GaussNoise",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"torch.cat",
"gc.collect",
"torch.nn.GroupNorm",
"torch.nn.init.constant_",
"numpy.tile",
"numpy.exp",
"torch.no_grad",
"albumentations.RGBShift",
"numpy.round",
"albumentations.MotionBlur",
"albume... | [((1570, 1601), 'pandas.read_csv', 'pd.read_csv', (["(PATH + 'train.csv')"], {}), "(PATH + 'train.csv')\n", (1581, 1601), True, 'import pandas as pd\n'), ((1609, 1652), 'pandas.read_csv', 'pd.read_csv', (["(PATH + 'sample_submission.csv')"], {}), "(PATH + 'sample_submission.csv')\n", (1620, 1652), True, 'import pandas as pd\n'), ((1836, 1933), 'numpy.array', 'np.array', (['[[2304.5479, 0, 1686.2379], [0, 2305.8757, 1354.9849], [0, 0, 1]]'], {'dtype': 'np.float32'}), '([[2304.5479, 0, 1686.2379], [0, 2305.8757, 1354.9849], [0, 0, 1]],\n dtype=np.float32)\n', (1844, 1933), True, 'import numpy as np\n'), ((2003, 2031), 'numpy.linalg.inv', 'np.linalg.inv', (['camera_matrix'], {}), '(camera_matrix)\n', (2016, 2031), True, 'import numpy as np\n'), ((7784, 7823), 'albumentations.Compose', 'Compose', (['albu_list'], {'p': 'p_transform_train'}), '(albu_list, p=p_transform_train)\n', (7791, 7823), False, 'from albumentations import Compose, OneOf, RandomBrightnessContrast, RandomGamma, HueSaturationValue, RGBShift, MotionBlur, Blur, GaussNoise, ChannelShuffle, MultiplicativeNoise, GaussNoise, ISONoise\n'), ((7871, 7908), 'albumentations.Compose', 'Compose', (['albu_list'], {'p': 'p_transform_val'}), '(albu_list, p=p_transform_val)\n', (7878, 7908), False, 'from albumentations import Compose, OneOf, RandomBrightnessContrast, RandomGamma, HueSaturationValue, RGBShift, MotionBlur, Blur, GaussNoise, ChannelShuffle, MultiplicativeNoise, GaussNoise, ISONoise\n'), ((9000, 9055), 'sklearn.model_selection.train_test_split', 'train_test_split', (['train'], {'test_size': '(0.1)', 'random_state': '(42)'}), '(train, test_size=0.1, random_state=42)\n', (9016, 9055), False, 'from sklearn.model_selection import train_test_split\n'), ((9452, 9541), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_dataset', 'batch_size': 'BATCH_SIZE', 'shuffle': '(True)', 'num_workers': '(4)'}), '(dataset=train_dataset, batch_size=BATCH_SIZE, shuffle=True,\n num_workers=4)\n', (9462, 9541), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((9551, 9639), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'dev_dataset', 'batch_size': 'BATCH_SIZE', 'shuffle': '(False)', 'num_workers': '(4)'}), '(dataset=dev_dataset, batch_size=BATCH_SIZE, shuffle=False,\n num_workers=4)\n', (9561, 9639), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((9650, 9739), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'test_dataset', 'batch_size': 'BATCH_SIZE', 'shuffle': '(False)', 'num_workers': '(4)'}), '(dataset=test_dataset, batch_size=BATCH_SIZE, shuffle=False,\n num_workers=4)\n', (9660, 9739), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((24280, 24294), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (24292, 24294), True, 'import pandas as pd\n'), ((26936, 26960), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (26958, 26960), False, 'import torch\n'), ((26961, 26973), 'gc.collect', 'gc.collect', ([], {}), '()\n', (26971, 26973), False, 'import gc\n'), ((27039, 27115), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'test_dataset', 'batch_size': '(4)', 'shuffle': '(False)', 'num_workers': '(4)'}), '(dataset=test_dataset, batch_size=4, shuffle=False, num_workers=4)\n', (27049, 27115), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((27148, 27165), 'tqdm.tqdm', 'tqdm', (['test_loader'], {}), '(test_loader)\n', (27152, 27165), False, 'from tqdm import tqdm\n'), ((27412, 27455), 'pandas.read_csv', 'pd.read_csv', (["(PATH + 'sample_submission.csv')"], {}), "(PATH + 'sample_submission.csv')\n", (27423, 27455), True, 'import pandas as pd\n'), ((2152, 2168), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (2162, 2168), False, 'import cv2\n'), ((4901, 4924), 'math.sin', 'sin', (["regr_dict['pitch']"], {}), "(regr_dict['pitch'])\n", (4904, 4924), False, 'from math import sin, cos\n'), ((4954, 4977), 'math.cos', 'cos', (["regr_dict['pitch']"], {}), "(regr_dict['pitch'])\n", (4957, 4977), False, 'from math import sin, cos\n'), ((5699, 5731), 'numpy.concatenate', 'np.concatenate', (['[bg, img, bg]', '(1)'], {}), '([bg, img, bg], 1)\n', (5713, 5731), True, 'import numpy as np\n'), ((5742, 5782), 'cv2.resize', 'cv2.resize', (['img', '(IMG_WIDTH, IMG_HEIGHT)'], {}), '(img, (IMG_WIDTH, IMG_HEIGHT))\n', (5752, 5782), False, 'import cv2\n'), ((5872, 5957), 'numpy.zeros', 'np.zeros', (['[IMG_HEIGHT // MODEL_SCALE, IMG_WIDTH // MODEL_SCALE]'], {'dtype': '"""float32"""'}), "([IMG_HEIGHT // MODEL_SCALE, IMG_WIDTH // MODEL_SCALE], dtype='float32'\n )\n", (5880, 5957), True, 'import numpy as np\n'), ((6021, 6109), 'numpy.zeros', 'np.zeros', (['[IMG_HEIGHT // MODEL_SCALE, IMG_WIDTH // MODEL_SCALE, 7]'], {'dtype': '"""float32"""'}), "([IMG_HEIGHT // MODEL_SCALE, IMG_WIDTH // MODEL_SCALE, 7], dtype=\n 'float32')\n", (6029, 6109), True, 'import numpy as np\n'), ((6971, 6982), 'numpy.std', 'np.std', (['img'], {}), '(img)\n', (6977, 6982), True, 'import numpy as np\n'), ((7024, 7036), 'numpy.std', 'np.std', (['mask'], {}), '(mask)\n', (7030, 7036), True, 'import numpy as np\n'), ((7078, 7090), 'numpy.std', 'np.std', (['regr'], {}), '(regr)\n', (7084, 7090), True, 'import numpy as np\n'), ((7134, 7227), 'albumentations.RandomBrightnessContrast', 'RandomBrightnessContrast', ([], {'brightness_limit': '(-0.3, 0.3)', 'contrast_limit': '(-0.3, 0.3)', 'p': '(0.3)'}), '(brightness_limit=(-0.3, 0.3), contrast_limit=(-0.3,\n 0.3), p=0.3)\n', (7158, 7227), False, 'from albumentations import Compose, OneOf, RandomBrightnessContrast, RandomGamma, HueSaturationValue, RGBShift, MotionBlur, Blur, GaussNoise, ChannelShuffle, MultiplicativeNoise, GaussNoise, ISONoise\n'), ((7236, 7254), 'albumentations.RandomGamma', 'RandomGamma', ([], {'p': '(0.2)'}), '(p=0.2)\n', (7247, 7254), False, 'from albumentations import Compose, OneOf, RandomBrightnessContrast, RandomGamma, HueSaturationValue, RGBShift, MotionBlur, Blur, GaussNoise, ChannelShuffle, MultiplicativeNoise, GaussNoise, ISONoise\n'), ((7256, 7281), 'albumentations.HueSaturationValue', 'HueSaturationValue', ([], {'p': '(0.3)'}), '(p=0.3)\n', (7274, 7281), False, 'from albumentations import Compose, OneOf, RandomBrightnessContrast, RandomGamma, HueSaturationValue, RGBShift, MotionBlur, Blur, GaussNoise, ChannelShuffle, MultiplicativeNoise, GaussNoise, ISONoise\n'), ((7283, 7298), 'albumentations.RGBShift', 'RGBShift', ([], {'p': '(0.3)'}), '(p=0.3)\n', (7291, 7298), False, 'from albumentations import Compose, OneOf, RandomBrightnessContrast, RandomGamma, HueSaturationValue, RGBShift, MotionBlur, Blur, GaussNoise, ChannelShuffle, MultiplicativeNoise, GaussNoise, ISONoise\n'), ((7300, 7317), 'albumentations.MotionBlur', 'MotionBlur', ([], {'p': '(0.1)'}), '(p=0.1)\n', (7310, 7317), False, 'from albumentations import Compose, OneOf, RandomBrightnessContrast, RandomGamma, HueSaturationValue, RGBShift, MotionBlur, Blur, GaussNoise, ChannelShuffle, MultiplicativeNoise, GaussNoise, ISONoise\n'), ((7319, 7330), 'albumentations.Blur', 'Blur', ([], {'p': '(0.1)'}), '(p=0.1)\n', (7323, 7330), False, 'from albumentations import Compose, OneOf, RandomBrightnessContrast, RandomGamma, HueSaturationValue, RGBShift, MotionBlur, Blur, GaussNoise, ChannelShuffle, MultiplicativeNoise, GaussNoise, ISONoise\n'), ((7344, 7382), 'albumentations.GaussNoise', 'GaussNoise', ([], {'var_limit': '(20, 100)', 'p': '(0.2)'}), '(var_limit=(20, 100), p=0.2)\n', (7354, 7382), False, 'from albumentations import Compose, OneOf, RandomBrightnessContrast, RandomGamma, HueSaturationValue, RGBShift, MotionBlur, Blur, GaussNoise, ChannelShuffle, MultiplicativeNoise, GaussNoise, ISONoise\n'), ((7395, 7416), 'albumentations.ChannelShuffle', 'ChannelShuffle', ([], {'p': '(0.2)'}), '(p=0.2)\n', (7409, 7416), False, 'from albumentations import Compose, OneOf, RandomBrightnessContrast, RandomGamma, HueSaturationValue, RGBShift, MotionBlur, Blur, GaussNoise, ChannelShuffle, MultiplicativeNoise, GaussNoise, ISONoise\n'), ((7418, 7467), 'albumentations.MultiplicativeNoise', 'MultiplicativeNoise', ([], {'multiplier': '(0.7, 1.2)', 'p': '(0.2)'}), '(multiplier=(0.7, 1.2), p=0.2)\n', (7437, 7467), False, 'from albumentations import Compose, OneOf, RandomBrightnessContrast, RandomGamma, HueSaturationValue, RGBShift, MotionBlur, Blur, GaussNoise, ChannelShuffle, MultiplicativeNoise, GaussNoise, ISONoise\n'), ((7469, 7484), 'albumentations.ISONoise', 'ISONoise', ([], {'p': '(0.2)'}), '(p=0.2)\n', (7477, 7484), False, 'from albumentations import Compose, OneOf, RandomBrightnessContrast, RandomGamma, HueSaturationValue, RGBShift, MotionBlur, Blur, GaussNoise, ChannelShuffle, MultiplicativeNoise, GaussNoise, ISONoise\n'), ((7485, 7554), 'albumentations.GaussNoise', 'GaussNoise', ([], {'var_limit': '(10.0, 50.0)', 'mean': '(0)', 'always_apply': '(False)', 'p': '(0.5)'}), '(var_limit=(10.0, 50.0), mean=0, always_apply=False, p=0.5)\n', (7495, 7554), False, 'from albumentations import Compose, OneOf, RandomBrightnessContrast, RandomGamma, HueSaturationValue, RGBShift, MotionBlur, Blur, GaussNoise, ChannelShuffle, MultiplicativeNoise, GaussNoise, ISONoise\n'), ((12521, 12610), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': '(3)', 'stride': 'stride', 'padding': '(1)', 'bias': '(False)'}), '(in_planes, out_planes, kernel_size=3, stride=stride, padding=1,\n bias=False)\n', (12530, 12610), True, 'import torch.nn as nn\n'), ((12713, 12787), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': '(1)', 'stride': 'stride', 'bias': '(False)'}), '(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n', (12722, 12787), True, 'import torch.nn as nn\n'), ((21858, 21889), 'torch.sigmoid', 'torch.sigmoid', (['prediction[:, 0]'], {}), '(prediction[:, 0])\n', (21871, 21889), False, 'import torch\n'), ((22628, 22646), 'tqdm.tqdm', 'tqdm', (['train_loader'], {}), '(train_loader)\n', (22632, 22646), False, 'from tqdm import tqdm\n'), ((22840, 22858), 'tqdm.tqdm', 'tqdm', (['train_loader'], {}), '(train_loader)\n', (22844, 22858), False, 'from tqdm import tqdm\n'), ((24330, 24354), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (24352, 24354), False, 'import torch\n'), ((24359, 24371), 'gc.collect', 'gc.collect', ([], {}), '()\n', (24369, 24371), False, 'import gc\n'), ((25548, 25600), 'scipy.optimize.minimize', 'minimize', (['distance_fn', '[x0, y0, z0]'], {'method': '"""Powell"""'}), "(distance_fn, [x0, y0, z0], method='Powell')\n", (25556, 25600), False, 'from scipy.optimize import minimize\n'), ((26214, 26237), 'numpy.argwhere', 'np.argwhere', (['(logits > 0)'], {}), '(logits > 0)\n', (26225, 26237), True, 'import numpy as np\n'), ((2249, 2274), 'numpy.array', 'np.array', (['img[:, :, ::-1]'], {}), '(img[:, :, ::-1])\n', (2257, 2274), True, 'import numpy as np\n'), ((3612, 3636), 'numpy.dot', 'np.dot', (['camera_matrix', 'P'], {}), '(camera_matrix, P)\n', (3618, 3636), True, 'import numpy as np\n'), ((4596, 4608), 'numpy.dot', 'np.dot', (['P', 'R'], {}), '(P, R)\n', (4602, 4608), True, 'import numpy as np\n'), ((5263, 5329), 'numpy.sqrt', 'np.sqrt', (["(regr_dict['pitch_sin'] ** 2 + regr_dict['pitch_cos'] ** 2)"], {}), "(regr_dict['pitch_sin'] ** 2 + regr_dict['pitch_cos'] ** 2)\n", (5270, 5329), True, 'import numpy as np\n'), ((5367, 5433), 'numpy.sqrt', 'np.sqrt', (["(regr_dict['pitch_sin'] ** 2 + regr_dict['pitch_cos'] ** 2)"], {}), "(regr_dict['pitch_sin'] ** 2 + regr_dict['pitch_cos'] ** 2)\n", (5374, 5433), True, 'import numpy as np\n'), ((5455, 5475), 'numpy.arccos', 'np.arccos', (['pitch_cos'], {}), '(pitch_cos)\n', (5464, 5475), True, 'import numpy as np\n'), ((5478, 5496), 'numpy.sign', 'np.sign', (['pitch_sin'], {}), '(pitch_sin)\n', (5485, 5496), True, 'import numpy as np\n'), ((5589, 5606), 'numpy.ones_like', 'np.ones_like', (['img'], {}), '(img)\n', (5601, 5606), True, 'import numpy as np\n'), ((8296, 8316), 'torch.is_tensor', 'torch.is_tensor', (['idx'], {}), '(idx)\n', (8311, 8316), False, 'import torch\n'), ((8589, 8611), 'numpy.rollaxis', 'np.rollaxis', (['img', '(2)', '(0)'], {}), '(img, 2, 0)\n', (8600, 8611), True, 'import numpy as np\n'), ((10981, 11056), 'torch.nn.functional.pad', 'F.pad', (['x1', '(diffX // 2, diffX - diffX // 2, diffY // 2, diffY - diffY // 2)'], {}), '(x1, (diffX // 2, diffX - diffX // 2, diffY // 2, diffY - diffY // 2))\n', (10986, 11056), True, 'import torch.nn.functional as F\n'), ((11576, 11602), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'shape_y'], {}), '(0, 1, shape_y)\n', (11587, 11602), True, 'import numpy as np\n'), ((11604, 11630), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'shape_x'], {}), '(0, 1, shape_x)\n', (11615, 11630), True, 'import numpy as np\n'), ((13008, 13032), 'torch.nn.GroupNorm', 'nn.GroupNorm', (['(16)', 'planes'], {}), '(16, planes)\n', (13020, 13032), True, 'import torch.nn as nn\n'), ((13098, 13122), 'torch.nn.GroupNorm', 'nn.GroupNorm', (['(16)', 'planes'], {}), '(16, planes)\n', (13110, 13122), True, 'import torch.nn as nn\n'), ((13613, 13638), 'torch.nn.functional.relu', 'F.relu', (['out'], {'inplace': '(True)'}), '(out, inplace=True)\n', (13619, 13638), True, 'import torch.nn.functional as F\n'), ((13870, 13894), 'torch.nn.GroupNorm', 'nn.GroupNorm', (['(16)', 'planes'], {}), '(16, planes)\n', (13882, 13894), True, 'import torch.nn as nn\n'), ((13967, 13991), 'torch.nn.GroupNorm', 'nn.GroupNorm', (['(16)', 'planes'], {}), '(16, planes)\n', (13979, 13991), True, 'import torch.nn as nn\n'), ((14073, 14114), 'torch.nn.GroupNorm', 'nn.GroupNorm', (['(16)', '(planes * self.expansion)'], {}), '(16, planes * self.expansion)\n', (14085, 14114), True, 'import torch.nn as nn\n'), ((14735, 14746), 'torch.nn.functional.relu', 'F.relu', (['out'], {}), '(out)\n', (14741, 14746), True, 'import torch.nn.functional as F\n'), ((14981, 15045), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(64)'], {'kernel_size': '(7)', 'stride': '(2)', 'padding': '(3)', 'bias': '(False)'}), '(3, 64, kernel_size=7, stride=2, padding=3, bias=False)\n', (14990, 15045), True, 'import torch.nn as nn\n'), ((15096, 15116), 'torch.nn.GroupNorm', 'nn.GroupNorm', (['(16)', '(64)'], {}), '(16, 64)\n', (15108, 15116), True, 'import torch.nn as nn\n'), ((16531, 16553), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (16544, 16553), True, 'import torch.nn as nn\n'), ((16660, 16703), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['conv1', '(3)'], {'stride': '(2)', 'padding': '(1)'}), '(conv1, 3, stride=2, padding=1)\n', (16672, 16703), True, 'import torch.nn.functional as F\n'), ((19437, 19459), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)', '(1)'], {}), '(128, 256, 1)\n', (19446, 19459), True, 'import torch.nn as nn\n'), ((19481, 19503), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(1)'], {}), '(256, 256, 1)\n', (19490, 19503), True, 'import torch.nn as nn\n'), ((19525, 19547), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(256)', '(1)'], {}), '(512, 256, 1)\n', (19534, 19547), True, 'import torch.nn as nn\n'), ((19567, 19588), 'torch.nn.GroupNorm', 'nn.GroupNorm', (['(16)', '(256)'], {}), '(16, 256)\n', (19579, 19588), True, 'import torch.nn as nn\n'), ((19609, 19630), 'torch.nn.GroupNorm', 'nn.GroupNorm', (['(16)', '(256)'], {}), '(16, 256)\n', (19621, 19630), True, 'import torch.nn as nn\n'), ((19651, 19672), 'torch.nn.GroupNorm', 'nn.GroupNorm', (['(16)', '(256)'], {}), '(16, 256)\n', (19663, 19672), True, 'import torch.nn as nn\n'), ((19878, 19893), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (19890, 19893), True, 'import torch.nn as nn\n'), ((19994, 20022), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', 'n_classes', '(1)'], {}), '(256, n_classes, 1)\n', (20003, 20022), True, 'import torch.nn as nn\n'), ((20169, 20193), 'torch.cat', 'torch.cat', (['[x, mesh1]', '(1)'], {}), '([x, mesh1], 1)\n', (20178, 20193), False, 'import torch\n'), ((20783, 20811), 'torch.cat', 'torch.cat', (['[lat32, mesh2]', '(1)'], {}), '([lat32, mesh2], 1)\n', (20792, 20811), False, 'import torch\n'), ((21064, 21089), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (21087, 21089), False, 'import torch\n'), ((22383, 22403), 'torch.log', 'torch.log', (['mask_loss'], {}), '(mask_loss)\n', (22392, 22403), False, 'import torch\n'), ((23718, 23733), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (23731, 23733), False, 'import torch\n'), ((25729, 25766), 'numpy.array', 'np.array', (["[c1['x'], c1['y'], c1['z']]"], {}), "([c1['x'], c1['y'], c1['z']])\n", (25737, 25766), True, 'import numpy as np\n'), ((27176, 27191), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (27189, 27191), False, 'import torch\n'), ((8762, 8785), 'numpy.rollaxis', 'np.rollaxis', (['regr', '(2)', '(0)'], {}), '(regr, 2, 0)\n', (8773, 8785), True, 'import numpy as np\n'), ((9964, 10002), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_ch', 'out_ch', '(3)'], {'padding': '(1)'}), '(in_ch, out_ch, 3, padding=1)\n', (9973, 10002), True, 'import torch.nn as nn\n'), ((10016, 10038), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_ch'], {}), '(out_ch)\n', (10030, 10038), True, 'import torch.nn as nn\n'), ((10052, 10073), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (10059, 10073), True, 'import torch.nn as nn\n'), ((10087, 10126), 'torch.nn.Conv2d', 'nn.Conv2d', (['out_ch', 'out_ch', '(3)'], {'padding': '(1)'}), '(out_ch, out_ch, 3, padding=1)\n', (10096, 10126), True, 'import torch.nn as nn\n'), ((10140, 10162), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_ch'], {}), '(out_ch)\n', (10154, 10162), True, 'import torch.nn as nn\n'), ((10176, 10197), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (10183, 10197), True, 'import torch.nn as nn\n'), ((10584, 10648), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(scale_factor=2, mode='bilinear', align_corners=True)\n", (10595, 10648), True, 'import torch.nn as nn\n'), ((10685, 10740), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(in_ch // 2)', '(in_ch // 2)', '(2)'], {'stride': '(2)'}), '(in_ch // 2, in_ch // 2, 2, stride=2)\n', (10703, 10740), True, 'import torch.nn as nn\n'), ((11400, 11426), 'torch.cat', 'torch.cat', (['[x2, x1]'], {'dim': '(1)'}), '([x2, x1], dim=1)\n', (11409, 11426), False, 'import torch\n'), ((11643, 11697), 'numpy.tile', 'np.tile', (['mg_x[None, None, :, :]', '[batch_size, 1, 1, 1]'], {}), '(mg_x[None, None, :, :], [batch_size, 1, 1, 1])\n', (11650, 11697), True, 'import numpy as np\n'), ((11727, 11781), 'numpy.tile', 'np.tile', (['mg_y[None, None, :, :]', '[batch_size, 1, 1, 1]'], {}), '(mg_y[None, None, :, :], [batch_size, 1, 1, 1])\n', (11734, 11781), True, 'import numpy as np\n'), ((17184, 17226), 'torch.utils.model_zoo.load_url', 'model_zoo.load_url', (["model_urls['resnet18']"], {}), "(model_urls['resnet18'])\n", (17202, 17226), True, 'import torch.utils.model_zoo as model_zoo\n'), ((17535, 17577), 'torch.utils.model_zoo.load_url', 'model_zoo.load_url', (["model_urls['resnet34']"], {}), "(model_urls['resnet34'])\n", (17553, 17577), True, 'import torch.utils.model_zoo as model_zoo\n'), ((18343, 18386), 'torch.utils.model_zoo.load_url', 'model_zoo.load_url', (["model_urls['resnet101']"], {}), "(model_urls['resnet101'])\n", (18361, 18386), True, 'import torch.utils.model_zoo as model_zoo\n'), ((18697, 18740), 'torch.utils.model_zoo.load_url', 'model_zoo.load_url', (["model_urls['resnet152']"], {}), "(model_urls['resnet152'])\n", (18715, 18740), True, 'import torch.utils.model_zoo as model_zoo\n'), ((22051, 22079), 'torch.log', 'torch.log', (['(pred_mask + 1e-12)'], {}), '(pred_mask + 1e-12)\n', (22060, 22079), False, 'import torch\n'), ((22095, 22127), 'torch.log', 'torch.log', (['(1 - pred_mask + 1e-12)'], {}), '(1 - pred_mask + 1e-12)\n', (22104, 22127), False, 'import torch\n'), ((25812, 25849), 'numpy.array', 'np.array', (["[c2['x'], c2['y'], c2['z']]"], {}), "([c2['x'], c2['y'], c2['z']])\n", (25820, 25849), True, 'import numpy as np\n'), ((4231, 4239), 'math.cos', 'cos', (['yaw'], {}), '(yaw)\n', (4234, 4239), False, 'from math import sin, cos\n'), ((4244, 4252), 'math.sin', 'sin', (['yaw'], {}), '(yaw)\n', (4247, 4252), False, 'from math import sin, cos\n'), ((4317, 4325), 'math.cos', 'cos', (['yaw'], {}), '(yaw)\n', (4320, 4325), False, 'from math import sin, cos\n'), ((4380, 4390), 'math.cos', 'cos', (['pitch'], {}), '(pitch)\n', (4383, 4390), False, 'from math import sin, cos\n'), ((4428, 4438), 'math.sin', 'sin', (['pitch'], {}), '(pitch)\n', (4431, 4438), False, 'from math import sin, cos\n'), ((4440, 4450), 'math.cos', 'cos', (['pitch'], {}), '(pitch)\n', (4443, 4450), False, 'from math import sin, cos\n'), ((4473, 4482), 'math.cos', 'cos', (['roll'], {}), '(roll)\n', (4476, 4482), False, 'from math import sin, cos\n'), ((4519, 4528), 'math.sin', 'sin', (['roll'], {}), '(roll)\n', (4522, 4528), False, 'from math import sin, cos\n'), ((4530, 4539), 'math.cos', 'cos', (['roll'], {}), '(roll)\n', (4533, 4539), False, 'from math import sin, cos\n'), ((6338, 6349), 'numpy.round', 'np.round', (['x'], {}), '(x)\n', (6346, 6349), True, 'import numpy as np\n'), ((6461, 6472), 'numpy.round', 'np.round', (['y'], {}), '(y)\n', (6469, 6472), True, 'import numpy as np\n'), ((13266, 13290), 'torch.nn.GroupNorm', 'nn.GroupNorm', (['(16)', 'planes'], {}), '(16, planes)\n', (13278, 13290), True, 'import torch.nn as nn\n'), ((14309, 14350), 'torch.nn.GroupNorm', 'nn.GroupNorm', (['(16)', '(planes * self.expansion)'], {}), '(16, planes * self.expansion)\n', (14321, 14350), True, 'import torch.nn as nn\n'), ((15487, 15557), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['m.weight'], {'mode': '"""fan_out"""', 'nonlinearity': '"""relu"""'}), "(m.weight, mode='fan_out', nonlinearity='relu')\n", (15510, 15557), True, 'import torch.nn as nn\n'), ((25349, 25360), 'numpy.round', 'np.round', (['x'], {}), '(x)\n', (25357, 25360), True, 'import numpy as np\n'), ((25472, 25483), 'numpy.round', 'np.round', (['y'], {}), '(y)\n', (25480, 25483), True, 'import numpy as np\n'), ((26511, 26532), 'numpy.exp', 'np.exp', (['(-logits[r, c])'], {}), '(-logits[r, c])\n', (26517, 26532), True, 'import numpy as np\n'), ((4304, 4312), 'math.sin', 'sin', (['yaw'], {}), '(yaw)\n', (4307, 4312), False, 'from math import sin, cos\n'), ((4393, 4403), 'math.sin', 'sin', (['pitch'], {}), '(pitch)\n', (4396, 4403), False, 'from math import sin, cos\n'), ((4485, 4494), 'math.sin', 'sin', (['roll'], {}), '(roll)\n', (4488, 4494), False, 'from math import sin, cos\n'), ((11822, 11840), 'torch.tensor', 'torch.tensor', (['mg_x'], {}), '(mg_x)\n', (11834, 11840), False, 'import torch\n'), ((11853, 11871), 'torch.tensor', 'torch.tensor', (['mg_y'], {}), '(mg_y)\n', (11865, 11871), False, 'import torch\n'), ((15622, 15652), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.weight', '(1)'], {}), '(m.weight, 1)\n', (15639, 15652), True, 'import torch.nn as nn\n'), ((15669, 15697), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (15686, 15697), True, 'import torch.nn as nn\n'), ((16095, 16129), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bn3.weight', '(0)'], {}), '(m.bn3.weight, 0)\n', (16112, 16129), True, 'import torch.nn as nn\n'), ((16198, 16232), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bn2.weight', '(0)'], {}), '(m.bn2.weight, 0)\n', (16215, 16232), True, 'import torch.nn as nn\n'), ((22250, 22277), 'torch.abs', 'torch.abs', (['(pred_regr - regr)'], {}), '(pred_regr - regr)\n', (22259, 22277), False, 'import torch\n')] |
import nltk
from nltk.tokenize import word_tokenize
import numpy as np
import random
import pickle
from collections import Counter
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
hm_lines = 100000
def create_lexicon(pos, neg):
lexicon = []
with open(pos, 'r') as f:
contents = f.readlines()
for l in contents[:hm_lines]:
all_words = word_tokenize(l)
lexicon += list(all_words)
with open(neg, 'r') as f:
contents = f.readlines()
for l in contents[:hm_lines]:
all_words = word_tokenize(l)
lexicon += list(all_words)
lexicon = [lemmatizer.lemmatize(i) for i in lexicon]
w_counts = Counter(lexicon)
l2 = []
for w in w_counts:
if 1000 > w_counts[w] > 50:
l2.append(w)
print(len(l2))
return l2
def sample_handling(sample, lexicon, classification):
featureset = []
with open(sample, 'r') as f:
contents = f.readlines()
for l in contents[:hm_lines]:
current_words = word_tokenize(l.lower())
current_words = [lemmatizer.lemmatize(i) for i in current_words]
features = np.zeros(len(lexicon))
for word in current_words:
if word.lower() in lexicon:
index_value = lexicon.index(word.lower())
features[index_value] = 1
features = list(features)
featureset.append([features, classification])
return featureset
def create_feature_sets_and_labels(pos, neg, test_size=0.1):
lexicon = create_lexicon(pos,neg)
features = []
features += sample_handling('pos.txt',lexicon,[1,0])
features += sample_handling('neg.txt',lexicon,[0,1])
random.shuffle(features)
features = np.array(features)
testing_size = int(test_size*len(features))
train_x = list(features[:,0][:-testing_size])
train_y = list(features[:,1][:-testing_size])
test_x = list(features[:,0][-testing_size:])
test_y = list(features[:,1][-testing_size:])
return train_x,train_y,test_x,test_y
if __name__ == '__main__':
train_x,train_y,test_x,test_y = create_feature_sets_and_labels('/path/to/pos.txt','/path/to/neg.txt')
# if you want to pickle this data:
with open('/path/to/sentiment_set.pickle','wb') as f:
pickle.dump([train_x,train_y,test_x,test_y],f) | [
"pickle.dump",
"nltk.stem.WordNetLemmatizer",
"random.shuffle",
"numpy.array",
"collections.Counter",
"nltk.tokenize.word_tokenize"
] | [((185, 204), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (202, 204), False, 'from nltk.stem import WordNetLemmatizer\n'), ((708, 724), 'collections.Counter', 'Counter', (['lexicon'], {}), '(lexicon)\n', (715, 724), False, 'from collections import Counter\n'), ((1774, 1798), 'random.shuffle', 'random.shuffle', (['features'], {}), '(features)\n', (1788, 1798), False, 'import random\n'), ((1814, 1832), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (1822, 1832), True, 'import numpy as np\n'), ((2347, 2397), 'pickle.dump', 'pickle.dump', (['[train_x, train_y, test_x, test_y]', 'f'], {}), '([train_x, train_y, test_x, test_y], f)\n', (2358, 2397), False, 'import pickle\n'), ((397, 413), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['l'], {}), '(l)\n', (410, 413), False, 'from nltk.tokenize import word_tokenize\n'), ((579, 595), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['l'], {}), '(l)\n', (592, 595), False, 'from nltk.tokenize import word_tokenize\n')] |
import numpy as np
class Metric:
def __init__(self, num_classes, ignore_indexes=[]):
self.num_classes = num_classes
self.ignore_indexes = ignore_indexes
self.matrix = np.zeros((self.num_classes, self.num_classes))
def reset(self):
self.matrix.fill(0)
def add(self, pred, label):
mask = (label >= 0) & (label < self.num_classes)
for ignore_index in self.ignore_indexes:
mask &= (label != ignore_index)
count = np.bincount(self.num_classes * label[mask] + pred[mask], minlength=self.num_classes ** 2)
self.matrix += count.reshape((self.num_classes, self.num_classes))
def PA(self):
acc = np.diag(self.matrix).sum() / self.matrix.sum()
return acc
def mPA(self):
ps = self.Ps()
mpa = ps.mean()
return mpa
def Ps(self):
ps = np.diag(self.matrix) / self.matrix.sum(axis=1)
return ps
def Rs(self):
rs = np.diag(self.matrix) / self.matrix.sum(axis=0)
return rs
def F1s(self):
ps = self.Ps()
rs = self.Ps()
f1s = 2 * ps * rs / (ps + rs)
return f1s
def IoUs(self):
intersection = np.diag(self.matrix)
union = np.sum(self.matrix, axis=0) + np.sum(self.matrix, axis=1) - np.diag(self.matrix)
iou = intersection / union
return iou
def mIoU(self):
ious = self.IoUs()
iou = ious.mean()
return iou
| [
"numpy.diag",
"numpy.bincount",
"numpy.zeros",
"numpy.sum"
] | [((197, 243), 'numpy.zeros', 'np.zeros', (['(self.num_classes, self.num_classes)'], {}), '((self.num_classes, self.num_classes))\n', (205, 243), True, 'import numpy as np\n'), ((494, 588), 'numpy.bincount', 'np.bincount', (['(self.num_classes * label[mask] + pred[mask])'], {'minlength': '(self.num_classes ** 2)'}), '(self.num_classes * label[mask] + pred[mask], minlength=self.\n num_classes ** 2)\n', (505, 588), True, 'import numpy as np\n'), ((1205, 1225), 'numpy.diag', 'np.diag', (['self.matrix'], {}), '(self.matrix)\n', (1212, 1225), True, 'import numpy as np\n'), ((876, 896), 'numpy.diag', 'np.diag', (['self.matrix'], {}), '(self.matrix)\n', (883, 896), True, 'import numpy as np\n'), ((973, 993), 'numpy.diag', 'np.diag', (['self.matrix'], {}), '(self.matrix)\n', (980, 993), True, 'import numpy as np\n'), ((1302, 1322), 'numpy.diag', 'np.diag', (['self.matrix'], {}), '(self.matrix)\n', (1309, 1322), True, 'import numpy as np\n'), ((1242, 1269), 'numpy.sum', 'np.sum', (['self.matrix'], {'axis': '(0)'}), '(self.matrix, axis=0)\n', (1248, 1269), True, 'import numpy as np\n'), ((1272, 1299), 'numpy.sum', 'np.sum', (['self.matrix'], {'axis': '(1)'}), '(self.matrix, axis=1)\n', (1278, 1299), True, 'import numpy as np\n'), ((692, 712), 'numpy.diag', 'np.diag', (['self.matrix'], {}), '(self.matrix)\n', (699, 712), True, 'import numpy as np\n')] |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from utils import *
import numpy as np
WIDTH = 9
HEIGHT = 2
INIT_BALLS_COUNT_IN_PIT = 9
MAX_ARRAY_LEN_OF_ENCODED_PIT_STATE = 1 # TODO: find rignt value , minimum muste be 9 if tun encoding is onehot
BOARD_SIZE = WIDTH * HEIGHT
WIN_SCORE = (BOARD_SIZE * WIDTH)/HEIGHT
PIT_STATE_ENCODER = array_to_array_without_none # data_array_to_one_hot_with_shape
PIT_STATE_DECODER = array_without_none_to_array # one_hot_batch_to_array
SCORE_ENCODER = number_to_number_without_none
SCORE_DECODER = number_without_none_to_number
TUZ_ENCODER = number_to_number_without_none # number_to_onehot
TUZ_DECODER = number_without_none_to_number # onehot_to_number
# 0-9 - player 1 green
# 10-18 - player -1 red
class Board():
__additional_components_count = 4
shape = (BOARD_SIZE + __additional_components_count, MAX_ARRAY_LEN_OF_ENCODED_PIT_STATE)
action_size = BOARD_SIZE
def __init__(self):
self.__size = WIDTH * HEIGHT
self.__init_state = [INIT_BALLS_COUNT_IN_PIT] * BOARD_SIZE
self.__pieces = self.__init_state
self.__players_scores = {
1 : 0, # player 1
-1 : 0 # player -1
}
self.__players_tuz = {
1 : None, # player 1
-1 : None # player -1
}
self.__canonical_player = 1
def __getitem__(self, index):
return self.get_encoded_state()[index]
def get_encoded_state(self): #TODO: canonical_board_for_opponent_must_be = board * -1
pieces = self.__pieces
mid=int((len(pieces) + 1) / 2)
half_shape = (int(self.shape[0]/2),self.shape[1])
firstHalf = PIT_STATE_ENCODER(pieces[:mid],half_shape)
firstHalf[WIDTH - 1 + 1] = SCORE_ENCODER(self.__players_scores[1 * self.__canonical_player], MAX_ARRAY_LEN_OF_ENCODED_PIT_STATE)
firstHalf[WIDTH - 1 + 2] = TUZ_ENCODER(self.__players_tuz[1 * self.__canonical_player], MAX_ARRAY_LEN_OF_ENCODED_PIT_STATE)
secondHalf = PIT_STATE_ENCODER(pieces[mid:],half_shape)
secondHalf[WIDTH - 1 + 1] = SCORE_ENCODER(self.__players_scores[-1 * self.__canonical_player], MAX_ARRAY_LEN_OF_ENCODED_PIT_STATE)
secondHalf[WIDTH - 1 + 2] = TUZ_ENCODER(self.__players_tuz[-1 * self.__canonical_player], MAX_ARRAY_LEN_OF_ENCODED_PIT_STATE)
if self.__canonical_player == 1:
secondHalf *= -1
else:
firstHalf *= -1
result = np.concatenate((firstHalf, secondHalf), axis=0)
return result
def set_encoded_state(self,state):
state = state.copy()
mid=int((len(state) + 1) / 2)
firstHalf = state[:mid]
secondHalf = state[mid:]
firstSum = 0
secondSum = 0
for onehot in firstHalf:
firstSum += sum(onehot)
for onehot in secondHalf:
secondSum += sum(onehot)
if firstSum > 0 or secondSum < 0: # playe 1 in firstHalf
self.__canonical_player = 1
secondHalf *= -1
else: #player -1 in firstHalf
self.__canonical_player = -1
firstHalf *= -1
HALF_BOARD_SIZE = int(BOARD_SIZE/2)
#first half of board
self.__pieces = PIT_STATE_DECODER(firstHalf[:HALF_BOARD_SIZE]) # pit states
self.__players_scores[1 * self.__canonical_player] = SCORE_DECODER(firstHalf[HALF_BOARD_SIZE]) # score
self.__players_tuz[1 * self.__canonical_player] = TUZ_DECODER(firstHalf[HALF_BOARD_SIZE + 1]) # tuz position
#second half of board
self.__pieces += PIT_STATE_DECODER(secondHalf[:HALF_BOARD_SIZE]) # pit states
self.__players_scores[-1 * self.__canonical_player] = SCORE_DECODER(secondHalf[HALF_BOARD_SIZE]) # score
self.__players_tuz[-1 * self.__canonical_player] = TUZ_DECODER(secondHalf[HALF_BOARD_SIZE + 1]) # tuz position
def get_legal_moves(self, player):
return self.__generate_valid_moves(player)
def has_legal_moves(self):
return self.get_legal_moves(1).count(1) != 0 or self.get_legal_moves(-1).count(1) != 0
def is_win(self, player):
#Победа в игре достигается двумя способами:
#набор в свой казан 82 коргоола или более
if self.__players_scores[player] >= WIN_SCORE and self.__players_scores[-player] < WIN_SCORE:
return True
#ат сыроо (если после моего хода у противника не осталось ходов)
#у противника не осталось ходов (см. ниже «ат сыроо») и при этом он ещё не набрал 81 коргоол
if self.__generate_valid_moves(-player).count(1) == 0 and self.__players_scores[-player] < WIN_SCORE:
return True
return False
def execute_move(self, move, player): # TODO: rename move to actions
#check valid moves
if is_debug_mode():
valids = self.__generate_valid_moves(player)
if valids[move] == 0: #TODO: fix missing
print(red("WARNING - Board.execute_move invalid action"))
print("self.__pieces")
print(self.__pieces)
print("self.__players_tuz")
print(self.__players_tuz)
print("valids")
print(valids)
print("move")
print(move)
raise ValueError('Invalid action')
game_state = self.__pieces
balls_in_first_pit = game_state[move]
last_pit = move + balls_in_first_pit
last_pit_looped = last_pit if last_pit < len(game_state) else last_pit % len(game_state)
last_pit_looped -= 1
if last_pit_looped < 0:
last_pit_looped = len(game_state) - 1
if balls_in_first_pit == 1:
# Если в исходной лунке только один камень, то он перекладывается в следующую лунку.
last_pit_looped += 1
if last_pit_looped >= len(game_state):
last_pit_looped = 0
game_state[move] = 0
game_state[last_pit_looped] += balls_in_first_pit
else:
# игрок берёт все камни из любой своей лунки «дом» и, начиная с этой же лунки, раскладывает их по одному против часовой стрелки в свои и чужие дома
game_state[move] = 0
for pit in range(move,last_pit):
if pit >= len(game_state):
pit = pit % len(game_state)
game_state[pit] += 1
#Если последний коргоол попадает в дом соперника и
#количество коргоолов в нём становится чётным, то коргоолы из этого дома переходят в казан игрока, совершившего ход.
if (
self.__is_pit_dont_belongs_to_player(last_pit_looped,player) and
game_state[last_pit_looped] % 2 == 0
):
self.__players_scores[player] += game_state[last_pit_looped]
game_state[last_pit_looped] = 0
opponents_last_pit = BOARD_SIZE - 1 if player == 1 else BOARD_SIZE / HEIGHT - 1
opponents_tuz = self.__players_tuz[-player]
#Если при ходе игрока А последний коргоол попадает в дом игрока Б и в нём после этого оказывается три коргоола, то этот дом объявляется тузом игрока А
# 1) игрок не может завести себе туз в самом последнем (девятом) доме соперника,
# 2) игрок не может завести себе туз в доме с таким же порядковым номером, который имеет лунка-туз соперника,
# 3) каждый игрок в течение игры может завести себе только один туз.
if (self.__is_pit_dont_belongs_to_player(last_pit_looped,player) and
game_state[last_pit_looped] == 3 and
last_pit_looped != opponents_last_pit and # 1)
last_pit_looped != opponents_tuz and # 2)
self.__players_tuz[player] == None): # 3)
#Эти три коргоола попадают в казан игрока
self.__players_scores[player] += game_state[last_pit_looped]
game_state[last_pit_looped] = 0
self.__players_tuz[player] = last_pit_looped
self.__pieces = game_state
if self.__players_tuz[player] is not None and (self.__players_tuz[player] < 0 or self.__players_tuz[player] >= self.action_size):
print("Warnning: execute_move out of bounds") # TODO: fix
print("game_state " + str(game_state))
print("tuz " + str(self.__players_tuz[player]))
print("move " + str(move))
print("player " + str(player))
print("last_pit " + str(last_pit))
print("last_pit_looped " + str(last_pit_looped))
print("balls_in_first_pit" + str(balls_in_first_pit))
# Ат сыроо Если после хода игрока А все его дома оказываются пустыми (ход «91»), то он попадает в ситуацию «ат сыроо».
# Игрок Б делает свой очередной ход. Если после его хода в дома игрока А не попадает ни одного коргоола, то в этой ситуации у игрока А нет ходов и игра заканчивается. Коргоолы из домов игрока Б переходят в казан игрока Б и производится подсчёт коргоолов в казанах.
if self.__generate_valid_moves(-player).count(1) == 0:
for i, piece in enumerate(self.__pieces):
self.__players_scores[player] += piece
self.__pieces[i] = 0
return self.get_encoded_state()
def __generate_valid_moves(self,player):
# player *= self.__canonical_player
possible_moves = [0] * self.action_size
game_state = self.__pieces
for i in range(0,self.action_size):
if (
player * self.__canonical_player == 1 and (i < BOARD_SIZE/2) or #playes 1 side
player * self.__canonical_player == -1 and (i >= BOARD_SIZE/2) #playes -1 side
):
possible_moves[i] = 1 if game_state[i] > 0 else 0 # можно сделать ход, если лунке есть камни
if self.__players_tuz[player] is not None and (self.__players_tuz[player] < 0 or self.__players_tuz[player] >= self.action_size) :
print("Warnning: __generate_valid_moves out of bounds") # TODO: fix
print("game_state " + str(game_state))
print("tuz " + str(self.__players_tuz[player]))
if (self.__players_tuz[player] is not None and self.__players_tuz[player] >= 0 and self.__players_tuz[player] < self.action_size): # туз игрока
possible_moves[self.__players_tuz[player]] = 1 if game_state[self.__players_tuz[player]] > 0 else 0
if (self.__players_tuz[-player] is not None and self.__players_tuz[-player] >= 0 and self.__players_tuz[-player] < self.action_size):
possible_moves[self.__players_tuz[-player]] = 0
return possible_moves
def __is_pit_dont_belongs_to_player(self,pit,player):
if (player * self.__canonical_player) == 1:
players_pit = list(range(0, int(self.__size / 2)))
else:
players_pit = list(range(int(self.__size / 2), self.__size))
if self.__players_tuz[-player] in players_pit:
players_pit.remove(self.__players_tuz[-player])
if self.__players_tuz[player] != None:
players_pit.append(self.__players_tuz[player])
return pit not in players_pit
def display(self):
str_pieces = []
green_valids = self.__generate_valid_moves(1) #valids for green
red_valids = self.__generate_valid_moves(-1) #valids for red
canonicalPlayer1Color = green if self.__canonical_player == 1 else red
canonicalPlayer2Color = red if self.__canonical_player == 1 else green
for counter, value in enumerate( self.__pieces):
if green_valids[counter] == 1:
str_pieces.append(canonicalPlayer1Color(value))
elif red_valids[counter] == 1:
str_pieces.append(canonicalPlayer2Color(value))
else:
str_pieces.append(str(value))
return "pieces: " + "\t".join(str_pieces) + "\tscores: " + canonicalPlayer1Color("p1 - " + str(self.__players_scores[1])) + canonicalPlayer2Color(" p-1 - " + str(self.__players_scores[-1])) + " tuz: " + canonicalPlayer1Color("p1 - " + str(self.__players_tuz[1])) + canonicalPlayer2Color(" p-1 - " + str(self.__players_tuz[-1]))
#for tests
def set_pieces(self,pieces):
self.__pieces = pieces
def get_pieces(self): # for players
return copy.copy(self.__pieces)
def get_players_scores(self):
return copy.copy(self.__players_scores)
def get_players_tuz(self):
return copy.copy(self.__players_tuz)
def set_players_tuz(self,player,value):
self.__players_tuz[player] = value
def get_canonical_player(self):
return self.__canonical_player | [
"numpy.concatenate"
] | [((2270, 2317), 'numpy.concatenate', 'np.concatenate', (['(firstHalf, secondHalf)'], {'axis': '(0)'}), '((firstHalf, secondHalf), axis=0)\n', (2284, 2317), True, 'import numpy as np\n')] |
def test_standard_absolute_deviation():
from pysad.models import StandardAbsoluteDeviation
import numpy as np
from numpy.testing import assert_raises
from pysad.utils import fix_seed
fix_seed(61)
X = np.random.rand(150, 1)
model = StandardAbsoluteDeviation(substracted_statistic="mean")
model = model.fit(X)
y_pred = model.score(X)
assert y_pred.shape == (X.shape[0],)
model = StandardAbsoluteDeviation(substracted_statistic="median")
model = model.fit(X)
y_pred = model.score(X)
assert y_pred.shape == (X.shape[0],)
with assert_raises(ValueError):
StandardAbsoluteDeviation(substracted_statistic="asd")
with assert_raises(ValueError):
StandardAbsoluteDeviation(substracted_statistic=None)
| [
"numpy.random.rand",
"pysad.utils.fix_seed",
"numpy.testing.assert_raises",
"pysad.models.StandardAbsoluteDeviation"
] | [((205, 217), 'pysad.utils.fix_seed', 'fix_seed', (['(61)'], {}), '(61)\n', (213, 217), False, 'from pysad.utils import fix_seed\n'), ((226, 248), 'numpy.random.rand', 'np.random.rand', (['(150)', '(1)'], {}), '(150, 1)\n', (240, 248), True, 'import numpy as np\n'), ((262, 317), 'pysad.models.StandardAbsoluteDeviation', 'StandardAbsoluteDeviation', ([], {'substracted_statistic': '"""mean"""'}), "(substracted_statistic='mean')\n", (287, 317), False, 'from pysad.models import StandardAbsoluteDeviation\n'), ((425, 482), 'pysad.models.StandardAbsoluteDeviation', 'StandardAbsoluteDeviation', ([], {'substracted_statistic': '"""median"""'}), "(substracted_statistic='median')\n", (450, 482), False, 'from pysad.models import StandardAbsoluteDeviation\n'), ((587, 612), 'numpy.testing.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (600, 612), False, 'from numpy.testing import assert_raises\n'), ((622, 676), 'pysad.models.StandardAbsoluteDeviation', 'StandardAbsoluteDeviation', ([], {'substracted_statistic': '"""asd"""'}), "(substracted_statistic='asd')\n", (647, 676), False, 'from pysad.models import StandardAbsoluteDeviation\n'), ((687, 712), 'numpy.testing.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (700, 712), False, 'from numpy.testing import assert_raises\n'), ((722, 775), 'pysad.models.StandardAbsoluteDeviation', 'StandardAbsoluteDeviation', ([], {'substracted_statistic': 'None'}), '(substracted_statistic=None)\n', (747, 775), False, 'from pysad.models import StandardAbsoluteDeviation\n')] |
"""
Contains the core of NumPy: ndarray, ufuncs, dtypes, etc.
Please note that this module is private. All functions and objects
are available in the main ``numpy`` namespace - use that instead.
"""
from numpy.version import version as __version__
import os
# disables OpenBLAS affinity setting of the main thread that limits
# python threads or processes to one core
env_added = []
for envkey in ['OPENBLAS_MAIN_FREE', 'GOTOBLAS_MAIN_FREE']:
if envkey not in os.environ:
os.environ[envkey] = '1'
env_added.append(envkey)
try:
from . import multiarray
except ImportError as exc:
import sys
msg = """
IMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE!
Importing the numpy C-extensions failed. This error can happen for
many reasons, often due to issues with your setup or how NumPy was
installed.
We have compiled some common reasons and troubleshooting tips at:
https://numpy.org/devdocs/user/troubleshooting-importerror.html
Please note and check the following:
* The Python version is: Python%d.%d from "%s"
* The NumPy version is: "%s"
and make sure that they are the versions you expect.
Please carefully study the documentation linked above for further help.
Original error was: %s
""" % (sys.version_info[0], sys.version_info[1], sys.executable,
__version__, exc)
raise ImportError(msg)
finally:
for envkey in env_added:
del os.environ[envkey]
del envkey
del env_added
del os
from . import umath
# Check that multiarray,umath are pure python modules wrapping
# _multiarray_umath and not either of the old c-extension modules
if not (hasattr(multiarray, '_multiarray_umath') and
hasattr(umath, '_multiarray_umath')):
import sys
path = sys.modules['numpy'].__path__
msg = ("Something is wrong with the numpy installation. "
"While importing we detected an older version of "
"numpy in {}. One method of fixing this is to repeatedly uninstall "
"numpy until none is found, then reinstall this version.")
raise ImportError(msg.format(path))
from . import numerictypes as nt
multiarray.set_typeDict(nt.sctypeDict)
from . import numeric
from .numeric import *
from . import fromnumeric
from .fromnumeric import *
from . import defchararray as char
from . import records as rec
from .records import *
from .memmap import *
from .defchararray import chararray
from . import function_base
from .function_base import *
from . import machar
from .machar import *
from . import getlimits
from .getlimits import *
from . import shape_base
from .shape_base import *
from . import einsumfunc
from .einsumfunc import *
del nt
from .fromnumeric import amax as max, amin as min, round_ as round
from .numeric import absolute as abs
# do this after everything else, to minimize the chance of this misleadingly
# appearing in an import-time traceback
from . import _add_newdocs
# add these for module-freeze analysis (like PyInstaller)
from . import _dtype_ctypes
from . import _internal
from . import _dtype
from . import _methods
__all__ = ['char', 'rec', 'memmap']
__all__ += numeric.__all__
__all__ += fromnumeric.__all__
__all__ += rec.__all__
__all__ += ['chararray']
__all__ += function_base.__all__
__all__ += machar.__all__
__all__ += getlimits.__all__
__all__ += shape_base.__all__
__all__ += einsumfunc.__all__
# Make it possible so that ufuncs can be pickled
# Here are the loading and unloading functions
# The name numpy.core._ufunc_reconstruct must be
# available for unpickling to work.
def _ufunc_reconstruct(module, name):
# The `fromlist` kwarg is required to ensure that `mod` points to the
# inner-most module rather than the parent package when module name is
# nested. This makes it possible to pickle non-toplevel ufuncs such as
# scipy.special.expit for instance.
mod = __import__(module, fromlist=[name])
return getattr(mod, name)
def _ufunc_reduce(func):
from pickle import whichmodule
name = func.__name__
return _ufunc_reconstruct, (whichmodule(func, name), name)
import copyreg
copyreg.pickle(ufunc, _ufunc_reduce, _ufunc_reconstruct)
# Unclutter namespace (must keep _ufunc_reconstruct for unpickling)
del copyreg
del _ufunc_reduce
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
| [
"pickle.whichmodule",
"copyreg.pickle",
"numpy._pytesttester.PytestTester"
] | [((4087, 4143), 'copyreg.pickle', 'copyreg.pickle', (['ufunc', '_ufunc_reduce', '_ufunc_reconstruct'], {}), '(ufunc, _ufunc_reduce, _ufunc_reconstruct)\n', (4101, 4143), False, 'import copyreg\n'), ((4295, 4317), 'numpy._pytesttester.PytestTester', 'PytestTester', (['__name__'], {}), '(__name__)\n', (4307, 4317), False, 'from numpy._pytesttester import PytestTester\n'), ((4038, 4061), 'pickle.whichmodule', 'whichmodule', (['func', 'name'], {}), '(func, name)\n', (4049, 4061), False, 'from pickle import whichmodule\n')] |
import numpy as np
import sys, json
import numpy.random as rnd
from timeit import default_timer as now
######################################################
# GLOBAL DECLARATIONS THAT WILL BE USED IN ALL FILES #
######################################################
PRINT_DATA = 0
# make xrange available in python 3
try:
xrange
except NameError:
xrange = range
LOW = 0
HIGH = 10.0
SEED = 9
HALO = 1
STR_SIZE = 256
DEVICE = 0
LWS = 2 ** 10
###############################################
def gen_data(rows, cols):
return (rnd.randint(LOW, HIGH, (rows, cols)), np.empty(cols))
##############################################
def run(name, alg, sizes=5, step=2, rows=2 ** 10, cols=2 ** 6, pyramid_height=20):
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--steps", required=False, default=sizes, help="Number of steps"
)
parser.add_argument(
"--step", required=False, default=step, help="Factor for each step"
)
parser.add_argument("--rows", required=False, default=rows, help="Initial row size")
parser.add_argument(
"--cols", required=False, default=cols, help="Initial column size"
)
parser.add_argument(
"--pyht", required=False, default=pyramid_height, help="Initial pyramid height"
)
parser.add_argument(
"--repeat", required=False, default=1, help="Iterations inside measured region"
)
parser.add_argument(
"--json",
required=False,
default=__file__.replace("py", "json"),
help="output json data filename",
)
args = parser.parse_args()
sizes = int(args.steps)
step = int(args.step)
rows = int(args.rows)
cols = int(args.cols)
pyramid_height = int(args.pyht)
repeat = int(args.repeat)
kwargs = {}
output = {}
output["name"] = name
output["sizes"] = sizes
output["step"] = step
output["repeat"] = repeat
output["randseed"] = SEED
output["metrics"] = []
rnd.seed(SEED)
f2 = open("runtimes.csv", "w", 1)
for i in xrange(sizes):
data, result = gen_data(rows, cols)
iterations = xrange(repeat)
t0 = now()
for _ in iterations:
alg(data, rows, cols, pyramid_height, result)
time = now() - t0
f2.write(str(rows) + "," + str(time) + "\n")
rows *= step
mops = 0.0
nopt = 0
print(
"ERF: {:15s} | Size: {:10d} | MOPS: {:15.2f} | TIME: {:10.6f}".format(
name, nopt, mops, time
),
flush=True,
)
output["metrics"].append((nopt, mops, time))
repeat -= step
if repeat < 1:
repeat = 1
json.dump(output, open(args.json, "w"), indent=2, sort_keys=True)
f2.close()
| [
"numpy.random.seed",
"argparse.ArgumentParser",
"numpy.empty",
"timeit.default_timer",
"numpy.random.randint"
] | [((766, 791), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (789, 791), False, 'import argparse\n'), ((2008, 2022), 'numpy.random.seed', 'rnd.seed', (['SEED'], {}), '(SEED)\n', (2016, 2022), True, 'import numpy.random as rnd\n'), ((544, 580), 'numpy.random.randint', 'rnd.randint', (['LOW', 'HIGH', '(rows, cols)'], {}), '(LOW, HIGH, (rows, cols))\n', (555, 580), True, 'import numpy.random as rnd\n'), ((582, 596), 'numpy.empty', 'np.empty', (['cols'], {}), '(cols)\n', (590, 596), True, 'import numpy as np\n'), ((2183, 2188), 'timeit.default_timer', 'now', ([], {}), '()\n', (2186, 2188), True, 'from timeit import default_timer as now\n'), ((2291, 2296), 'timeit.default_timer', 'now', ([], {}), '()\n', (2294, 2296), True, 'from timeit import default_timer as now\n')] |
import numpy as np
import random
from ugd.model.digraph import RstDiGraph
import inspect
'''Random function'''
def rand_choise(q):
''' returns true with probability q '''
event = np.random.binomial(1, q)
return event == 1
def rand_element_of_set(set):
return random.sample(set, 1)[0]
''' path algorithms, for construction marking and unmarking'''
def get_path(startnode, active_start, graph, pathnumber):
# This function is only allowed be used on schlaufen of type 1
path = []
working_node = startnode
path_continue = True
is_aktive = active_start
while path_continue:
path.append(working_node)
path_continue, working_node = get_next_node(graph, working_node, is_aktive, pathnumber)
is_aktive = not (is_aktive)
del_nodes_mark(graph)
return path
def get_next_node(graph, working_node, is_aktive, pathnumber):
if is_aktive:
out_dict = graph.nodes[working_node].active_marked
for key, value in out_dict.items():
if value == pathnumber:
if graph.nodes[working_node].active_visited:
return False, key
else:
graph.nodes[working_node].active_visited = True
return True, key
else:
out_dict = graph.nodes[working_node].passive_marked
for key, value in out_dict.items():
if value == pathnumber:
if graph.nodes[working_node].passive_visited:
return False, key
else:
graph.nodes[working_node].passive_visited = True
return True, key
return False, None
def del_nodes_mark(graph, startnode=None):
for node in range(graph.node_number):
del_node_mark(graph, node)
def del_node_mark(graph, node):
graph.nodes[node].active_visited = False
graph.nodes[node].passive_visited = False
def form_to_set_index(graph, edge):
for ind, set in enumerate(graph.restriction_set_list):
if edge[0] in set:
from_ind = ind
if edge[1] in set:
to_ind = ind
return from_ind, to_ind
def all_edges(graph):
edges = []
for from_node in range(graph.node_number):
for to_node in graph.nodes[from_node].outnodes:
edges.append((from_node, to_node))
return list(set(edges))
''' Help function for debugging and controll'''
def check_symmetric(a, tol=1e-8):
return np.allclose(a, a.T, atol=tol)
def isdebugging():
for frame in inspect.stack():
if frame[1].endswith("pydevd.py"):
return True
return False
| [
"random.sample",
"numpy.allclose",
"inspect.stack",
"numpy.random.binomial"
] | [((191, 215), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'q'], {}), '(1, q)\n', (209, 215), True, 'import numpy as np\n'), ((2472, 2501), 'numpy.allclose', 'np.allclose', (['a', 'a.T'], {'atol': 'tol'}), '(a, a.T, atol=tol)\n', (2483, 2501), True, 'import numpy as np\n'), ((2540, 2555), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (2553, 2555), False, 'import inspect\n'), ((281, 302), 'random.sample', 'random.sample', (['set', '(1)'], {}), '(set, 1)\n', (294, 302), False, 'import random\n')] |
#Program to find the inverse of a matrix.
#Developed by: <NAME>
#RegisterNumber: 21004191
import numpy as np
a=np.array([[2,1,1],[1,1,1,],[1,-1,2]])
values=np.linalg.inv(a)
print(values) | [
"numpy.linalg.inv",
"numpy.array"
] | [((115, 159), 'numpy.array', 'np.array', (['[[2, 1, 1], [1, 1, 1], [1, -1, 2]]'], {}), '([[2, 1, 1], [1, 1, 1], [1, -1, 2]])\n', (123, 159), True, 'import numpy as np\n'), ((161, 177), 'numpy.linalg.inv', 'np.linalg.inv', (['a'], {}), '(a)\n', (174, 177), True, 'import numpy as np\n')] |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = "<NAME>"
""" JQM_CV - Python implementations of Dunn and Davis Bouldin clustering validity indices
dunn(k_list):
Slow implementation of Dunn index that depends on numpy
-- basec.pyx Cython implementation is much faster but flower than dunn_fast()
dunn_fast(points, labels):
Fast implementation of Dunn index that depends on numpy and sklearn.pairwise
-- No Cython implementation
davisbouldin(k_list, k_centers):
Implementation of Davis Boulding index that depends on numpy
-- basec.pyx Cython implementation is much faster
"""
import numpy as np
from sklearn.metrics.pairwise import euclidean_distances
def delta(ck, cl):
values = np.ones([len(ck), len(cl)])*10000
for i in range(0, len(ck)):
for j in range(0, len(cl)):
values[i, j] = np.linalg.norm(ck[i]-cl[j])
return np.min(values)
def big_delta(ci):
values = np.zeros([len(ci), len(ci)])
for i in range(0, len(ci)):
for j in range(0, len(ci)):
values[i, j] = np.linalg.norm(ci[i]-ci[j])
return np.max(values)
def dunn(k_list):
""" Dunn index [CVI]
Parameters
----------
k_list : list of np.arrays
A list containing a numpy array for each cluster |c| = number of clusters
c[K] is np.array([N, p]) (N : number of samples in cluster K, p : sample dimension)
"""
deltas = np.ones([len(k_list), len(k_list)])*1000000
big_deltas = np.zeros([len(k_list), 1])
l_range = list(range(0, len(k_list)))
for k in l_range:
for l in (l_range[0:k]+l_range[k+1:]):
deltas[k, l] = delta(k_list[k], k_list[l])
big_deltas[k] = big_delta(k_list[k])
di = np.min(deltas)/np.max(big_deltas)
return di
def delta_fast(ck, cl, distances):
values = distances[np.where(ck)][:, np.where(cl)]
values = values[np.nonzero(values)]
return np.min(values)
def big_delta_fast(ci, distances):
values = distances[np.where(ci)][:, np.where(ci)]
#values = values[np.nonzero(values)]
return np.max(values)
def dunn_fast(points, labels):
""" Dunn index - FAST (using sklearn pairwise euclidean_distance function)
Parameters
----------
points : np.array
np.array([N, p]) of all points
labels: np.array
np.array([N]) labels of all points
"""
distances = euclidean_distances(points)
ks = np.sort(np.unique(labels))
deltas = np.ones([len(ks), len(ks)])*1000000
big_deltas = np.zeros([len(ks), 1])
l_range = list(range(0, len(ks)))
for k in l_range:
for l in (l_range[0:k]+l_range[k+1:]):
deltas[k, l] = delta_fast((labels == ks[k]), (labels == ks[l]), distances)
big_deltas[k] = big_delta_fast((labels == ks[k]), distances)
di = np.min(deltas)/np.max(big_deltas)
return di
def big_s(x, center):
len_x = len(x)
total = 0
for i in range(len_x):
total += np.linalg.norm(x[i]-center)
return total/len_x
def davisbouldin(k_list, k_centers):
""" Davis Bouldin Index
Parameters
----------
k_list : list of np.arrays
A list containing a numpy array for each cluster |c| = number of clusters
c[K] is np.array([N, p]) (N : number of samples in cluster K, p : sample dimension)
k_centers : np.array
The array of the cluster centers (prototypes) of type np.array([K, p])
"""
len_k_list = len(k_list)
big_ss = np.zeros([len_k_list], dtype=np.float64)
d_eucs = np.zeros([len_k_list, len_k_list], dtype=np.float64)
db = 0
for k in range(len_k_list):
big_ss[k] = big_s(k_list[k], k_centers[k])
for k in range(len_k_list):
for l in range(0, len_k_list):
d_eucs[k, l] = np.linalg.norm(k_centers[k]-k_centers[l])
for k in range(len_k_list):
values = np.zeros([len_k_list-1], dtype=np.float64)
for l in range(0, k):
values[l] = (big_ss[k] + big_ss[l])/d_eucs[k, l]
for l in range(k+1, len_k_list):
values[l-1] = (big_ss[k] + big_ss[l])/d_eucs[k, l]
db += np.max(values)
res = db/len_k_list
return res
| [
"numpy.zeros",
"sklearn.metrics.pairwise.euclidean_distances",
"numpy.nonzero",
"numpy.min",
"numpy.max",
"numpy.linalg.norm",
"numpy.where",
"numpy.unique"
] | [((1445, 1459), 'numpy.min', 'np.min', (['values'], {}), '(values)\n', (1451, 1459), True, 'import numpy as np\n'), ((1687, 1701), 'numpy.max', 'np.max', (['values'], {}), '(values)\n', (1693, 1701), True, 'import numpy as np\n'), ((2552, 2566), 'numpy.min', 'np.min', (['values'], {}), '(values)\n', (2558, 2566), True, 'import numpy as np\n'), ((2732, 2746), 'numpy.max', 'np.max', (['values'], {}), '(values)\n', (2738, 2746), True, 'import numpy as np\n'), ((3054, 3081), 'sklearn.metrics.pairwise.euclidean_distances', 'euclidean_distances', (['points'], {}), '(points)\n', (3073, 3081), False, 'from sklearn.metrics.pairwise import euclidean_distances\n'), ((4230, 4270), 'numpy.zeros', 'np.zeros', (['[len_k_list]'], {'dtype': 'np.float64'}), '([len_k_list], dtype=np.float64)\n', (4238, 4270), True, 'import numpy as np\n'), ((4285, 4337), 'numpy.zeros', 'np.zeros', (['[len_k_list, len_k_list]'], {'dtype': 'np.float64'}), '([len_k_list, len_k_list], dtype=np.float64)\n', (4293, 4337), True, 'import numpy as np\n'), ((2355, 2369), 'numpy.min', 'np.min', (['deltas'], {}), '(deltas)\n', (2361, 2369), True, 'import numpy as np\n'), ((2370, 2388), 'numpy.max', 'np.max', (['big_deltas'], {}), '(big_deltas)\n', (2376, 2388), True, 'import numpy as np\n'), ((2518, 2536), 'numpy.nonzero', 'np.nonzero', (['values'], {}), '(values)\n', (2528, 2536), True, 'import numpy as np\n'), ((3100, 3117), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (3109, 3117), True, 'import numpy as np\n'), ((3518, 3532), 'numpy.min', 'np.min', (['deltas'], {}), '(deltas)\n', (3524, 3532), True, 'import numpy as np\n'), ((3533, 3551), 'numpy.max', 'np.max', (['big_deltas'], {}), '(big_deltas)\n', (3539, 3551), True, 'import numpy as np\n'), ((3694, 3723), 'numpy.linalg.norm', 'np.linalg.norm', (['(x[i] - center)'], {}), '(x[i] - center)\n', (3708, 3723), True, 'import numpy as np\n'), ((4639, 4683), 'numpy.zeros', 'np.zeros', (['[len_k_list - 1]'], {'dtype': 'np.float64'}), '([len_k_list - 1], dtype=np.float64)\n', (4647, 4683), True, 'import numpy as np\n'), ((4898, 4912), 'numpy.max', 'np.max', (['values'], {}), '(values)\n', (4904, 4912), True, 'import numpy as np\n'), ((1391, 1420), 'numpy.linalg.norm', 'np.linalg.norm', (['(ck[i] - cl[j])'], {}), '(ck[i] - cl[j])\n', (1405, 1420), True, 'import numpy as np\n'), ((1633, 1662), 'numpy.linalg.norm', 'np.linalg.norm', (['(ci[i] - ci[j])'], {}), '(ci[i] - ci[j])\n', (1647, 1662), True, 'import numpy as np\n'), ((2466, 2478), 'numpy.where', 'np.where', (['ck'], {}), '(ck)\n', (2474, 2478), True, 'import numpy as np\n'), ((2483, 2495), 'numpy.where', 'np.where', (['cl'], {}), '(cl)\n', (2491, 2495), True, 'import numpy as np\n'), ((2633, 2645), 'numpy.where', 'np.where', (['ci'], {}), '(ci)\n', (2641, 2645), True, 'import numpy as np\n'), ((2650, 2662), 'numpy.where', 'np.where', (['ci'], {}), '(ci)\n', (2658, 2662), True, 'import numpy as np\n'), ((4544, 4587), 'numpy.linalg.norm', 'np.linalg.norm', (['(k_centers[k] - k_centers[l])'], {}), '(k_centers[k] - k_centers[l])\n', (4558, 4587), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Contains functions for reading flow fields in various formats
"""
import numpy as np
import xarray as xr
from glob import glob
import os, re
def create_sample_field(frame = 0):
""" creates a sample dataset for the tests """
x = np.arange(32.,128.,32.)
y = np.arange(16.,128.,16.)
xm,ym = np.meshgrid(x,y)
u = np.ones_like(xm.T) + np.arange(0.0,7.0)
v = np.zeros_like(ym.T)+np.random.rand(3,1)-.5
u = u[:,:,np.newaxis]
v = v[:,:,np.newaxis]
chc = np.ones_like(u)
# plt.quiver(xm.T,ym.T,u,v)
u = xr.DataArray(u,dims=('x','y','t'),coords={'x':x,'y':y,'t':[frame]})
v = xr.DataArray(v,dims=('x','y','t'),coords={'x':x,'y':y,'t':[frame]})
chc = xr.DataArray(chc,dims=('x','y','t'),coords={'x':x,'y':y,'t':[frame]})
data = xr.Dataset({'u': u, 'v': v,'chc':chc})
data.attrs['variables'] = ['x','y','u','v']
data.attrs['units'] = ['pix','pix','pix/dt','pix/dt']
data.attrs['dt'] = 1.0
data.attrs['files'] = ''
return data
def create_sample_dataset(n = 5):
""" using create_sample_field that has random part in it, create
a sample dataset of length 'n' """
data = []
for i in range(n):
data.append(create_sample_field(frame=i))
combined = xr.concat(data, dim='t')
combined.attrs['variables'] = ['x','y','u','v']
combined.attrs['units'] = ['pix','pix','pix/dt','pix/dt']
combined.attrs['dt'] = 1.0
combined.attrs['files'] = ''
return combined
def loadvec(filename, rows=None, cols=None, variables=None, units=None, dt=None, frame=0):
"""
loadvec(filename,rows=rows,cols=cols)
Loads the VEC file (TECPLOT format by TSI Inc.), OpenPIV VEC or TXT formats
Arguments:
filename : file name, expected to have a header and 5 columns
rows, cols : number of rows and columns of a vector field,
if None, None, then parse_header is called to infer the number
written in the header
dt : time interval (default is None)
frame : frame or time marker (default is None)
Output:
data is a xAarray Dataset, see xarray for help
"""
if rows is None or cols is None:
variables,units,rows,cols, dt, frame = parse_header(filename)
if rows is None: # means no headers
d = np.loadtxt(filename,usecols=(0,1,2,3,4))
x = np.unique(d[:,0])
y = np.unique(d[:,1])
d = d.reshape(len(y),len(x),5).transpose(1,0,2)
else:
d = np.loadtxt(filename,skiprows=1,delimiter=',',usecols=(0,1,2,3,4)).reshape(rows,cols,5)
x = d[:,:,0][0,:]
y = d[:,:,1][:,0]
u = d[:,:,2]
v = d[:,:,3]
chc = d[:,:,4]
# extend dimensions
u = u[:,:,np.newaxis]
v = v[:,:,np.newaxis]
chc = chc[:,:,np.newaxis]
u = xr.DataArray(u,dims=('x','y','t'),coords={'x':x,'y':y,'t':[frame]})
v = xr.DataArray(v,dims=('x','y','t'),coords={'x':x,'y':y,'t':[frame]})
chc = xr.DataArray(chc,dims=('x','y','t'),coords={'x':x,'y':y,'t':[frame]})
data = xr.Dataset({'u': u, 'v': v,'chc':chc})
data.attrs['variables'] = variables
data.attrs['units'] = units
data.attrs['dt'] = dt
data.attrs['files'] = filename
return data
def load_directory(path,basename=''):
"""
load_directory (path)
Loads all the .VEC files in the directory into a single
xarray dataset with variables and units added as attributes
Input:
directory : path to the directory with .vec files
Output:
data : xarray DataSet with dimensions: x,y,t and
data arrays of u,v,
attributes of variables and units
See more: loadvec
"""
files = sorted(glob(os.path.join(path,basename+'*.vec')))
variables, units, rows, cols, dt, frame = parse_header(files[0])
data = []
for i,f in enumerate(files):
data.append(loadvec(f,rows,cols,variables,units,dt,frame+i-1))
combined = xr.concat(data, dim='t')
combined.attrs['variables'] = variables
combined.attrs['units'] = units
combined.attrs['dt'] = dt
combined.attrs['files'] = files
return combined
def parse_header(filename):
"""
parse_header ( filename)
Parses header of the file (.vec) to get the variables (typically X,Y,U,V)
and units (can be m,mm, pix/dt or mm/sec, etc.), and the size of the dataset
by the number of rows and columns.
Input:
filename : complete path of the file to read
Returns:
variables : list of strings
units : list of strings
rows : number of rows of the dataset
cols : number of columns of the dataset
dt : time interval between the two PIV frames in microseconds
"""
# split path from the filename
fname = os.path.basename(filename)
# get the number in a filename if it's a .vec file from Insight
if '.' in fname[:-4]: # day2a005003.T000.D000.P003.H001.L.vec
frame = int(re.findall('\d+',fname.split('.')[0])[-1])
elif '_' in filename[:-4]:
frame = int(re.findall('\d+',fname.split('_')[1])[-1]) # exp1_001_b.vec, .txt
with open(filename) as fid:
header = fid.readline()
# if the file does not have a header, can be from OpenPIV or elsewhere
# return None
if header[:5] != 'TITLE':
return (None,None,None,None,None,frame)
header_list = header.replace(',',' ').replace('=',' ').replace('"',' ').split()
# get variable names, typically X,Y,U,V
variables = header_list[3:12][::2]
# get units - this is important if it's mm or m/s
units = header_list[4:12][::2]
# get the size of the PIV grid in rows x cols
rows = int(header_list[-5])
cols = int(header_list[-3])
# this is also important to know the time interval, dt
ind1 = header.find('MicrosecondsPerDeltaT')
dt = float(header[ind1:].split('"')[1])
return (variables, units, rows, cols, dt, frame)
def get_units(filename):
"""
get_units(filename)
given a full path name to the .vec file will return the names
of length and velocity units fallback option is all None. Uses
parse_header function, see below.
"""
lUnits, velUnits, tUnits = None, None, None
_, units, _, _, _, _ = parse_header(filename)
if units is None:
return lUnits, velUnits, tUnits
lUnits = units[0]
velUnits = units[2]
if velUnits == 'pixel':
velUnits = velUnits+'/dt' # make it similar to m/s
tUnits = velUnits.split('/')[1] # make it 's' or 'dt'
return lUnits, velUnits, tUnits | [
"numpy.meshgrid",
"numpy.ones_like",
"numpy.zeros_like",
"os.path.basename",
"xarray.Dataset",
"xarray.concat",
"numpy.arange",
"xarray.DataArray",
"numpy.loadtxt",
"numpy.random.rand",
"os.path.join",
"numpy.unique"
] | [((272, 300), 'numpy.arange', 'np.arange', (['(32.0)', '(128.0)', '(32.0)'], {}), '(32.0, 128.0, 32.0)\n', (281, 300), True, 'import numpy as np\n'), ((304, 332), 'numpy.arange', 'np.arange', (['(16.0)', '(128.0)', '(16.0)'], {}), '(16.0, 128.0, 16.0)\n', (313, 332), True, 'import numpy as np\n'), ((341, 358), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (352, 358), True, 'import numpy as np\n'), ((521, 536), 'numpy.ones_like', 'np.ones_like', (['u'], {}), '(u)\n', (533, 536), True, 'import numpy as np\n'), ((581, 657), 'xarray.DataArray', 'xr.DataArray', (['u'], {'dims': "('x', 'y', 't')", 'coords': "{'x': x, 'y': y, 't': [frame]}"}), "(u, dims=('x', 'y', 't'), coords={'x': x, 'y': y, 't': [frame]})\n", (593, 657), True, 'import xarray as xr\n'), ((657, 733), 'xarray.DataArray', 'xr.DataArray', (['v'], {'dims': "('x', 'y', 't')", 'coords': "{'x': x, 'y': y, 't': [frame]}"}), "(v, dims=('x', 'y', 't'), coords={'x': x, 'y': y, 't': [frame]})\n", (669, 733), True, 'import xarray as xr\n'), ((735, 813), 'xarray.DataArray', 'xr.DataArray', (['chc'], {'dims': "('x', 'y', 't')", 'coords': "{'x': x, 'y': y, 't': [frame]}"}), "(chc, dims=('x', 'y', 't'), coords={'x': x, 'y': y, 't': [frame]})\n", (747, 813), True, 'import xarray as xr\n'), ((821, 861), 'xarray.Dataset', 'xr.Dataset', (["{'u': u, 'v': v, 'chc': chc}"], {}), "({'u': u, 'v': v, 'chc': chc})\n", (831, 861), True, 'import xarray as xr\n'), ((1307, 1331), 'xarray.concat', 'xr.concat', (['data'], {'dim': '"""t"""'}), "(data, dim='t')\n", (1316, 1331), True, 'import xarray as xr\n'), ((2902, 2978), 'xarray.DataArray', 'xr.DataArray', (['u'], {'dims': "('x', 'y', 't')", 'coords': "{'x': x, 'y': y, 't': [frame]}"}), "(u, dims=('x', 'y', 't'), coords={'x': x, 'y': y, 't': [frame]})\n", (2914, 2978), True, 'import xarray as xr\n'), ((2978, 3054), 'xarray.DataArray', 'xr.DataArray', (['v'], {'dims': "('x', 'y', 't')", 'coords': "{'x': x, 'y': y, 't': [frame]}"}), "(v, dims=('x', 'y', 't'), coords={'x': x, 'y': y, 't': [frame]})\n", (2990, 3054), True, 'import xarray as xr\n'), ((3056, 3134), 'xarray.DataArray', 'xr.DataArray', (['chc'], {'dims': "('x', 'y', 't')", 'coords': "{'x': x, 'y': y, 't': [frame]}"}), "(chc, dims=('x', 'y', 't'), coords={'x': x, 'y': y, 't': [frame]})\n", (3068, 3134), True, 'import xarray as xr\n'), ((3142, 3182), 'xarray.Dataset', 'xr.Dataset', (["{'u': u, 'v': v, 'chc': chc}"], {}), "({'u': u, 'v': v, 'chc': chc})\n", (3152, 3182), True, 'import xarray as xr\n'), ((4077, 4101), 'xarray.concat', 'xr.concat', (['data'], {'dim': '"""t"""'}), "(data, dim='t')\n", (4086, 4101), True, 'import xarray as xr\n'), ((4905, 4931), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (4921, 4931), False, 'import os, re\n'), ((367, 385), 'numpy.ones_like', 'np.ones_like', (['xm.T'], {}), '(xm.T)\n', (379, 385), True, 'import numpy as np\n'), ((388, 407), 'numpy.arange', 'np.arange', (['(0.0)', '(7.0)'], {}), '(0.0, 7.0)\n', (397, 407), True, 'import numpy as np\n'), ((2402, 2447), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'usecols': '(0, 1, 2, 3, 4)'}), '(filename, usecols=(0, 1, 2, 3, 4))\n', (2412, 2447), True, 'import numpy as np\n'), ((2455, 2473), 'numpy.unique', 'np.unique', (['d[:, 0]'], {}), '(d[:, 0])\n', (2464, 2473), True, 'import numpy as np\n'), ((2485, 2503), 'numpy.unique', 'np.unique', (['d[:, 1]'], {}), '(d[:, 1])\n', (2494, 2503), True, 'import numpy as np\n'), ((415, 434), 'numpy.zeros_like', 'np.zeros_like', (['ym.T'], {}), '(ym.T)\n', (428, 434), True, 'import numpy as np\n'), ((435, 455), 'numpy.random.rand', 'np.random.rand', (['(3)', '(1)'], {}), '(3, 1)\n', (449, 455), True, 'import numpy as np\n'), ((3820, 3858), 'os.path.join', 'os.path.join', (['path', "(basename + '*.vec')"], {}), "(path, basename + '*.vec')\n", (3832, 3858), False, 'import os, re\n'), ((2581, 2653), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'skiprows': '(1)', 'delimiter': '""","""', 'usecols': '(0, 1, 2, 3, 4)'}), "(filename, skiprows=1, delimiter=',', usecols=(0, 1, 2, 3, 4))\n", (2591, 2653), True, 'import numpy as np\n')] |
import pytest
from fastapi import BackgroundTasks
from typing import List, Tuple
import numpy as np
from tests.utils import floats_almost_equal, nested_floats_almost_equal
from src.constants import PLATFORM_ENUM
from src.app.ml.base_predictor import BaseData, BaseDataInterface, BaseDataConverter, BasePredictor
from src.app.ml.active_predictor import DataConverter
import src.app
from src.app.api._predict import (
__predict,
__predict_label,
_predict_from_redis_cache,
_labels,
_test,
_test_label,
_predict,
_predict_label,
_predict_async_post,
_predict_async_get,
_predict_async_get_label,
)
labels = ["a", "b", "c"]
test_uuid = "550e8400-e29b-41d4-a716-446655440000"
job_id = f"{test_uuid}_0"
mock_BackgroundTasks = BackgroundTasks()
f_proba = [0.7, 0.2, 0.1]
f_data = [[5.1, 3.5, 1.4, 0.2]]
class MockPredictor(BasePredictor):
def load_model(self):
pass
def predict(self, data):
return None
class MockData(BaseData):
input_data: List[List[float]] = f_data
test_data: List[List[float]] = f_data
labels: List[str] = labels
class MockDataInterface(BaseDataInterface):
pass
MockDataInterface.input_shape = (1, 4)
MockDataInterface.input_type = "float32"
MockDataInterface.output_shape = (1, 3)
MockDataInterface.output_type = "float32"
class MockDataConverter(BaseDataConverter):
pass
MockDataConverter.meta_data = MockDataInterface
class MockJob:
def __call__(self):
return True
@pytest.mark.parametrize(
("prediction", "expected"),
[(np.array([[0.8, 0.1, 0.1]]), {"prediction": [[0.8, 0.1, 0.1]]}), (np.array([[0.2, 0.1, 0.7]]), {"prediction": [[0.2, 0.1, 0.7]]})],
)
def test__predict(mocker, prediction, expected):
mock_data = MockData()
mocker.patch(
"src.app.ml.active_predictor.DataConverter.convert_input_data_to_np",
return_value=np.array(mock_data.input_data).astype(np.float32).reshape(MockDataInterface.input_shape),
)
mocker.patch("src.app.ml.active_predictor.DataConverter.reshape_output", return_value=prediction)
mocker.patch("src.app.ml.active_predictor.active_predictor.predict", return_value=prediction)
__predict(data=mock_data)
assert nested_floats_almost_equal(mock_data.prediction, expected["prediction"])
@pytest.mark.parametrize(("prediction", "expected"), [(np.array([[0.1, 0.1, 0.8]]), {"c": 0.8}), (np.array([[0.2, 0.1, 0.7]]), {"c": 0.7})])
def test__predict_label(mocker, prediction, expected):
mock_data = MockData()
mocker.patch("src.app.ml.active_predictor.DataConverter.reshape_output", return_value=prediction)
mocker.patch("src.app.ml.active_predictor.active_predictor.predict", return_value=prediction)
result = __predict_label(data=mock_data)
assert result == expected
@pytest.mark.parametrize(("job_id", "data", "expected"), [(job_id, {"input_data": f_data}, {"input_data": f_data, "prediction": [f_proba]})])
def test_predict_from_redis_cache(mocker, job_id, data, expected):
mock_data = MockData(input_data=data["input_data"], prediction=expected["prediction"])
mocker.patch("src.jobs.store_data_job.load_data_redis", return_value=data)
mocker.patch("src.app.ml.active_predictor.active_predictor.predict", return_value=np.array(expected["prediction"]))
result = _predict_from_redis_cache(job_id, MockData)
assert expected["input_data"] == result.input_data
assert nested_floats_almost_equal(mock_data.prediction, expected["prediction"])
def test_labels(mocker):
result = _labels(MockData)
assert "labels" in result
@pytest.mark.parametrize(
("output", "expected"), [(np.array([[0.8, 0.1, 0.1]]), {"prediction": [[0.8, 0.1, 0.1]]}), (np.array([[0.2, 0.1, 0.7]]), {"prediction": [[0.2, 0.1, 0.7]]})]
)
def test_test(mocker, output, expected):
mocker.patch("src.app.ml.active_predictor.active_predictor.predict", return_value=output)
result = _test(data=MockData())
assert nested_floats_almost_equal(result["prediction"], expected["prediction"])
@pytest.mark.parametrize(
("output", "expected"), [(np.array([[0.8, 0.1, 0.1]]), {"prediction": {"a": 0.8}}), (np.array([[0.2, 0.1, 0.7]]), {"prediction": {"c": 0.7}})]
)
def test_test_label(mocker, output, expected):
mocker.patch("src.app.ml.active_predictor.DataConverter.reshape_output", return_value=output)
mocker.patch("src.app.ml.active_predictor.active_predictor.predict", return_value=output)
result = _test_label(data=MockData())
assert result == expected
@pytest.mark.asyncio
@pytest.mark.parametrize(
("output", "expected"), [(np.array([[0.8, 0.1, 0.1]]), {"prediction": [[0.8, 0.1, 0.1]]}), (np.array([[0.2, 0.1, 0.7]]), {"prediction": [[0.2, 0.1, 0.7]]})]
)
async def test_predict(mocker, output, expected):
mocker.patch("src.app.ml.active_predictor.active_predictor.predict", return_value=output)
mocker.patch("src.jobs.store_data_job._save_data_job", return_value=job_id)
result = await _predict(MockData(), test_uuid, mock_BackgroundTasks)
assert nested_floats_almost_equal(result["prediction"], expected["prediction"])
assert result["job_id"] == test_uuid
@pytest.mark.asyncio
@pytest.mark.parametrize(
("output", "expected"), [(np.array([[0.8, 0.1, 0.1]]), {"prediction": {"a": 0.8}}), (np.array([[0.7, 0.1, 0.2]]), {"prediction": {"a": 0.7}})]
)
async def test_predict_label(mocker, output, expected):
mocker.patch("src.app.ml.active_predictor.active_predictor.predict", return_value=output)
mocker.patch("src.jobs.store_data_job._save_data_job", return_value=job_id)
result = await _predict_label(MockData(), test_uuid, mock_BackgroundTasks)
assert result["prediction"]["a"] == pytest.approx(expected["prediction"]["a"])
assert result["job_id"] == test_uuid
@pytest.mark.asyncio
@pytest.mark.parametrize(("job_id"), [(job_id)])
async def test_predict_async_post(mocker, job_id):
mocker.patch("src.jobs.store_data_job._save_data_job", return_value=job_id)
result = await _predict_async_post(MockData(), job_id, mock_BackgroundTasks)
assert result["job_id"] == job_id
@pytest.mark.parametrize(
("job_id", "data_dict", "expected"),
[(job_id, {"input_data": [[5.1, 3.5, 1.4, 0.2]], "prediction": [[0.8, 0.1, 0.1]]}, {job_id: {"prediction": [[0.8, 0.1, 0.1]]}})],
)
def test_predict_async_get(mocker, job_id, data_dict, expected):
src.app.api._predict.PLATFORM = PLATFORM_ENUM.DOCKER_COMPOSE.value
mocker.patch("src.jobs.store_data_job.load_data_redis", return_value=data_dict)
result = _predict_async_get(job_id)
assert result == expected
@pytest.mark.parametrize(
("job_id", "data_dict", "expected"),
[(job_id, {"input_data": [[5.1, 3.5, 1.4, 0.2]], "prediction": [[0.8, 0.1, 0.1]], "labels": labels}, {job_id: {"prediction": {"a": 0.8}}})],
)
def test_predict_async_get_label(mocker, job_id, data_dict, expected):
src.app.api._predict.PLATFORM = PLATFORM_ENUM.DOCKER_COMPOSE.value
mocker.patch("src.jobs.store_data_job.load_data_redis", return_value=data_dict)
result = _predict_async_get_label(job_id)
assert result == expected
| [
"tests.utils.nested_floats_almost_equal",
"src.app.api._predict._predict_from_redis_cache",
"src.app.api._predict.__predict",
"src.app.api._predict._predict_async_get",
"fastapi.BackgroundTasks",
"src.app.api._predict._predict_async_get_label",
"numpy.array",
"src.app.api._predict._labels",
"pytest.... | [((767, 784), 'fastapi.BackgroundTasks', 'BackgroundTasks', ([], {}), '()\n', (782, 784), False, 'from fastapi import BackgroundTasks\n'), ((2805, 2950), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('job_id', 'data', 'expected')", "[(job_id, {'input_data': f_data}, {'input_data': f_data, 'prediction': [\n f_proba]})]"], {}), "(('job_id', 'data', 'expected'), [(job_id, {\n 'input_data': f_data}, {'input_data': f_data, 'prediction': [f_proba]})])\n", (2828, 2950), False, 'import pytest\n'), ((5812, 5855), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""job_id"""', '[job_id]'], {}), "('job_id', [job_id])\n", (5835, 5855), False, 'import pytest\n'), ((6113, 6312), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('job_id', 'data_dict', 'expected')", "[(job_id, {'input_data': [[5.1, 3.5, 1.4, 0.2]], 'prediction': [[0.8, 0.1, \n 0.1]]}, {job_id: {'prediction': [[0.8, 0.1, 0.1]]}})]"], {}), "(('job_id', 'data_dict', 'expected'), [(job_id, {\n 'input_data': [[5.1, 3.5, 1.4, 0.2]], 'prediction': [[0.8, 0.1, 0.1]]},\n {job_id: {'prediction': [[0.8, 0.1, 0.1]]}})])\n", (6136, 6312), False, 'import pytest\n'), ((6608, 6818), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('job_id', 'data_dict', 'expected')", "[(job_id, {'input_data': [[5.1, 3.5, 1.4, 0.2]], 'prediction': [[0.8, 0.1, \n 0.1]], 'labels': labels}, {job_id: {'prediction': {'a': 0.8}}})]"], {}), "(('job_id', 'data_dict', 'expected'), [(job_id, {\n 'input_data': [[5.1, 3.5, 1.4, 0.2]], 'prediction': [[0.8, 0.1, 0.1]],\n 'labels': labels}, {job_id: {'prediction': {'a': 0.8}}})])\n", (6631, 6818), False, 'import pytest\n'), ((2192, 2217), 'src.app.api._predict.__predict', '__predict', ([], {'data': 'mock_data'}), '(data=mock_data)\n', (2201, 2217), False, 'from src.app.api._predict import __predict, __predict_label, _predict_from_redis_cache, _labels, _test, _test_label, _predict, _predict_label, _predict_async_post, _predict_async_get, _predict_async_get_label\n'), ((2229, 2301), 'tests.utils.nested_floats_almost_equal', 'nested_floats_almost_equal', (['mock_data.prediction', "expected['prediction']"], {}), "(mock_data.prediction, expected['prediction'])\n", (2255, 2301), False, 'from tests.utils import floats_almost_equal, nested_floats_almost_equal\n'), ((2740, 2771), 'src.app.api._predict.__predict_label', '__predict_label', ([], {'data': 'mock_data'}), '(data=mock_data)\n', (2755, 2771), False, 'from src.app.api._predict import __predict, __predict_label, _predict_from_redis_cache, _labels, _test, _test_label, _predict, _predict_label, _predict_async_post, _predict_async_get, _predict_async_get_label\n'), ((3318, 3361), 'src.app.api._predict._predict_from_redis_cache', '_predict_from_redis_cache', (['job_id', 'MockData'], {}), '(job_id, MockData)\n', (3343, 3361), False, 'from src.app.api._predict import __predict, __predict_label, _predict_from_redis_cache, _labels, _test, _test_label, _predict, _predict_label, _predict_async_post, _predict_async_get, _predict_async_get_label\n'), ((3428, 3500), 'tests.utils.nested_floats_almost_equal', 'nested_floats_almost_equal', (['mock_data.prediction', "expected['prediction']"], {}), "(mock_data.prediction, expected['prediction'])\n", (3454, 3500), False, 'from tests.utils import floats_almost_equal, nested_floats_almost_equal\n'), ((3541, 3558), 'src.app.api._predict._labels', '_labels', (['MockData'], {}), '(MockData)\n', (3548, 3558), False, 'from src.app.api._predict import __predict, __predict_label, _predict_from_redis_cache, _labels, _test, _test_label, _predict, _predict_label, _predict_async_post, _predict_async_get, _predict_async_get_label\n'), ((3962, 4034), 'tests.utils.nested_floats_almost_equal', 'nested_floats_almost_equal', (["result['prediction']", "expected['prediction']"], {}), "(result['prediction'], expected['prediction'])\n", (3988, 4034), False, 'from tests.utils import floats_almost_equal, nested_floats_almost_equal\n'), ((5043, 5115), 'tests.utils.nested_floats_almost_equal', 'nested_floats_almost_equal', (["result['prediction']", "expected['prediction']"], {}), "(result['prediction'], expected['prediction'])\n", (5069, 5115), False, 'from tests.utils import floats_almost_equal, nested_floats_almost_equal\n'), ((6548, 6574), 'src.app.api._predict._predict_async_get', '_predict_async_get', (['job_id'], {}), '(job_id)\n', (6566, 6574), False, 'from src.app.api._predict import __predict, __predict_label, _predict_from_redis_cache, _labels, _test, _test_label, _predict, _predict_label, _predict_async_post, _predict_async_get, _predict_async_get_label\n'), ((7060, 7092), 'src.app.api._predict._predict_async_get_label', '_predict_async_get_label', (['job_id'], {}), '(job_id)\n', (7084, 7092), False, 'from src.app.api._predict import __predict, __predict_label, _predict_from_redis_cache, _labels, _test, _test_label, _predict, _predict_label, _predict_async_post, _predict_async_get, _predict_async_get_label\n'), ((5704, 5746), 'pytest.approx', 'pytest.approx', (["expected['prediction']['a']"], {}), "(expected['prediction']['a'])\n", (5717, 5746), False, 'import pytest\n'), ((1565, 1592), 'numpy.array', 'np.array', (['[[0.8, 0.1, 0.1]]'], {}), '([[0.8, 0.1, 0.1]])\n', (1573, 1592), True, 'import numpy as np\n'), ((1631, 1658), 'numpy.array', 'np.array', (['[[0.2, 0.1, 0.7]]'], {}), '([[0.2, 0.1, 0.7]])\n', (1639, 1658), True, 'import numpy as np\n'), ((2359, 2386), 'numpy.array', 'np.array', (['[[0.1, 0.1, 0.8]]'], {}), '([[0.1, 0.1, 0.8]])\n', (2367, 2386), True, 'import numpy as np\n'), ((2402, 2429), 'numpy.array', 'np.array', (['[[0.2, 0.1, 0.7]]'], {}), '([[0.2, 0.1, 0.7]])\n', (2410, 2429), True, 'import numpy as np\n'), ((3271, 3303), 'numpy.array', 'np.array', (["expected['prediction']"], {}), "(expected['prediction'])\n", (3279, 3303), True, 'import numpy as np\n'), ((3647, 3674), 'numpy.array', 'np.array', (['[[0.8, 0.1, 0.1]]'], {}), '([[0.8, 0.1, 0.1]])\n', (3655, 3674), True, 'import numpy as np\n'), ((3713, 3740), 'numpy.array', 'np.array', (['[[0.2, 0.1, 0.7]]'], {}), '([[0.2, 0.1, 0.7]])\n', (3721, 3740), True, 'import numpy as np\n'), ((4093, 4120), 'numpy.array', 'np.array', (['[[0.8, 0.1, 0.1]]'], {}), '([[0.8, 0.1, 0.1]])\n', (4101, 4120), True, 'import numpy as np\n'), ((4152, 4179), 'numpy.array', 'np.array', (['[[0.2, 0.1, 0.7]]'], {}), '([[0.2, 0.1, 0.7]])\n', (4160, 4179), True, 'import numpy as np\n'), ((4602, 4629), 'numpy.array', 'np.array', (['[[0.8, 0.1, 0.1]]'], {}), '([[0.8, 0.1, 0.1]])\n', (4610, 4629), True, 'import numpy as np\n'), ((4668, 4695), 'numpy.array', 'np.array', (['[[0.2, 0.1, 0.7]]'], {}), '([[0.2, 0.1, 0.7]])\n', (4676, 4695), True, 'import numpy as np\n'), ((5236, 5263), 'numpy.array', 'np.array', (['[[0.8, 0.1, 0.1]]'], {}), '([[0.8, 0.1, 0.1]])\n', (5244, 5263), True, 'import numpy as np\n'), ((5295, 5322), 'numpy.array', 'np.array', (['[[0.7, 0.1, 0.2]]'], {}), '([[0.7, 0.1, 0.2]])\n', (5303, 5322), True, 'import numpy as np\n'), ((1892, 1922), 'numpy.array', 'np.array', (['mock_data.input_data'], {}), '(mock_data.input_data)\n', (1900, 1922), True, 'import numpy as np\n')] |
import numpy as np
from mchap.assemble import arraymap
def test_get__nan():
amap = arraymap.new(5, 2)
for _ in range(10):
a = np.random.randint(0, 2, 5)
actual = arraymap.get(amap, a)
assert np.isnan(actual)
def test_get_set_get():
amap = arraymap.new(5, 3)
a = np.array([0, 1, 2, 0, 1])
b = np.array([0, 1, 2, 0, 2])
assert np.isnan(arraymap.get(amap, a))
assert np.isnan(arraymap.get(amap, b))
amap = arraymap.set(amap, a, 0.5)
assert arraymap.get(amap, a) == 0.5
assert np.isnan(arraymap.get(amap, b))
amap = arraymap.set(amap, b, 0.1)
assert arraymap.get(amap, a) == 0.5
assert arraymap.get(amap, b) == 0.1
def test_set__grow_tree():
amap = arraymap.new(5, 3, initial_size=4)
a = np.array([0, 1, 2, 0, 1])
b = np.array([0, 1, 2, 0, 2])
assert np.isnan(arraymap.get(amap, a))
assert np.isnan(arraymap.get(amap, b))
assert amap[0].shape == (4, 3)
assert amap[1].shape == (4,)
amap = arraymap.set(amap, a, 0.5)
assert arraymap.get(amap, a) == 0.5
assert np.isnan(arraymap.get(amap, b))
assert amap[0].shape == (8, 3)
assert amap[1].shape == (4,)
amap = arraymap.set(amap, b, 0.1)
assert arraymap.get(amap, a) == 0.5
assert arraymap.get(amap, b) == 0.1
assert amap[0].shape == (16, 3)
assert amap[1].shape == (4,)
def test_set__grow_values():
amap = arraymap.new(5, 3, initial_size=2)
a = np.array([0, 1, 2, 0, 1])
b = np.array([0, 1, 2, 0, 2])
assert np.isnan(arraymap.get(amap, a))
assert np.isnan(arraymap.get(amap, b))
assert amap[0].shape == (2, 3)
assert amap[1].shape == (2,)
amap = arraymap.set(amap, a, 0.5)
assert arraymap.get(amap, a) == 0.5
assert np.isnan(arraymap.get(amap, b))
assert amap[0].shape == (8, 3)
assert amap[1].shape == (4,)
amap = arraymap.set(amap, b, 0.1)
assert arraymap.get(amap, a) == 0.5
assert arraymap.get(amap, b) == 0.1
assert amap[0].shape == (16, 3)
assert amap[1].shape == (4,)
def test_set__full():
amap = arraymap.new(5, 3, initial_size=4, max_size=8)
a = np.array([0, 1, 2, 0, 1])
b = np.array([1, 1, 2, 0, 2])
assert np.isnan(arraymap.get(amap, a))
assert np.isnan(arraymap.get(amap, b))
assert amap[0].shape == (4, 3)
assert amap[1].shape == (4,)
amap = arraymap.set(amap, a, 0.5)
assert arraymap.get(amap, a) == 0.5
assert np.isnan(arraymap.get(amap, b))
assert amap[0].shape == (8, 3)
assert amap[1].shape == (4,)
try:
amap = arraymap.set(amap, b, 0.1)
except ValueError:
pass
else:
assert False
def test_set__empty_if_full():
amap = arraymap.new(5, 3, initial_size=4, max_size=8)
a = np.array([0, 1, 2, 0, 1])
b = np.array([1, 1, 2, 0, 2])
assert np.isnan(arraymap.get(amap, a))
assert np.isnan(arraymap.get(amap, b))
assert amap[0].shape == (4, 3)
assert amap[1].shape == (4,)
amap = arraymap.set(amap, a, 0.5, empty_if_full=True)
assert arraymap.get(amap, a) == 0.5
assert np.isnan(arraymap.get(amap, b))
assert amap[0].shape == (8, 3)
assert amap[1].shape == (4,)
amap = arraymap.set(amap, b, 0.1, empty_if_full=True)
# amap should now be empty but the same size
assert np.all(amap[0] == -1)
assert np.all(np.isnan(amap[1]))
assert amap[2] == 5 # array length
assert amap[3] == 1 # intial ofset
assert amap[4] == 0 # intial ofset
assert amap[5] == 8 # max size
assert np.isnan(arraymap.get(amap, a))
assert np.isnan(arraymap.get(amap, b))
assert amap[0].shape == (8, 3)
assert amap[1].shape == (4,)
| [
"mchap.assemble.arraymap.set",
"numpy.isnan",
"numpy.random.randint",
"numpy.array",
"mchap.assemble.arraymap.new",
"mchap.assemble.arraymap.get",
"numpy.all"
] | [((90, 108), 'mchap.assemble.arraymap.new', 'arraymap.new', (['(5)', '(2)'], {}), '(5, 2)\n', (102, 108), False, 'from mchap.assemble import arraymap\n'), ((280, 298), 'mchap.assemble.arraymap.new', 'arraymap.new', (['(5)', '(3)'], {}), '(5, 3)\n', (292, 298), False, 'from mchap.assemble import arraymap\n'), ((307, 332), 'numpy.array', 'np.array', (['[0, 1, 2, 0, 1]'], {}), '([0, 1, 2, 0, 1])\n', (315, 332), True, 'import numpy as np\n'), ((341, 366), 'numpy.array', 'np.array', (['[0, 1, 2, 0, 2]'], {}), '([0, 1, 2, 0, 2])\n', (349, 366), True, 'import numpy as np\n'), ((464, 490), 'mchap.assemble.arraymap.set', 'arraymap.set', (['amap', 'a', '(0.5)'], {}), '(amap, a, 0.5)\n', (476, 490), False, 'from mchap.assemble import arraymap\n'), ((585, 611), 'mchap.assemble.arraymap.set', 'arraymap.set', (['amap', 'b', '(0.1)'], {}), '(amap, b, 0.1)\n', (597, 611), False, 'from mchap.assemble import arraymap\n'), ((732, 766), 'mchap.assemble.arraymap.new', 'arraymap.new', (['(5)', '(3)'], {'initial_size': '(4)'}), '(5, 3, initial_size=4)\n', (744, 766), False, 'from mchap.assemble import arraymap\n'), ((775, 800), 'numpy.array', 'np.array', (['[0, 1, 2, 0, 1]'], {}), '([0, 1, 2, 0, 1])\n', (783, 800), True, 'import numpy as np\n'), ((809, 834), 'numpy.array', 'np.array', (['[0, 1, 2, 0, 2]'], {}), '([0, 1, 2, 0, 2])\n', (817, 834), True, 'import numpy as np\n'), ((1000, 1026), 'mchap.assemble.arraymap.set', 'arraymap.set', (['amap', 'a', '(0.5)'], {}), '(amap, a, 0.5)\n', (1012, 1026), False, 'from mchap.assemble import arraymap\n'), ((1189, 1215), 'mchap.assemble.arraymap.set', 'arraymap.set', (['amap', 'b', '(0.1)'], {}), '(amap, b, 0.1)\n', (1201, 1215), False, 'from mchap.assemble import arraymap\n'), ((1407, 1441), 'mchap.assemble.arraymap.new', 'arraymap.new', (['(5)', '(3)'], {'initial_size': '(2)'}), '(5, 3, initial_size=2)\n', (1419, 1441), False, 'from mchap.assemble import arraymap\n'), ((1450, 1475), 'numpy.array', 'np.array', (['[0, 1, 2, 0, 1]'], {}), '([0, 1, 2, 0, 1])\n', (1458, 1475), True, 'import numpy as np\n'), ((1484, 1509), 'numpy.array', 'np.array', (['[0, 1, 2, 0, 2]'], {}), '([0, 1, 2, 0, 2])\n', (1492, 1509), True, 'import numpy as np\n'), ((1675, 1701), 'mchap.assemble.arraymap.set', 'arraymap.set', (['amap', 'a', '(0.5)'], {}), '(amap, a, 0.5)\n', (1687, 1701), False, 'from mchap.assemble import arraymap\n'), ((1864, 1890), 'mchap.assemble.arraymap.set', 'arraymap.set', (['amap', 'b', '(0.1)'], {}), '(amap, b, 0.1)\n', (1876, 1890), False, 'from mchap.assemble import arraymap\n'), ((2075, 2121), 'mchap.assemble.arraymap.new', 'arraymap.new', (['(5)', '(3)'], {'initial_size': '(4)', 'max_size': '(8)'}), '(5, 3, initial_size=4, max_size=8)\n', (2087, 2121), False, 'from mchap.assemble import arraymap\n'), ((2130, 2155), 'numpy.array', 'np.array', (['[0, 1, 2, 0, 1]'], {}), '([0, 1, 2, 0, 1])\n', (2138, 2155), True, 'import numpy as np\n'), ((2164, 2189), 'numpy.array', 'np.array', (['[1, 1, 2, 0, 2]'], {}), '([1, 1, 2, 0, 2])\n', (2172, 2189), True, 'import numpy as np\n'), ((2355, 2381), 'mchap.assemble.arraymap.set', 'arraymap.set', (['amap', 'a', '(0.5)'], {}), '(amap, a, 0.5)\n', (2367, 2381), False, 'from mchap.assemble import arraymap\n'), ((2695, 2741), 'mchap.assemble.arraymap.new', 'arraymap.new', (['(5)', '(3)'], {'initial_size': '(4)', 'max_size': '(8)'}), '(5, 3, initial_size=4, max_size=8)\n', (2707, 2741), False, 'from mchap.assemble import arraymap\n'), ((2750, 2775), 'numpy.array', 'np.array', (['[0, 1, 2, 0, 1]'], {}), '([0, 1, 2, 0, 1])\n', (2758, 2775), True, 'import numpy as np\n'), ((2784, 2809), 'numpy.array', 'np.array', (['[1, 1, 2, 0, 2]'], {}), '([1, 1, 2, 0, 2])\n', (2792, 2809), True, 'import numpy as np\n'), ((2975, 3021), 'mchap.assemble.arraymap.set', 'arraymap.set', (['amap', 'a', '(0.5)'], {'empty_if_full': '(True)'}), '(amap, a, 0.5, empty_if_full=True)\n', (2987, 3021), False, 'from mchap.assemble import arraymap\n'), ((3184, 3230), 'mchap.assemble.arraymap.set', 'arraymap.set', (['amap', 'b', '(0.1)'], {'empty_if_full': '(True)'}), '(amap, b, 0.1, empty_if_full=True)\n', (3196, 3230), False, 'from mchap.assemble import arraymap\n'), ((3291, 3312), 'numpy.all', 'np.all', (['(amap[0] == -1)'], {}), '(amap[0] == -1)\n', (3297, 3312), True, 'import numpy as np\n'), ((145, 171), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', '(5)'], {}), '(0, 2, 5)\n', (162, 171), True, 'import numpy as np\n'), ((189, 210), 'mchap.assemble.arraymap.get', 'arraymap.get', (['amap', 'a'], {}), '(amap, a)\n', (201, 210), False, 'from mchap.assemble import arraymap\n'), ((226, 242), 'numpy.isnan', 'np.isnan', (['actual'], {}), '(actual)\n', (234, 242), True, 'import numpy as np\n'), ((387, 408), 'mchap.assemble.arraymap.get', 'arraymap.get', (['amap', 'a'], {}), '(amap, a)\n', (399, 408), False, 'from mchap.assemble import arraymap\n'), ((430, 451), 'mchap.assemble.arraymap.get', 'arraymap.get', (['amap', 'b'], {}), '(amap, b)\n', (442, 451), False, 'from mchap.assemble import arraymap\n'), ((502, 523), 'mchap.assemble.arraymap.get', 'arraymap.get', (['amap', 'a'], {}), '(amap, a)\n', (514, 523), False, 'from mchap.assemble import arraymap\n'), ((551, 572), 'mchap.assemble.arraymap.get', 'arraymap.get', (['amap', 'b'], {}), '(amap, b)\n', (563, 572), False, 'from mchap.assemble import arraymap\n'), ((623, 644), 'mchap.assemble.arraymap.get', 'arraymap.get', (['amap', 'a'], {}), '(amap, a)\n', (635, 644), False, 'from mchap.assemble import arraymap\n'), ((663, 684), 'mchap.assemble.arraymap.get', 'arraymap.get', (['amap', 'b'], {}), '(amap, b)\n', (675, 684), False, 'from mchap.assemble import arraymap\n'), ((855, 876), 'mchap.assemble.arraymap.get', 'arraymap.get', (['amap', 'a'], {}), '(amap, a)\n', (867, 876), False, 'from mchap.assemble import arraymap\n'), ((898, 919), 'mchap.assemble.arraymap.get', 'arraymap.get', (['amap', 'b'], {}), '(amap, b)\n', (910, 919), False, 'from mchap.assemble import arraymap\n'), ((1038, 1059), 'mchap.assemble.arraymap.get', 'arraymap.get', (['amap', 'a'], {}), '(amap, a)\n', (1050, 1059), False, 'from mchap.assemble import arraymap\n'), ((1087, 1108), 'mchap.assemble.arraymap.get', 'arraymap.get', (['amap', 'b'], {}), '(amap, b)\n', (1099, 1108), False, 'from mchap.assemble import arraymap\n'), ((1227, 1248), 'mchap.assemble.arraymap.get', 'arraymap.get', (['amap', 'a'], {}), '(amap, a)\n', (1239, 1248), False, 'from mchap.assemble import arraymap\n'), ((1267, 1288), 'mchap.assemble.arraymap.get', 'arraymap.get', (['amap', 'b'], {}), '(amap, b)\n', (1279, 1288), False, 'from mchap.assemble import arraymap\n'), ((1530, 1551), 'mchap.assemble.arraymap.get', 'arraymap.get', (['amap', 'a'], {}), '(amap, a)\n', (1542, 1551), False, 'from mchap.assemble import arraymap\n'), ((1573, 1594), 'mchap.assemble.arraymap.get', 'arraymap.get', (['amap', 'b'], {}), '(amap, b)\n', (1585, 1594), False, 'from mchap.assemble import arraymap\n'), ((1713, 1734), 'mchap.assemble.arraymap.get', 'arraymap.get', (['amap', 'a'], {}), '(amap, a)\n', (1725, 1734), False, 'from mchap.assemble import arraymap\n'), ((1762, 1783), 'mchap.assemble.arraymap.get', 'arraymap.get', (['amap', 'b'], {}), '(amap, b)\n', (1774, 1783), False, 'from mchap.assemble import arraymap\n'), ((1902, 1923), 'mchap.assemble.arraymap.get', 'arraymap.get', (['amap', 'a'], {}), '(amap, a)\n', (1914, 1923), False, 'from mchap.assemble import arraymap\n'), ((1942, 1963), 'mchap.assemble.arraymap.get', 'arraymap.get', (['amap', 'b'], {}), '(amap, b)\n', (1954, 1963), False, 'from mchap.assemble import arraymap\n'), ((2210, 2231), 'mchap.assemble.arraymap.get', 'arraymap.get', (['amap', 'a'], {}), '(amap, a)\n', (2222, 2231), False, 'from mchap.assemble import arraymap\n'), ((2253, 2274), 'mchap.assemble.arraymap.get', 'arraymap.get', (['amap', 'b'], {}), '(amap, b)\n', (2265, 2274), False, 'from mchap.assemble import arraymap\n'), ((2393, 2414), 'mchap.assemble.arraymap.get', 'arraymap.get', (['amap', 'a'], {}), '(amap, a)\n', (2405, 2414), False, 'from mchap.assemble import arraymap\n'), ((2442, 2463), 'mchap.assemble.arraymap.get', 'arraymap.get', (['amap', 'b'], {}), '(amap, b)\n', (2454, 2463), False, 'from mchap.assemble import arraymap\n'), ((2557, 2583), 'mchap.assemble.arraymap.set', 'arraymap.set', (['amap', 'b', '(0.1)'], {}), '(amap, b, 0.1)\n', (2569, 2583), False, 'from mchap.assemble import arraymap\n'), ((2830, 2851), 'mchap.assemble.arraymap.get', 'arraymap.get', (['amap', 'a'], {}), '(amap, a)\n', (2842, 2851), False, 'from mchap.assemble import arraymap\n'), ((2873, 2894), 'mchap.assemble.arraymap.get', 'arraymap.get', (['amap', 'b'], {}), '(amap, b)\n', (2885, 2894), False, 'from mchap.assemble import arraymap\n'), ((3033, 3054), 'mchap.assemble.arraymap.get', 'arraymap.get', (['amap', 'a'], {}), '(amap, a)\n', (3045, 3054), False, 'from mchap.assemble import arraymap\n'), ((3082, 3103), 'mchap.assemble.arraymap.get', 'arraymap.get', (['amap', 'b'], {}), '(amap, b)\n', (3094, 3103), False, 'from mchap.assemble import arraymap\n'), ((3331, 3348), 'numpy.isnan', 'np.isnan', (['amap[1]'], {}), '(amap[1])\n', (3339, 3348), True, 'import numpy as np\n'), ((3526, 3547), 'mchap.assemble.arraymap.get', 'arraymap.get', (['amap', 'a'], {}), '(amap, a)\n', (3538, 3547), False, 'from mchap.assemble import arraymap\n'), ((3569, 3590), 'mchap.assemble.arraymap.get', 'arraymap.get', (['amap', 'b'], {}), '(amap, b)\n', (3581, 3590), False, 'from mchap.assemble import arraymap\n')] |
# libraries
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import simps
import scipy.constants as cte
from scipy.sparse import diags
from scipy.linalg import inv
from scipy.fftpack import fft, ifft, fftfreq
import scipy.special as sp
from scipy.signal import gaussian
# matplotlib defaults setup
plt.rcParams['savefig.dpi'] = 75
plt.rcParams['figure.autolayout'] = False
plt.rcParams['figure.figsize'] = 14, 8
plt.rcParams['axes.labelsize'] = 18
plt.rcParams['axes.titlesize'] = 20
plt.rcParams['font.size'] = 16
plt.rcParams['lines.linewidth'] = 2.0
plt.rcParams['lines.markersize'] = 8
plt.rcParams['legend.fontsize'] = 14
plt.rcParams['font.family'] = "serif"
plt.rcParams['font.serif'] = "computer modern sans serif"
plt.rcParams['text.usetex']=True
plt.rcParams['text.latex.unicode']=True
# These are the "Tableau 20" colors as RGB.
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
# grandezas de interesse em unidades atomicas
au_l = cte.value('atomic unit of length')
au_t = cte.value('atomic unit of time')
au_e = cte.value('atomic unit of energy')
# outras relacoes de interesse
ev = cte.value('electron volt')
c = cte.value('speed of light in vacuum')
hbar_si = cte.value('Planck constant over 2 pi')
me = cte.value('electron mass')
au2ang = au_l / 1e-10
au2ev = au_e / ev
# otimizando
L = 10.0 # angstron
N = 2048
hN = int(N/2)
dt = 1e-20 # s
# unidades atomicas
L_au = L / au2ang
dt_au = -1j * dt / au_t
# malhas direta e reciproca
x_au = np.linspace(-L_au/2.0, L_au/2.0, N)
dx_au = np.abs(x_au[1] - x_au[0])
k_au = fftfreq(N, d=dx_au)
# props do material
me_eff = 0.5
adw_k0 = 0.0#-132.7074997
k2 = 7.0
k3 = 0.5
k4 = 1.0
v_adw = lambda x: adw_k0-k2*x**2+k3*x**3+k4*x**4
v_au = np.vectorize(v_adw)(x_au)
# split step
exp_v2 = np.exp(- 0.5j * v_au * dt_au)
exp_t = np.exp(- 0.5j * (2 * np.pi * k_au) ** 2 * dt_au / me_eff)
propagador = lambda p: exp_v2 * ifft(exp_t * fft(exp_v2 * p))
propagador_titulo = "Split-Step"
# chutes iniciais
n = 9
a = 1.9
sigma = 0.87
g = np.vectorize(lambda x: np.exp(-(x-a)**2/(2*sigma))+np.exp(-(x+a)**2/(2*sigma)))(x_au)
g /= np.sqrt(simps(np.abs(g)**2, x_au))
estados = np.array([g for _ in range(n)],dtype=np.complex_)
valores = np.zeros(n)
contadores = np.zeros(n)
valores_analiticos_ev = [-12.258438, -6.045418, -5.286089, -0.646627, 0.691204, 4.053229, 7.368937, 11.235521, 15.431918]
valores_analiticos_ev = np.array(valores_analiticos_ev) + adw_k0
texto_x_l = -10/2
texto_x_r = 0.7 * 10/2
for s in range(n):
v_ant = 1.0
while True:
contadores[s] += 1
estados[s] = propagador(estados[s])
# gram-shimdt
for m in range(s):
proj = simps(estados[s] * np.conjugate(estados[m]), x_au)
estados[s] -= proj * estados[m]
# normaliza
estados[s] /= np.sqrt(simps(np.abs(estados[s])**2, x_au))
if contadores[s] % 1000 == 0:
# calcula autoestados
# derivada segunda
derivada2 = (estados[s][:-2] - 2 * estados[s][1:-1] + estados[s][2:]) / dx_au**2
psi = estados[s][1:-1]
psi_conj = np.conjugate(psi)
# <Psi|H|Psi>
p_h_p = simps(psi_conj * (-0.5 * derivada2 / me_eff + v_au[1:-1] * psi), x_au[1:-1])
# divide por <Psi|Psi>
#p_h_p /= A
print(p_h_p)
valores[s] = p_h_p.real
# especificos do grafico
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_ylim([-20 + adw_k0,20 + adw_k0])
ax.set_xlim([-6, 6])
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
#plt.title("Autoestados/Autovalores Poço Suplo Assimétrico (%s)" % (propagador_titulo), fontsize=18)
plt.xlabel("x (u. a.)", fontsize=16)
plt.ylabel(r'$E \, (u. a.)$', fontsize=16)
psif = [estados[m] for m in range(s+1)]
psif = [2 * p / np.ptp(p) + valores[m] for m, p in enumerate(psif)]
lines = []
for i, p in enumerate(psif):
line, = plt.plot(x_au, p, lw=1.0, color=tableau20[i], label=r'$|\Psi_{%d} (x,t)|^2$' % i)
lines.append(line)
if i < len(psif) - 1 and valores[i+1]-valores[i] < 2.0:
ax.text(texto_x_l, valores[i] - 1, r"$E_{%d} = %.5f$ eV" % (i, valores[i]))
ax.text(texto_x_r, valores[i] - 1, r"$%d k$ iterações" % int(contadores[i]/1000))
else:
ax.text(texto_x_l, valores[i] + 0.3, r"$E_{%d} = %.5f$ eV" % (i, valores[i]))
ax.text(texto_x_r, valores[i] + 0.3, r"$%d k$ iterações" % int(contadores[i]/1000))
linev, = plt.plot(x_au, v_au, lw=1.0, color=tableau20[n], label='$V(x)$')
lines.append(linev)
plt.legend(handles=lines, loc=9, bbox_to_anchor=(0.5, -0.1), ncol=4)
plt.show()
#print("%d >>> %.4e / %.4e" % (contadores[s], valores[s], valores_analiticos_ev[s]))
print("%d >>> %.8e" % (contadores[s], valores[s]))
#if np.abs(valores[s] - valores_analiticos_ev[s]) < 0.000001:
if np.abs(1-valores[s]/v_ant) < 0.0000001:
break
else:
v_ant = valores[s] | [
"numpy.abs",
"numpy.vectorize",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show",
"numpy.ptp",
"matplotlib.pyplot.legend",
"numpy.zeros",
"scipy.constants.value",
"scipy.fftpack.fft",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.exp",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
... | [((1508, 1542), 'scipy.constants.value', 'cte.value', (['"""atomic unit of length"""'], {}), "('atomic unit of length')\n", (1517, 1542), True, 'import scipy.constants as cte\n'), ((1550, 1582), 'scipy.constants.value', 'cte.value', (['"""atomic unit of time"""'], {}), "('atomic unit of time')\n", (1559, 1582), True, 'import scipy.constants as cte\n'), ((1590, 1624), 'scipy.constants.value', 'cte.value', (['"""atomic unit of energy"""'], {}), "('atomic unit of energy')\n", (1599, 1624), True, 'import scipy.constants as cte\n'), ((1662, 1688), 'scipy.constants.value', 'cte.value', (['"""electron volt"""'], {}), "('electron volt')\n", (1671, 1688), True, 'import scipy.constants as cte\n'), ((1693, 1730), 'scipy.constants.value', 'cte.value', (['"""speed of light in vacuum"""'], {}), "('speed of light in vacuum')\n", (1702, 1730), True, 'import scipy.constants as cte\n'), ((1741, 1779), 'scipy.constants.value', 'cte.value', (['"""Planck constant over 2 pi"""'], {}), "('Planck constant over 2 pi')\n", (1750, 1779), True, 'import scipy.constants as cte\n'), ((1785, 1811), 'scipy.constants.value', 'cte.value', (['"""electron mass"""'], {}), "('electron mass')\n", (1794, 1811), True, 'import scipy.constants as cte\n'), ((2023, 2062), 'numpy.linspace', 'np.linspace', (['(-L_au / 2.0)', '(L_au / 2.0)', 'N'], {}), '(-L_au / 2.0, L_au / 2.0, N)\n', (2034, 2062), True, 'import numpy as np\n'), ((2067, 2092), 'numpy.abs', 'np.abs', (['(x_au[1] - x_au[0])'], {}), '(x_au[1] - x_au[0])\n', (2073, 2092), True, 'import numpy as np\n'), ((2100, 2119), 'scipy.fftpack.fftfreq', 'fftfreq', (['N'], {'d': 'dx_au'}), '(N, d=dx_au)\n', (2107, 2119), False, 'from scipy.fftpack import fft, ifft, fftfreq\n'), ((2312, 2340), 'numpy.exp', 'np.exp', (['(-0.5j * v_au * dt_au)'], {}), '(-0.5j * v_au * dt_au)\n', (2318, 2340), True, 'import numpy as np\n'), ((2350, 2406), 'numpy.exp', 'np.exp', (['(-0.5j * (2 * np.pi * k_au) ** 2 * dt_au / me_eff)'], {}), '(-0.5j * (2 * np.pi * k_au) ** 2 * dt_au / me_eff)\n', (2356, 2406), True, 'import numpy as np\n'), ((2749, 2760), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (2757, 2760), True, 'import numpy as np\n'), ((2774, 2785), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (2782, 2785), True, 'import numpy as np\n'), ((2263, 2282), 'numpy.vectorize', 'np.vectorize', (['v_adw'], {}), '(v_adw)\n', (2275, 2282), True, 'import numpy as np\n'), ((2933, 2964), 'numpy.array', 'np.array', (['valores_analiticos_ev'], {}), '(valores_analiticos_ev)\n', (2941, 2964), True, 'import numpy as np\n'), ((2658, 2667), 'numpy.abs', 'np.abs', (['g'], {}), '(g)\n', (2664, 2667), True, 'import numpy as np\n'), ((3673, 3690), 'numpy.conjugate', 'np.conjugate', (['psi'], {}), '(psi)\n', (3685, 3690), True, 'import numpy as np\n'), ((3737, 3813), 'scipy.integrate.simps', 'simps', (['(psi_conj * (-0.5 * derivada2 / me_eff + v_au[1:-1] * psi))', 'x_au[1:-1]'], {}), '(psi_conj * (-0.5 * derivada2 / me_eff + v_au[1:-1] * psi), x_au[1:-1])\n', (3742, 3813), False, 'from scipy.integrate import simps\n'), ((4003, 4015), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4013, 4015), True, 'import matplotlib.pyplot as plt\n'), ((4366, 4402), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x (u. a.)"""'], {'fontsize': '(16)'}), "('x (u. a.)', fontsize=16)\n", (4376, 4402), True, 'import matplotlib.pyplot as plt\n'), ((4415, 4457), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$E \\\\, (u. a.)$"""'], {'fontsize': '(16)'}), "('$E \\\\, (u. a.)$', fontsize=16)\n", (4425, 4457), True, 'import matplotlib.pyplot as plt\n'), ((5357, 5421), 'matplotlib.pyplot.plot', 'plt.plot', (['x_au', 'v_au'], {'lw': '(1.0)', 'color': 'tableau20[n]', 'label': '"""$V(x)$"""'}), "(x_au, v_au, lw=1.0, color=tableau20[n], label='$V(x)$')\n", (5365, 5421), True, 'import matplotlib.pyplot as plt\n'), ((5466, 5534), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': 'lines', 'loc': '(9)', 'bbox_to_anchor': '(0.5, -0.1)', 'ncol': '(4)'}), '(handles=lines, loc=9, bbox_to_anchor=(0.5, -0.1), ncol=4)\n', (5476, 5534), True, 'import matplotlib.pyplot as plt\n'), ((5547, 5557), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5555, 5557), True, 'import matplotlib.pyplot as plt\n'), ((2453, 2468), 'scipy.fftpack.fft', 'fft', (['(exp_v2 * p)'], {}), '(exp_v2 * p)\n', (2456, 2468), False, 'from scipy.fftpack import fft, ifft, fftfreq\n'), ((2576, 2611), 'numpy.exp', 'np.exp', (['(-(x - a) ** 2 / (2 * sigma))'], {}), '(-(x - a) ** 2 / (2 * sigma))\n', (2582, 2611), True, 'import numpy as np\n'), ((2604, 2639), 'numpy.exp', 'np.exp', (['(-(x + a) ** 2 / (2 * sigma))'], {}), '(-(x + a) ** 2 / (2 * sigma))\n', (2610, 2639), True, 'import numpy as np\n'), ((4691, 4777), 'matplotlib.pyplot.plot', 'plt.plot', (['x_au', 'p'], {'lw': '(1.0)', 'color': 'tableau20[i]', 'label': "('$|\\\\Psi_{%d} (x,t)|^2$' % i)"}), "(x_au, p, lw=1.0, color=tableau20[i], label=\n '$|\\\\Psi_{%d} (x,t)|^2$' % i)\n", (4699, 4777), True, 'import matplotlib.pyplot as plt\n'), ((5820, 5850), 'numpy.abs', 'np.abs', (['(1 - valores[s] / v_ant)'], {}), '(1 - valores[s] / v_ant)\n', (5826, 5850), True, 'import numpy as np\n'), ((3235, 3259), 'numpy.conjugate', 'np.conjugate', (['estados[m]'], {}), '(estados[m])\n', (3247, 3259), True, 'import numpy as np\n'), ((3380, 3398), 'numpy.abs', 'np.abs', (['estados[s]'], {}), '(estados[s])\n', (3386, 3398), True, 'import numpy as np\n'), ((4551, 4560), 'numpy.ptp', 'np.ptp', (['p'], {}), '(p)\n', (4557, 4560), True, 'import numpy as np\n')] |
from typing import List, Tuple
import array
import numpy as np
from scipy.sparse import coo_matrix, dok_matrix
from ..state import StateVar, StateVector
from . import CostTerm
class LazyCOOBuilder:
class SlicingHandler:
def __init__(self, key: Tuple[slice, slice]):
self.key = key
rows, cols = key
rows = range(rows.stop)[rows]
cols = range(cols.stop)[cols]
self.rows = np.repeat(rows, len(cols))
self.cols = np.tile(cols, len(rows))
self.data = None
def __iadd__(self, data: np.ndarray):
self.data = data.reshape(-1)
return self
def __init__(self, shape: Tuple[int, int]):
self.shape = shape
self.rows = array.array('i')
self.cols = array.array('i')
self.data = array.array('d')
def tocoo(self):
return coo_matrix((self.data, (self.rows, self.cols)), self.shape)
def __getitem__(self, key):
return LazyCOOBuilder.SlicingHandler(key)
def __setitem__(self, key: Tuple[slice, slice], value: SlicingHandler):
assert key == value.key
if value.data is not None:
self.rows.extend(value.rows)
self.cols.extend(value.cols)
self.data.extend(value.data)
class OptimizationProblem:
"""Container for state variables and cost terms associated with the optimization problem to be solved."""
def __init__(self) -> None:
self._cost_terms: List[CostTerm] = []
self._state_vars: List[StateVar] = []
def add_state_var(self, *state_vars: StateVar) -> None:
"""Adds state variables (either locked or unlocked)."""
self._state_vars.extend(state_vars)
def add_cost_term(self, *cost_terms: CostTerm) -> None:
"""Adds cost terms."""
self._cost_terms.extend(cost_terms)
def cost(self) -> float:
"""Computes the cost from the collection of cost terms."""
return sum([x.cost() for x in self._cost_terms])
def get_state_vars(self) -> List[StateVar]:
"""Gets reference to state variables."""
return self._state_vars
def get_num_of_cost_terms(self) -> int:
"""Gets the total number of cost terms."""
return len(self._cost_terms)
def build_gauss_newton_terms(self, state_vector: StateVector, sparse: bool) -> Tuple[np.ndarray, np.ndarray]:
"""Computes the left-hand approximated Hessian (A) and right-hand gradient vector (b)."""
state_size = state_vector.get_state_size()
# equivalent to
# A = dok_matrix((state_size, state_size)) if sparse else np.zeros((state_size, state_size))
# ...
# return A.tocsr()
# but faster
A = LazyCOOBuilder((state_size, state_size)) if sparse else np.zeros((state_size, state_size))
b = np.zeros((state_size, 1))
for cost_term in self._cost_terms:
cost_term.build_gauss_newton_terms(state_vector, A, b)
return A.tocoo().tocsr() if sparse else A, b | [
"scipy.sparse.coo_matrix",
"numpy.zeros",
"array.array"
] | [((688, 704), 'array.array', 'array.array', (['"""i"""'], {}), "('i')\n", (699, 704), False, 'import array\n'), ((721, 737), 'array.array', 'array.array', (['"""i"""'], {}), "('i')\n", (732, 737), False, 'import array\n'), ((754, 770), 'array.array', 'array.array', (['"""d"""'], {}), "('d')\n", (765, 770), False, 'import array\n'), ((802, 861), 'scipy.sparse.coo_matrix', 'coo_matrix', (['(self.data, (self.rows, self.cols))', 'self.shape'], {}), '((self.data, (self.rows, self.cols)), self.shape)\n', (812, 861), False, 'from scipy.sparse import coo_matrix, dok_matrix\n'), ((2636, 2661), 'numpy.zeros', 'np.zeros', (['(state_size, 1)'], {}), '((state_size, 1))\n', (2644, 2661), True, 'import numpy as np\n'), ((2593, 2627), 'numpy.zeros', 'np.zeros', (['(state_size, state_size)'], {}), '((state_size, state_size))\n', (2601, 2627), True, 'import numpy as np\n')] |
from threading import Thread, Lock
from Queue import Empty
from time import sleep
import json
import numpy as np
from typing import Optional, List, Dict
class Scheduler(Thread):
"""
Runs Threaded Task Continuously with certain interval
This is useful for long running Real-Time tasks:
When there are many of these tasks, they start to conflict with each other.
By specifying an interval in which the CPU on this thread is told to sleep,
breathing room is realized for the other threads to execute their commands.
Parameters
----------
target: Callable
Function to Run
interval: float
Interval between function calls
name: str or None
Name of Thread (for identification in debug mode)
args: tuple
Target Arguments
kwargs: dict
Target Keyword Arguments
"""
def __init__(self, target, interval=1E-1, name=None, args=(), kwargs={}):
Thread.__init__(self, name=name)
self._target = target
self._interval = interval
self._args = args
self._kwargs = kwargs
self._running = False
self.daemon = True
def run(self):
self._running = True
while self._running:
self._target(*self._args, **self._kwargs)
sleep(self._interval)
def join(self, timeout=None):
self._running = False
Thread.join(self, timeout)
class Mailbox(object):
"""
Mailbox Object: Single-Item Queue with Override on 'put'
"""
EPSILON = 1E-1
def __init__(self):
self._mutex = Lock()
self._mail = None
def put(self, mail):
"""
Put new Mail in Mailbox, overriding any mail that might be there already
Parameters
----------
mail: Any
"""
self._mail = mail
def get(self, block=True):
"""
Get latest Mail from Mailbox
Parameters
----------
block: bool
If True: Wait for Mail until it arrives in Mailbox
If False: Return Empty Exception when Mailbox is Empty
Returns
-------
mail: Any
"""
with self._mutex:
if block:
while self._mail is None:
sleep(Mailbox.EPSILON)
return self._get()
else:
if self._mail is None:
raise Empty
return self._get()
def _get(self):
"""
Get Mail & Empty Mailbox
Returns
-------
mail: Any
"""
mail = self._mail
self._mail = None
return mail
class Bounds(object):
"""
Rectangle Bounds Object
Parameters
----------
x0: float
y0: float
x1: float
y1: float
"""
def __init__(self, x0, y0, x1, y1):
# type: (float, float, float, float) -> None
if x0 > x1 or y0 > y1:
raise RuntimeWarning("Rectangle Error: Point (x1,y1) should be bigger than point (x0, y0)")
self._x0 = x0
self._y0 = y0
self._x1 = x1
self._y1 = y1
@classmethod
def from_json(cls, data):
# type: (dict) -> Bounds
"""
Create Bounds Object from Dictionary
Parameters
----------
data: dict
Dictionary containing x0, y0, x1, y1 keys
Returns
-------
bounds: Bounds
"""
return cls(data["x0"], data["y0"], data["x1"], data["y1"])
@property
def x0(self):
# type: () -> float
"""
X0
Returns
-------
x0: float
"""
return self._x0
@property
def y0(self):
# type: () -> float
"""
Y0
Returns
-------
y0: float
"""
return self._y0
@property
def x1(self):
# type: () -> float
"""
X1
Returns
-------
x1: float
"""
return self._x1
@property
def y1(self):
# type: () -> float
"""
Y1
Returns
-------
y1: float
"""
return self._y1
@property
def width(self):
# type: () -> float
"""
Bounds Width
Returns
-------
width: float
"""
return self.x1 - self.x0
@property
def height(self):
# type: () -> float
"""
Bounds Height
Returns
-------
height: float
"""
return self.y1 - self.y0
@property
def center(self):
# type: () -> (float, float)
"""
Bounds Center
Returns
-------
center: tuple
"""
return (self.x0 + self.width / 2, self.y0 + self.height / 2)
@property
def area(self):
# type: () -> float
"""
Bounds Area
Returns
-------
area: float
"""
return self.width * self.height
def intersection(self, bounds):
# type: (Bounds) -> Optional[Bounds]
"""
Bounds Intersection with another Bounds
Parameters
----------
bounds: Bounds
Returns
-------
intersection: Bounds or None
"""
x0 = max(self.x0, bounds.x0)
y0 = max(self.y0, bounds.y0)
x1 = min(self.x1, bounds.x1)
y1 = min(self.y1, bounds.y1)
return None if x0 >= x1 or y0 >= y1 else Bounds(x0, y0, x1, y1)
def overlap(self, other):
# type: (Bounds) -> float
"""
Bounds Overlap Ratio
Parameters
----------
other: Bounds
Returns
-------
overlap: float
"""
intersection = self.intersection(other)
if intersection:
return min(intersection.area / self.area, self.area / intersection.area)
else:
return 0.0
def is_subset_of(self, other):
# type: (Bounds) -> bool
"""
Whether 'other' Bounds is subset of 'this' Bounds
Parameters
----------
other: Bounds
Returns
-------
is_subset_of: bool
Whether 'other' Bounds is subset of 'this' Bounds
"""
return self.x0 >= other.x0 and self.y0 >= other.y0 and self.x1 <= other.x1 and self.y1 <= other.y1
def is_superset_of(self, other):
# type: (Bounds) -> float
"""
Whether 'other' Bounds is superset of 'this' Bounds
Parameters
----------
other: Bounds
Returns
-------
is_superset_of: bool
Whether 'other' Bounds is superset of 'this' Bounds
"""
return self.x0 <= other.x0 and self.y0 <= other.y0 and self.x1 >= other.x1 and self.y1 >= other.y1
def contains(self, point):
# type: ((float, float)) -> bool
"""
Whether Point lies in Bounds
Parameters
----------
point: Tuple[float, float]
Returns
-------
is_in: bool
Whether Point lies in Bounds
"""
x, y = point
return self.x0 < x < self.x1 and self.y0 < y < self.y1
def equals(self, other):
# type: (Bounds) -> bool
"""
Whether 'other' bounds equals 'this' bounds
Parameters
----------
other: Bounds
Returns
-------
equals: bool
Whether 'other' bounds equals 'this' bounds
"""
return self.x0 == other.x0 and self.y0 == other.y0 and self.x1 == other.x1 and self.y1 == other.y1
def scaled(self, x_scale, y_scale):
# type: (float, float) -> Bounds
"""
Return Scaled Bounds Object
Parameters
----------
x_scale: float
y_scale: float
Returns
-------
bounds: Bounds
Scaled Bounds object
"""
return Bounds(self.x0 * x_scale, self.y0 * y_scale, self.x1 * x_scale, self.y1 * y_scale)
def to_list(self):
# type: () -> List[float]
"""
Export Bounds as List
Returns
-------
bounds: List[float]
"""
return [self.x0, self.y0, self.x1, self.y1]
def dict(self):
# type: () -> Dict[str, float]
"""
Export Bounds as Dict
Returns
-------
dict: Dict[str, float]
"""
return {
"x0": self.x0,
"y0": self.y0,
"x1": self.x1,
"y1": self.y1
}
@property
def json(self):
# type: () -> str
"""
Export Bounds as JSON
Returns
-------
json: str
"""
return json.dumps(self.dict())
def __repr__(self):
return "Bounds[({:3f}, {:3f}), ({:3f}, {:3f})]".format(self.x0, self.y0, self.x1, self.y1)
def spherical2cartesian(phi, theta, depth):
"""
Spherical Coordinates to Cartesian Coordinates
Phi: Left to Right, Theta: Down to Up, Depth: Distance
x: Left to Right, y: down to up, z: close to far
Parameters
----------
phi: float
theta: float
depth: float
Returns
-------
x,y,z: float, float, float
"""
x = depth * np.sin(theta) * np.cos(phi)
y = depth * np.cos(theta)
z = depth * np.sin(theta) * np.sin(phi)
return x, y, z | [
"threading.Thread.__init__",
"threading.Thread.join",
"time.sleep",
"threading.Lock",
"numpy.sin",
"numpy.cos"
] | [((955, 987), 'threading.Thread.__init__', 'Thread.__init__', (['self'], {'name': 'name'}), '(self, name=name)\n', (970, 987), False, 'from threading import Thread, Lock\n'), ((1405, 1431), 'threading.Thread.join', 'Thread.join', (['self', 'timeout'], {}), '(self, timeout)\n', (1416, 1431), False, 'from threading import Thread, Lock\n'), ((1601, 1607), 'threading.Lock', 'Lock', ([], {}), '()\n', (1605, 1607), False, 'from threading import Thread, Lock\n'), ((9366, 9377), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (9372, 9377), True, 'import numpy as np\n'), ((9394, 9407), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (9400, 9407), True, 'import numpy as np\n'), ((9440, 9451), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (9446, 9451), True, 'import numpy as np\n'), ((1310, 1331), 'time.sleep', 'sleep', (['self._interval'], {}), '(self._interval)\n', (1315, 1331), False, 'from time import sleep\n'), ((9350, 9363), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (9356, 9363), True, 'import numpy as np\n'), ((9424, 9437), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (9430, 9437), True, 'import numpy as np\n'), ((2291, 2313), 'time.sleep', 'sleep', (['Mailbox.EPSILON'], {}), '(Mailbox.EPSILON)\n', (2296, 2313), False, 'from time import sleep\n')] |
import os
import pdb
import random
import madmom
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import tensorflow as tf
from sklearn.metrics import f1_score
from mir_eval.onset import f_measure
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from networks import *
from utils import set_seeds
os.environ["CUDA_VISIBLE_DEVICES"]="1"
os.nice(0)
gpu_name = '/GPU:0'
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
print(e)
def flatten_sequence(sequence, factor):
seq_length = sequence.shape[-1]
length = seq_length//factor
seq_length_diff = seq_length - length
sequence_flat = np.zeros(tf.size(Dataset_Test).numpy()*factor)
for n in range(len(sequence)):
point = n*length
if n==0:
sequence_flat[:seq_length] = sequence[n]
else:
sequence_flat[point:point+seq_length_diff] = sequence_flat[point:point+seq_length_diff] + sequence[n][:-length]
sequence_flat[point+seq_length_diff:point+seq_length_diff+length] = sequence[n][-length:]
sequence_flat = sequence_flat[:point+seq_length]
for n in range(factor-1):
point = n*length
sequence_flat[point:point+length] = sequence_flat[point:point+length]/(n+1)
if n==0:
sequence_flat[-point-length:] = sequence_flat[-point-length:]/(n+1)
else:
sequence_flat[-point-length:-point] = sequence_flat[-point-length:-point]/(n+1)
sequence_flat[(factor-1)*length:-(factor-1)*length] = sequence_flat[(factor-1)*length:-(factor-1)*length]/factor
return sequence_flat
mode = 'BRNN_1S'
num_crossval = 7
epochs = 10000
patience_lr = 10
patience_early = 20
sequence_length = 16
hop = 4
pre_avg = np.array([0])*0.01
post_avg = np.array([0])*0.01
pre_max = np.array([0,1,2,4,7])*0.01
post_max = np.array([0,1,2,4,7])*0.01
#pre_avg = np.array([0])
#post_avg = np.array([0])
#pre_max = np.array([0])
#post_max = np.array([0])
num_thresholds_F1_score = 100.
lr = 1e-3
batch_size = 1024
hop_size = 128
#dropouts = [0,0.05,0.1,0.15,0.2,0.25,0.3]
dropouts = [0.1]
#dropouts = [0.3]
if not os.path.isdir('../../models/' + mode):
os.mkdir('../../models/' + mode)
if not os.path.isdir('../../results/' + mode):
os.mkdir('../../results/' + mode)
frame_dev_absmeans = np.zeros(len(dropouts))
frame_dev_absstds = np.zeros(len(dropouts))
frame_dev_means = np.zeros(len(dropouts))
frame_dev_stds = np.zeros(len(dropouts))
accuracies = np.zeros(len(dropouts))
precisions = np.zeros(len(dropouts))
recalls = np.zeros(len(dropouts))
all_thresholds_crossval = np.zeros((len(dropouts),num_crossval))
for a in range(len(dropouts)):
dropout = dropouts[a]
set_seeds(0)
Tensor_TrainVal_Raw = np.load('../../data/interim/Dataset_TrainVal.npy').T
Classes_TrainVal_Raw = np.load('../../data/interim/Classes_TrainVal.npy')
Tensor_Test_Raw = np.load('../../data/interim/Dataset_Test.npy').T
Classes_Test_Raw = np.load('../../data/interim/Classes_Test.npy')
for i in range(len(Classes_TrainVal_Raw)):
if Classes_TrainVal_Raw[i]==1:
Classes_TrainVal_Raw[i-1] = 0.2
Classes_TrainVal_Raw[i+1] = 0.5
Classes_TrainVal_Raw[i+2] = 0.1
set_seeds(0)
#Tensor_TrainVal = np.lib.stride_tricks.sliding_window_view(Tensor_TrainVal,(sequence_length,Tensor_TrainVal.shape[1]))[:,0,:,:]
#Tensor_Test = np.lib.stride_tricks.sliding_window_view(Tensor_Test,(sequence_length,Tensor_Test.shape[1]))[:,0,:,:]
length = Tensor_TrainVal_Raw.shape[0]-sequence_length+1
Tensor_TrainVal = np.zeros(shape=(length,sequence_length,Tensor_TrainVal_Raw.shape[1]))
Classes_TrainVal = np.zeros(shape=(length,sequence_length))
for n in range(sequence_length):
Tensor_TrainVal[:,n] = Tensor_TrainVal_Raw[n:length+n]
Classes_TrainVal[:,n] = Classes_TrainVal_Raw[n:length+n]
Tensor_TrainVal = Tensor_TrainVal[::hop]
Classes_TrainVal = Classes_TrainVal[::hop]
length = Tensor_Test_Raw.shape[0]-sequence_length+1
Tensor_Test = np.zeros(shape=(length,sequence_length,Tensor_Test_Raw.shape[1]))
Classes_Test = np.zeros(shape=(length,sequence_length))
for n in range(sequence_length):
Tensor_Test[:,n] = Tensor_Test_Raw[n:length+n]
Classes_Test[:,n] = Classes_Test_Raw[n:length+n]
Tensor_Test = Tensor_Test[::hop]
Classes_Test = Classes_Test[::hop]
Tensor_TrainVal = np.log(Tensor_TrainVal+1e-4)
min_norm = np.min(Tensor_TrainVal)
max_norm = np.max(Tensor_TrainVal)
Tensor_TrainVal = (Tensor_TrainVal-min_norm)/(max_norm-min_norm+1e-16)
Tensor_Test = np.log(Tensor_Test+1e-4)
Tensor_Test = (Tensor_Test-min_norm)/(max_norm-min_norm+1e-16)
Tensor_TrainVal_Reduced = np.sum(Tensor_TrainVal, axis=1)
Classes_TrainVal_Reduced = np.clip(np.sum(Classes_TrainVal, axis=1), 0, 1)
Tensor_Test_Reduced = np.sum(Tensor_Test, axis=1)
Classes_Test_Reduced = np.clip(np.sum(Classes_Test, axis=1), 0, 1)
Dataset_Test = Tensor_Test.copy()
Dataset_Test = Dataset_Test.astype('float32')
Classes_Test = Classes_Test.astype('float32')
for it in range(5):
skf = KFold(n_splits=num_crossval)
thresholds_crossval = np.zeros(num_crossval)
pre_avg_crossval = np.zeros(num_crossval)
post_avg_crossval = np.zeros(num_crossval)
pre_max_crossval = np.zeros(num_crossval)
post_max_crossval = np.zeros(num_crossval)
min_norm_crossval = np.zeros(num_crossval)
max_norm_crossval = np.zeros(num_crossval)
pred_norm = []
validation_accuracy = 0
test_accuracy = 0
min_val_loss = np.inf
set_seeds(0)
models = []
g = 0
for train_index, test_index in skf.split(Tensor_TrainVal_Reduced, Classes_TrainVal_Reduced):
Dataset_Train, Dataset_Val = Tensor_TrainVal[train_index], Tensor_TrainVal[test_index]
Classes_Train, Classes_Val = Classes_TrainVal[train_index], Classes_TrainVal[test_index]
Dataset_Train = Dataset_Train.astype('float32')
Dataset_Val = Dataset_Val.astype('float32')
Classes_Train = Classes_Train.astype('float32')
Classes_Val = Classes_Val.astype('float32')
early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=patience_early, verbose=2, mode='auto', baseline=None, restore_best_weights=True)
lr_scheduler = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', patience=patience_lr, verbose=2)
with tf.device(gpu_name):
set_seeds(it)
model = BRNN_1(sequence_length, dropout)
set_seeds(it)
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lr), loss=tf.keras.losses.BinaryCrossentropy(from_logits=True)) # , metrics=['accuracy']
set_seeds(it)
history = model.fit(Dataset_Train, Classes_Train, batch_size=batch_size, epochs=epochs, validation_data=(Dataset_Val, Classes_Val), callbacks=[early_stopping,lr_scheduler], shuffle=True)
if min(history.history['val_loss'])<min_val_loss:
idx_best_model = g
min_val_loss = min(history.history['val_loss'])
print('Val Loss for fold ' + str(g+1) + ' of ' + str(num_crossval) + ': ' + str(min(history.history['val_loss'])))
models.append(model)
# Calculate threshold parameter with validation set
print('Processing validation data...')
#pred_train = flatten_sequence(model.predict(Dataset_Train.astype('float32')),hop)
#pred_val = flatten_sequence(model.predict(Dataset_Val.astype('float32')),hop)
#pred_all = np.concatenate((pred_train,pred_val))
#pred_norm.append([np.max(pred_all),np.min(pred_all)])
#Classes_Val[Classes_Val!=1] = 0
#hop_size_ms = hop_size/22050
#Prediction = (pred_val-pred_norm[g][1])/(pred_norm[g][0]-pred_norm[g][1])
#Target = flatten_sequence(Classes_Val,hop)
#predictions = model.predict(Dataset_Val.astype('float32'))
#Prediction = tf.math.sigmoid(flatten_sequence(predictions, hop))
#Prediction = flatten_sequence(predictions, hop)
#Target = flatten_sequence(Classes_Val, hop)
Classes_TrainVal_CV = Classes_TrainVal.copy()
Classes_TrainVal_CV[Classes_TrainVal_CV!=1] = 0
hop_size_ms = hop_size/22050
predictions_all = model.predict(Tensor_TrainVal.astype('float32'))
Prediction = flatten_sequence(predictions_all, hop)
Target = flatten_sequence(Classes_TrainVal_CV, hop)
min_norm_crossval[g] = np.min(Prediction)
max_norm_crossval[g] = np.max(Prediction)
Prediction = (Prediction-min_norm_crossval[g])/(max_norm_crossval[g]-min_norm_crossval[g])
factor = np.arange(len(Target))*hop_size_ms
Target = factor*Target
j = np.where(Target!=0)
Target = Target[j]
Target = Target[:Target.argmax()]
for s in range(len(Target)-1):
if Target[s+1]<Target[s]:
print('Ensuring Monotonic Target')
Target[s+1] = Target[s]
num_thresholds = num_thresholds_F1_score
Threshold = np.arange(int(num_thresholds+2))/(num_thresholds+2)
Threshold = Threshold[1:-1]
print('Calculating threshold...')
f1_score = np.zeros((len(Threshold),len(pre_avg),len(post_avg),len(pre_max),len(post_max)))
precision = np.zeros((len(Threshold),len(pre_avg),len(post_avg),len(pre_max),len(post_max)))
recall = np.zeros((len(Threshold),len(pre_avg),len(post_avg),len(pre_max),len(post_max)))
for i in range(len(Threshold)):
for c in range(len(pre_avg)):
for d in range(len(post_avg)):
for e in range(len(pre_max)):
for f in range(len(post_max)):
pick_picker = madmom.features.onsets.OnsetPeakPickingProcessor(fps=172.265625,pre_avg=pre_avg[c],post_avg=post_avg[d],pre_max=pre_max[e],post_max=post_max[f],threshold=Threshold[i])
Pred = pick_picker(Prediction)
#Pred = np.argwhere(Prediction>=Threshold[i])[:,0]*hop_size_ms
#ind_delete = [i+1 for (x,y,i) in zip(Pred,Pred[1:],range(len(Pred))) if 0.015>abs(x-y)]
#Pred = np.delete(Pred, ind_delete)
f1_score[i,c,d,e,f], precision[i,c,d,e,f], recall[i,c,d,e,f] = f_measure(Target, Pred, window=0.03)
print(str(i+1) + '/' + str(len(Threshold)))
max_f1 = 0
idx_max = np.zeros(5)
for i in range(len(Threshold)):
for c in range(len(pre_avg)):
for d in range(len(post_avg)):
for e in range(len(pre_max)):
for f in range(len(post_max)):
if np.mean(f1_score[i,c,d,e,f])>max_f1:
max_f1 = np.mean(f1_score[i,c,d,e,f])
scores_max = f1_score[i,c,d,e,f]
idx_max = np.array([i,c,d,e,f])
print(idx_max)
print('Val Accuracy for fold ' + str(g+1) + ' of ' + str(num_crossval) + ': ' + str(np.max(f1_score)))
thresholds_crossval[g] = Threshold[idx_max[0]]
pre_avg_crossval[g] = pre_avg[idx_max[1]]
post_avg_crossval[g] = post_avg[idx_max[2]]
pre_max_crossval[g] = pre_max[idx_max[3]]
post_max_crossval[g] = post_max[idx_max[4]]
g += 1
tf.keras.backend.clear_session()
# Test
print('Evaluating...')
#idx_model = (np.abs((thresholds_crossval-thresholds_crossval.mean()))).argmin()
idx_model = idx_best_model
model = models[idx_model]
pre_avg_eval = pre_avg_crossval[idx_model]
post_avg_eval = post_avg_crossval[idx_model]
pre_max_eval = pre_max_crossval[idx_model]
post_max_eval = post_max_crossval[idx_model]
predictions = model.predict(Dataset_Test.astype('float32'))
hop_size_ms = hop_size/22050
#Prediction = (flatten_sequence(predictions,hop)-pred_norm[idx_best_model][1])/(pred_norm[idx_best_model][0]-pred_norm[idx_best_model][1])
#Target = flatten_sequence(Classes_Test, hop)
#Prediction = tf.math.sigmoid(flatten_sequence(predictions, hop))
Prediction = flatten_sequence(predictions, hop)
Target = flatten_sequence(Classes_Test, hop)
factor = np.arange(len(Target))*hop_size_ms
Target = factor*Target
j = np.where(Target!=0)
Target = Target[j]
Target = Target[:Target.argmax()]
for s in range(len(Target)-1):
if Target[s+1]<Target[s]:
print('Ensuring Monotonic Target')
Target[s+1] = Target[s]
Prediction = (Prediction-min_norm_crossval[idx_model])/(max_norm_crossval[idx_model]-min_norm_crossval[idx_model])
threshold = np.mean(thresholds_crossval)
pick_picker = madmom.features.onsets.OnsetPeakPickingProcessor(fps=172.265625,pre_avg=pre_avg_eval,post_avg=post_avg_eval,pre_max=pre_max_eval,post_max=post_max_eval,threshold=threshold)
Pred = pick_picker(Prediction)
#Pred = np.argwhere(Prediction>=threshold)[:,0]*hop_size_ms
#ind_delete = [i+1 for (x,y,i) in zip(Pred,Pred[1:],range(len(Pred))) if 0.015>abs(x-y)]
#Pred = np.delete(Pred, ind_delete)
test_accuracy, test_precision, test_recall = f_measure(Target, Pred, window=0.03)
print('Test Accuracy: {:.4f}'.format(test_accuracy))
min_values = []
min_indices = []
for k in range(len(Pred)):
abs_diff = Target-Pred[k]
diff = np.abs(abs_diff)
if diff.argmin() not in min_indices:
min_indices.append(diff.argmin())
else:
continue
min_value = abs_diff[diff.argmin()]
if abs(min_value)<=0.03:
min_values.append(min_value)
min_values = np.array(min_values)
frame_dev_absmeans[a] = np.mean(np.abs(min_values))
frame_dev_absstds[a] = np.std(np.abs(min_values))
frame_dev_means[a] = np.mean(min_values)
frame_dev_stds[a] = np.std(min_values)
accuracies[a] = test_accuracy
precisions[a] = test_precision
recalls[a] = test_recall
all_thresholds_crossval[a] = thresholds_crossval
print('')
print('Dropout: ' + str(dropout))
print('Mean Absolute Onset Deviation: ' + str(frame_dev_absmeans[a]))
print('STD Absolute Onset Deviation: ' + str(frame_dev_absstds[a]))
print('Mean Deviation: ' + str(frame_dev_means[a]))
print('STD Deviation: ' + str(frame_dev_stds[a]))
print('Accuracy: ' + str(accuracies[a]))
print('Precision: ' + str(precisions[a]))
print('Recall: ' + str(recalls[a]))
print('Cross-Validated Thresholds: ' + str(all_thresholds_crossval[a]))
print('')
np.save('../../results/' + mode + '/frame_dev_absstds', frame_dev_absstds)
np.save('../../results/' + mode + '/frame_dev_absmeans', frame_dev_absmeans)
np.save('../../results/' + mode + '/frame_dev_means', frame_dev_means)
np.save('../../results/' + mode + '/frame_dev_stds', frame_dev_stds)
np.save('../../results/' + mode + '/accuracies', accuracies)
np.save('../../results/' + mode + '/precisions', precisions)
np.save('../../results/' + mode + '/recalls', recalls)
np.save('../../results/' + mode + '/thresholds', all_thresholds_crossval)
'''if accuracies[a]==np.max(accuracies):
for g in range(num_crossval):
models[idx_best_model].save_weights('../../models/' + mode + '/model_dropout_' + str(dropout) + '_crossval_' + str(idx_best_model) + '.h5')'''
tf.keras.backend.clear_session() | [
"os.mkdir",
"numpy.load",
"numpy.sum",
"numpy.abs",
"numpy.mean",
"os.nice",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.size",
"madmom.features.onsets.OnsetPeakPickingProcessor",
"numpy.std",
"tensorflow.keras.callbacks.ReduceLROnPlateau",
"numpy.max",
"tensorflow.keras.optimize... | [((408, 418), 'os.nice', 'os.nice', (['(0)'], {}), '(0)\n', (415, 418), False, 'import os\n'), ((447, 498), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (491, 498), True, 'import tensorflow as tf\n'), ((1941, 1954), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (1949, 1954), True, 'import numpy as np\n'), ((1971, 1984), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (1979, 1984), True, 'import numpy as np\n'), ((2000, 2025), 'numpy.array', 'np.array', (['[0, 1, 2, 4, 7]'], {}), '([0, 1, 2, 4, 7])\n', (2008, 2025), True, 'import numpy as np\n'), ((2038, 2063), 'numpy.array', 'np.array', (['[0, 1, 2, 4, 7]'], {}), '([0, 1, 2, 4, 7])\n', (2046, 2063), True, 'import numpy as np\n'), ((2331, 2368), 'os.path.isdir', 'os.path.isdir', (["('../../models/' + mode)"], {}), "('../../models/' + mode)\n", (2344, 2368), False, 'import os\n'), ((2374, 2406), 'os.mkdir', 'os.mkdir', (["('../../models/' + mode)"], {}), "('../../models/' + mode)\n", (2382, 2406), False, 'import os\n'), ((2415, 2453), 'os.path.isdir', 'os.path.isdir', (["('../../results/' + mode)"], {}), "('../../results/' + mode)\n", (2428, 2453), False, 'import os\n'), ((2459, 2492), 'os.mkdir', 'os.mkdir', (["('../../results/' + mode)"], {}), "('../../results/' + mode)\n", (2467, 2492), False, 'import os\n'), ((2905, 2917), 'utils.set_seeds', 'set_seeds', (['(0)'], {}), '(0)\n', (2914, 2917), False, 'from utils import set_seeds\n'), ((3025, 3075), 'numpy.load', 'np.load', (['"""../../data/interim/Classes_TrainVal.npy"""'], {}), "('../../data/interim/Classes_TrainVal.npy')\n", (3032, 3075), True, 'import numpy as np\n'), ((3170, 3216), 'numpy.load', 'np.load', (['"""../../data/interim/Classes_Test.npy"""'], {}), "('../../data/interim/Classes_Test.npy')\n", (3177, 3216), True, 'import numpy as np\n'), ((3445, 3457), 'utils.set_seeds', 'set_seeds', (['(0)'], {}), '(0)\n', (3454, 3457), False, 'from utils import set_seeds\n'), ((3796, 3867), 'numpy.zeros', 'np.zeros', ([], {'shape': '(length, sequence_length, Tensor_TrainVal_Raw.shape[1])'}), '(shape=(length, sequence_length, Tensor_TrainVal_Raw.shape[1]))\n', (3804, 3867), True, 'import numpy as np\n'), ((3889, 3930), 'numpy.zeros', 'np.zeros', ([], {'shape': '(length, sequence_length)'}), '(shape=(length, sequence_length))\n', (3897, 3930), True, 'import numpy as np\n'), ((4262, 4329), 'numpy.zeros', 'np.zeros', ([], {'shape': '(length, sequence_length, Tensor_Test_Raw.shape[1])'}), '(shape=(length, sequence_length, Tensor_Test_Raw.shape[1]))\n', (4270, 4329), True, 'import numpy as np\n'), ((4347, 4388), 'numpy.zeros', 'np.zeros', ([], {'shape': '(length, sequence_length)'}), '(shape=(length, sequence_length))\n', (4355, 4388), True, 'import numpy as np\n'), ((4636, 4668), 'numpy.log', 'np.log', (['(Tensor_TrainVal + 0.0001)'], {}), '(Tensor_TrainVal + 0.0001)\n', (4642, 4668), True, 'import numpy as np\n'), ((4680, 4703), 'numpy.min', 'np.min', (['Tensor_TrainVal'], {}), '(Tensor_TrainVal)\n', (4686, 4703), True, 'import numpy as np\n'), ((4719, 4742), 'numpy.max', 'np.max', (['Tensor_TrainVal'], {}), '(Tensor_TrainVal)\n', (4725, 4742), True, 'import numpy as np\n'), ((4836, 4864), 'numpy.log', 'np.log', (['(Tensor_Test + 0.0001)'], {}), '(Tensor_Test + 0.0001)\n', (4842, 4864), True, 'import numpy as np\n'), ((4959, 4990), 'numpy.sum', 'np.sum', (['Tensor_TrainVal'], {'axis': '(1)'}), '(Tensor_TrainVal, axis=1)\n', (4965, 4990), True, 'import numpy as np\n'), ((5096, 5123), 'numpy.sum', 'np.sum', (['Tensor_Test'], {'axis': '(1)'}), '(Tensor_Test, axis=1)\n', (5102, 5123), True, 'import numpy as np\n'), ((2945, 2995), 'numpy.load', 'np.load', (['"""../../data/interim/Dataset_TrainVal.npy"""'], {}), "('../../data/interim/Dataset_TrainVal.npy')\n", (2952, 2995), True, 'import numpy as np\n'), ((3098, 3144), 'numpy.load', 'np.load', (['"""../../data/interim/Dataset_Test.npy"""'], {}), "('../../data/interim/Dataset_Test.npy')\n", (3105, 3144), True, 'import numpy as np\n'), ((5030, 5062), 'numpy.sum', 'np.sum', (['Classes_TrainVal'], {'axis': '(1)'}), '(Classes_TrainVal, axis=1)\n', (5036, 5062), True, 'import numpy as np\n'), ((5159, 5187), 'numpy.sum', 'np.sum', (['Classes_Test'], {'axis': '(1)'}), '(Classes_Test, axis=1)\n', (5165, 5187), True, 'import numpy as np\n'), ((5375, 5403), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'num_crossval'}), '(n_splits=num_crossval)\n', (5380, 5403), False, 'from sklearn.model_selection import KFold\n'), ((5435, 5457), 'numpy.zeros', 'np.zeros', (['num_crossval'], {}), '(num_crossval)\n', (5443, 5457), True, 'import numpy as np\n'), ((5485, 5507), 'numpy.zeros', 'np.zeros', (['num_crossval'], {}), '(num_crossval)\n', (5493, 5507), True, 'import numpy as np\n'), ((5536, 5558), 'numpy.zeros', 'np.zeros', (['num_crossval'], {}), '(num_crossval)\n', (5544, 5558), True, 'import numpy as np\n'), ((5586, 5608), 'numpy.zeros', 'np.zeros', (['num_crossval'], {}), '(num_crossval)\n', (5594, 5608), True, 'import numpy as np\n'), ((5637, 5659), 'numpy.zeros', 'np.zeros', (['num_crossval'], {}), '(num_crossval)\n', (5645, 5659), True, 'import numpy as np\n'), ((5689, 5711), 'numpy.zeros', 'np.zeros', (['num_crossval'], {}), '(num_crossval)\n', (5697, 5711), True, 'import numpy as np\n'), ((5740, 5762), 'numpy.zeros', 'np.zeros', (['num_crossval'], {}), '(num_crossval)\n', (5748, 5762), True, 'import numpy as np\n'), ((5886, 5898), 'utils.set_seeds', 'set_seeds', (['(0)'], {}), '(0)\n', (5895, 5898), False, 'from utils import set_seeds\n'), ((13202, 13223), 'numpy.where', 'np.where', (['(Target != 0)'], {}), '(Target != 0)\n', (13210, 13223), True, 'import numpy as np\n'), ((13613, 13641), 'numpy.mean', 'np.mean', (['thresholds_crossval'], {}), '(thresholds_crossval)\n', (13620, 13641), True, 'import numpy as np\n'), ((13665, 13852), 'madmom.features.onsets.OnsetPeakPickingProcessor', 'madmom.features.onsets.OnsetPeakPickingProcessor', ([], {'fps': '(172.265625)', 'pre_avg': 'pre_avg_eval', 'post_avg': 'post_avg_eval', 'pre_max': 'pre_max_eval', 'post_max': 'post_max_eval', 'threshold': 'threshold'}), '(fps=172.265625, pre_avg=\n pre_avg_eval, post_avg=post_avg_eval, pre_max=pre_max_eval, post_max=\n post_max_eval, threshold=threshold)\n', (13713, 13852), False, 'import madmom\n'), ((14139, 14175), 'mir_eval.onset.f_measure', 'f_measure', (['Target', 'Pred'], {'window': '(0.03)'}), '(Target, Pred, window=0.03)\n', (14148, 14175), False, 'from mir_eval.onset import f_measure\n'), ((14703, 14723), 'numpy.array', 'np.array', (['min_values'], {}), '(min_values)\n', (14711, 14723), True, 'import numpy as np\n'), ((14880, 14899), 'numpy.mean', 'np.mean', (['min_values'], {}), '(min_values)\n', (14887, 14899), True, 'import numpy as np\n'), ((14928, 14946), 'numpy.std', 'np.std', (['min_values'], {}), '(min_values)\n', (14934, 14946), True, 'import numpy as np\n'), ((15726, 15800), 'numpy.save', 'np.save', (["('../../results/' + mode + '/frame_dev_absstds')", 'frame_dev_absstds'], {}), "('../../results/' + mode + '/frame_dev_absstds', frame_dev_absstds)\n", (15733, 15800), True, 'import numpy as np\n'), ((15809, 15885), 'numpy.save', 'np.save', (["('../../results/' + mode + '/frame_dev_absmeans')", 'frame_dev_absmeans'], {}), "('../../results/' + mode + '/frame_dev_absmeans', frame_dev_absmeans)\n", (15816, 15885), True, 'import numpy as np\n'), ((15894, 15964), 'numpy.save', 'np.save', (["('../../results/' + mode + '/frame_dev_means')", 'frame_dev_means'], {}), "('../../results/' + mode + '/frame_dev_means', frame_dev_means)\n", (15901, 15964), True, 'import numpy as np\n'), ((15973, 16041), 'numpy.save', 'np.save', (["('../../results/' + mode + '/frame_dev_stds')", 'frame_dev_stds'], {}), "('../../results/' + mode + '/frame_dev_stds', frame_dev_stds)\n", (15980, 16041), True, 'import numpy as np\n'), ((16051, 16111), 'numpy.save', 'np.save', (["('../../results/' + mode + '/accuracies')", 'accuracies'], {}), "('../../results/' + mode + '/accuracies', accuracies)\n", (16058, 16111), True, 'import numpy as np\n'), ((16120, 16180), 'numpy.save', 'np.save', (["('../../results/' + mode + '/precisions')", 'precisions'], {}), "('../../results/' + mode + '/precisions', precisions)\n", (16127, 16180), True, 'import numpy as np\n'), ((16189, 16243), 'numpy.save', 'np.save', (["('../../results/' + mode + '/recalls')", 'recalls'], {}), "('../../results/' + mode + '/recalls', recalls)\n", (16196, 16243), True, 'import numpy as np\n'), ((16252, 16325), 'numpy.save', 'np.save', (["('../../results/' + mode + '/thresholds')", 'all_thresholds_crossval'], {}), "('../../results/' + mode + '/thresholds', all_thresholds_crossval)\n", (16259, 16325), True, 'import numpy as np\n'), ((16586, 16618), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (16616, 16618), True, 'import tensorflow as tf\n'), ((554, 605), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), '(gpu, True)\n', (594, 605), True, 'import tensorflow as tf\n'), ((6501, 6666), 'tensorflow.keras.callbacks.EarlyStopping', 'tf.keras.callbacks.EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': '(0)', 'patience': 'patience_early', 'verbose': '(2)', 'mode': '"""auto"""', 'baseline': 'None', 'restore_best_weights': '(True)'}), "(monitor='val_loss', min_delta=0, patience=\n patience_early, verbose=2, mode='auto', baseline=None,\n restore_best_weights=True)\n", (6533, 6666), True, 'import tensorflow as tf\n'), ((6685, 6779), 'tensorflow.keras.callbacks.ReduceLROnPlateau', 'tf.keras.callbacks.ReduceLROnPlateau', ([], {'monitor': '"""val_loss"""', 'patience': 'patience_lr', 'verbose': '(2)'}), "(monitor='val_loss', patience=\n patience_lr, verbose=2)\n", (6721, 6779), True, 'import tensorflow as tf\n'), ((8996, 9014), 'numpy.min', 'np.min', (['Prediction'], {}), '(Prediction)\n', (9002, 9014), True, 'import numpy as np\n'), ((9050, 9068), 'numpy.max', 'np.max', (['Prediction'], {}), '(Prediction)\n', (9056, 9068), True, 'import numpy as np\n'), ((9281, 9302), 'numpy.where', 'np.where', (['(Target != 0)'], {}), '(Target != 0)\n', (9289, 9302), True, 'import numpy as np\n'), ((11158, 11169), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (11166, 11169), True, 'import numpy as np\n'), ((12163, 12195), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (12193, 12195), True, 'import tensorflow as tf\n'), ((14380, 14396), 'numpy.abs', 'np.abs', (['abs_diff'], {}), '(abs_diff)\n', (14386, 14396), True, 'import numpy as np\n'), ((14773, 14791), 'numpy.abs', 'np.abs', (['min_values'], {}), '(min_values)\n', (14779, 14791), True, 'import numpy as np\n'), ((14831, 14849), 'numpy.abs', 'np.abs', (['min_values'], {}), '(min_values)\n', (14837, 14849), True, 'import numpy as np\n'), ((6793, 6812), 'tensorflow.device', 'tf.device', (['gpu_name'], {}), '(gpu_name)\n', (6802, 6812), True, 'import tensorflow as tf\n'), ((6830, 6843), 'utils.set_seeds', 'set_seeds', (['it'], {}), '(it)\n', (6839, 6843), False, 'from utils import set_seeds\n'), ((6917, 6930), 'utils.set_seeds', 'set_seeds', (['it'], {}), '(it)\n', (6926, 6930), False, 'from utils import set_seeds\n'), ((7115, 7128), 'utils.set_seeds', 'set_seeds', (['it'], {}), '(it)\n', (7124, 7128), False, 'from utils import set_seeds\n'), ((840, 861), 'tensorflow.size', 'tf.size', (['Dataset_Test'], {}), '(Dataset_Test)\n', (847, 861), True, 'import tensorflow as tf\n'), ((6971, 7013), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'lr'}), '(learning_rate=lr)\n', (6995, 7013), True, 'import tensorflow as tf\n'), ((7020, 7072), 'tensorflow.keras.losses.BinaryCrossentropy', 'tf.keras.losses.BinaryCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (7054, 7072), True, 'import tensorflow as tf\n'), ((11831, 11847), 'numpy.max', 'np.max', (['f1_score'], {}), '(f1_score)\n', (11837, 11847), True, 'import numpy as np\n'), ((10405, 10587), 'madmom.features.onsets.OnsetPeakPickingProcessor', 'madmom.features.onsets.OnsetPeakPickingProcessor', ([], {'fps': '(172.265625)', 'pre_avg': 'pre_avg[c]', 'post_avg': 'post_avg[d]', 'pre_max': 'pre_max[e]', 'post_max': 'post_max[f]', 'threshold': 'Threshold[i]'}), '(fps=172.265625, pre_avg=\n pre_avg[c], post_avg=post_avg[d], pre_max=pre_max[e], post_max=post_max\n [f], threshold=Threshold[i])\n', (10453, 10587), False, 'import madmom\n'), ((11015, 11051), 'mir_eval.onset.f_measure', 'f_measure', (['Target', 'Pred'], {'window': '(0.03)'}), '(Target, Pred, window=0.03)\n', (11024, 11051), False, 'from mir_eval.onset import f_measure\n'), ((11459, 11491), 'numpy.mean', 'np.mean', (['f1_score[i, c, d, e, f]'], {}), '(f1_score[i, c, d, e, f])\n', (11466, 11491), True, 'import numpy as np\n'), ((11541, 11573), 'numpy.mean', 'np.mean', (['f1_score[i, c, d, e, f]'], {}), '(f1_score[i, c, d, e, f])\n', (11548, 11573), True, 'import numpy as np\n'), ((11685, 11710), 'numpy.array', 'np.array', (['[i, c, d, e, f]'], {}), '([i, c, d, e, f])\n', (11693, 11710), True, 'import numpy as np\n')] |
"""
Plot spectra.
"""
from __future__ import print_function, division
import os
import numpy as np
import astropy.units as u
import astropy.constants as const
from astropy.table import Table
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from tqdm import tqdm
import context
def plot_ngc3311(field=None, targetSN=250):
""" Plot spectra of field A of NGC 3311"""
# Reading indices file
indnames = ['bTiO_muse', 'H_beta', 'Fe5015', 'Mg_1', 'Mg_2', 'Mg_b',
'Fe5270', 'Fe5335', 'Fe5406', 'Fe5709', 'Fe5782', 'aTiO',
'Na_D', 'TiO_1', 'TiO_2_muse', 'CaH_1',
'CaH_2_muse', 'TiO_3', 'TiO_4', 'NaI', 'CaT1', 'CaT2',
'CaT3']
ylabels = [_.replace("_", "").replace("muse", "*") for _ in indnames]
bandsfile = os.path.join(os.path.split(os.path.abspath(
context.__file__))[0],
"tables/spindex_CS.dat")
names = np.loadtxt(bandsfile, usecols=(8,), dtype=str).tolist()
idx = [names.index(index) for index in indnames]
bandsz0 = np.loadtxt(bandsfile, usecols=(3,4,1,2,5,6))[idx]
units_bin = np.loadtxt(bandsfile, usecols=(7,))[idx]
units = np.where(units_bin, u.Unit("mag"), u.Unit("angstrom"))
# Reading spectra
field = "fieldA" if field is None else field
imgname, cubename = context.get_field_files(field)
wdir = os.path.join(os.path.split(cubename)[0], "sn{}".format(targetSN))
outdir = os.path.join(wdir, "plots", "spec_with_indices")
if not os.path.exists(outdir):
os.mkdir(outdir)
# Getting velocity of spectra
ktable = Table.read(os.path.join(wdir,
"ppxf_vel50_w4500_10000_kinematics.fits"))
for spec in tqdm(ktable, desc="Producing figures:"):
vel = spec["V"]
c = const.c.to("km/s").value
dwave = np.sqrt((1 + vel/ c) / (1 - vel/ c))
data = Table.read(os.path.join(wdir, "ppxf_vel50_w4500_10000_kinematics",
"{}_bestfit.fits".format(spec["spec"])))
wave = data["lam"]
norm = 1e19
flux = (data["galaxy"] - data["gas_bestfit"]) * norm
fluxerr = data["noise"] * norm
bestfit = (data["bestfit"] - data["gas_bestfit"]) * norm
fig = plt.figure(figsize=(15, 8))
gs = GridSpec(5, 6)
ax0 = plt.subplot(gs[0,:])
ax0.plot(wave, flux)
ax0.plot(wave, bestfit)
ax0.set_ylabel("$f_{\lambda}$")
ax0.set_xlabel("$\lambda$ (\\r{A})")
for i,w0 in enumerate(bandsz0):
w = w0 * dwave
axn = plt.subplot(gs[i+6])
dw = 20
idx = np.where((wave >= w[0] - dw) & (wave <= w[5] + dw))
axn.plot(wave[idx], flux[idx])
axn.plot(wave[idx], bestfit[idx])
axn.fill_between(wave[idx], flux[idx] - fluxerr[idx], flux[idx] +
fluxerr[idx])
axn.set_xlabel("$\lambda$ (\\r{A})")
axn.set_ylabel("$f_{\lambda}$")
for ax in [ax0, axn]:
ax.axvspan(w[0], w[1], alpha=.3, color="b")
ax.axvspan(w[2], w[3], alpha=.3, color="g")
ax.axvspan(w[4], w[5], alpha=.3, color="r")
axn.set_title(ylabels[i])
plt.tight_layout()
plt.savefig(os.path.join(outdir, "{}.png".format(spec["spec"])),
dpi=250)
plt.close()
if __name__ == "__main__":
plot_ngc3311()
| [
"os.mkdir",
"tqdm.tqdm",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.tight_layout",
"astropy.constants.c.to",
"os.path.abspath",
"os.path.split",
"matplotlib.pyplot.close",
"os.path.exists",
"matplotlib.pyplot.figure",
"numpy.where",
"numpy.loadtxt",
"context.get_field_files",
"matplot... | [((1365, 1395), 'context.get_field_files', 'context.get_field_files', (['field'], {}), '(field)\n', (1388, 1395), False, 'import context\n'), ((1486, 1534), 'os.path.join', 'os.path.join', (['wdir', '"""plots"""', '"""spec_with_indices"""'], {}), "(wdir, 'plots', 'spec_with_indices')\n", (1498, 1534), False, 'import os\n'), ((1765, 1804), 'tqdm.tqdm', 'tqdm', (['ktable'], {'desc': '"""Producing figures:"""'}), "(ktable, desc='Producing figures:')\n", (1769, 1804), False, 'from tqdm import tqdm\n'), ((1096, 1145), 'numpy.loadtxt', 'np.loadtxt', (['bandsfile'], {'usecols': '(3, 4, 1, 2, 5, 6)'}), '(bandsfile, usecols=(3, 4, 1, 2, 5, 6))\n', (1106, 1145), True, 'import numpy as np\n'), ((1162, 1197), 'numpy.loadtxt', 'np.loadtxt', (['bandsfile'], {'usecols': '(7,)'}), '(bandsfile, usecols=(7,))\n', (1172, 1197), True, 'import numpy as np\n'), ((1235, 1248), 'astropy.units.Unit', 'u.Unit', (['"""mag"""'], {}), "('mag')\n", (1241, 1248), True, 'import astropy.units as u\n'), ((1250, 1268), 'astropy.units.Unit', 'u.Unit', (['"""angstrom"""'], {}), "('angstrom')\n", (1256, 1268), True, 'import astropy.units as u\n'), ((1546, 1568), 'os.path.exists', 'os.path.exists', (['outdir'], {}), '(outdir)\n', (1560, 1568), False, 'import os\n'), ((1578, 1594), 'os.mkdir', 'os.mkdir', (['outdir'], {}), '(outdir)\n', (1586, 1594), False, 'import os\n'), ((1653, 1713), 'os.path.join', 'os.path.join', (['wdir', '"""ppxf_vel50_w4500_10000_kinematics.fits"""'], {}), "(wdir, 'ppxf_vel50_w4500_10000_kinematics.fits')\n", (1665, 1713), False, 'import os\n'), ((1883, 1921), 'numpy.sqrt', 'np.sqrt', (['((1 + vel / c) / (1 - vel / c))'], {}), '((1 + vel / c) / (1 - vel / c))\n', (1890, 1921), True, 'import numpy as np\n'), ((2308, 2335), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 8)'}), '(figsize=(15, 8))\n', (2318, 2335), True, 'import matplotlib.pyplot as plt\n'), ((2349, 2363), 'matplotlib.gridspec.GridSpec', 'GridSpec', (['(5)', '(6)'], {}), '(5, 6)\n', (2357, 2363), False, 'from matplotlib.gridspec import GridSpec\n'), ((2378, 2399), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0, :]'], {}), '(gs[0, :])\n', (2389, 2399), True, 'import matplotlib.pyplot as plt\n'), ((3304, 3322), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3320, 3322), True, 'import matplotlib.pyplot as plt\n'), ((3433, 3444), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3442, 3444), True, 'import matplotlib.pyplot as plt\n'), ((973, 1019), 'numpy.loadtxt', 'np.loadtxt', (['bandsfile'], {'usecols': '(8,)', 'dtype': 'str'}), '(bandsfile, usecols=(8,), dtype=str)\n', (983, 1019), True, 'import numpy as np\n'), ((1420, 1443), 'os.path.split', 'os.path.split', (['cubename'], {}), '(cubename)\n', (1433, 1443), False, 'import os\n'), ((1842, 1860), 'astropy.constants.c.to', 'const.c.to', (['"""km/s"""'], {}), "('km/s')\n", (1852, 1860), True, 'import astropy.constants as const\n'), ((2630, 2652), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[i + 6]'], {}), '(gs[i + 6])\n', (2641, 2652), True, 'import matplotlib.pyplot as plt\n'), ((2689, 2740), 'numpy.where', 'np.where', (['((wave >= w[0] - dw) & (wave <= w[5] + dw))'], {}), '((wave >= w[0] - dw) & (wave <= w[5] + dw))\n', (2697, 2740), True, 'import numpy as np\n'), ((838, 871), 'os.path.abspath', 'os.path.abspath', (['context.__file__'], {}), '(context.__file__)\n', (853, 871), False, 'import os\n')] |
from itertools import cycle
import numpy as np
import torch
import torch.nn as nn
import models
from layers import masked_cross_entropy
from utils import to_var, time_desc_decorator, TensorboardWriter, pad_and_pack, normal_kl_div, to_bow, bag_of_words_loss, normal_kl_div, embedding_metric
import os
from tqdm import tqdm
from math import isnan
import re
import math
import pickle
import gensim
word2vec_path = "../datasets/GoogleNews-vectors-negative300.bin"
class Solver(object):
def __init__(self, config, train_data_loader, eval_data_loader, vocab, is_train=True, model=None):
self.config = config
self.epoch_i = 0
self.train_data_loader = train_data_loader
self.eval_data_loader = eval_data_loader
self.vocab = vocab
self.is_train = is_train
self.model = model
@time_desc_decorator('Build Graph')
def build(self, cuda=True):
if self.model is None:
self.model = getattr(models, self.config.model)(self.config)
# orthogonal initialiation for hidden weights
# input gate bias for GRUs
if self.config.mode == 'train' and self.config.checkpoint is None:
print('Parameter initiailization')
for name, param in self.model.named_parameters():
if 'weight_hh' in name:
print('\t' + name)
nn.init.orthogonal_(param)
# bias_hh is concatenation of reset, input, new gates
# only set the input gate bias to 2.0
if 'bias_hh' in name:
print('\t' + name)
dim = int(param.size(0) / 3)
param.data[dim:2 * dim].fill_(2.0)
if torch.cuda.is_available() and cuda:
self.model.cuda()
# Overview Parameters
print('Model Parameters')
for name, param in self.model.named_parameters():
print('\t' + name + '\t', list(param.size()))
if self.config.checkpoint:
self.load_model(self.config.checkpoint)
if self.is_train:
self.writer = TensorboardWriter(self.config.logdir)
self.optimizer = self.config.optimizer(
filter(lambda p: p.requires_grad, self.model.parameters()),
lr=self.config.learning_rate)
def save_model(self, epoch):
"""Save parameters to checkpoint"""
ckpt_path = os.path.join(self.config.save_path, f'{epoch}.pkl')
print(f'Save parameters to {ckpt_path}')
torch.save(self.model.state_dict(), ckpt_path)
def load_model(self, checkpoint):
"""Load parameters from checkpoint"""
print(f'Load parameters from {checkpoint}')
epoch = re.match(r"[0-9]*", os.path.basename(checkpoint)).group(0)
self.epoch_i = int(epoch)
self.model.load_state_dict(torch.load(checkpoint))
def write_summary(self, epoch_i):
epoch_loss = getattr(self, 'epoch_loss', None)
if epoch_loss is not None:
self.writer.update_loss(
loss=epoch_loss,
step_i=epoch_i + 1,
name='train_loss')
epoch_recon_loss = getattr(self, 'epoch_recon_loss', None)
if epoch_recon_loss is not None:
self.writer.update_loss(
loss=epoch_recon_loss,
step_i=epoch_i + 1,
name='train_recon_loss')
epoch_kl_div = getattr(self, 'epoch_kl_div', None)
if epoch_kl_div is not None:
self.writer.update_loss(
loss=epoch_kl_div,
step_i=epoch_i + 1,
name='train_kl_div')
kl_mult = getattr(self, 'kl_mult', None)
if kl_mult is not None:
self.writer.update_loss(
loss=kl_mult,
step_i=epoch_i + 1,
name='kl_mult')
epoch_bow_loss = getattr(self, 'epoch_bow_loss', None)
if epoch_bow_loss is not None:
self.writer.update_loss(
loss=epoch_bow_loss,
step_i=epoch_i + 1,
name='bow_loss')
validation_loss = getattr(self, 'validation_loss', None)
if validation_loss is not None:
self.writer.update_loss(
loss=validation_loss,
step_i=epoch_i + 1,
name='validation_loss')
@time_desc_decorator('Training Start!')
def train(self):
epoch_loss_history = []
for epoch_i in range(self.epoch_i, self.config.n_epoch):
self.epoch_i = epoch_i
batch_loss_history = []
self.model.train()
n_total_words = 0
for batch_i, (conversations, conversation_length, sentence_length) in enumerate(tqdm(self.train_data_loader, ncols=80)):
# conversations: (batch_size) list of conversations
# conversation: list of sentences
# sentence: list of tokens
# conversation_length: list of int
# sentence_length: (batch_size) list of conversation list of sentence_lengths
input_conversations = [conv[:-1] for conv in conversations]
target_conversations = [conv[1:] for conv in conversations]
# flatten input and target conversations
input_sentences = [sent for conv in input_conversations for sent in conv]
target_sentences = [sent for conv in target_conversations for sent in conv]
input_sentence_length = [l for len_list in sentence_length for l in len_list[:-1]]
target_sentence_length = [l for len_list in sentence_length for l in len_list[1:]]
input_conversation_length = [l - 1 for l in conversation_length]
input_sentences = to_var(torch.LongTensor(input_sentences))
target_sentences = to_var(torch.LongTensor(target_sentences))
input_sentence_length = to_var(torch.LongTensor(input_sentence_length))
target_sentence_length = to_var(torch.LongTensor(target_sentence_length))
input_conversation_length = to_var(torch.LongTensor(input_conversation_length))
# reset gradient
self.optimizer.zero_grad()
sentence_logits = self.model(
input_sentences,
input_sentence_length,
input_conversation_length,
target_sentences,
decode=False)
batch_loss, n_words = masked_cross_entropy(
sentence_logits,
target_sentences,
target_sentence_length)
assert not isnan(batch_loss.item())
batch_loss_history.append(batch_loss.item())
n_total_words += n_words.item()
if batch_i % self.config.print_every == 0:
tqdm.write(
f'Epoch: {epoch_i+1}, iter {batch_i}: loss = {batch_loss.item()/ n_words.item():.3f}')
# Back-propagation
batch_loss.backward()
# Gradient cliping
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.clip)
# Run optimizer
self.optimizer.step()
epoch_loss = np.sum(batch_loss_history) / n_total_words
epoch_loss_history.append(epoch_loss)
self.epoch_loss = epoch_loss
print_str = f'Epoch {epoch_i+1} loss average: {epoch_loss:.3f}'
print(print_str)
if epoch_i % self.config.save_every_epoch == 0:
self.save_model(epoch_i + 1)
print('\n<Validation>...')
self.validation_loss = self.evaluate()
if epoch_i % self.config.plot_every_epoch == 0:
self.write_summary(epoch_i)
self.save_model(self.config.n_epoch)
return epoch_loss_history
def generate_sentence(self, input_sentences, input_sentence_length,
input_conversation_length, target_sentences):
self.model.eval()
# [batch_size, max_seq_len, vocab_size]
generated_sentences = self.model(
input_sentences,
input_sentence_length,
input_conversation_length,
target_sentences,
decode=True)
# write output to file
with open(os.path.join(self.config.save_path, 'samples.txt'), 'a') as f:
f.write(f'<Epoch {self.epoch_i}>\n\n')
tqdm.write('\n<Samples>')
for input_sent, target_sent, output_sent in zip(input_sentences, target_sentences, generated_sentences):
input_sent = self.vocab.decode(input_sent)
target_sent = self.vocab.decode(target_sent)
output_sent = '\n'.join([self.vocab.decode(sent) for sent in output_sent])
s = '\n'.join(['Input sentence: ' + input_sent,
'Ground truth: ' + target_sent,
'Generated response: ' + output_sent + '\n'])
f.write(s + '\n')
print(s)
print('')
def evaluate(self):
self.model.eval()
batch_loss_history = []
n_total_words = 0
for batch_i, (conversations, conversation_length, sentence_length) in enumerate(tqdm(self.eval_data_loader, ncols=80)):
# conversations: (batch_size) list of conversations
# conversation: list of sentences
# sentence: list of tokens
# conversation_length: list of int
# sentence_length: (batch_size) list of conversation list of sentence_lengths
input_conversations = [conv[:-1] for conv in conversations]
target_conversations = [conv[1:] for conv in conversations]
# flatten input and target conversations
input_sentences = [sent for conv in input_conversations for sent in conv]
target_sentences = [sent for conv in target_conversations for sent in conv]
input_sentence_length = [l for len_list in sentence_length for l in len_list[:-1]]
target_sentence_length = [l for len_list in sentence_length for l in len_list[1:]]
input_conversation_length = [l - 1 for l in conversation_length]
with torch.no_grad():
input_sentences = to_var(torch.LongTensor(input_sentences))
target_sentences = to_var(torch.LongTensor(target_sentences))
input_sentence_length = to_var(torch.LongTensor(input_sentence_length))
target_sentence_length = to_var(torch.LongTensor(target_sentence_length))
input_conversation_length = to_var(
torch.LongTensor(input_conversation_length))
if batch_i == 0:
self.generate_sentence(input_sentences,
input_sentence_length,
input_conversation_length,
target_sentences)
sentence_logits = self.model(
input_sentences,
input_sentence_length,
input_conversation_length,
target_sentences)
batch_loss, n_words = masked_cross_entropy(
sentence_logits,
target_sentences,
target_sentence_length)
assert not isnan(batch_loss.item())
batch_loss_history.append(batch_loss.item())
n_total_words += n_words.item()
epoch_loss = np.sum(batch_loss_history) / n_total_words
print_str = f'Validation loss: {epoch_loss:.3f}\n'
print(print_str)
return epoch_loss
def test(self):
self.model.eval()
batch_loss_history = []
n_total_words = 0
for batch_i, (conversations, conversation_length, sentence_length) in enumerate(tqdm(self.eval_data_loader, ncols=80)):
# conversations: (batch_size) list of conversations
# conversation: list of sentences
# sentence: list of tokens
# conversation_length: list of int
# sentence_length: (batch_size) list of conversation list of sentence_lengths
input_conversations = [conv[:-1] for conv in conversations]
target_conversations = [conv[1:] for conv in conversations]
# flatten input and target conversations
input_sentences = [sent for conv in input_conversations for sent in conv]
target_sentences = [sent for conv in target_conversations for sent in conv]
input_sentence_length = [l for len_list in sentence_length for l in len_list[:-1]]
target_sentence_length = [l for len_list in sentence_length for l in len_list[1:]]
input_conversation_length = [l - 1 for l in conversation_length]
with torch.no_grad():
input_sentences = to_var(torch.LongTensor(input_sentences))
target_sentences = to_var(torch.LongTensor(target_sentences))
input_sentence_length = to_var(torch.LongTensor(input_sentence_length))
target_sentence_length = to_var(torch.LongTensor(target_sentence_length))
input_conversation_length = to_var(torch.LongTensor(input_conversation_length))
sentence_logits = self.model(
input_sentences,
input_sentence_length,
input_conversation_length,
target_sentences)
batch_loss, n_words = masked_cross_entropy(
sentence_logits,
target_sentences,
target_sentence_length)
assert not isnan(batch_loss.item())
batch_loss_history.append(batch_loss.item())
n_total_words += n_words.item()
epoch_loss = np.sum(batch_loss_history) / n_total_words
print(f'Number of words: {n_total_words}')
print(f'Bits per word: {epoch_loss:.3f}')
word_perplexity = np.exp(epoch_loss)
print_str = f'Word perplexity : {word_perplexity:.3f}\n'
print(print_str)
return word_perplexity
def embedding_metric(self):
word2vec = getattr(self, 'word2vec', None)
if word2vec is None:
print('Loading word2vec model')
word2vec = gensim.models.KeyedVectors.load_word2vec_format(word2vec_path, binary=True)
self.word2vec = word2vec
keys = word2vec.vocab
self.model.eval()
n_context = self.config.n_context
n_sample_step = self.config.n_sample_step
metric_average_history = []
metric_extrema_history = []
metric_greedy_history = []
context_history = []
sample_history = []
n_sent = 0
n_conv = 0
for batch_i, (conversations, conversation_length, sentence_length) \
in enumerate(tqdm(self.eval_data_loader, ncols=80)):
# conversations: (batch_size) list of conversations
# conversation: list of sentences
# sentence: list of tokens
# conversation_length: list of int
# sentence_length: (batch_size) list of conversation list of sentence_lengths
conv_indices = [i for i in range(len(conversations)) if len(conversations[i]) >= n_context + n_sample_step]
context = [c for i in conv_indices for c in [conversations[i][:n_context]]]
ground_truth = [c for i in conv_indices for c in [conversations[i][n_context:n_context + n_sample_step]]]
sentence_length = [c for i in conv_indices for c in [sentence_length[i][:n_context]]]
with torch.no_grad():
context = to_var(torch.LongTensor(context))
sentence_length = to_var(torch.LongTensor(sentence_length))
samples = self.model.generate(context, sentence_length, n_context)
context = context.data.cpu().numpy().tolist()
samples = samples.data.cpu().numpy().tolist()
context_history.append(context)
sample_history.append(samples)
samples = [[self.vocab.decode(sent) for sent in c] for c in samples]
ground_truth = [[self.vocab.decode(sent) for sent in c] for c in ground_truth]
samples = [sent for c in samples for sent in c]
ground_truth = [sent for c in ground_truth for sent in c]
samples = [[word2vec[s] for s in sent.split() if s in keys] for sent in samples]
ground_truth = [[word2vec[s] for s in sent.split() if s in keys] for sent in ground_truth]
indices = [i for i, s, g in zip(range(len(samples)), samples, ground_truth) if s != [] and g != []]
samples = [samples[i] for i in indices]
ground_truth = [ground_truth[i] for i in indices]
n = len(samples)
n_sent += n
metric_average = embedding_metric(samples, ground_truth, word2vec, 'average')
metric_extrema = embedding_metric(samples, ground_truth, word2vec, 'extrema')
metric_greedy = embedding_metric(samples, ground_truth, word2vec, 'greedy')
metric_average_history.append(metric_average)
metric_extrema_history.append(metric_extrema)
metric_greedy_history.append(metric_greedy)
epoch_average = np.mean(np.concatenate(metric_average_history), axis=0)
epoch_extrema = np.mean(np.concatenate(metric_extrema_history), axis=0)
epoch_greedy = np.mean(np.concatenate(metric_greedy_history), axis=0)
print('n_sentences:', n_sent)
print_str = f'Metrics - Average: {epoch_average:.3f}, Extrema: {epoch_extrema:.3f}, Greedy: {epoch_greedy:.3f}'
print(print_str)
print('\n')
return epoch_average, epoch_extrema, epoch_greedy
class VariationalSolver(Solver):
def __init__(self, config, train_data_loader, eval_data_loader, vocab, is_train=True, model=None):
self.config = config
self.epoch_i = 0
self.train_data_loader = train_data_loader
self.eval_data_loader = eval_data_loader
self.vocab = vocab
self.is_train = is_train
self.model = model
@time_desc_decorator('Training Start!')
def train(self):
epoch_loss_history = []
kl_mult = 0.0
conv_kl_mult = 0.0
for epoch_i in range(self.epoch_i, self.config.n_epoch):
self.epoch_i = epoch_i
batch_loss_history = []
recon_loss_history = []
kl_div_history = []
kl_div_sent_history = []
kl_div_conv_history = []
bow_loss_history = []
self.model.train()
n_total_words = 0
# self.evaluate()
for batch_i, (conversations, conversation_length, sentence_length) \
in enumerate(tqdm(self.train_data_loader, ncols=80)):
# conversations: (batch_size) list of conversations
# conversation: list of sentences
# sentence: list of tokens
# conversation_length: list of int
# sentence_length: (batch_size) list of conversation list of sentence_lengths
target_conversations = [conv[1:] for conv in conversations]
# flatten input and target conversations
sentences = [sent for conv in conversations for sent in conv]
input_conversation_length = [l - 1 for l in conversation_length]
target_sentences = [sent for conv in target_conversations for sent in conv]
target_sentence_length = [l for len_list in sentence_length for l in len_list[1:]]
sentence_length = [l for len_list in sentence_length for l in len_list]
sentences = to_var(torch.LongTensor(sentences))
sentence_length = to_var(torch.LongTensor(sentence_length))
input_conversation_length = to_var(torch.LongTensor(input_conversation_length))
target_sentences = to_var(torch.LongTensor(target_sentences))
target_sentence_length = to_var(torch.LongTensor(target_sentence_length))
# reset gradient
self.optimizer.zero_grad()
sentence_logits, kl_div, _, _ = self.model(
sentences,
sentence_length,
input_conversation_length,
target_sentences)
recon_loss, n_words = masked_cross_entropy(
sentence_logits,
target_sentences,
target_sentence_length)
batch_loss = recon_loss + kl_mult * kl_div
batch_loss_history.append(batch_loss.item())
recon_loss_history.append(recon_loss.item())
kl_div_history.append(kl_div.item())
n_total_words += n_words.item()
if self.config.bow:
bow_loss = self.model.compute_bow_loss(target_conversations)
batch_loss += bow_loss
bow_loss_history.append(bow_loss.item())
assert not isnan(batch_loss.item())
if batch_i % self.config.print_every == 0:
print_str = f'Epoch: {epoch_i+1}, iter {batch_i}: loss = {batch_loss.item() / n_words.item():.3f}, recon = {recon_loss.item() / n_words.item():.3f}, kl_div = {kl_div.item() / n_words.item():.3f}'
if self.config.bow:
print_str += f', bow_loss = {bow_loss.item() / n_words.item():.3f}'
tqdm.write(print_str)
# Back-propagation
batch_loss.backward()
# Gradient cliping
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.clip)
# Run optimizer
self.optimizer.step()
kl_mult = min(kl_mult + 1.0 / self.config.kl_annealing_iter, 1.0)
epoch_loss = np.sum(batch_loss_history) / n_total_words
epoch_loss_history.append(epoch_loss)
epoch_recon_loss = np.sum(recon_loss_history) / n_total_words
epoch_kl_div = np.sum(kl_div_history) / n_total_words
self.kl_mult = kl_mult
self.epoch_loss = epoch_loss
self.epoch_recon_loss = epoch_recon_loss
self.epoch_kl_div = epoch_kl_div
print_str = f'Epoch {epoch_i+1} loss average: {epoch_loss:.3f}, recon_loss: {epoch_recon_loss:.3f}, kl_div: {epoch_kl_div:.3f}'
if bow_loss_history:
self.epoch_bow_loss = np.sum(bow_loss_history) / n_total_words
print_str += f', bow_loss = {self.epoch_bow_loss:.3f}'
print(print_str)
if epoch_i % self.config.save_every_epoch == 0:
self.save_model(epoch_i + 1)
print('\n<Validation>...')
self.validation_loss = self.evaluate()
if epoch_i % self.config.plot_every_epoch == 0:
self.write_summary(epoch_i)
return epoch_loss_history
def generate_sentence(self, sentences, sentence_length,
input_conversation_length, input_sentences, target_sentences):
"""Generate output of decoder (single batch)"""
self.model.eval()
# [batch_size, max_seq_len, vocab_size]
generated_sentences, _, _, _ = self.model(
sentences,
sentence_length,
input_conversation_length,
target_sentences,
decode=True)
# write output to file
with open(os.path.join(self.config.save_path, 'samples.txt'), 'a') as f:
f.write(f'<Epoch {self.epoch_i}>\n\n')
tqdm.write('\n<Samples>')
for input_sent, target_sent, output_sent in zip(input_sentences, target_sentences, generated_sentences):
input_sent = self.vocab.decode(input_sent)
target_sent = self.vocab.decode(target_sent)
output_sent = '\n'.join([self.vocab.decode(sent) for sent in output_sent])
s = '\n'.join(['Input sentence: ' + input_sent,
'Ground truth: ' + target_sent,
'Generated response: ' + output_sent + '\n'])
f.write(s + '\n')
print(s)
print('')
def evaluate(self):
self.model.eval()
batch_loss_history = []
recon_loss_history = []
kl_div_history = []
bow_loss_history = []
n_total_words = 0
for batch_i, (conversations, conversation_length, sentence_length) \
in enumerate(tqdm(self.eval_data_loader, ncols=80)):
# conversations: (batch_size) list of conversations
# conversation: list of sentences
# sentence: list of tokens
# conversation_length: list of int
# sentence_length: (batch_size) list of conversation list of sentence_lengths
target_conversations = [conv[1:] for conv in conversations]
# flatten input and target conversations
sentences = [sent for conv in conversations for sent in conv]
input_conversation_length = [l - 1 for l in conversation_length]
target_sentences = [sent for conv in target_conversations for sent in conv]
target_sentence_length = [l for len_list in sentence_length for l in len_list[1:]]
sentence_length = [l for len_list in sentence_length for l in len_list]
with torch.no_grad():
sentences = to_var(torch.LongTensor(sentences))
sentence_length = to_var(torch.LongTensor(sentence_length))
input_conversation_length = to_var(
torch.LongTensor(input_conversation_length))
target_sentences = to_var(torch.LongTensor(target_sentences))
target_sentence_length = to_var(torch.LongTensor(target_sentence_length))
if batch_i == 0:
input_conversations = [conv[:-1] for conv in conversations]
input_sentences = [sent for conv in input_conversations for sent in conv]
with torch.no_grad():
input_sentences = to_var(torch.LongTensor(input_sentences))
self.generate_sentence(sentences,
sentence_length,
input_conversation_length,
input_sentences,
target_sentences)
sentence_logits, kl_div, _, _ = self.model(
sentences,
sentence_length,
input_conversation_length,
target_sentences)
recon_loss, n_words = masked_cross_entropy(
sentence_logits,
target_sentences,
target_sentence_length)
batch_loss = recon_loss + kl_div
if self.config.bow:
bow_loss = self.model.compute_bow_loss(target_conversations)
bow_loss_history.append(bow_loss.item())
assert not isnan(batch_loss.item())
batch_loss_history.append(batch_loss.item())
recon_loss_history.append(recon_loss.item())
kl_div_history.append(kl_div.item())
n_total_words += n_words.item()
epoch_loss = np.sum(batch_loss_history) / n_total_words
epoch_recon_loss = np.sum(recon_loss_history) / n_total_words
epoch_kl_div = np.sum(kl_div_history) / n_total_words
print_str = f'Validation loss: {epoch_loss:.3f}, recon_loss: {epoch_recon_loss:.3f}, kl_div: {epoch_kl_div:.3f}'
if bow_loss_history:
epoch_bow_loss = np.sum(bow_loss_history) / n_total_words
print_str += f', bow_loss = {epoch_bow_loss:.3f}'
print(print_str)
print('\n')
return epoch_loss
def importance_sample(self):
''' Perform importance sampling to get tighter bound
'''
self.model.eval()
weight_history = []
n_total_words = 0
kl_div_history = []
for batch_i, (conversations, conversation_length, sentence_length) \
in enumerate(tqdm(self.eval_data_loader, ncols=80)):
# conversations: (batch_size) list of conversations
# conversation: list of sentences
# sentence: list of tokens
# conversation_length: list of int
# sentence_length: (batch_size) list of conversation list of sentence_lengths
target_conversations = [conv[1:] for conv in conversations]
# flatten input and target conversations
sentences = [sent for conv in conversations for sent in conv]
input_conversation_length = [l - 1 for l in conversation_length]
target_sentences = [sent for conv in target_conversations for sent in conv]
target_sentence_length = [l for len_list in sentence_length for l in len_list[1:]]
sentence_length = [l for len_list in sentence_length for l in len_list]
# n_words += sum([len([word for word in sent if word != PAD_ID]) for sent in target_sentences])
with torch.no_grad():
sentences = to_var(torch.LongTensor(sentences))
sentence_length = to_var(torch.LongTensor(sentence_length))
input_conversation_length = to_var(
torch.LongTensor(input_conversation_length))
target_sentences = to_var(torch.LongTensor(target_sentences))
target_sentence_length = to_var(torch.LongTensor(target_sentence_length))
# treat whole batch as one data sample
weights = []
for j in range(self.config.importance_sample):
sentence_logits, kl_div, log_p_z, log_q_zx = self.model(
sentences,
sentence_length,
input_conversation_length,
target_sentences)
recon_loss, n_words = masked_cross_entropy(
sentence_logits,
target_sentences,
target_sentence_length)
log_w = (-recon_loss.sum() + log_p_z - log_q_zx).data
weights.append(log_w)
if j == 0:
n_total_words += n_words.item()
kl_div_history.append(kl_div.item())
# weights: [n_samples]
weights = torch.stack(weights, 0)
m = np.floor(weights.max())
weights = np.log(torch.exp(weights - m).sum())
weights = m + weights - np.log(self.config.importance_sample)
weight_history.append(weights)
print(f'Number of words: {n_total_words}')
bits_per_word = -np.sum(weight_history) / n_total_words
print(f'Bits per word: {bits_per_word:.3f}')
word_perplexity = np.exp(bits_per_word)
epoch_kl_div = np.sum(kl_div_history) / n_total_words
print_str = f'Word perplexity upperbound using {self.config.importance_sample} importance samples: {word_perplexity:.3f}, kl_div: {epoch_kl_div:.3f}\n'
print(print_str)
return word_perplexity
| [
"numpy.sum",
"numpy.exp",
"gensim.models.KeyedVectors.load_word2vec_format",
"torch.no_grad",
"os.path.join",
"torch.load",
"torch.exp",
"utils.embedding_metric",
"tqdm.tqdm",
"os.path.basename",
"torch.cuda.is_available",
"numpy.concatenate",
"tqdm.tqdm.write",
"layers.masked_cross_entrop... | [((834, 868), 'utils.time_desc_decorator', 'time_desc_decorator', (['"""Build Graph"""'], {}), "('Build Graph')\n", (853, 868), False, 'from utils import to_var, time_desc_decorator, TensorboardWriter, pad_and_pack, normal_kl_div, to_bow, bag_of_words_loss, normal_kl_div, embedding_metric\n'), ((4439, 4477), 'utils.time_desc_decorator', 'time_desc_decorator', (['"""Training Start!"""'], {}), "('Training Start!')\n", (4458, 4477), False, 'from utils import to_var, time_desc_decorator, TensorboardWriter, pad_and_pack, normal_kl_div, to_bow, bag_of_words_loss, normal_kl_div, embedding_metric\n'), ((18442, 18480), 'utils.time_desc_decorator', 'time_desc_decorator', (['"""Training Start!"""'], {}), "('Training Start!')\n", (18461, 18480), False, 'from utils import to_var, time_desc_decorator, TensorboardWriter, pad_and_pack, normal_kl_div, to_bow, bag_of_words_loss, normal_kl_div, embedding_metric\n'), ((2478, 2529), 'os.path.join', 'os.path.join', (['self.config.save_path', 'f"""{epoch}.pkl"""'], {}), "(self.config.save_path, f'{epoch}.pkl')\n", (2490, 2529), False, 'import os\n'), ((14230, 14248), 'numpy.exp', 'np.exp', (['epoch_loss'], {}), '(epoch_loss)\n', (14236, 14248), True, 'import numpy as np\n'), ((31347, 31368), 'numpy.exp', 'np.exp', (['bits_per_word'], {}), '(bits_per_word)\n', (31353, 31368), True, 'import numpy as np\n'), ((1780, 1805), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1803, 1805), False, 'import torch\n'), ((2168, 2205), 'utils.TensorboardWriter', 'TensorboardWriter', (['self.config.logdir'], {}), '(self.config.logdir)\n', (2185, 2205), False, 'from utils import to_var, time_desc_decorator, TensorboardWriter, pad_and_pack, normal_kl_div, to_bow, bag_of_words_loss, normal_kl_div, embedding_metric\n'), ((2915, 2937), 'torch.load', 'torch.load', (['checkpoint'], {}), '(checkpoint)\n', (2925, 2937), False, 'import torch\n'), ((8661, 8686), 'tqdm.tqdm.write', 'tqdm.write', (['"""\n<Samples>"""'], {}), "('\\n<Samples>')\n", (8671, 8686), False, 'from tqdm import tqdm\n'), ((9497, 9534), 'tqdm.tqdm', 'tqdm', (['self.eval_data_loader'], {'ncols': '(80)'}), '(self.eval_data_loader, ncols=80)\n', (9501, 9534), False, 'from tqdm import tqdm\n'), ((11449, 11528), 'layers.masked_cross_entropy', 'masked_cross_entropy', (['sentence_logits', 'target_sentences', 'target_sentence_length'], {}), '(sentence_logits, target_sentences, target_sentence_length)\n', (11469, 11528), False, 'from layers import masked_cross_entropy\n'), ((11750, 11776), 'numpy.sum', 'np.sum', (['batch_loss_history'], {}), '(batch_loss_history)\n', (11756, 11776), True, 'import numpy as np\n'), ((12098, 12135), 'tqdm.tqdm', 'tqdm', (['self.eval_data_loader'], {'ncols': '(80)'}), '(self.eval_data_loader, ncols=80)\n', (12102, 12135), False, 'from tqdm import tqdm\n'), ((13758, 13837), 'layers.masked_cross_entropy', 'masked_cross_entropy', (['sentence_logits', 'target_sentences', 'target_sentence_length'], {}), '(sentence_logits, target_sentences, target_sentence_length)\n', (13778, 13837), False, 'from layers import masked_cross_entropy\n'), ((14059, 14085), 'numpy.sum', 'np.sum', (['batch_loss_history'], {}), '(batch_loss_history)\n', (14065, 14085), True, 'import numpy as np\n'), ((14553, 14628), 'gensim.models.KeyedVectors.load_word2vec_format', 'gensim.models.KeyedVectors.load_word2vec_format', (['word2vec_path'], {'binary': '(True)'}), '(word2vec_path, binary=True)\n', (14600, 14628), False, 'import gensim\n'), ((15122, 15159), 'tqdm.tqdm', 'tqdm', (['self.eval_data_loader'], {'ncols': '(80)'}), '(self.eval_data_loader, ncols=80)\n', (15126, 15159), False, 'from tqdm import tqdm\n'), ((17143, 17203), 'utils.embedding_metric', 'embedding_metric', (['samples', 'ground_truth', 'word2vec', '"""average"""'], {}), "(samples, ground_truth, word2vec, 'average')\n", (17159, 17203), False, 'from utils import to_var, time_desc_decorator, TensorboardWriter, pad_and_pack, normal_kl_div, to_bow, bag_of_words_loss, normal_kl_div, embedding_metric\n'), ((17233, 17293), 'utils.embedding_metric', 'embedding_metric', (['samples', 'ground_truth', 'word2vec', '"""extrema"""'], {}), "(samples, ground_truth, word2vec, 'extrema')\n", (17249, 17293), False, 'from utils import to_var, time_desc_decorator, TensorboardWriter, pad_and_pack, normal_kl_div, to_bow, bag_of_words_loss, normal_kl_div, embedding_metric\n'), ((17322, 17381), 'utils.embedding_metric', 'embedding_metric', (['samples', 'ground_truth', 'word2vec', '"""greedy"""'], {}), "(samples, ground_truth, word2vec, 'greedy')\n", (17338, 17381), False, 'from utils import to_var, time_desc_decorator, TensorboardWriter, pad_and_pack, normal_kl_div, to_bow, bag_of_words_loss, normal_kl_div, embedding_metric\n'), ((17587, 17625), 'numpy.concatenate', 'np.concatenate', (['metric_average_history'], {}), '(metric_average_history)\n', (17601, 17625), True, 'import numpy as np\n'), ((17667, 17705), 'numpy.concatenate', 'np.concatenate', (['metric_extrema_history'], {}), '(metric_extrema_history)\n', (17681, 17705), True, 'import numpy as np\n'), ((17746, 17783), 'numpy.concatenate', 'np.concatenate', (['metric_greedy_history'], {}), '(metric_greedy_history)\n', (17760, 17783), True, 'import numpy as np\n'), ((24047, 24072), 'tqdm.tqdm.write', 'tqdm.write', (['"""\n<Samples>"""'], {}), "('\\n<Samples>')\n", (24057, 24072), False, 'from tqdm import tqdm\n'), ((24991, 25028), 'tqdm.tqdm', 'tqdm', (['self.eval_data_loader'], {'ncols': '(80)'}), '(self.eval_data_loader, ncols=80)\n', (24995, 25028), False, 'from tqdm import tqdm\n'), ((27154, 27233), 'layers.masked_cross_entropy', 'masked_cross_entropy', (['sentence_logits', 'target_sentences', 'target_sentence_length'], {}), '(sentence_logits, target_sentences, target_sentence_length)\n', (27174, 27233), False, 'from layers import masked_cross_entropy\n'), ((27773, 27799), 'numpy.sum', 'np.sum', (['batch_loss_history'], {}), '(batch_loss_history)\n', (27779, 27799), True, 'import numpy as np\n'), ((27843, 27869), 'numpy.sum', 'np.sum', (['recon_loss_history'], {}), '(recon_loss_history)\n', (27849, 27869), True, 'import numpy as np\n'), ((27909, 27931), 'numpy.sum', 'np.sum', (['kl_div_history'], {}), '(kl_div_history)\n', (27915, 27931), True, 'import numpy as np\n'), ((28624, 28661), 'tqdm.tqdm', 'tqdm', (['self.eval_data_loader'], {'ncols': '(80)'}), '(self.eval_data_loader, ncols=80)\n', (28628, 28661), False, 'from tqdm import tqdm\n'), ((30912, 30935), 'torch.stack', 'torch.stack', (['weights', '(0)'], {}), '(weights, 0)\n', (30923, 30935), False, 'import torch\n'), ((31393, 31415), 'numpy.sum', 'np.sum', (['kl_div_history'], {}), '(kl_div_history)\n', (31399, 31415), True, 'import numpy as np\n'), ((4820, 4858), 'tqdm.tqdm', 'tqdm', (['self.train_data_loader'], {'ncols': '(80)'}), '(self.train_data_loader, ncols=80)\n', (4824, 4858), False, 'from tqdm import tqdm\n'), ((6634, 6713), 'layers.masked_cross_entropy', 'masked_cross_entropy', (['sentence_logits', 'target_sentences', 'target_sentence_length'], {}), '(sentence_logits, target_sentences, target_sentence_length)\n', (6654, 6713), False, 'from layers import masked_cross_entropy\n'), ((7437, 7463), 'numpy.sum', 'np.sum', (['batch_loss_history'], {}), '(batch_loss_history)\n', (7443, 7463), True, 'import numpy as np\n'), ((8534, 8584), 'os.path.join', 'os.path.join', (['self.config.save_path', '"""samples.txt"""'], {}), "(self.config.save_path, 'samples.txt')\n", (8546, 8584), False, 'import os\n'), ((10485, 10500), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10498, 10500), False, 'import torch\n'), ((13086, 13101), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13099, 13101), False, 'import torch\n'), ((15895, 15910), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (15908, 15910), False, 'import torch\n'), ((19102, 19140), 'tqdm.tqdm', 'tqdm', (['self.train_data_loader'], {'ncols': '(80)'}), '(self.train_data_loader, ncols=80)\n', (19106, 19140), False, 'from tqdm import tqdm\n'), ((20761, 20840), 'layers.masked_cross_entropy', 'masked_cross_entropy', (['sentence_logits', 'target_sentences', 'target_sentence_length'], {}), '(sentence_logits, target_sentences, target_sentence_length)\n', (20781, 20840), False, 'from layers import masked_cross_entropy\n'), ((22289, 22315), 'numpy.sum', 'np.sum', (['batch_loss_history'], {}), '(batch_loss_history)\n', (22295, 22315), True, 'import numpy as np\n'), ((22414, 22440), 'numpy.sum', 'np.sum', (['recon_loss_history'], {}), '(recon_loss_history)\n', (22420, 22440), True, 'import numpy as np\n'), ((22484, 22506), 'numpy.sum', 'np.sum', (['kl_div_history'], {}), '(kl_div_history)\n', (22490, 22506), True, 'import numpy as np\n'), ((23920, 23970), 'os.path.join', 'os.path.join', (['self.config.save_path', '"""samples.txt"""'], {}), "(self.config.save_path, 'samples.txt')\n", (23932, 23970), False, 'import os\n'), ((25884, 25899), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (25897, 25899), False, 'import torch\n'), ((28128, 28152), 'numpy.sum', 'np.sum', (['bow_loss_history'], {}), '(bow_loss_history)\n', (28134, 28152), True, 'import numpy as np\n'), ((29625, 29640), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (29638, 29640), False, 'import torch\n'), ((30468, 30547), 'layers.masked_cross_entropy', 'masked_cross_entropy', (['sentence_logits', 'target_sentences', 'target_sentence_length'], {}), '(sentence_logits, target_sentences, target_sentence_length)\n', (30488, 30547), False, 'from layers import masked_cross_entropy\n'), ((31071, 31108), 'numpy.log', 'np.log', (['self.config.importance_sample'], {}), '(self.config.importance_sample)\n', (31077, 31108), True, 'import numpy as np\n'), ((31229, 31251), 'numpy.sum', 'np.sum', (['weight_history'], {}), '(weight_history)\n', (31235, 31251), True, 'import numpy as np\n'), ((2807, 2835), 'os.path.basename', 'os.path.basename', (['checkpoint'], {}), '(checkpoint)\n', (2823, 2835), False, 'import os\n'), ((5885, 5918), 'torch.LongTensor', 'torch.LongTensor', (['input_sentences'], {}), '(input_sentences)\n', (5901, 5918), False, 'import torch\n'), ((5962, 5996), 'torch.LongTensor', 'torch.LongTensor', (['target_sentences'], {}), '(target_sentences)\n', (5978, 5996), False, 'import torch\n'), ((6045, 6084), 'torch.LongTensor', 'torch.LongTensor', (['input_sentence_length'], {}), '(input_sentence_length)\n', (6061, 6084), False, 'import torch\n'), ((6134, 6174), 'torch.LongTensor', 'torch.LongTensor', (['target_sentence_length'], {}), '(target_sentence_length)\n', (6150, 6174), False, 'import torch\n'), ((6227, 6270), 'torch.LongTensor', 'torch.LongTensor', (['input_conversation_length'], {}), '(input_conversation_length)\n', (6243, 6270), False, 'import torch\n'), ((10543, 10576), 'torch.LongTensor', 'torch.LongTensor', (['input_sentences'], {}), '(input_sentences)\n', (10559, 10576), False, 'import torch\n'), ((10620, 10654), 'torch.LongTensor', 'torch.LongTensor', (['target_sentences'], {}), '(target_sentences)\n', (10636, 10654), False, 'import torch\n'), ((10703, 10742), 'torch.LongTensor', 'torch.LongTensor', (['input_sentence_length'], {}), '(input_sentence_length)\n', (10719, 10742), False, 'import torch\n'), ((10792, 10832), 'torch.LongTensor', 'torch.LongTensor', (['target_sentence_length'], {}), '(target_sentence_length)\n', (10808, 10832), False, 'import torch\n'), ((10906, 10949), 'torch.LongTensor', 'torch.LongTensor', (['input_conversation_length'], {}), '(input_conversation_length)\n', (10922, 10949), False, 'import torch\n'), ((13144, 13177), 'torch.LongTensor', 'torch.LongTensor', (['input_sentences'], {}), '(input_sentences)\n', (13160, 13177), False, 'import torch\n'), ((13221, 13255), 'torch.LongTensor', 'torch.LongTensor', (['target_sentences'], {}), '(target_sentences)\n', (13237, 13255), False, 'import torch\n'), ((13304, 13343), 'torch.LongTensor', 'torch.LongTensor', (['input_sentence_length'], {}), '(input_sentence_length)\n', (13320, 13343), False, 'import torch\n'), ((13393, 13433), 'torch.LongTensor', 'torch.LongTensor', (['target_sentence_length'], {}), '(target_sentence_length)\n', (13409, 13433), False, 'import torch\n'), ((13486, 13529), 'torch.LongTensor', 'torch.LongTensor', (['input_conversation_length'], {}), '(input_conversation_length)\n', (13502, 13529), False, 'import torch\n'), ((15945, 15970), 'torch.LongTensor', 'torch.LongTensor', (['context'], {}), '(context)\n', (15961, 15970), False, 'import torch\n'), ((16013, 16046), 'torch.LongTensor', 'torch.LongTensor', (['sentence_length'], {}), '(sentence_length)\n', (16029, 16046), False, 'import torch\n'), ((20062, 20089), 'torch.LongTensor', 'torch.LongTensor', (['sentences'], {}), '(sentences)\n', (20078, 20089), False, 'import torch\n'), ((20132, 20165), 'torch.LongTensor', 'torch.LongTensor', (['sentence_length'], {}), '(sentence_length)\n', (20148, 20165), False, 'import torch\n'), ((20218, 20261), 'torch.LongTensor', 'torch.LongTensor', (['input_conversation_length'], {}), '(input_conversation_length)\n', (20234, 20261), False, 'import torch\n'), ((20305, 20339), 'torch.LongTensor', 'torch.LongTensor', (['target_sentences'], {}), '(target_sentences)\n', (20321, 20339), False, 'import torch\n'), ((20389, 20429), 'torch.LongTensor', 'torch.LongTensor', (['target_sentence_length'], {}), '(target_sentence_length)\n', (20405, 20429), False, 'import torch\n'), ((21888, 21909), 'tqdm.tqdm.write', 'tqdm.write', (['print_str'], {}), '(print_str)\n', (21898, 21909), False, 'from tqdm import tqdm\n'), ((22910, 22934), 'numpy.sum', 'np.sum', (['bow_loss_history'], {}), '(bow_loss_history)\n', (22916, 22934), True, 'import numpy as np\n'), ((25936, 25963), 'torch.LongTensor', 'torch.LongTensor', (['sentences'], {}), '(sentences)\n', (25952, 25963), False, 'import torch\n'), ((26006, 26039), 'torch.LongTensor', 'torch.LongTensor', (['sentence_length'], {}), '(sentence_length)\n', (26022, 26039), False, 'import torch\n'), ((26113, 26156), 'torch.LongTensor', 'torch.LongTensor', (['input_conversation_length'], {}), '(input_conversation_length)\n', (26129, 26156), False, 'import torch\n'), ((26200, 26234), 'torch.LongTensor', 'torch.LongTensor', (['target_sentences'], {}), '(target_sentences)\n', (26216, 26234), False, 'import torch\n'), ((26284, 26324), 'torch.LongTensor', 'torch.LongTensor', (['target_sentence_length'], {}), '(target_sentence_length)\n', (26300, 26324), False, 'import torch\n'), ((26543, 26558), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (26556, 26558), False, 'import torch\n'), ((29677, 29704), 'torch.LongTensor', 'torch.LongTensor', (['sentences'], {}), '(sentences)\n', (29693, 29704), False, 'import torch\n'), ((29747, 29780), 'torch.LongTensor', 'torch.LongTensor', (['sentence_length'], {}), '(sentence_length)\n', (29763, 29780), False, 'import torch\n'), ((29854, 29897), 'torch.LongTensor', 'torch.LongTensor', (['input_conversation_length'], {}), '(input_conversation_length)\n', (29870, 29897), False, 'import torch\n'), ((29941, 29975), 'torch.LongTensor', 'torch.LongTensor', (['target_sentences'], {}), '(target_sentences)\n', (29957, 29975), False, 'import torch\n'), ((30025, 30065), 'torch.LongTensor', 'torch.LongTensor', (['target_sentence_length'], {}), '(target_sentence_length)\n', (30041, 30065), False, 'import torch\n'), ((1411, 1437), 'torch.nn.init.orthogonal_', 'nn.init.orthogonal_', (['param'], {}), '(param)\n', (1430, 1437), True, 'import torch.nn as nn\n'), ((26605, 26638), 'torch.LongTensor', 'torch.LongTensor', (['input_sentences'], {}), '(input_sentences)\n', (26621, 26638), False, 'import torch\n'), ((31005, 31027), 'torch.exp', 'torch.exp', (['(weights - m)'], {}), '(weights - m)\n', (31014, 31027), False, 'import torch\n')] |
from functools import partial
import numpy as np
import torch
import torch.nn as nn
from torch.nn.init import xavier_uniform_, constant_
import jax
from jax_md import space, partition
from jax import numpy as jnp
import warnings
warnings.filterwarnings("ignore")
@torch.no_grad()
def pairwise_distance_norm(x, box_size, mask_self=False, cached_mask=None):
'''
Input: x is a Nxd matrix
y is an optional Mxd matirx
Output: dist is a NxM matrix where dist[i,j] is the square norm between x[i,:] and y[j,:]
if y is not given then use 'y=x'.
i.e. dist[i,j] = \sqrt{||x[i,:]-y[j,:]||^2}
'''
dist_all = 0.
for dim in range(x.shape[1]):
x_norm = (x[:, 0] ** 2).view(-1, 1)
y_t = x[:, 0].view(1, -1)
y_norm = x_norm.view(1, -1)
dist = x_norm + y_norm - 2.0 * torch.mm(x[:, 0].view(-1, 1), y_t)
# Ensure diagonal is zero if x=y
# if y is None:
# dist = dist - torch.diag(dist.diag)
# Ensure diagonal is zero if x=y
del x_norm, y_norm, y_t
dist -= torch.diag(dist.diag())
dist = torch.clamp(dist, 0.0, np.inf)
dist[dist != dist] = 0.
dist = dist.view(-1)
# dist = torch.remainder(dist + 0.5 * box_size, float(box_size)) - 0.5 * float(box_size)
dist_mat_mask = dist > (box_size**2 / 4)
dist[dist_mat_mask] = dist[dist_mat_mask] + box_size**2 -\
2.0 * box_size * torch.sqrt(dist[dist_mat_mask]) * torch.sign(dist[dist_mat_mask])
del dist_mat_mask
if dim != 2:
x = x[:, 1:]
dist_all += dist
del dist
torch.cuda.empty_cache()
dist = torch.sqrt_(dist_all)
if mask_self:
# if cached_mask is None:
# self_mask = np.array([i*x.shape[0] + j for i in range(x.shape[0]) for j in range(x.shape[0]) if i == j])
# mask_array = np.ones(x.shape[0]*x.shape[0], dtype=bool)
# mask_array[self_mask] = False
# else:
# mask_array = cached_mask
dist = dist[dist > 0.]
return dist, None
def pair_distance(pos: torch.Tensor, box_size, mask_self=False, return_norm=False, cached_mask=None):
# [[0, 1, 2, ,3, 4 ...], [0, 1, ...],...] [[0, 0, 0, 0, 0, ...], [1, 1, 1, ...],...]
dist_mat = pos[None, :, :] - pos[:, None, :]
dist_mat = torch.remainder(dist_mat + 0.5 * box_size, box_size) - 0.5 * box_size
dist_mat = dist_mat.view(-1, pos.size(1))
if mask_self:
if cached_mask is None:
self_mask = np.array([i*pos.shape[0] + j for i in range(pos.shape[0]) for j in range(pos.shape[0]) if i == j])
mask_array = np.ones(pos.shape[0]*pos.shape[0], dtype=bool)
mask_array[self_mask] = 0
else:
mask_array = cached_mask
dist_mat = dist_mat[mask_array]
if return_norm:
return dist_mat.norm(dim=1), mask_array
return dist_mat
def pair_distance_two_system(pos1: torch.Tensor, pos2: torch.Tensor, box_size):
# pos1 and pos2 should in same shape
# [[0, 1, 2, ,3, 4 ...], [0, 1, ...],...] [[0, 0, 0, 0, 0, ...], [1, 1, 1, ...],...]
dist_mat = pos1[None, :, :] - pos2[:, None, :]
# dist_mat_mask_right = dist_mat > box_size / 2
# dist_mat_mask_left = dist_mat < -box_size / 2
#
# dist_mat[dist_mat_mask_right] = dist_mat[dist_mat_mask_right] - box_size
# dist_mat[dist_mat_mask_left] = dist_mat[dist_mat_mask_left] + box_size
dist_mat = torch.remainder(dist_mat+0.5*box_size, box_size) - 0.5*box_size
return dist_mat.view(-1, pos1.size(1))
def get_neighbor(pos: torch.Tensor, r_cutoff, box_size, return_dist=True,
predefined_mask=None, bond_type=None):
if isinstance(pos, np.ndarray):
if torch.cuda.is_available():
pos = torch.from_numpy(pos).cuda()
if bond_type is not None:
bond_type = torch.from_numpy(bond_type).cuda()
with torch.no_grad():
distance = pair_distance(pos, box_size)
distance_norm = torch.norm(distance, dim=1) # [pos.size(0) * pos.size(0), 1]
edge_idx_1 = torch.cat([torch.arange(pos.size(0)) for _ in range(pos.size(0))], dim=0).to(pos.device)
edge_idx_2 = torch.cat([torch.LongTensor(pos.size(0)).fill_(i) for i in range(pos.size(0))], dim=0).to(pos.device)
if predefined_mask is not None:
mask = (distance_norm.view(-1) <= r_cutoff) & predefined_mask & ~(edge_idx_1 == edge_idx_2)
else:
mask = (distance_norm.view(-1) <= r_cutoff) & ~(edge_idx_1 == edge_idx_2)
masked_bond_type = None
if bond_type is not None:
masked_bond_type = bond_type[mask]
edge_idx_1 = edge_idx_1[mask].view(1, -1)
edge_idx_2 = edge_idx_2[mask].view(1, -1)
edge_idx = torch.cat((edge_idx_1, edge_idx_2), dim=0)
distance = distance[mask]
distance_norm = distance_norm[mask]
if return_dist:
return edge_idx, distance, distance_norm, masked_bond_type
else:
return edge_idx, masked_bond_type
@jax.jit
def edge_type_water(i, j):
cond1 = jnp.logical_and(i % 3 == 0, 0 < j - i)
cond1 = jnp.logical_and(cond1, j - i <= 2)
cond2 = jnp.logical_and(i % 3 == 1, abs(j - i) <= 1)
cond3 = jnp.logical_and(i % 3 == 2, 0 < i - j)
cond3 = jnp.logical_and(cond3, i - j <= 2)
in_same_molecule = jnp.logical_or(jnp.logical_or(cond1, cond2), cond3)
bond_type = jnp.where(in_same_molecule, 0, 1)
return bond_type
class NeighborSearcher(object):
def __init__(self, box_size, cutoff):
# define a displacement function under periodic condition
self.displacement_fn, _ = space.periodic(box_size)
self.disp = jax.vmap(self.displacement_fn)
self.dist = jax.vmap(space.metric(self.displacement_fn))
self.cutoff = cutoff
self.neighbor_list_fn = partition.neighbor_list(self.displacement_fn,
box_size,
cutoff,
dr_threshold= cutoff / 5.)
self.neighbor_list_fn_jit = jax.jit(self.neighbor_list_fn)
self.box_size = box_size
self.neighbor_dist_jit = self.displacement_fn
def init_new_neighbor_lst(self, pos):
# Create a new neighbor list.
pos = jnp.mod(pos, self.box_size)
nbr = self.neighbor_list_fn(pos)
return nbr
def update_neighbor_lst(self, pos, nbr):
update_idx = True
pos = jnp.mod(pos, self.box_size)
# update_idx = np.any(self.dist(pos, nbr.reference_position) > (self.cutoff / 10.))
nbr = self.neighbor_list_fn_jit(pos, nbr)
nbr_lst_updated = False
if nbr.did_buffer_overflow:
nbr = self.neighbor_list_fn(pos)
nbr_lst_updated = True
return nbr, update_idx, nbr_lst_updated
def graph_network_nbr_fn(displacement_fn,
cutoff,
edge_type_fn,
N):
def nbrlst_to_edge(pos: jnp.ndarray, neigh_idx: jnp.ndarray):
# notice here, pos must be jax numpy array, otherwise fancy indexing will fail
d = jax.partial(displacement_fn)
d = space.map_neighbor(d)
pos_neigh = pos[neigh_idx]
dR = d(pos, pos_neigh)
dr_2 = space.square_distance(dR)
mask = jnp.logical_and(neigh_idx != N, dr_2 < cutoff ** 2)
edge_dist = dR
edge_norm = jnp.sqrt(dr_2)
return edge_dist, edge_norm, mask
def filter_edge_idx(center_idx: jnp.ndarray, neigh_idx: jnp.ndarray):
edge_type_fv = jax.vmap(edge_type_fn, in_axes=(0, 0), out_axes=0)
I = center_idx
J = neigh_idx
edge_type = edge_type_fv(I, J)
return edge_type
return nbrlst_to_edge, filter_edge_idx
def get_water_box_neighbor_fn(box_size, cutoff):
displacement_fn, _ = space.periodic(box_size)
neighbor_list_fn = partition.neighbor_list(displacement_fn,
box_size,
cutoff,
dr_threshold=cutoff/5.,
capacity_multiplier=1.0
)
return neighbor_list_fn
| [
"jax_md.space.periodic",
"jax.numpy.logical_or",
"torch.sqrt",
"torch.cat",
"numpy.ones",
"torch.no_grad",
"jax.jit",
"jax.numpy.where",
"torch.remainder",
"torch.sign",
"jax_md.partition.neighbor_list",
"jax.vmap",
"torch.norm",
"jax_md.space.metric",
"torch.sqrt_",
"torch.clamp",
"... | [((230, 263), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (253, 263), False, 'import warnings\n'), ((267, 282), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (280, 282), False, 'import torch\n'), ((1693, 1714), 'torch.sqrt_', 'torch.sqrt_', (['dist_all'], {}), '(dist_all)\n', (1704, 1714), False, 'import torch\n'), ((5143, 5181), 'jax.numpy.logical_and', 'jnp.logical_and', (['(i % 3 == 0)', '(0 < j - i)'], {}), '(i % 3 == 0, 0 < j - i)\n', (5158, 5181), True, 'from jax import numpy as jnp\n'), ((5194, 5228), 'jax.numpy.logical_and', 'jnp.logical_and', (['cond1', '(j - i <= 2)'], {}), '(cond1, j - i <= 2)\n', (5209, 5228), True, 'from jax import numpy as jnp\n'), ((5300, 5338), 'jax.numpy.logical_and', 'jnp.logical_and', (['(i % 3 == 2)', '(0 < i - j)'], {}), '(i % 3 == 2, 0 < i - j)\n', (5315, 5338), True, 'from jax import numpy as jnp\n'), ((5351, 5385), 'jax.numpy.logical_and', 'jnp.logical_and', (['cond3', '(i - j <= 2)'], {}), '(cond3, i - j <= 2)\n', (5366, 5385), True, 'from jax import numpy as jnp\n'), ((5478, 5511), 'jax.numpy.where', 'jnp.where', (['in_same_molecule', '(0)', '(1)'], {}), '(in_same_molecule, 0, 1)\n', (5487, 5511), True, 'from jax import numpy as jnp\n'), ((7987, 8011), 'jax_md.space.periodic', 'space.periodic', (['box_size'], {}), '(box_size)\n', (8001, 8011), False, 'from jax_md import space, partition\n'), ((8035, 8150), 'jax_md.partition.neighbor_list', 'partition.neighbor_list', (['displacement_fn', 'box_size', 'cutoff'], {'dr_threshold': '(cutoff / 5.0)', 'capacity_multiplier': '(1.0)'}), '(displacement_fn, box_size, cutoff, dr_threshold=\n cutoff / 5.0, capacity_multiplier=1.0)\n', (8058, 8150), False, 'from jax_md import space, partition\n'), ((1115, 1145), 'torch.clamp', 'torch.clamp', (['dist', '(0.0)', 'np.inf'], {}), '(dist, 0.0, np.inf)\n', (1126, 1145), False, 'import torch\n'), ((1657, 1681), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (1679, 1681), False, 'import torch\n'), ((2370, 2422), 'torch.remainder', 'torch.remainder', (['(dist_mat + 0.5 * box_size)', 'box_size'], {}), '(dist_mat + 0.5 * box_size, box_size)\n', (2385, 2422), False, 'import torch\n'), ((3497, 3549), 'torch.remainder', 'torch.remainder', (['(dist_mat + 0.5 * box_size)', 'box_size'], {}), '(dist_mat + 0.5 * box_size, box_size)\n', (3512, 3549), False, 'import torch\n'), ((3783, 3808), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3806, 3808), False, 'import torch\n'), ((3968, 3983), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3981, 3983), False, 'import torch\n'), ((4057, 4084), 'torch.norm', 'torch.norm', (['distance'], {'dim': '(1)'}), '(distance, dim=1)\n', (4067, 4084), False, 'import torch\n'), ((4831, 4873), 'torch.cat', 'torch.cat', (['(edge_idx_1, edge_idx_2)'], {'dim': '(0)'}), '((edge_idx_1, edge_idx_2), dim=0)\n', (4840, 4873), False, 'import torch\n'), ((5425, 5453), 'jax.numpy.logical_or', 'jnp.logical_or', (['cond1', 'cond2'], {}), '(cond1, cond2)\n', (5439, 5453), True, 'from jax import numpy as jnp\n'), ((5709, 5733), 'jax_md.space.periodic', 'space.periodic', (['box_size'], {}), '(box_size)\n', (5723, 5733), False, 'from jax_md import space, partition\n'), ((5754, 5784), 'jax.vmap', 'jax.vmap', (['self.displacement_fn'], {}), '(self.displacement_fn)\n', (5762, 5784), False, 'import jax\n'), ((5911, 6005), 'jax_md.partition.neighbor_list', 'partition.neighbor_list', (['self.displacement_fn', 'box_size', 'cutoff'], {'dr_threshold': '(cutoff / 5.0)'}), '(self.displacement_fn, box_size, cutoff,\n dr_threshold=cutoff / 5.0)\n', (5934, 6005), False, 'from jax_md import space, partition\n'), ((6203, 6233), 'jax.jit', 'jax.jit', (['self.neighbor_list_fn'], {}), '(self.neighbor_list_fn)\n', (6210, 6233), False, 'import jax\n'), ((6416, 6443), 'jax.numpy.mod', 'jnp.mod', (['pos', 'self.box_size'], {}), '(pos, self.box_size)\n', (6423, 6443), True, 'from jax import numpy as jnp\n'), ((6591, 6618), 'jax.numpy.mod', 'jnp.mod', (['pos', 'self.box_size'], {}), '(pos, self.box_size)\n', (6598, 6618), True, 'from jax import numpy as jnp\n'), ((7270, 7298), 'jax.partial', 'jax.partial', (['displacement_fn'], {}), '(displacement_fn)\n', (7281, 7298), False, 'import jax\n'), ((7311, 7332), 'jax_md.space.map_neighbor', 'space.map_neighbor', (['d'], {}), '(d)\n', (7329, 7332), False, 'from jax_md import space, partition\n'), ((7415, 7440), 'jax_md.space.square_distance', 'space.square_distance', (['dR'], {}), '(dR)\n', (7436, 7440), False, 'from jax_md import space, partition\n'), ((7456, 7507), 'jax.numpy.logical_and', 'jnp.logical_and', (['(neigh_idx != N)', '(dr_2 < cutoff ** 2)'], {}), '(neigh_idx != N, dr_2 < cutoff ** 2)\n', (7471, 7507), True, 'from jax import numpy as jnp\n'), ((7552, 7566), 'jax.numpy.sqrt', 'jnp.sqrt', (['dr_2'], {}), '(dr_2)\n', (7560, 7566), True, 'from jax import numpy as jnp\n'), ((7707, 7757), 'jax.vmap', 'jax.vmap', (['edge_type_fn'], {'in_axes': '(0, 0)', 'out_axes': '(0)'}), '(edge_type_fn, in_axes=(0, 0), out_axes=0)\n', (7715, 7757), False, 'import jax\n'), ((2684, 2732), 'numpy.ones', 'np.ones', (['(pos.shape[0] * pos.shape[0])'], {'dtype': 'bool'}), '(pos.shape[0] * pos.shape[0], dtype=bool)\n', (2691, 2732), True, 'import numpy as np\n'), ((5814, 5848), 'jax_md.space.metric', 'space.metric', (['self.displacement_fn'], {}), '(self.displacement_fn)\n', (5826, 5848), False, 'from jax_md import space, partition\n'), ((1502, 1533), 'torch.sign', 'torch.sign', (['dist[dist_mat_mask]'], {}), '(dist[dist_mat_mask])\n', (1512, 1533), False, 'import torch\n'), ((1468, 1499), 'torch.sqrt', 'torch.sqrt', (['dist[dist_mat_mask]'], {}), '(dist[dist_mat_mask])\n', (1478, 1499), False, 'import torch\n'), ((3828, 3849), 'torch.from_numpy', 'torch.from_numpy', (['pos'], {}), '(pos)\n', (3844, 3849), False, 'import torch\n'), ((3923, 3950), 'torch.from_numpy', 'torch.from_numpy', (['bond_type'], {}), '(bond_type)\n', (3939, 3950), False, 'import torch\n')] |
""" Techniques to compute preactivation bounds for piecewise linear nets"""
from relu_nets import ReLUNet
from hyperbox import Hyperbox
import utilities as utils
import torch
import numpy as np
import torch.nn as nn
# ====================================================================
# = Class to store preactivation bound results =
# ====================================================================
# REFACTOR THIS -- SOOOOO UGLY!!!
class PreactivationBounds(object):
@classmethod
def naive_ia(cls, network, hyperbox):
return naive_interval_analysis(network, hyperbox)
@classmethod
def preact_constructor(cls, preact_input, network, hyperbox):
if preact_input == 'ia':
return cls.naive_ia_from_hyperbox(network, hyperbox)
elif isinstance(preact_input, PreactivationBounds):
return preact_input
def __init__(self, network, hyperbox):
self.network = network
self.hyperbox = hyperbox
self.low_dict = {}
self.high_dict = {}
self.backprop_lows = {}
self.backprop_highs = {}
self.backprop_vector = None
def _forward_computed(self):
return sum(len(_) for _ in [self.low_dict, self.high_dict]) > 0
def _backward_computed(self):
return sum(len(_) for _ in
[self.backprop_lows, self.backprop_highs]) > 0
def add_ith_layer_bounds(self, i, lows, highs):
""" Adds the bounds as a [n_neurons, 2] numpy array """
self.low_dict[i] = lows
self.high_dict[i] = highs
def get_ith_layer_bounds(self, i, two_col=False):
output = self.low_dict[i], self.high_dict[i]
if two_col:
return utils.two_col(*output)
return output
def bound_iter(self, two_col=False):
for i in range(len(self.low_dict)):
yield self.get_ith_layer_bounds(i, two_col=two_col)
def get_ith_layer_backprop_bounds(self, i, two_col=False):
output = self.backprop_lows[i], self.backprop_highs[i]
if two_col:
return utils.two_col(*output)
return output
def get_ith_layer_on_off_set(self, i):
""" For the i^th layer neurons of a neural_net,
will return a dict like
{'on': {on_indices in a set}
'off': {off_indices in a set}
'uncertain': {indices that may be on or off}}
"""
output = {'on': set(),
'off': set(),
'uncertain': set()}
ith_low, ith_high = self.get_ith_layer_bounds(i)
for j, (lo, hi) in enumerate(zip(ith_low, ith_high)):
if hi < 0:
output['off'].add(j)
elif lo > 0:
output['on'].add(j)
else:
output['uncertain'].add(j)
return output
def backprop_bounds(self, c_vector):
""" For a given vector, vec, computes upper and lower bounds
on the partial derivative d<vec, network(x)>/dNeuron
for each neuron.
This is related, but not identical to fast-lip
ARGS:
c_vector: torch.Tensor, np.ndarray, or
c_vector: torch.Tensor or np.ndarray - tensor or array
that is the output of self.network gets multiplied by
RETURNS:
None, but sets self.backprop_vector, self.backprop_low,
self.backprop_high
"""
if self._backward_computed():
return
preswitch_lows = {}
preswitch_highs = {}
postswitch_lows = {}
postswitch_highs = {}
# Do backprop bounds by iterating backwards
# General rules -- only two layers in the backprop
# Push intervals backwards through each layer
# Initial interval starts out as [d_i, d_i] for each neuron i
# where d = W^l * c_vector
# EVERYTHING IS A NUMPY ARRAY!!!
c_vector = utils.as_numpy(c_vector)
for layer_no in range(len(self.network.fcs) - 1, -1, -1):
fc = self.network.fcs[layer_no]
# First case is special
if layer_no == len(self.network.fcs) - 1:
layer_lows = layer_highs = (c_vector.dot(utils.as_numpy(fc.weight)))
preswitch_lows[layer_no] = layer_lows
preswitch_highs[layer_no] = layer_highs
continue
# other cases are pretty usual of (switch -> linear layer)
prev_lo = preswitch_lows[layer_no + 1]
prev_hi = preswitch_highs[layer_no + 1]
preact_lo, preact_hi = self.low_dict[layer_no], self.high_dict[layer_no]
postswitch_lo, postswitch_hi = \
PreactivationBounds._backprop_switch_layer(prev_lo, prev_hi,
preact_lo, preact_hi)
next_lo, next_hi =\
PreactivationBounds._backprop_linear_layer(fc, postswitch_lo,
postswitch_hi)
preswitch_lows[layer_no] = next_lo
preswitch_highs[layer_no] = next_hi
# Set values and return
self.backprop_vector = c_vector
self.backprop_lows = preswitch_lows
self.backprop_highs = preswitch_highs
@classmethod
def _backprop_linear_layer(self, fc_layer, input_lows, input_highs):
""" Subroutine to handle the backprop of a linear layer:
i.e., given a function defined as y=Wx + b
and some interval on the value of df/dy, want intervals for df/dx
ARGS:
layer_no: nn.Linear object - object we are backpropping through
input_lows: np.Array - array for the lower bounds of the
input gradient
input_highs: np.Array - array for the upper bounds of the
input gradient
RETURNS:
output_lows : np.Array - array for the lower bounds on
the output gradients
output_highs : np.Array - tensor for the high bounds on
the output gradients
"""
weight_t = utils.as_numpy(fc_layer.weight.t())
midpoint = (input_lows + input_highs) / 2.0
radius = (input_highs - input_lows) / 2.0
new_midpoint = weight_t.dot(midpoint)
new_radius = np.abs(weight_t).dot(radius)
output_lows = new_midpoint - new_radius
output_highs = new_midpoint + new_radius
return output_lows, output_highs
@classmethod
def _backprop_switch_layer(self, input_lows, input_highs, preact_lows,
preact_highs):
""" Does interval bound propagation through a switch layer. Follows
the following rules: (elementwise)
switch([lo, hi], a) :=
--- [lo , hi ] if a is guaranteed to be 1
--- [0 , 0 ] if a is guaranteed to be 0
--- [min(lo, 0), max(0, hi)] if a is uncertain
ARGS:
input_lows: np.Array - array for lower bounds on the input
input_highs: np.Array - array for upper bounds on the input
preact_lows: np.Array - array for lower bounds on the relu
preactivation (useful for computing
which neurons are on/off/uncertain)
preact_highs: np.Array - array for upper bounds on the relu
preactivation (useful for computing
which neurons are on/off/uncertain)
RETURNS:
output_lows : np.Array - array for the lower bounds on
the output gradients
output_highs : np.Array - array for the high bounds on
the output gradients
"""
# On case by default
new_lows = np.copy(input_lows)
new_highs = np.copy(input_highs)
on_neurons = preact_lows > 0
off_neurons = preact_highs < 0
uncertain_neurons = (1 - (on_neurons + off_neurons))
# Handle the off case
new_lows[off_neurons] = 0.0
new_highs[off_neurons] = 0.0
# Handle the uncertain case
new_lows[np.logical_and(uncertain_neurons, (input_lows > 0))] = 0.0
new_highs[np.logical_and(uncertain_neurons, (input_highs < 0))] = 0.0
return new_lows, new_highs
# ===============================================================
# = Preactivation Bound Compute Techniques =
# ===============================================================
def naive_interval_analysis(network, domain):
""" Most naive form of interval bound propagation --
implemented using equation (6) from
https://arxiv.org/pdf/1810.12715.pdf
ARGS:
network : ReLUNet object - network we're building bounds for
domain: Hyperbox object - bounds on the input we allow
RETURNS:
PreactivationBounds object which holds the values we care
about
"""
preact_object = PreactivationBounds(network, domain)
prev_lows, prev_highs = domain.box_low, domain.box_hi
relu_num = 0
for layer_num, layer in enumerate(network.net):
if isinstance(layer, nn.ReLU):
preact_object.add_ith_layer_bounds(relu_num, prev_lows, prev_highs)
relu_num += 1
prev_lows = np.maximum(prev_lows, 0)
prev_highs = np.maximum(prev_highs, 0)
elif isinstance(layer, nn.Linear):
midpoint = (prev_lows + prev_highs) / 2.0
radius = (prev_highs - prev_lows) / 2.0
new_midpoint = utils.as_numpy(layer(torch.Tensor(midpoint)))
new_radius = utils.as_numpy(torch.abs(layer.weight)).dot(radius)
prev_lows = new_midpoint - new_radius
prev_highs = new_midpoint + new_radius
if isinstance(network.net[-1], nn.Linear):
preact_object.add_ith_layer_bounds(relu_num, prev_lows, prev_highs)
return preact_object
def improved_interval_analysis(network, domain):
pass # Do later
def linear_programming_relaxation(network, domain):
pass # Do later
| [
"numpy.maximum",
"numpy.abs",
"numpy.logical_and",
"numpy.copy",
"utilities.two_col",
"torch.Tensor",
"utilities.as_numpy",
"torch.abs"
] | [((4004, 4028), 'utilities.as_numpy', 'utils.as_numpy', (['c_vector'], {}), '(c_vector)\n', (4018, 4028), True, 'import utilities as utils\n'), ((8110, 8129), 'numpy.copy', 'np.copy', (['input_lows'], {}), '(input_lows)\n', (8117, 8129), True, 'import numpy as np\n'), ((8150, 8170), 'numpy.copy', 'np.copy', (['input_highs'], {}), '(input_highs)\n', (8157, 8170), True, 'import numpy as np\n'), ((1733, 1755), 'utilities.two_col', 'utils.two_col', (['*output'], {}), '(*output)\n', (1746, 1755), True, 'import utilities as utils\n'), ((2094, 2116), 'utilities.two_col', 'utils.two_col', (['*output'], {}), '(*output)\n', (2107, 2116), True, 'import utilities as utils\n'), ((8470, 8519), 'numpy.logical_and', 'np.logical_and', (['uncertain_neurons', '(input_lows > 0)'], {}), '(uncertain_neurons, input_lows > 0)\n', (8484, 8519), True, 'import numpy as np\n'), ((8547, 8597), 'numpy.logical_and', 'np.logical_and', (['uncertain_neurons', '(input_highs < 0)'], {}), '(uncertain_neurons, input_highs < 0)\n', (8561, 8597), True, 'import numpy as np\n'), ((9638, 9662), 'numpy.maximum', 'np.maximum', (['prev_lows', '(0)'], {}), '(prev_lows, 0)\n', (9648, 9662), True, 'import numpy as np\n'), ((9688, 9713), 'numpy.maximum', 'np.maximum', (['prev_highs', '(0)'], {}), '(prev_highs, 0)\n', (9698, 9713), True, 'import numpy as np\n'), ((6496, 6512), 'numpy.abs', 'np.abs', (['weight_t'], {}), '(weight_t)\n', (6502, 6512), True, 'import numpy as np\n'), ((4286, 4311), 'utilities.as_numpy', 'utils.as_numpy', (['fc.weight'], {}), '(fc.weight)\n', (4300, 4311), True, 'import utilities as utils\n'), ((9911, 9933), 'torch.Tensor', 'torch.Tensor', (['midpoint'], {}), '(midpoint)\n', (9923, 9933), False, 'import torch\n'), ((9976, 9999), 'torch.abs', 'torch.abs', (['layer.weight'], {}), '(layer.weight)\n', (9985, 9999), False, 'import torch\n')] |
import my_keras.input_manipulation as to_test
import numpy as np
def test_rotate_array():
xs = [1, 2, 3, 4, 5]
assert np.array_equal(to_test.rotate(xs, 2), np.asarray([3, 4, 5, 1, 2]))
def test_rotate_array_wrap():
xs = [1, 2, 3, 4, 5]
assert np.array_equal(to_test.rotate(xs, 7), np.asarray([3, 4, 5, 1, 2]))
| [
"my_keras.input_manipulation.rotate",
"numpy.asarray"
] | [((143, 164), 'my_keras.input_manipulation.rotate', 'to_test.rotate', (['xs', '(2)'], {}), '(xs, 2)\n', (157, 164), True, 'import my_keras.input_manipulation as to_test\n'), ((166, 193), 'numpy.asarray', 'np.asarray', (['[3, 4, 5, 1, 2]'], {}), '([3, 4, 5, 1, 2])\n', (176, 193), True, 'import numpy as np\n'), ((278, 299), 'my_keras.input_manipulation.rotate', 'to_test.rotate', (['xs', '(7)'], {}), '(xs, 7)\n', (292, 299), True, 'import my_keras.input_manipulation as to_test\n'), ((301, 328), 'numpy.asarray', 'np.asarray', (['[3, 4, 5, 1, 2]'], {}), '([3, 4, 5, 1, 2])\n', (311, 328), True, 'import numpy as np\n')] |
import glob
import imageio
import numpy as np
import os
from os import path
import random
import tqdm
from dataloader import _pil_loader
import torch
from torch import nn
from torch.utils import data
class GoPro(data.Dataset):
def __init__(self, root, transform=None, dim=(1280, 720), randomCropSize=(352, 352), seq_len=11, train=True):
super(GoPro, self).__init__()
self.seq_len = seq_len
self.randomCropSize = randomCropSize
self.cropX0 = dim[0] - randomCropSize[0]
self.cropY0 = dim[1] - randomCropSize[1]
self.root = root
self.transform = transform
self.train = train
self._set_directory(root)
self.data_dict = self._scan(train)
self.n_samples = 0
self.n_sample_list = []
self.img_type = 'bin'
if self.img_type == 'bin':
for k in tqdm.tqdm(self.data_dict.keys(), ncols=80):
bin_path = os.path.join(self.data_root, 'bin')
for idx, v in enumerate(self.data_dict[k]):
save_as = v.replace(self.data_root, bin_path)
save_as = save_as.replace('.png', '')
# If we don't have the binary, make it.
if not os.path.isfile(save_as+'.npy'):
os.makedirs(os.path.dirname(save_as), exist_ok=True)
img = imageio.imread(v)
# Bypassing the zip archive error
# _, w, c = img.shape
# dummy = np.zeros((1,w,c))
# img_dummy = np.concatenate((img, dummy), axis=0)
# torch.save(img_dummy, save_as)
np.save(save_as, img)
# Update the dictionary
self.data_dict[k][idx] = save_as + '.npy'
# when testing, we do not overlap the video sequence (0~6, 7~13, ...)
if train:
for k in self.data_dict.keys():
self.n_sample_list.append(self.n_samples)
self.n_samples += len(self.data_dict[k]) - (self.seq_len - 1)
self.n_sample_list.append(self.n_samples)
else:
for k in self.data_dict.keys():
self.n_sample_list.append(self.n_samples)
self.n_samples += len(self.data_dict[k]) // self.seq_len
self.n_sample_list.append(self.n_samples)
print("Sample #:", self.n_sample_list)
def _set_directory(self, data_root):
self.data_root = path.join(data_root, 'GoPro')
def _scan(self, train):
def _make_keys(dir_path):
"""
:param dir_path: Path
:return: train_000 form
"""
dir, base = path.dirname(dir_path), path.basename(dir_path)
tv = 'train' if dir.find('train')>=0 else 'test'
return tv + '_' + base
if train:
dir_train = path.join(self.data_root, 'train')
list_seq = glob.glob(dir_train+'/*')
data_dict = {
_make_keys(k): sorted(
glob.glob(path.join(k, '*' + '.png'))
) for k in list_seq
}
else:
dir_test = path.join(self.data_root, 'test')
list_seq = glob.glob(dir_test+'/*')
data_dict = {
_make_keys(k): sorted(
glob.glob(path.join(k, '*' + '.png'))
) for k in list_seq
}
return data_dict
def _find_key(self, idx):
for i, k in enumerate(self.data_dict.keys()):
if self.n_sample_list[i] <= idx and idx < self.n_sample_list[i+1]:
return k, idx - self.n_sample_list[i]
raise ValueError()
def __getitem__(self, idx):
if self.train:
half = idx % 2
idx = idx // 2
key, index = self._find_key(idx)
else:
key, index = self._find_key(idx)
if self.train:
filepath_list = [self.data_dict[key][i] for i in range(index, index+self.seq_len)]
else:
index *= self.seq_len
filepath_list = [self.data_dict[key][i] for i in range(index, index+self.seq_len)]
if self.train:
r = random.random()
if r > 0.5:
filepath_list.reverse()
sample = []
if self.train:
### Data Augmentation ###
# 11 frames in a clip
firstFrame = 0
# Apply random crop on the 9 input frames
cropX = random.randint(0, self.cropX0)
cropY = random.randint(0, self.cropY0)
cropArea = (cropX, cropY, cropX + self.randomCropSize[0], cropY + self.randomCropSize[1])
# Random reverse frame
# frameRange = range(firstFrame, firstFrame + 9) if (random.randint(0, 1)) else range(firstFrame + 8, firstFrame - 1, -1)
while True:
IFrameIndex = random.randint(firstFrame + 1, firstFrame + self.seq_len-2)
if not IFrameIndex == firstFrame + self.seq_len // 2:
break
if random.randint(0, 1):
# frameRange = [firstFrame, IFrameIndex, firstFrame + 10]
frameRange = [i for i in range(firstFrame, firstFrame + self.seq_len)]
returnIndex = IFrameIndex - firstFrame - 1
else:
# frameRange = [firstFrame + 10, IFrameIndex, firstFrame]
frameRange = [i for i in range(firstFrame + self.seq_len-1, firstFrame - 1, -1)]
returnIndex = firstFrame - IFrameIndex + self.seq_len-2
# Random flip frame
randomFrameFlip = random.randint(0, 1)
else:
# Fixed settings to return same samples every epoch.
# For validation/test sets.
firstFrame = 0
cropArea = (0, 0, self.randomCropSize[0], self.randomCropSize[1])
IFrameIndex = index % (self.seq_len-2) + 1
if IFrameIndex == firstFrame + self.seq_len // 2:
IFrameIndex = IFrameIndex - 1
returnIndex = IFrameIndex - 1
# frameRange = [0, IFrameIndex, 10]
frameRange = [i for i in range(self.seq_len)]
randomFrameFlip = 0
if self.img_type == 'img':
fn_read = imageio.imread
elif self.img_type == 'bin':
fn_read = np.load
else:
raise ValueError('Wrong img type: {}'.format(self.img_type))
# Loop over for all frames corresponding to the `index`.
for frameIndex in frameRange:
# Open image using pil and augment the image.
# image = _pil_loader(self.framesPath[index][frameIndex], cropArea=cropArea, frameFlip=randomFrameFlip)
# image = _pil_loader(filepath_list[frameIndex], cropArea=cropArea, frameFlip=randomFrameFlip)
image = fn_read(filepath_list[frameIndex])
image = image[cropArea[1]:cropArea[3], cropArea[0]:cropArea[2]]
if randomFrameFlip:
image = np.ascontiguousarray(image[:, ::-1])
# Apply transformation if specified.
if self.transform is not None:
image = self.transform(image)
sample.append(image)
return sample, returnIndex, filepath_list
def __len__(self):
return self.n_samples
| [
"numpy.save",
"random.randint",
"os.path.basename",
"os.path.dirname",
"numpy.ascontiguousarray",
"imageio.imread",
"random.random",
"os.path.isfile",
"glob.glob",
"os.path.join"
] | [((2593, 2622), 'os.path.join', 'path.join', (['data_root', '"""GoPro"""'], {}), "(data_root, 'GoPro')\n", (2602, 2622), False, 'from os import path\n'), ((2999, 3033), 'os.path.join', 'path.join', (['self.data_root', '"""train"""'], {}), "(self.data_root, 'train')\n", (3008, 3033), False, 'from os import path\n'), ((3057, 3084), 'glob.glob', 'glob.glob', (["(dir_train + '/*')"], {}), "(dir_train + '/*')\n", (3066, 3084), False, 'import glob\n'), ((3294, 3327), 'os.path.join', 'path.join', (['self.data_root', '"""test"""'], {}), "(self.data_root, 'test')\n", (3303, 3327), False, 'from os import path\n'), ((3351, 3377), 'glob.glob', 'glob.glob', (["(dir_test + '/*')"], {}), "(dir_test + '/*')\n", (3360, 3377), False, 'import glob\n'), ((4343, 4358), 'random.random', 'random.random', ([], {}), '()\n', (4356, 4358), False, 'import random\n'), ((4641, 4671), 'random.randint', 'random.randint', (['(0)', 'self.cropX0'], {}), '(0, self.cropX0)\n', (4655, 4671), False, 'import random\n'), ((4692, 4722), 'random.randint', 'random.randint', (['(0)', 'self.cropY0'], {}), '(0, self.cropY0)\n', (4706, 4722), False, 'import random\n'), ((5220, 5240), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (5234, 5240), False, 'import random\n'), ((5785, 5805), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (5799, 5805), False, 'import random\n'), ((988, 1023), 'os.path.join', 'os.path.join', (['self.data_root', '"""bin"""'], {}), "(self.data_root, 'bin')\n", (1000, 1023), False, 'import os\n'), ((2812, 2834), 'os.path.dirname', 'path.dirname', (['dir_path'], {}), '(dir_path)\n', (2824, 2834), False, 'from os import path\n'), ((2836, 2859), 'os.path.basename', 'path.basename', (['dir_path'], {}), '(dir_path)\n', (2849, 2859), False, 'from os import path\n'), ((5048, 5109), 'random.randint', 'random.randint', (['(firstFrame + 1)', '(firstFrame + self.seq_len - 2)'], {}), '(firstFrame + 1, firstFrame + self.seq_len - 2)\n', (5062, 5109), False, 'import random\n'), ((7181, 7217), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['image[:, ::-1]'], {}), '(image[:, ::-1])\n', (7201, 7217), True, 'import numpy as np\n'), ((1295, 1327), 'os.path.isfile', 'os.path.isfile', (["(save_as + '.npy')"], {}), "(save_as + '.npy')\n", (1309, 1327), False, 'import os\n'), ((1434, 1451), 'imageio.imread', 'imageio.imread', (['v'], {}), '(v)\n', (1448, 1451), False, 'import imageio\n'), ((1764, 1785), 'numpy.save', 'np.save', (['save_as', 'img'], {}), '(save_as, img)\n', (1771, 1785), True, 'import numpy as np\n'), ((3178, 3204), 'os.path.join', 'path.join', (['k', "('*' + '.png')"], {}), "(k, '*' + '.png')\n", (3187, 3204), False, 'from os import path\n'), ((3471, 3497), 'os.path.join', 'path.join', (['k', "('*' + '.png')"], {}), "(k, '*' + '.png')\n", (3480, 3497), False, 'from os import path\n'), ((1363, 1387), 'os.path.dirname', 'os.path.dirname', (['save_as'], {}), '(save_as)\n', (1378, 1387), False, 'import os\n')] |
# coding=utf-8
import argparse
import sys
import numpy as np
from collections import deque
from math import exp
import isa
from config import MATSIZE as WIDTH
args = None
# width of the tile
#WIDTH = 16
class TPUSim(object):
def __init__(self, program_filename, dram_filename, hostmem_filename):
# TODO: switch b/w 32-bit float vs int
self.program = open(program_filename, 'rb')
self.weight_memory = np.load(dram_filename)
self.host_memory = np.load(hostmem_filename)
if not args.raw:
assert self.weight_memory.dtype == np.int8, 'DRAM weight mem is not 8-bit ints'
assert self.host_memory.dtype == np.int8, 'Hostmem not 8-bit ints'
self.unified_buffer = (np.zeros((96000, WIDTH), dtype=np.float32) if args.raw else
np.zeros((96000, WIDTH), dtype=np.int8))
self.accumulator = (np.zeros((4000, WIDTH), dtype=np.float32) if args.raw else
np.zeros((4000, WIDTH), dtype=np.int32))
self.weight_fifo = deque()
def run(self):
# load program and execute instructions
while True:
instruction = self.decode()
opcode, operands = instruction[0], instruction[1:]
if opcode in ['RHM', 'WHM', 'RW']:
self.memops(opcode, *operands)
elif opcode == 'MMC':
self.matrix_multiply_convolve(*operands)
elif opcode == 'ACT':
self.act(*operands)
elif opcode == 'SYNC':
pass
elif opcode == 'NOP':
pass
elif opcode == 'HLT':
print('H A L T')
break
else:
raise Exception('WAT (╯°□°)╯︵ ┻━┻')
# all done, exit
savepath = 'sim32.npy' if args.raw else 'sim8.npy'
np.save(savepath, self.host_memory)
print(self.host_memory.astype('uint8'))
self.program.close()
print("""ALL DONE!
(•_•)
( •_•)>⌐■-■
(⌐■_■)""")
def decode(self):
opcode = int.from_bytes(self.program.read(isa.OP_SIZE), byteorder='big')
opcode = isa.BIN2OPCODE[opcode]
flag = int.from_bytes(self.program.read(isa.FLAGS_SIZE), byteorder='big')
length = int.from_bytes(self.program.read(isa.LEN_SIZE), byteorder='big')
src_addr = int.from_bytes(self.program.read(isa.ADDR_SIZE), byteorder='big')
dest_addr = int.from_bytes(self.program.read(isa.UB_ADDR_SIZE), byteorder='big')
#print('{} decoding: len {}, flags {}, src {}, dst {}'.format(opcode, length, flag, src_addr, dest_addr))
return opcode, src_addr, dest_addr, length, flag
# opcodes
def act(self, src, dest, length, flag):
print('ACTIVATE!')
result = self.accumulator[src:src+length]
if flag & isa.FUNC_RELU_MASK:
print(' RELU!!!!')
vfunc = np.vectorize(lambda x: 0 * x if x < 0. else x)
elif flag & isa.FUNC_SIGMOID_MASK:
print(' SIGMOID')
vfunc = np.vectorize(lambda x: int(255./(1.+exp(-x))))
else:
vfunc = np.vectorize(lambda x: x)
#raise Exception('(╯°□°)╯︵ ┻━┻ bad activation function!')
result = vfunc(result)
# downsample/normalize if needed
if not args.raw:
result = [v & 0x000000FF for v in result]
self.unified_buffer[dest:dest+length] = result
def memops(self, opcode, src_addr, dest_addr, length, flag):
print('Memory xfer! host: {} unified buffer: {}: length: {} (FLAGS? {})'.format(
src_addr, dest_addr, length, flag
))
if opcode == 'RHM':
print(' read host memory to unified buffer')
self.unified_buffer[dest_addr:dest_addr + length] = self.host_memory[src_addr:src_addr + length]
elif opcode == 'WHM':
print(' write unified buffer to host memory')
self.host_memory[dest_addr:dest_addr + length] = self.unified_buffer[src_addr:src_addr + length]
elif opcode == 'RW':
print(' read weights from DRAM into MMU')
self.weight_fifo.append(self.weight_memory[src_addr])
else:
raise Exception('WAT (╯°□°)╯︵ ┻━┻')
def matrix_multiply_convolve(self, ub_addr, accum_addr, size, flags):
print('Matrix things....')
print(' UB@{} + {} -> MMU -> accumulator@{} + {}'.format(
ub_addr, size, accum_addr, size
))
inp = self.unified_buffer[ub_addr: ub_addr + size]
print('MMC input shape: {}'.format(inp.shape))
weight_mat = self.weight_fifo.popleft()
print('MMC weight: {}'.format(weight_mat))
if not args.raw:
inp = inp.astype(np.int32)
weight_mat = weight_mat.astype(np.int32)
out = np.matmul(inp, weight_mat)
print('MMC output shape: {}'.format(out.shape))
overwrite = isa.OVERWRITE_MASK & flags
if overwrite:
self.accumulator[accum_addr:accum_addr + size] = out
else:
self.accumulator[accum_addr:accum_addr + size] += out
def parse_args():
global args
parser = argparse.ArgumentParser()
parser.add_argument('program', action='store',
help='Path to assembly program file.')
parser.add_argument('host_file', action='store',
help='Path to host file.')
parser.add_argument('dram_file', action='store',
help='Path to dram file.')
parser.add_argument('--raw', action='store_true', default=False,
help='Gen sim32.npy instead of sim8.npy.')
args = parser.parse_args()
if __name__ == '__main__':
if len(sys.argv) < 4:
print('Usage:', sys.argv[0], 'PROGRAM_BINARY DRAM_FILE HOST_FILE')
sys.exit(0)
parse_args()
tpusim = TPUSim(args.program, args.dram_file, args.host_file)
tpusim.run()
| [
"numpy.load",
"numpy.save",
"numpy.vectorize",
"argparse.ArgumentParser",
"math.exp",
"numpy.zeros",
"numpy.matmul",
"sys.exit",
"collections.deque"
] | [((5174, 5199), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5197, 5199), False, 'import argparse\n'), ((432, 454), 'numpy.load', 'np.load', (['dram_filename'], {}), '(dram_filename)\n', (439, 454), True, 'import numpy as np\n'), ((482, 507), 'numpy.load', 'np.load', (['hostmem_filename'], {}), '(hostmem_filename)\n', (489, 507), True, 'import numpy as np\n'), ((1015, 1022), 'collections.deque', 'deque', ([], {}), '()\n', (1020, 1022), False, 'from collections import deque\n'), ((1832, 1867), 'numpy.save', 'np.save', (['savepath', 'self.host_memory'], {}), '(savepath, self.host_memory)\n', (1839, 1867), True, 'import numpy as np\n'), ((4828, 4854), 'numpy.matmul', 'np.matmul', (['inp', 'weight_mat'], {}), '(inp, weight_mat)\n', (4837, 4854), True, 'import numpy as np\n'), ((5826, 5837), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (5834, 5837), False, 'import sys\n'), ((735, 777), 'numpy.zeros', 'np.zeros', (['(96000, WIDTH)'], {'dtype': 'np.float32'}), '((96000, WIDTH), dtype=np.float32)\n', (743, 777), True, 'import numpy as np\n'), ((807, 846), 'numpy.zeros', 'np.zeros', (['(96000, WIDTH)'], {'dtype': 'np.int8'}), '((96000, WIDTH), dtype=np.int8)\n', (815, 846), True, 'import numpy as np\n'), ((876, 917), 'numpy.zeros', 'np.zeros', (['(4000, WIDTH)'], {'dtype': 'np.float32'}), '((4000, WIDTH), dtype=np.float32)\n', (884, 917), True, 'import numpy as np\n'), ((947, 986), 'numpy.zeros', 'np.zeros', (['(4000, WIDTH)'], {'dtype': 'np.int32'}), '((4000, WIDTH), dtype=np.int32)\n', (955, 986), True, 'import numpy as np\n'), ((2907, 2954), 'numpy.vectorize', 'np.vectorize', (['(lambda x: 0 * x if x < 0.0 else x)'], {}), '(lambda x: 0 * x if x < 0.0 else x)\n', (2919, 2954), True, 'import numpy as np\n'), ((3129, 3154), 'numpy.vectorize', 'np.vectorize', (['(lambda x: x)'], {}), '(lambda x: x)\n', (3141, 3154), True, 'import numpy as np\n'), ((3084, 3091), 'math.exp', 'exp', (['(-x)'], {}), '(-x)\n', (3087, 3091), False, 'from math import exp\n')] |
from collections import OrderedDict
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import importsd
NUM_WORDS = 100
EMBEDDING_VEC_LEN = 100
HIDDEN_SIZE = 128
class LSTMModel(nn.Module):
# The names of properties should be the same in C# and Python
# otherwise, you have to manually change the key name in the state_dict
def __init__(self):
super(LSTMModel, self).__init__()
self.embedding = nn.Embedding(NUM_WORDS, EMBEDDING_VEC_LEN)
self.lstm = nn.LSTM(EMBEDDING_VEC_LEN, HIDDEN_SIZE, batch_first=True)
self.dropout = nn.Dropout(0.5)
self.dense = nn.Linear(HIDDEN_SIZE, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x_embed = self.embedding(x)
x_lstm, _ = self.lstm(x_embed)
x_lstm_last_seq = x_lstm[:, -1, :]
x_lstm_last_seq = self.dropout(x_lstm_last_seq)
logits = self.dense(x_lstm_last_seq)
out = self.sigmoid(logits)
return out
class LeNet1Model(nn.Module):
# The names of properties should be the same in C# and Python
# in this case, we both name the Sequential as layers
def __init__(self):
super(LeNet1Model, self).__init__()
# the names of each layer should also be the same in C# and Python
modules = OrderedDict([
("conv-1", nn.Conv2d(1, 4, 5, padding=2)),
("bnrm2d-1", nn.BatchNorm2d(4)),
("relu-1", nn.ReLU()),
("maxpool-1", nn.MaxPool2d(2, stride=2)),
("conv-2", nn.Conv2d(4, 12, 5)),
("bnrm2d-2", nn.BatchNorm2d(12)),
("relu-2", nn.ReLU()),
("maxpool-2", nn.MaxPool2d(2, stride=2)),
("flatten", nn.Flatten()),
("linear", nn.Linear(300, 10)),
])
self.layers = nn.Sequential(modules)
def forward(self, x):
return self.layers.forward(x)
def testLSTM():
print("testing LSTM")
mylstm = LSTMModel()
with open("lstm.dat", "rb") as f:
sd = importsd.load_state_dict(f)
# you can change the loaded key names here, for example:
# sd = {k + "py": v for k, v in sd}
# you can check the key names of state_dict in python by:
# print(mylstm.state_dict())
mylstm.load_state_dict(sd)
# init values & functions
torch.manual_seed(0)
np.random.seed(0)
labels = torch.tensor(np.random.randint(0, 1, [100, 1]), dtype=torch.float)
inputs = torch.tensor(np.random.randint(0, 100, [100, 100]))
opt = optim.Adam(mylstm.parameters(), lr=8e-5)
loss_func = nn.BCELoss()
# evaluation before training
mylstm.eval()
preds = mylstm.forward(inputs)
preds = torch.round(preds)
correct_num = torch.sum(preds == labels).item()
print(f"before training: {correct_num} corrected")
# training for 50 steps
mylstm.train()
for i in range(50):
opt.zero_grad() # Reset the gradient in every iteration
outputs = mylstm(inputs)
loss = loss_func(outputs, labels) # Loss forward pass
loss.backward() # Loss backaed pass
opt.step() # Update all the parameters by the given learnig rule
# evaluation after training
mylstm.eval()
preds = mylstm.forward(inputs)
preds = torch.round(preds)
correct_num = torch.sum(preds == labels).item()
print(f"after training: {correct_num} corrected")
def testLeNet1():
print("testing LeNet1")
mylenet = LeNet1Model()
with open("lenet1.dat", "rb") as f:
sd = importsd.load_state_dict(f)
# you can change the loaded key names here, for example:
# sd = {k + "py": v for k, v in sd}
# you can check the key names of state_dict in python by:
# print(mylenet.state_dict())
mylenet.load_state_dict(sd)
# init values & functions
torch.manual_seed(0)
np.random.seed(0)
labels = torch.tensor(np.random.randint(0, 10, [100]))
inputs = torch.tensor(np.random.randint(0, 255, [100, 1, 28, 28]) / 255.0, dtype=torch.float32)
opt = optim.Adam(mylenet.parameters(), lr=8e-5)
loss_func = nn.CrossEntropyLoss()
# evaluation before training
mylenet.eval()
output = mylenet.forward(inputs)
_, preds = torch.max(output.data, dim=1)
correct_num = torch.sum(preds == labels).item()
print(f"before training: {correct_num} corrected")
# training for 200 steps
mylenet.train()
for i in range(200):
opt.zero_grad() # Reset the gradient in every iteration
outputs = mylenet(inputs)
loss = loss_func(outputs, labels) # Loss forward pass
loss.backward() # Loss backaed pass
opt.step() # Update all the parameters by the given learnig rule
# evaluation after training
mylenet.eval()
output = mylenet.forward(inputs)
_, preds = torch.max(output.data, dim=1)
correct_num = torch.sum(preds == labels).item()
print(f"after training: {correct_num} corrected")
if __name__ == '__main__':
testLSTM()
testLeNet1()
| [
"torch.nn.Dropout",
"numpy.random.seed",
"torch.nn.Embedding",
"numpy.random.randint",
"torch.nn.BCELoss",
"importsd.load_state_dict",
"torch.nn.Linear",
"torch.nn.LSTM",
"torch.manual_seed",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d",
"torch.max",
"torch.nn.MaxPool2d",
"torch.sum",
"torc... | [((2316, 2336), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (2333, 2336), False, 'import torch\n'), ((2341, 2358), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (2355, 2358), True, 'import numpy as np\n'), ((2571, 2583), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (2581, 2583), True, 'import torch.nn as nn\n'), ((2683, 2701), 'torch.round', 'torch.round', (['preds'], {}), '(preds)\n', (2694, 2701), False, 'import torch\n'), ((3259, 3277), 'torch.round', 'torch.round', (['preds'], {}), '(preds)\n', (3270, 3277), False, 'import torch\n'), ((3805, 3825), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (3822, 3825), False, 'import torch\n'), ((3830, 3847), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (3844, 3847), True, 'import numpy as np\n'), ((4075, 4096), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (4094, 4096), True, 'import torch.nn as nn\n'), ((4202, 4231), 'torch.max', 'torch.max', (['output.data'], {'dim': '(1)'}), '(output.data, dim=1)\n', (4211, 4231), False, 'import torch\n'), ((4799, 4828), 'torch.max', 'torch.max', (['output.data'], {'dim': '(1)'}), '(output.data, dim=1)\n', (4808, 4828), False, 'import torch\n'), ((459, 501), 'torch.nn.Embedding', 'nn.Embedding', (['NUM_WORDS', 'EMBEDDING_VEC_LEN'], {}), '(NUM_WORDS, EMBEDDING_VEC_LEN)\n', (471, 501), True, 'import torch.nn as nn\n'), ((522, 579), 'torch.nn.LSTM', 'nn.LSTM', (['EMBEDDING_VEC_LEN', 'HIDDEN_SIZE'], {'batch_first': '(True)'}), '(EMBEDDING_VEC_LEN, HIDDEN_SIZE, batch_first=True)\n', (529, 579), True, 'import torch.nn as nn\n'), ((603, 618), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (613, 618), True, 'import torch.nn as nn\n'), ((640, 665), 'torch.nn.Linear', 'nn.Linear', (['HIDDEN_SIZE', '(1)'], {}), '(HIDDEN_SIZE, 1)\n', (649, 665), True, 'import torch.nn as nn\n'), ((689, 701), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (699, 701), True, 'import torch.nn as nn\n'), ((1818, 1840), 'torch.nn.Sequential', 'nn.Sequential', (['modules'], {}), '(modules)\n', (1831, 1840), True, 'import torch.nn as nn\n'), ((2026, 2053), 'importsd.load_state_dict', 'importsd.load_state_dict', (['f'], {}), '(f)\n', (2050, 2053), False, 'import importsd\n'), ((2385, 2418), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1)', '[100, 1]'], {}), '(0, 1, [100, 1])\n', (2402, 2418), True, 'import numpy as np\n'), ((2465, 2502), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)', '[100, 100]'], {}), '(0, 100, [100, 100])\n', (2482, 2502), True, 'import numpy as np\n'), ((3513, 3540), 'importsd.load_state_dict', 'importsd.load_state_dict', (['f'], {}), '(f)\n', (3537, 3540), False, 'import importsd\n'), ((3874, 3905), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', '[100]'], {}), '(0, 10, [100])\n', (3891, 3905), True, 'import numpy as np\n'), ((2720, 2746), 'torch.sum', 'torch.sum', (['(preds == labels)'], {}), '(preds == labels)\n', (2729, 2746), False, 'import torch\n'), ((3296, 3322), 'torch.sum', 'torch.sum', (['(preds == labels)'], {}), '(preds == labels)\n', (3305, 3322), False, 'import torch\n'), ((3933, 3976), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)', '[100, 1, 28, 28]'], {}), '(0, 255, [100, 1, 28, 28])\n', (3950, 3976), True, 'import numpy as np\n'), ((4250, 4276), 'torch.sum', 'torch.sum', (['(preds == labels)'], {}), '(preds == labels)\n', (4259, 4276), False, 'import torch\n'), ((4847, 4873), 'torch.sum', 'torch.sum', (['(preds == labels)'], {}), '(preds == labels)\n', (4856, 4873), False, 'import torch\n'), ((1356, 1385), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(4)', '(5)'], {'padding': '(2)'}), '(1, 4, 5, padding=2)\n', (1365, 1385), True, 'import torch.nn as nn\n'), ((1413, 1430), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(4)'], {}), '(4)\n', (1427, 1430), True, 'import torch.nn as nn\n'), ((1456, 1465), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1463, 1465), True, 'import torch.nn as nn\n'), ((1494, 1519), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {'stride': '(2)'}), '(2, stride=2)\n', (1506, 1519), True, 'import torch.nn as nn\n'), ((1545, 1564), 'torch.nn.Conv2d', 'nn.Conv2d', (['(4)', '(12)', '(5)'], {}), '(4, 12, 5)\n', (1554, 1564), True, 'import torch.nn as nn\n'), ((1592, 1610), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(12)'], {}), '(12)\n', (1606, 1610), True, 'import torch.nn as nn\n'), ((1636, 1645), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1643, 1645), True, 'import torch.nn as nn\n'), ((1674, 1699), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {'stride': '(2)'}), '(2, stride=2)\n', (1686, 1699), True, 'import torch.nn as nn\n'), ((1726, 1738), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (1736, 1738), True, 'import torch.nn as nn\n'), ((1764, 1782), 'torch.nn.Linear', 'nn.Linear', (['(300)', '(10)'], {}), '(300, 10)\n', (1773, 1782), True, 'import torch.nn as nn\n')] |
import gc
import glob
import os
import shutil
import cv2
import keras
import keras.backend as K
import keras.callbacks as callbacks
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow as tf
from keras import Model
from keras import backend as K
from keras import constraints, initializers, optimizers, regularizers
from keras.applications.xception import Xception
from keras.callbacks import Callback, ModelCheckpoint
from keras.engine import InputSpec
from keras.engine.topology import Input, get_source_inputs
from keras.engine.training import Model
from keras.layers import (
Activation,
Add,
BatchNormalization,
Concatenate,
Conv2D,
Conv2DTranspose,
Dense,
Dropout,
GlobalAveragePooling2D,
Input,
LeakyReLU,
MaxPooling2D,
Permute,
Reshape,
UpSampling2D,
ZeroPadding2D,
concatenate,
multiply,
)
from keras.layers.convolutional import Conv2D, Conv2DTranspose, UpSampling2D
from keras.layers.core import Activation, Dense, Lambda, SpatialDropout2D
from keras.layers.merge import add, concatenate
from keras.layers.normalization import BatchNormalization
from keras.layers.pooling import MaxPooling2D
from keras.legacy import interfaces
from keras.losses import binary_crossentropy
from keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator, load_img
from keras.regularizers import l2
from keras.utils import conv_utils
from keras.utils.data_utils import get_file
from keras.utils.generic_utils import get_custom_objects
from PIL import Image
from skimage.transform import resize
from sklearn.model_selection import StratifiedKFold, train_test_split
from tqdm import tqdm_notebook
class DataGenerator(keras.utils.Sequence):
def __init__(
self,
list_ids,
image_path,
mask_path,
augmentations,
batch_size,
img_size,
n_channels=3,
shuffle=True,
):
self.indexes = []
self.image_path = image_path
self.masks_path = mask_path
self.batch_size = batch_size
self.list_ids = list_ids
self.img_size = img_size
self.n_channels = n_channels
self.shuffle = shuffle
self.augment = augmentations
self.on_epoch_end()
def __len__(self):
return int(np.ceil(len(self.list_ids) / self.batch_size))
def on_epoch_end(self):
self.indexes = np.arange(len(self.list_ids))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __getitem__(self, index):
indexes = self.indexes[
index
* self.batch_size : min((index + 1) * self.batch_size, len(self.list_ids))
]
list_IDs_im = [self.list_ids[k] for k in indexes]
X, y = self.data_generation(list_IDs_im)
if self.augment is None:
return X, np.array(y) / 255
else:
im, mask = [], []
for x, y in zip(X, y):
augmented = self.augment(image=x, mask=y)
im.append(augmented["image"])
mask.append(augmented["mask"])
return np.array(im), np.array(mask) / 255
def data_generation(self, list_id_index):
X = np.empty((len(list_id_index), self.img_size,
self.img_size, self.n_channels))
y = np.empty((len(list_id_index), self.img_size, self.img_size, 1))
for i, image_id in enumerate(list_id_index):
im = np.array(Image.open(os.path.join(
os.getcwd(), self.image_path) + image_id + '.png'))
mask = np.array(Image.open(os.path.join(
os.getcwd(), self.masks_path) + image_id + '.png'))
if len(im.shape) == 2:
im = np.repeat(im[..., None], 3, 2)
X[i, ] = cv2.resize(im, (self.img_size, self.img_size))
y[i, ] = cv2.resize(
mask, (self.img_size, self.img_size))[..., np.newaxis]
y[y > 0] = 255
return np.uint8(X), np.uint8(y)
| [
"numpy.uint8",
"numpy.random.shuffle",
"os.getcwd",
"numpy.array",
"cv2.resize",
"numpy.repeat"
] | [((2543, 2574), 'numpy.random.shuffle', 'np.random.shuffle', (['self.indexes'], {}), '(self.indexes)\n', (2560, 2574), True, 'import numpy as np\n'), ((3860, 3906), 'cv2.resize', 'cv2.resize', (['im', '(self.img_size, self.img_size)'], {}), '(im, (self.img_size, self.img_size))\n', (3870, 3906), False, 'import cv2\n'), ((4055, 4066), 'numpy.uint8', 'np.uint8', (['X'], {}), '(X)\n', (4063, 4066), True, 'import numpy as np\n'), ((4068, 4079), 'numpy.uint8', 'np.uint8', (['y'], {}), '(y)\n', (4076, 4079), True, 'import numpy as np\n'), ((3188, 3200), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (3196, 3200), True, 'import numpy as np\n'), ((3808, 3838), 'numpy.repeat', 'np.repeat', (['im[..., None]', '(3)', '(2)'], {}), '(im[..., None], 3, 2)\n', (3817, 3838), True, 'import numpy as np\n'), ((3929, 3977), 'cv2.resize', 'cv2.resize', (['mask', '(self.img_size, self.img_size)'], {}), '(mask, (self.img_size, self.img_size))\n', (3939, 3977), False, 'import cv2\n'), ((2921, 2932), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (2929, 2932), True, 'import numpy as np\n'), ((3202, 3216), 'numpy.array', 'np.array', (['mask'], {}), '(mask)\n', (3210, 3216), True, 'import numpy as np\n'), ((3578, 3589), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3587, 3589), False, 'import os\n'), ((3699, 3710), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3708, 3710), False, 'import os\n')] |
import re
import torch
# Example finding for thesis
device = torch.device("cuda")
from bert.preprocessing_bert import BertPreprocessor
from transformers import BertTokenizer, BertForSequenceClassification
import numpy as np
import torch.nn.functional as F
pretrained_weights = 'bert-base-uncased'
bert_tokenizer = BertTokenizer.from_pretrained(pretrained_weights)
PATH = '../models/bert_sciEntsBank/model.pt'
model = BertForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=3)
model.load_state_dict(torch.load(PATH))
model.cuda()
model.eval()
def list_to_string(list):
"""
joins a list of strings together
:param list: list of strings
:return: string
"""
return ' '.join(list)
def separate_answers(bert_text, cls='[CLS]', sep='[SEP]'):
"""
Separates the sentences of sequence classification used for bert
:param bert_text: list of bert word tokens
:param cls: string of cls token
:param sep: string of sep token
:return: separated strings
"""
# Fix double-hash
pattern = '^##.*'
remove = []
for i, word in enumerate(bert_text):
if re.match(pattern, word):
bert_text[i] = bert_text[i - 1] + word[2:]
remove.append(i - 1)
for j in sorted(remove, reverse=True):
bert_text.pop(j)
cls_idx = bert_text.index(cls)
sep_1_idx = bert_text.index(sep)
ans1 = bert_text[cls_idx + 1:sep_1_idx]
ans2 = bert_text[sep_1_idx + 1:bert_text.index(sep, sep_1_idx + 1)]
return ans1, ans2
def predict(model, ref, stud, orig_pred):
if type(ref) is list:
ref = list_to_string(ref)
if type(stud) is list:
stud = list_to_string(stud)
assert type(stud) is str and type(ref) is str
token_ids, segment, attention, lab = \
BertPreprocessor(bert_tokenizer, data=[ref, stud, orig_pred]).load_data()
token_ids = torch.tensor([token_ids]).long().to(device)
segment = torch.tensor([segment]).long().to(device)
attention = torch.tensor([attention]).long().to(device)
outputs = model.forward(input_ids=token_ids, attention_mask=attention, token_type_ids=segment)
logits = outputs[0].detach().cpu().squeeze()
return logits
a = predict(model, "if the motor runs , the object is a conductor .",
"he will know because a conductor inevitably is not glowing in a circuit .", 0)
print(int(np.argmax(a)), F.softmax(a)[int(np.argmax(a))])
| [
"transformers.BertForSequenceClassification.from_pretrained",
"numpy.argmax",
"torch.load",
"re.match",
"torch.nn.functional.softmax",
"bert.preprocessing_bert.BertPreprocessor",
"transformers.BertTokenizer.from_pretrained",
"torch.device",
"torch.tensor"
] | [((63, 83), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (75, 83), False, 'import torch\n'), ((317, 366), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['pretrained_weights'], {}), '(pretrained_weights)\n', (346, 366), False, 'from transformers import BertTokenizer, BertForSequenceClassification\n'), ((420, 505), 'transformers.BertForSequenceClassification.from_pretrained', 'BertForSequenceClassification.from_pretrained', (['"""bert-base-uncased"""'], {'num_labels': '(3)'}), "('bert-base-uncased', num_labels=3\n )\n", (465, 505), False, 'from transformers import BertTokenizer, BertForSequenceClassification\n'), ((523, 539), 'torch.load', 'torch.load', (['PATH'], {}), '(PATH)\n', (533, 539), False, 'import torch\n'), ((1135, 1158), 're.match', 're.match', (['pattern', 'word'], {}), '(pattern, word)\n', (1143, 1158), False, 'import re\n'), ((2385, 2397), 'numpy.argmax', 'np.argmax', (['a'], {}), '(a)\n', (2394, 2397), True, 'import numpy as np\n'), ((2400, 2412), 'torch.nn.functional.softmax', 'F.softmax', (['a'], {}), '(a)\n', (2409, 2412), True, 'import torch.nn.functional as F\n'), ((1794, 1855), 'bert.preprocessing_bert.BertPreprocessor', 'BertPreprocessor', (['bert_tokenizer'], {'data': '[ref, stud, orig_pred]'}), '(bert_tokenizer, data=[ref, stud, orig_pred])\n', (1810, 1855), False, 'from bert.preprocessing_bert import BertPreprocessor\n'), ((2417, 2429), 'numpy.argmax', 'np.argmax', (['a'], {}), '(a)\n', (2426, 2429), True, 'import numpy as np\n'), ((1884, 1909), 'torch.tensor', 'torch.tensor', (['[token_ids]'], {}), '([token_ids])\n', (1896, 1909), False, 'import torch\n'), ((1942, 1965), 'torch.tensor', 'torch.tensor', (['[segment]'], {}), '([segment])\n', (1954, 1965), False, 'import torch\n'), ((2000, 2025), 'torch.tensor', 'torch.tensor', (['[attention]'], {}), '([attention])\n', (2012, 2025), False, 'import torch\n')] |
import os
import csv
import numpy as np
from config import config as cfg
class KITTILoader():
def __init__(self, subset='training'):
super(KITTILoader, self).__init__()
self.base_dir = cfg().base_dir
self.KITTI_cat = cfg().KITTI_cat
label_dir = os.path.join(self.base_dir, subset, 'label_2')
image_dir = os.path.join(self.base_dir, subset, 'image_2')
self.image_data = []
self.images = []
for i, fn in enumerate(os.listdir(label_dir)):
label_full_path = os.path.join(label_dir, fn)
image_full_path = os.path.join(image_dir, fn.replace('.txt', '.png'))
self.images.append(image_full_path)
fieldnames = ['type', 'truncated', 'occluded', 'alpha', 'xmin', 'ymin', 'xmax', 'ymax', 'dh', 'dw', 'dl',
'lx', 'ly', 'lz', 'ry']
with open(label_full_path, 'r') as csv_file:
reader = csv.DictReader(csv_file, delimiter=' ', fieldnames=fieldnames)
for line, row in enumerate(reader):
if row['type'] in self.KITTI_cat:
if subset == 'training':
new_alpha = get_new_alpha(row['alpha'])
dimensions = np.array([float(row['dh']), float(row['dw']), float(row['dl'])])
annotation = {'name': row['type'], 'image': image_full_path,
'xmin': int(float(row['xmin'])), 'ymin': int(float(row['ymin'])),
'xmax': int(float(row['xmax'])), 'ymax': int(float(row['ymax'])),
'dims': dimensions, 'new_alpha': new_alpha}
elif subset == 'eval':
dimensions = np.array([float(row['dh']), float(row['dw']), float(row['dl'])])
translations = np.array([float(row['lx']), float(row['ly']), float(row['lz'])])
annotation = {'name': row['type'], 'image': image_full_path,
'alpha': float(row['alpha']),
'xmin': int(float(row['xmin'])), 'ymin': int(float(row['ymin'])),
'xmax': int(float(row['xmax'])), 'ymax': int(float(row['ymax'])),
'dims': dimensions, 'trans': translations, 'rot_y': float(row['ry'])}
self.image_data.append(annotation)
def get_average_dimension(self):
dims_avg = {key: np.array([0, 0, 0]) for key in self.KITTI_cat}
dims_cnt = {key: 0 for key in self.KITTI_cat}
for i in range(len(self.image_data)):
current_data = self.image_data[i]
if current_data['name'] in self.KITTI_cat:
dims_avg[current_data['name']] = dims_cnt[current_data['name']] * dims_avg[current_data['name']] + \
current_data['dims']
dims_cnt[current_data['name']] += 1
dims_avg[current_data['name']] /= dims_cnt[current_data['name']]
return dims_avg, dims_cnt
def get_new_alpha(alpha):
"""
change the range of orientation from [-pi, pi] to [0, 2pi]
:param alpha: original orientation in KITTI
:return: new alpha
"""
new_alpha = float(alpha) + np.pi / 2.
if new_alpha < 0:
new_alpha = new_alpha + 2. * np.pi
# make sure angle lies in [0, 2pi]
new_alpha = new_alpha - int(new_alpha / (2. * np.pi)) * (2. * np.pi)
return new_alpha
if __name__ == '__main__':
base_dir = '/home/user/Deep3DBOX_Keras_Modified/kitti_test'
KITTI_gen = KITTILoader(subset='training')
dim_avg, dim_cnt = KITTI_gen.get_average_dimension()
print(dim_avg, dim_cnt)
# input = 180 * np.pi / 180
# a = KITTILoader.get_new_alpha(input)
# print(a * 180 / np.pi)
| [
"config.config",
"csv.DictReader",
"numpy.array",
"os.path.join",
"os.listdir"
] | [((284, 330), 'os.path.join', 'os.path.join', (['self.base_dir', 'subset', '"""label_2"""'], {}), "(self.base_dir, subset, 'label_2')\n", (296, 330), False, 'import os\n'), ((351, 397), 'os.path.join', 'os.path.join', (['self.base_dir', 'subset', '"""image_2"""'], {}), "(self.base_dir, subset, 'image_2')\n", (363, 397), False, 'import os\n'), ((207, 212), 'config.config', 'cfg', ([], {}), '()\n', (210, 212), True, 'from config import config as cfg\n'), ((247, 252), 'config.config', 'cfg', ([], {}), '()\n', (250, 252), True, 'from config import config as cfg\n'), ((485, 506), 'os.listdir', 'os.listdir', (['label_dir'], {}), '(label_dir)\n', (495, 506), False, 'import os\n'), ((539, 566), 'os.path.join', 'os.path.join', (['label_dir', 'fn'], {}), '(label_dir, fn)\n', (551, 566), False, 'import os\n'), ((2608, 2627), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (2616, 2627), True, 'import numpy as np\n'), ((948, 1010), 'csv.DictReader', 'csv.DictReader', (['csv_file'], {'delimiter': '""" """', 'fieldnames': 'fieldnames'}), "(csv_file, delimiter=' ', fieldnames=fieldnames)\n", (962, 1010), False, 'import csv\n')] |
#!/usr/bin/python
'''
mjtsai1974@20180606, v1.0, simple subplot with figure size
https://stackoverflow.com/questions/41530975/set-size-of-subplot-in-matplotlib
'''
import numpy as np
import matplotlib.pyplot as plt
x = np.random.randn(20)
y = np.random.randn(20)
#fig = plt.figure(figsize=(20, 8))
fig = plt.figure(figsize=(8, 20))
for i in range(0,10):
ax = fig.add_subplot(5, 2, i+1)
plt.plot(x, y, 'o')
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
# x and y axis should be equal length
x0,x1 = ax.get_xlim()
y0,y1 = ax.get_ylim()
ax.set_aspect(abs(x1-x0)/abs(y1-y0))
plt.show()
fig.savefig('plot.pdf', bbox_inches='tight') | [
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.random.randn"
] | [((223, 242), 'numpy.random.randn', 'np.random.randn', (['(20)'], {}), '(20)\n', (238, 242), True, 'import numpy as np\n'), ((247, 266), 'numpy.random.randn', 'np.random.randn', (['(20)'], {}), '(20)\n', (262, 266), True, 'import numpy as np\n'), ((309, 336), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 20)'}), '(figsize=(8, 20))\n', (319, 336), True, 'import matplotlib.pyplot as plt\n'), ((620, 630), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (628, 630), True, 'import matplotlib.pyplot as plt\n'), ((400, 419), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""o"""'], {}), "(x, y, 'o')\n", (408, 419), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
from scipy.spatial.distance import pdist, squareform
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
from sklearn.manifold import MDS
def rf_proximities(forest, X):
prox = pdist(forest.apply(X), lambda u, v: (u == v).sum()) / forest.n_estimators
prox = squareform(prox)
return prox
data = load_digits()
X, y = data.data, data.target
indices = np.argsort(y)
X = X[indices]
y = y[indices]
# X = X[y < 2]
# y = y[y < 2]
forest = RandomForestClassifier(n_estimators=50, n_jobs=2, random_state=1).fit(X, y)
prox = rf_proximities(forest, X)
plt.matshow(prox, cmap="Reds")
plt.show()
model = MDS(dissimilarity="precomputed", n_jobs=2)
coords = model.fit_transform(1. - prox)
n_classes = forest.n_classes_
cm = plt.get_cmap("hsv")
colors = (cm(1. * i / n_classes) for i in range(n_classes))
for k, c in zip(range(n_classes), colors):
plt.plot(coords[y == k, 0], coords[y == k, 1], '.', label=k, color=c)
plt.legend(loc="best")
plt.show()
| [
"sklearn.ensemble.RandomForestClassifier",
"sklearn.datasets.load_digits",
"matplotlib.pyplot.show",
"matplotlib.pyplot.get_cmap",
"matplotlib.pyplot.plot",
"scipy.spatial.distance.squareform",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.matshow",
"numpy.argsort",
"sklearn.manifold.MDS"
] | [((431, 444), 'sklearn.datasets.load_digits', 'load_digits', ([], {}), '()\n', (442, 444), False, 'from sklearn.datasets import load_digits\n'), ((486, 499), 'numpy.argsort', 'np.argsort', (['y'], {}), '(y)\n', (496, 499), True, 'import numpy as np\n'), ((681, 711), 'matplotlib.pyplot.matshow', 'plt.matshow', (['prox'], {'cmap': '"""Reds"""'}), "(prox, cmap='Reds')\n", (692, 711), True, 'import matplotlib.pyplot as plt\n'), ((712, 722), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (720, 722), True, 'import matplotlib.pyplot as plt\n'), ((732, 774), 'sklearn.manifold.MDS', 'MDS', ([], {'dissimilarity': '"""precomputed"""', 'n_jobs': '(2)'}), "(dissimilarity='precomputed', n_jobs=2)\n", (735, 774), False, 'from sklearn.manifold import MDS\n'), ((851, 870), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""hsv"""'], {}), "('hsv')\n", (863, 870), True, 'import matplotlib.pyplot as plt\n'), ((1051, 1073), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (1061, 1073), True, 'import matplotlib.pyplot as plt\n'), ((1074, 1084), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1082, 1084), True, 'import matplotlib.pyplot as plt\n'), ((389, 405), 'scipy.spatial.distance.squareform', 'squareform', (['prox'], {}), '(prox)\n', (399, 405), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((980, 1049), 'matplotlib.pyplot.plot', 'plt.plot', (['coords[y == k, 0]', 'coords[y == k, 1]', '"""."""'], {'label': 'k', 'color': 'c'}), "(coords[y == k, 0], coords[y == k, 1], '.', label=k, color=c)\n", (988, 1049), True, 'import matplotlib.pyplot as plt\n'), ((571, 636), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(50)', 'n_jobs': '(2)', 'random_state': '(1)'}), '(n_estimators=50, n_jobs=2, random_state=1)\n', (593, 636), False, 'from sklearn.ensemble import RandomForestClassifier\n')] |
# coding: utf-8
import argparse
import copy
import os
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from models import LeNet5Client, LeNet5Server, ResNet_cifarClient, ResNet_cifarServer
from split_data import iid as iid_f
from split_data import noniid
from flearn.client.datasets import get_dataloader, get_datasets, get_split_loader
from flearn.common.utils import get_free_gpu_id, setup_seed
# 设置随机数种子
setup_seed(0)
idx = get_free_gpu_id()
print("使用{}号GPU".format(idx))
if idx != -1:
os.environ["CUDA_VISIBLE_DEVICES"] = str(idx)
torch.cuda.current_device()
torch.cuda._initialized = True
else:
raise SystemError("No Free GPU Device")
parser = argparse.ArgumentParser(description="Please input conf")
parser.add_argument("--local_epoch", dest="local_epoch", default=1, type=int)
parser.add_argument("--frac", dest="frac", default=1, type=float)
parser.add_argument("--suffix", dest="suffix", default="", type=str)
parser.add_argument("--iid", dest="iid", action="store_true")
parser.add_argument(
"--dataset_name",
dest="dataset_name",
default="mnist",
choices=["mnist", "cifar10", "cifar100"],
type=str,
)
args = parser.parse_args()
iid = args.iid
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# 设置数据集
dataset_name = args.dataset_name
dataset_fpath = "/mnt/data-ssd/"
num_classes = 10
batch_size = 128
if "cifar" in dataset_name:
dataset_fpath = os.path.join(dataset_fpath, "CIFAR")
trainset, testset = get_datasets(dataset_name, dataset_fpath)
# 设置模型
if dataset_name == "mnist":
model_base = LeNet5Client(num_classes=num_classes)
model_server = LeNet5Server(num_classes=num_classes)
elif "cifar" in dataset_name:
model_base = ResNet_cifarClient(
dataset=args.dataset_name,
resnet_size=8,
group_norm_num_groups=None,
freeze_bn=False,
freeze_bn_affine=False,
)
model_server = ResNet_cifarServer(
dataset=args.dataset_name,
resnet_size=8,
group_norm_num_groups=None,
freeze_bn=False,
freeze_bn_affine=False,
)
optim_server = optim.SGD(model_server.parameters(), lr=1e-1)
criterion_server = nn.CrossEntropyLoss()
model_fpath = "./ckpts{}".format(args.suffix)
if not os.path.isdir(model_fpath):
os.mkdir(model_fpath)
def inin_single_client(client_id, trainloader_idx_lst, testloader_idx_lst):
model_ = copy.deepcopy(model_base)
optim_ = optim.SGD(model_.parameters(), lr=1e-1)
trainloader, testloader = get_split_loader(
trainset,
testset,
trainloader_idx_lst[client_id],
testloader_idx_lst[client_id],
batch_size,
num_workers=0,
)
return {
"model": model_,
"criterion": nn.CrossEntropyLoss(),
"optimizer": optim_,
"trainloader": trainloader,
"testloader": testloader,
"model_fname": "client{}_round_{}.pth".format(client_id, "{}"),
"client_id": client_id,
"device": device,
"model_fpath": model_fpath,
"epoch": args.local_epoch,
"dataset_name": dataset_name,
"save": False,
"display": False,
"log": False,
}
def server_forward_backward(target, client_output, device, is_train=True):
target = target.to(device)
client_output = client_output.to(device)
output = model_server(client_output)
loss = criterion_server(output, target)
if is_train:
optim_server.zero_grad()
loss.backward()
optim_server.step()
client_grad = client_output.grad.clone().detach()
return (
client_grad,
(output.data.max(1)[1] == target.data).sum().item(),
loss.data.item(),
)
return (output.data.max(1)[1] == target.data).sum().item(), loss.data.item()
def client_forward(data, model_):
client_output_tmp = model_(data)
client_output = client_output_tmp.clone().detach().requires_grad_(True)
return client_output_tmp, client_output
def client_backward(client_output_tmp, client_grad, optimizer_):
optimizer_.zero_grad()
client_output_tmp.backward(client_grad)
optimizer_.step()
if __name__ == "__main__":
# 客户端数量,及每轮上传客户端数量
client_numbers = 20
k = int(client_numbers * args.frac)
print("切分数据集")
if iid == "True":
trainloader_idx_lst = iid_f(trainset, client_numbers)
testloader_idx_lst = iid_f(testset, client_numbers)
else:
shard_per_user = 2
if dataset_name == "cifar100":
shard_per_user = 20
trainloader_idx_lst, rand_set_all = noniid(
trainset, client_numbers, shard_per_user
)
testloader_idx_lst, rand_set_all = noniid(
testset, client_numbers, shard_per_user, rand_set_all=rand_set_all
)
_, glob_testloader = get_dataloader(trainset, testset, batch_size, pin_memory=True)
print("初始化客户端")
client_lst = []
for client_id in range(client_numbers):
conf_params = inin_single_client(
client_id, trainloader_idx_lst, testloader_idx_lst
)
client_lst.append(conf_params)
Round = 1000
model_server.to(device)
model_server.train()
for ri in range(Round):
print("Round {}:".format(ri))
round_loss_lst, round_trainacc_lst, round_testacc_lst = [], [], []
for id_, client in enumerate(client_lst):
model_ = client["model"]
optimizer_ = client["optimizer"]
trainloader = client["trainloader"]
testloader = client["testloader"]
# testloader = glob_testloader
model_.to(device)
model_.train()
loop_loss, accuracy = [], []
client_output_tmp_lst, target_lst, output_lst = [], [], []
for data, target in trainloader:
data = data.to(device)
client_output_tmp, client_output = client_forward(data, model_)
client_grad, acc, loss = server_forward_backward(
target, client_output, device
)
accuracy.append(acc)
loop_loss.append(loss / len(trainloader))
client_backward(client_output_tmp, client_grad, optimizer_)
client["model"] = model_
# test
model_.eval()
test_loop_loss, test_accuracy = [], []
client_output_tmp_lst, target_lst, output_lst = [], [], []
for data, target in testloader:
data = data.to(device)
with torch.no_grad():
client_output_tmp, client_output = client_forward(data, model_)
acc, loss = server_forward_backward(
target, client_output, device, is_train=False
)
test_accuracy.append(acc)
test_loop_loss.append(loss / len(trainloader))
round_loss_lst.append(np.sum(loop_loss)),
round_trainacc_lst.append(np.sum(accuracy) / len(trainloader.dataset) * 100)
round_testacc_lst.append(
np.sum(test_accuracy) / len(testloader.dataset) * 100
)
# print(
# "client {} Loss: {:.4f} TrainAcc: {:.4f} TestAcc: {:.4f}".format(
# id_,
# np.sum(loop_loss),
# np.sum(accuracy) / len(trainloader.dataset) * 100,
# np.sum(test_accuracy) / len(testloader.dataset) * 100,
# )
# )
print(
"Loss: {:.4f} TrainAcc: {:.4f} TestAcc: {:.4f}".format(
np.mean(round_loss_lst),
np.mean(round_trainacc_lst),
np.mean(round_testacc_lst),
)
)
| [
"os.mkdir",
"numpy.sum",
"argparse.ArgumentParser",
"numpy.mean",
"torch.cuda.current_device",
"torch.no_grad",
"os.path.join",
"flearn.common.utils.setup_seed",
"flearn.client.datasets.get_dataloader",
"models.ResNet_cifarServer",
"split_data.noniid",
"flearn.client.datasets.get_datasets",
... | [((457, 470), 'flearn.common.utils.setup_seed', 'setup_seed', (['(0)'], {}), '(0)\n', (467, 470), False, 'from flearn.common.utils import get_free_gpu_id, setup_seed\n'), ((477, 494), 'flearn.common.utils.get_free_gpu_id', 'get_free_gpu_id', ([], {}), '()\n', (492, 494), False, 'from flearn.common.utils import get_free_gpu_id, setup_seed\n'), ((716, 772), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Please input conf"""'}), "(description='Please input conf')\n", (739, 772), False, 'import argparse\n'), ((1528, 1569), 'flearn.client.datasets.get_datasets', 'get_datasets', (['dataset_name', 'dataset_fpath'], {}), '(dataset_name, dataset_fpath)\n', (1540, 1569), False, 'from flearn.client.datasets import get_dataloader, get_datasets, get_split_loader\n'), ((2220, 2241), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (2239, 2241), True, 'import torch.nn as nn\n'), ((593, 620), 'torch.cuda.current_device', 'torch.cuda.current_device', ([], {}), '()\n', (618, 620), False, 'import torch\n'), ((1471, 1507), 'os.path.join', 'os.path.join', (['dataset_fpath', '"""CIFAR"""'], {}), "(dataset_fpath, 'CIFAR')\n", (1483, 1507), False, 'import os\n'), ((1623, 1660), 'models.LeNet5Client', 'LeNet5Client', ([], {'num_classes': 'num_classes'}), '(num_classes=num_classes)\n', (1635, 1660), False, 'from models import LeNet5Client, LeNet5Server, ResNet_cifarClient, ResNet_cifarServer\n'), ((1680, 1717), 'models.LeNet5Server', 'LeNet5Server', ([], {'num_classes': 'num_classes'}), '(num_classes=num_classes)\n', (1692, 1717), False, 'from models import LeNet5Client, LeNet5Server, ResNet_cifarClient, ResNet_cifarServer\n'), ((2296, 2322), 'os.path.isdir', 'os.path.isdir', (['model_fpath'], {}), '(model_fpath)\n', (2309, 2322), False, 'import os\n'), ((2328, 2349), 'os.mkdir', 'os.mkdir', (['model_fpath'], {}), '(model_fpath)\n', (2336, 2349), False, 'import os\n'), ((2441, 2466), 'copy.deepcopy', 'copy.deepcopy', (['model_base'], {}), '(model_base)\n', (2454, 2466), False, 'import copy\n'), ((2551, 2680), 'flearn.client.datasets.get_split_loader', 'get_split_loader', (['trainset', 'testset', 'trainloader_idx_lst[client_id]', 'testloader_idx_lst[client_id]', 'batch_size'], {'num_workers': '(0)'}), '(trainset, testset, trainloader_idx_lst[client_id],\n testloader_idx_lst[client_id], batch_size, num_workers=0)\n', (2567, 2680), False, 'from flearn.client.datasets import get_dataloader, get_datasets, get_split_loader\n'), ((4880, 4942), 'flearn.client.datasets.get_dataloader', 'get_dataloader', (['trainset', 'testset', 'batch_size'], {'pin_memory': '(True)'}), '(trainset, testset, batch_size, pin_memory=True)\n', (4894, 4942), False, 'from flearn.client.datasets import get_dataloader, get_datasets, get_split_loader\n'), ((1276, 1301), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1299, 1301), False, 'import torch\n'), ((1766, 1899), 'models.ResNet_cifarClient', 'ResNet_cifarClient', ([], {'dataset': 'args.dataset_name', 'resnet_size': '(8)', 'group_norm_num_groups': 'None', 'freeze_bn': '(False)', 'freeze_bn_affine': '(False)'}), '(dataset=args.dataset_name, resnet_size=8,\n group_norm_num_groups=None, freeze_bn=False, freeze_bn_affine=False)\n', (1784, 1899), False, 'from models import LeNet5Client, LeNet5Server, ResNet_cifarClient, ResNet_cifarServer\n'), ((1962, 2095), 'models.ResNet_cifarServer', 'ResNet_cifarServer', ([], {'dataset': 'args.dataset_name', 'resnet_size': '(8)', 'group_norm_num_groups': 'None', 'freeze_bn': '(False)', 'freeze_bn_affine': '(False)'}), '(dataset=args.dataset_name, resnet_size=8,\n group_norm_num_groups=None, freeze_bn=False, freeze_bn_affine=False)\n', (1980, 2095), False, 'from models import LeNet5Client, LeNet5Server, ResNet_cifarClient, ResNet_cifarServer\n'), ((2792, 2813), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (2811, 2813), True, 'import torch.nn as nn\n'), ((4400, 4431), 'split_data.iid', 'iid_f', (['trainset', 'client_numbers'], {}), '(trainset, client_numbers)\n', (4405, 4431), True, 'from split_data import iid as iid_f\n'), ((4461, 4491), 'split_data.iid', 'iid_f', (['testset', 'client_numbers'], {}), '(testset, client_numbers)\n', (4466, 4491), True, 'from split_data import iid as iid_f\n'), ((4644, 4692), 'split_data.noniid', 'noniid', (['trainset', 'client_numbers', 'shard_per_user'], {}), '(trainset, client_numbers, shard_per_user)\n', (4650, 4692), False, 'from split_data import noniid\n'), ((4758, 4832), 'split_data.noniid', 'noniid', (['testset', 'client_numbers', 'shard_per_user'], {'rand_set_all': 'rand_set_all'}), '(testset, client_numbers, shard_per_user, rand_set_all=rand_set_all)\n', (4764, 4832), False, 'from split_data import noniid\n'), ((7702, 7725), 'numpy.mean', 'np.mean', (['round_loss_lst'], {}), '(round_loss_lst)\n', (7709, 7725), True, 'import numpy as np\n'), ((7743, 7770), 'numpy.mean', 'np.mean', (['round_trainacc_lst'], {}), '(round_trainacc_lst)\n', (7750, 7770), True, 'import numpy as np\n'), ((7788, 7814), 'numpy.mean', 'np.mean', (['round_testacc_lst'], {}), '(round_testacc_lst)\n', (7795, 7814), True, 'import numpy as np\n'), ((6621, 6636), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6634, 6636), False, 'import torch\n'), ((7012, 7029), 'numpy.sum', 'np.sum', (['loop_loss'], {}), '(loop_loss)\n', (7018, 7029), True, 'import numpy as np\n'), ((7070, 7086), 'numpy.sum', 'np.sum', (['accuracy'], {}), '(accuracy)\n', (7076, 7086), True, 'import numpy as np\n'), ((7175, 7196), 'numpy.sum', 'np.sum', (['test_accuracy'], {}), '(test_accuracy)\n', (7181, 7196), True, 'import numpy as np\n')] |
"""
===================
Quantile regression
===================
This example illustrates how quantile regression can predict non-trivial
conditional quantiles.
The left figure shows the case when the error distribution is normal,
but has non-constant variance, i.e. with heteroscedasticity.
The right figure shows an example of an asymmetric error distribution,
namely the Pareto distribution.
"""
print(__doc__)
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: BSD 3 clause
# %%
# Dataset generation
# ------------------
#
# To illustrate the behaviour of quantile regression, we will generate two
# synthetic datasets. The true generative random processess for both datasets
# will be composed by the same expected value with a linear relationship with a
# single feature `x`.
import numpy as np
rng = np.random.RandomState(42)
x = np.linspace(start=0, stop=10, num=100)
X = x[:, np.newaxis]
y_true_mean = 10 + 0.5 * x
# %%
# We will create two subsequent problems by changing the distribution of the
# target `y` while keeping the same expected value:
#
# - in the first case, a heteroscedastic Normal noise is added;
# - in the second case, an asymmetric Pareto noise is added.
y_normal = y_true_mean + rng.normal(
loc=0, scale=0.5 + 0.5 * x, size=x.shape[0]
)
a = 5
y_pareto = y_true_mean + 10 * (rng.pareto(a, size=x.shape[0]) - 1 / (a - 1))
# %%
# Let's first visualize the datasets as well as the distribution of the
# residuals `y - mean(y)`.
import matplotlib.pyplot as plt
_, axs = plt.subplots(
nrows=2, ncols=2, figsize=(15, 11), sharex="row", sharey="row"
)
axs[0, 0].plot(x, y_true_mean, label="True mean")
axs[0, 0].scatter(
x, y_normal, color="black", alpha=0.5, label="Observations"
)
axs[1, 0].hist(y_true_mean - y_normal, edgecolor="black")
axs[0, 1].plot(x, y_true_mean, label="True mean")
axs[0, 1].scatter(
x, y_pareto, color="black", alpha=0.5, label="Observations"
)
axs[1, 1].hist(y_true_mean - y_pareto, edgecolor="black")
axs[0, 0].set_title("Dataset with heteroscedastic Normal distributed targets")
axs[0, 1].set_title("Dataset with asymmetric Pareto distributed target")
axs[1, 0].set_title(
"Residuals distribution for heteroscedastic Normal distributed targets"
)
axs[1, 1].set_title(
"Residuals distribution for asymmetric Pareto distributed target"
)
axs[0, 0].legend()
axs[0, 1].legend()
axs[0, 0].set_ylabel("y")
axs[1, 0].set_ylabel("Counts")
axs[0, 1].set_xlabel("x")
axs[0, 0].set_xlabel("x")
axs[1, 0].set_xlabel("Residuals")
_ = axs[1, 1].set_xlabel("Residuals")
# %%
# With the heteroscedastic Normal distributed target, we observe that the
# variance of the noise is increasing when the value of the feature `x` is
# increasing.
#
# With the asymmetric Pareto distributed target, we observe that the positive
# residuals are bounded.
#
# These types of noisy targets make the estimation via
# :class:`~sklearn.linear_model.LinearRegression` less efficient, i.e. we need
# more data to get stable results and, in addition, large outliers can have a
# huge impact on the fitted coefficients. (Stated otherwise: in a setting with
# constant variance, ordinary least squares estimators converge much faster to
# the *true* coefficients with increasing sample size.)
#
# In this asymmetric setting, the median or different quantiles give additional
# insights. On top of that, median estimation is much more robust to outliers
# and heavy tailed distributions. But note that extreme quantiles are estimated
# by very view data points. 95% quantile are more or less estimated by the 5%
# largest values and thus also a bit sensitive outliers.
#
# In the remainder of this tutorial, we will show how
# :class:`~sklearn.linear_model.QuantileRegressor` can be used in practice and
# give the intuition into the properties of the fitted models. Finally,
# we will compare the both :class:`~sklearn.linear_model.QuantileRegressor`
# and :class:`~sklearn.linear_model.LinearRegression`.
#
# Fitting a `QuantileRegressor`
# -----------------------------
#
# In this section, we want to estimate the conditional median as well as
# a low and high quantile fixed at 5% and 95%, respectively. Thus, we will get
# three linear models, one for each quantile.
#
# We will use the quantiles at 5% and 95% to find the outliers in the training
# sample beyond the central 90% interval.
from sklearn.linear_model import QuantileRegressor
quantiles = [0.05, 0.5, 0.95]
predictions = {}
out_bounds_predictions = np.zeros_like(y_true_mean, dtype=np.bool_)
for quantile in quantiles:
qr = QuantileRegressor(quantile=quantile, alpha=0)
y_pred = qr.fit(X, y_normal).predict(X)
predictions[quantile] = y_pred
if quantile == min(quantiles):
out_bounds_predictions = np.logical_or(
out_bounds_predictions, y_pred >= y_normal
)
elif quantile == max(quantiles):
out_bounds_predictions = np.logical_or(
out_bounds_predictions, y_pred <= y_normal
)
# %%
# Now, we can plot the three linear models and the distinguished samples that
# are within the central 90% interval from samples that are outside this
# interval.
plt.plot(X, y_true_mean, color="black", linestyle="dashed", label="True mean")
for quantile, y_pred in predictions.items():
plt.plot(X, y_pred, label=f"Quantile: {quantile}")
plt.scatter(
x[out_bounds_predictions],
y_normal[out_bounds_predictions],
color="black",
marker="+",
alpha=0.5,
label="Outside interval",
)
plt.scatter(
x[~out_bounds_predictions],
y_normal[~out_bounds_predictions],
color="black",
alpha=0.5,
label="Inside interval",
)
plt.legend()
plt.xlabel("x")
plt.ylabel("y")
_ = plt.title("Quantiles of heteroscedastic Normal distributed target")
# %%
# Since the noise is still Normally distributed, in particular is symmetric,
# the true conditional mean and the true conditional median coincide. Indeed,
# we see that the estimated median almost hits the true mean. We observe the
# effect of having an increasing noise variance on the 5% and 95% quantiles:
# the slopes of those quantiles are very different and the interval between
# them becomes wider with increasing `x`.
#
# To get an additional intuition regarding the meaning of the 5% and 95%
# quantiles estimators, one can count the number of samples above and below the
# predicted quantiles (represented by a cross on the above plot), considering
# that we have a total of 100 samples.
#
# We can repeat the same experiment using the asymmetric Pareto distributed
# target.
quantiles = [0.05, 0.5, 0.95]
predictions = {}
out_bounds_predictions = np.zeros_like(y_true_mean, dtype=np.bool_)
for quantile in quantiles:
qr = QuantileRegressor(quantile=quantile, alpha=0)
y_pred = qr.fit(X, y_pareto).predict(X)
predictions[quantile] = y_pred
if quantile == min(quantiles):
out_bounds_predictions = np.logical_or(
out_bounds_predictions, y_pred >= y_pareto
)
elif quantile == max(quantiles):
out_bounds_predictions = np.logical_or(
out_bounds_predictions, y_pred <= y_pareto
)
# %%
plt.plot(X, y_true_mean, color="black", linestyle="dashed", label="True mean")
for quantile, y_pred in predictions.items():
plt.plot(X, y_pred, label=f"Quantile: {quantile}")
plt.scatter(
x[out_bounds_predictions],
y_pareto[out_bounds_predictions],
color="black",
marker="+",
alpha=0.5,
label="Outside interval",
)
plt.scatter(
x[~out_bounds_predictions],
y_pareto[~out_bounds_predictions],
color="black",
alpha=0.5,
label="Inside interval",
)
plt.legend()
plt.xlabel("x")
plt.ylabel("y")
_ = plt.title("Quantiles of asymmetric Pareto distributed target")
# %%
# Due to the asymmetry of the distribution of the noise, we observe that the
# true mean and estimated conditional median are different. We also observe
# that each quantile model has different parameters to better fit the desired
# quantile. Note that ideally, all quantiles would be parallel in this case,
# which would become more visible with more data points or less extreme
# quantiles, e.g. 10% and 90%.
#
# Comparing `QuantileRegressor` and `LinearRegression`
# ----------------------------------------------------
#
# In this section, we will linger on the difference regarding the error that
# :class:`~sklearn.linear_model.QuantileRegressor` and
# :class:`~sklearn.linear_model.LinearRegression` are minimizing.
#
# Indeed, :class:`~sklearn.linear_model.LinearRegression` is a least squares
# approach minimizing the mean squared error (MSE) between the training and
# predicted targets. In contrast,
# :class:`~sklearn.linear_model.QuantileRegressor` with `quantile=0.5`
# minimizes the mean absolute error (MAE) instead.
#
# Let's first compute the training errors of such models in terms of mean
# squared error and mean absolute error. We will use the asymmetric Pareto
# distributed target to make it more interesting as mean and median are not
# equal.
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
linear_regression = LinearRegression()
quantile_regression = QuantileRegressor(quantile=0.5, alpha=0)
y_pred_lr = linear_regression.fit(X, y_pareto).predict(X)
y_pred_qr = quantile_regression.fit(X, y_pareto).predict(X)
print(
f"""Training error (in-sample performance)
{linear_regression.__class__.__name__}:
MAE = {mean_absolute_error(y_pareto, y_pred_lr):.3f}
MSE = {mean_squared_error(y_pareto, y_pred_lr):.3f}
{quantile_regression.__class__.__name__}:
MAE = {mean_absolute_error(y_pareto, y_pred_qr):.3f}
MSE = {mean_squared_error(y_pareto, y_pred_qr):.3f}
"""
)
# %%
# On the training set, we see that MAE is lower for
# :class:`~sklearn.linear_model.QuantileRegressor` than
# :class:`~sklearn.linear_model.LinearRegression`. In contrast to that, MSE is
# lower for :class:`~sklearn.linear_model.LinearRegression` than
# :class:`~sklearn.linear_model.QuantileRegressor`. These results confirms that
# MAE is the loss minimized by :class:`~sklearn.linear_model.QuantileRegressor`
# while MSE is the loss minimized
# :class:`~sklearn.linear_model.LinearRegression`.
#
# We can make a similar evaluation but looking a the test error obtained by
# cross-validation.
from sklearn.model_selection import cross_validate
cv_results_lr = cross_validate(
linear_regression,
X,
y_pareto,
cv=3,
scoring=["neg_mean_absolute_error", "neg_mean_squared_error"],
)
cv_results_qr = cross_validate(
quantile_regression,
X,
y_pareto,
cv=3,
scoring=["neg_mean_absolute_error", "neg_mean_squared_error"],
)
print(
f"""Test error (cross-validated performance)
{linear_regression.__class__.__name__}:
MAE = {-cv_results_lr["test_neg_mean_absolute_error"].mean():.3f}
MSE = {-cv_results_lr["test_neg_mean_squared_error"].mean():.3f}
{quantile_regression.__class__.__name__}:
MAE = {-cv_results_qr["test_neg_mean_absolute_error"].mean():.3f}
MSE = {-cv_results_qr["test_neg_mean_squared_error"].mean():.3f}
"""
)
# %%
# We reach similar conclusions on the out-of-sample evaluation.
| [
"matplotlib.pyplot.title",
"numpy.zeros_like",
"matplotlib.pyplot.plot",
"sklearn.model_selection.cross_validate",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.legend",
"numpy.random.RandomState",
"sklearn.metrics.mean_absolute_error",
"sklearn.linear_model.LinearRegression",
"sklearn.linear_mo... | [((857, 882), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (878, 882), True, 'import numpy as np\n'), ((887, 925), 'numpy.linspace', 'np.linspace', ([], {'start': '(0)', 'stop': '(10)', 'num': '(100)'}), '(start=0, stop=10, num=100)\n', (898, 925), True, 'import numpy as np\n'), ((1553, 1629), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(2)', 'figsize': '(15, 11)', 'sharex': '"""row"""', 'sharey': '"""row"""'}), "(nrows=2, ncols=2, figsize=(15, 11), sharex='row', sharey='row')\n", (1565, 1629), True, 'import matplotlib.pyplot as plt\n'), ((4517, 4559), 'numpy.zeros_like', 'np.zeros_like', (['y_true_mean'], {'dtype': 'np.bool_'}), '(y_true_mean, dtype=np.bool_)\n', (4530, 4559), True, 'import numpy as np\n'), ((5189, 5267), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'y_true_mean'], {'color': '"""black"""', 'linestyle': '"""dashed"""', 'label': '"""True mean"""'}), "(X, y_true_mean, color='black', linestyle='dashed', label='True mean')\n", (5197, 5267), True, 'import matplotlib.pyplot as plt\n'), ((5370, 5510), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x[out_bounds_predictions]', 'y_normal[out_bounds_predictions]'], {'color': '"""black"""', 'marker': '"""+"""', 'alpha': '(0.5)', 'label': '"""Outside interval"""'}), "(x[out_bounds_predictions], y_normal[out_bounds_predictions],\n color='black', marker='+', alpha=0.5, label='Outside interval')\n", (5381, 5510), True, 'import matplotlib.pyplot as plt\n'), ((5534, 5663), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x[~out_bounds_predictions]', 'y_normal[~out_bounds_predictions]'], {'color': '"""black"""', 'alpha': '(0.5)', 'label': '"""Inside interval"""'}), "(x[~out_bounds_predictions], y_normal[~out_bounds_predictions],\n color='black', alpha=0.5, label='Inside interval')\n", (5545, 5663), True, 'import matplotlib.pyplot as plt\n'), ((5684, 5696), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5694, 5696), True, 'import matplotlib.pyplot as plt\n'), ((5697, 5712), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (5707, 5712), True, 'import matplotlib.pyplot as plt\n'), ((5713, 5728), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (5723, 5728), True, 'import matplotlib.pyplot as plt\n'), ((5733, 5800), 'matplotlib.pyplot.title', 'plt.title', (['"""Quantiles of heteroscedastic Normal distributed target"""'], {}), "('Quantiles of heteroscedastic Normal distributed target')\n", (5742, 5800), True, 'import matplotlib.pyplot as plt\n'), ((6666, 6708), 'numpy.zeros_like', 'np.zeros_like', (['y_true_mean'], {'dtype': 'np.bool_'}), '(y_true_mean, dtype=np.bool_)\n', (6679, 6708), True, 'import numpy as np\n'), ((7175, 7253), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'y_true_mean'], {'color': '"""black"""', 'linestyle': '"""dashed"""', 'label': '"""True mean"""'}), "(X, y_true_mean, color='black', linestyle='dashed', label='True mean')\n", (7183, 7253), True, 'import matplotlib.pyplot as plt\n'), ((7356, 7496), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x[out_bounds_predictions]', 'y_pareto[out_bounds_predictions]'], {'color': '"""black"""', 'marker': '"""+"""', 'alpha': '(0.5)', 'label': '"""Outside interval"""'}), "(x[out_bounds_predictions], y_pareto[out_bounds_predictions],\n color='black', marker='+', alpha=0.5, label='Outside interval')\n", (7367, 7496), True, 'import matplotlib.pyplot as plt\n'), ((7520, 7649), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x[~out_bounds_predictions]', 'y_pareto[~out_bounds_predictions]'], {'color': '"""black"""', 'alpha': '(0.5)', 'label': '"""Inside interval"""'}), "(x[~out_bounds_predictions], y_pareto[~out_bounds_predictions],\n color='black', alpha=0.5, label='Inside interval')\n", (7531, 7649), True, 'import matplotlib.pyplot as plt\n'), ((7670, 7682), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7680, 7682), True, 'import matplotlib.pyplot as plt\n'), ((7683, 7698), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (7693, 7698), True, 'import matplotlib.pyplot as plt\n'), ((7699, 7714), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (7709, 7714), True, 'import matplotlib.pyplot as plt\n'), ((7719, 7781), 'matplotlib.pyplot.title', 'plt.title', (['"""Quantiles of asymmetric Pareto distributed target"""'], {}), "('Quantiles of asymmetric Pareto distributed target')\n", (7728, 7781), True, 'import matplotlib.pyplot as plt\n'), ((9225, 9243), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (9241, 9243), False, 'from sklearn.linear_model import LinearRegression\n'), ((9266, 9306), 'sklearn.linear_model.QuantileRegressor', 'QuantileRegressor', ([], {'quantile': '(0.5)', 'alpha': '(0)'}), '(quantile=0.5, alpha=0)\n', (9283, 9306), False, 'from sklearn.linear_model import QuantileRegressor\n'), ((10476, 10596), 'sklearn.model_selection.cross_validate', 'cross_validate', (['linear_regression', 'X', 'y_pareto'], {'cv': '(3)', 'scoring': "['neg_mean_absolute_error', 'neg_mean_squared_error']"}), "(linear_regression, X, y_pareto, cv=3, scoring=[\n 'neg_mean_absolute_error', 'neg_mean_squared_error'])\n", (10490, 10596), False, 'from sklearn.model_selection import cross_validate\n'), ((10631, 10753), 'sklearn.model_selection.cross_validate', 'cross_validate', (['quantile_regression', 'X', 'y_pareto'], {'cv': '(3)', 'scoring': "['neg_mean_absolute_error', 'neg_mean_squared_error']"}), "(quantile_regression, X, y_pareto, cv=3, scoring=[\n 'neg_mean_absolute_error', 'neg_mean_squared_error'])\n", (10645, 10753), False, 'from sklearn.model_selection import cross_validate\n'), ((4596, 4641), 'sklearn.linear_model.QuantileRegressor', 'QuantileRegressor', ([], {'quantile': 'quantile', 'alpha': '(0)'}), '(quantile=quantile, alpha=0)\n', (4613, 4641), False, 'from sklearn.linear_model import QuantileRegressor\n'), ((5318, 5368), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'y_pred'], {'label': 'f"""Quantile: {quantile}"""'}), "(X, y_pred, label=f'Quantile: {quantile}')\n", (5326, 5368), True, 'import matplotlib.pyplot as plt\n'), ((6745, 6790), 'sklearn.linear_model.QuantileRegressor', 'QuantileRegressor', ([], {'quantile': 'quantile', 'alpha': '(0)'}), '(quantile=quantile, alpha=0)\n', (6762, 6790), False, 'from sklearn.linear_model import QuantileRegressor\n'), ((7304, 7354), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'y_pred'], {'label': 'f"""Quantile: {quantile}"""'}), "(X, y_pred, label=f'Quantile: {quantile}')\n", (7312, 7354), True, 'import matplotlib.pyplot as plt\n'), ((4790, 4847), 'numpy.logical_or', 'np.logical_or', (['out_bounds_predictions', '(y_pred >= y_normal)'], {}), '(out_bounds_predictions, y_pred >= y_normal)\n', (4803, 4847), True, 'import numpy as np\n'), ((6939, 6996), 'numpy.logical_or', 'np.logical_or', (['out_bounds_predictions', '(y_pred >= y_pareto)'], {}), '(out_bounds_predictions, y_pred >= y_pareto)\n', (6952, 6996), True, 'import numpy as np\n'), ((4940, 4997), 'numpy.logical_or', 'np.logical_or', (['out_bounds_predictions', '(y_pred <= y_normal)'], {}), '(out_bounds_predictions, y_pred <= y_normal)\n', (4953, 4997), True, 'import numpy as np\n'), ((7089, 7146), 'numpy.logical_or', 'np.logical_or', (['out_bounds_predictions', '(y_pred <= y_pareto)'], {}), '(out_bounds_predictions, y_pred <= y_pareto)\n', (7102, 7146), True, 'import numpy as np\n'), ((9541, 9581), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['y_pareto', 'y_pred_lr'], {}), '(y_pareto, y_pred_lr)\n', (9560, 9581), False, 'from sklearn.metrics import mean_absolute_error\n'), ((9598, 9637), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_pareto', 'y_pred_lr'], {}), '(y_pareto, y_pred_lr)\n', (9616, 9637), False, 'from sklearn.metrics import mean_squared_error\n'), ((9700, 9740), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['y_pareto', 'y_pred_qr'], {}), '(y_pareto, y_pred_qr)\n', (9719, 9740), False, 'from sklearn.metrics import mean_absolute_error\n'), ((9757, 9796), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_pareto', 'y_pred_qr'], {}), '(y_pareto, y_pred_qr)\n', (9775, 9796), False, 'from sklearn.metrics import mean_squared_error\n')] |
# Author: <NAME>
# ECG Biometric Authentication using CNN
import os
import pickle
import random
import librosa
import numpy as np
import pandas as pd
import warnings
from biosppy.signals import ecg
from scipy import signal
from scipy.signal import filtfilt, find_peaks
from matplotlib import pyplot as plt
def resamp(array, times):
return signal.resample(array, times)
def mel(y, sr):
spectrogram = librosa.feature.melspectrogram(y=y, sr=sr, n_fft=2048, hop_length=1024)
return librosa.power_to_db(spectrogram, ref=np.max)
def filters(array, n):
# the larger n is, the smoother curve will be
b = [1.0 / n] * n
a = 1
array = filtfilt(b, a, array)
return array
def refine_r_peaks(sig, r_peaks):
r_peaks2 = np.array(r_peaks) # make a copy
for i in range(len(r_peaks)):
r = r_peaks[i] # current R-peak
small_segment = sig[max(0, r - 100):min(len(sig), r + 100)] # consider the neighboring segment of R-peak
r_peaks2[i] = np.argmax(small_segment) - 100 + r_peaks[i] # picking the highest point
r_peaks2[i] = min(r_peaks2[i], len(sig)) # the detected R-peak shouldn't be outside the signal
r_peaks2[i] = max(r_peaks2[i], 0) # checking if it goes before zero
return r_peaks2 # returning the refined r-peak list
def segment_signals(sig, r_peaks_annot, bmd=True, normalization=True):
segmented_signals = []
r_peaks = np.array(r_peaks_annot)
r_peaks = refine_r_peaks(sig, r_peaks)
if bmd:
win_len = 300
else:
win_len = 256
win_len_1_4 = win_len // 4
win_len_3_4 = 3 * (win_len // 4)
for r in r_peaks:
if ((r - win_len_1_4) < 0) or ((r + win_len_3_4) >= len(sig)): # not enough signal to segment
continue
segmented_signal = np.array(sig[r - win_len_1_4:r + win_len_3_4]) # segmenting a heartbeat
if normalization: # Z-score normalization
if abs(np.std(segmented_signal)) < 1e-6: # flat line ECG, will cause zero division error
continue
segmented_signal = (segmented_signal - np.mean(segmented_signal)) / np.std(segmented_signal)
if not np.isnan(segmented_signal).any(): # checking for nan, this will never happen
segmented_signals.append(segmented_signal)
return segmented_signals, r_peaks
class GetFeatures:
def __init__(self):
self.dir = os.path.expanduser("data/ready/")
self.age_labels = []
self.gender_labels = []
self.signals = []
self.where = ""
self.all = []
self.person_waves = []
self.person = ""
# Extracts features from csv file of each person | mit database
def features(self, where, people):
self.where = where
for person in people:
self.signals = [] # reset signal array
self.person = person
if self.where == "ecgid":
try:
folder = os.path.expanduser("data/raw/ecgid/Person_" + person + "/")
files = folder + "rec_1.csv", folder + "rec_2.csv"
except FileNotFoundError:
continue
elif self.where == "mit":
files = ["data/raw/mit/" + person + ".csv"]
else: # bmd
files = ["data/raw/bmd101/csv/" + person + ".csv"]
sgs = []
for file in files:
with open(file, 'r') as f:
features = pd.read_csv(f)
filtered = features['0'].values
sgs = np.concatenate((sgs, filtered))
self.segment(np.array(sgs))
length = len(self.signals)
num = random.randint(0, length)
random.seed(num)
random.shuffle(self.signals)
self.dump_pickle(self.signals, person)
# self.dump_pickle(self.all, 'all')
print("Feature extraction complete.")
# ECG R-peak segmentation
def segment(self, array):
count = 1
array = np.array(array, dtype="float32")
array = (array - array.min()) / (array.max() - array.min())
peaks = ecg.christov_segmenter(signal=array, sampling_rate=500)[0]
waves, pks = segment_signals(array, peaks, False, True)
how_many = []
self.person_waves = []
length = len(waves)
for k in range(length):
wave = waves[k]
plt.title(self.person)
self.augment(wave, len(wave))
how_many.append(len(wave))
count += 1
plt.show()
print("Len per Wave", how_many)
print("Mean per Wave", np.mean(how_many))
print("How many", len(how_many))
print("Total", len(how_many) * 9)
# Augment each signal and convert call function to convert it to image
def augment(self, array, times):
array = resamp(array, times)
self.signals.append(array)
plt.plot(array)
# Noise addition using normal distribution with mean = 0 and std =1
# Permissible noise factor value = x > 0.009
if self.where[:4] == "live":
one = 0.09
two = 0.07
three = 0.05
else:
one = 0.09
two = 0.07
three = 0.05
self.help(array, times, "noise", one)
self.help(array, times, "noise", two)
self.help(array, times, "noise", three)
# plt.show()
# Permissible factor values = samplingRate / 100
self.help(array, times, "time_shifting", 150)
self.help(array, times, "time_shifting", 120)
self.help(array, times, "time_shifting", 100)
# Permissible factor values = -5 <= x <= 5
self.help(array, times, "pitch_shifting", -0.3)
self.help(array, times, "pitch_shifting", -0.2)
self.help(array, times, "pitch_shifting", -0.1)
def help(self, array, times, which, factor):
if which == "noise":
noise = array + factor * np.random.normal(0, 1, len(array))
noise = resamp(noise, times)
self.signals.append(noise)
plt.plot(noise)
if which == "time_shifting":
time_shifting = np.roll(array, int(500 / factor))
time_shifting = resamp(time_shifting, times)
self.signals.append(time_shifting)
plt.plot(time_shifting)
if which == "pitch_shifting":
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
pitch_shifting = librosa.effects.pitch_shift(array, 500, n_steps=float(factor))
pitch_shifting = resamp(pitch_shifting, times)
self.signals.append(pitch_shifting)
plt.plot(pitch_shifting)
if which == "time_stretching":
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
time_stretching = librosa.effects.time_stretch(array, factor)
time_stretching = resamp(time_stretching, times)
self.signals.append(time_stretching)
plt.plot(time_stretching)
def dump_pickle(self, signals, basename):
folder = self.dir + 'signals/'
if not os.path.exists(folder):
os.makedirs(folder)
filename = folder + basename + '.pickle'
pickle_out = open(filename, 'wb')
pickle.dump(signals, pickle_out)
pickle_out.close()
print('Person ' + basename, "\n")
| [
"matplotlib.pyplot.title",
"pickle.dump",
"numpy.argmax",
"pandas.read_csv",
"random.shuffle",
"numpy.isnan",
"librosa.effects.time_stretch",
"librosa.power_to_db",
"numpy.mean",
"librosa.feature.melspectrogram",
"random.randint",
"warnings.simplefilter",
"numpy.std",
"os.path.exists",
"... | [((347, 376), 'scipy.signal.resample', 'signal.resample', (['array', 'times'], {}), '(array, times)\n', (362, 376), False, 'from scipy import signal\n'), ((413, 484), 'librosa.feature.melspectrogram', 'librosa.feature.melspectrogram', ([], {'y': 'y', 'sr': 'sr', 'n_fft': '(2048)', 'hop_length': '(1024)'}), '(y=y, sr=sr, n_fft=2048, hop_length=1024)\n', (443, 484), False, 'import librosa\n'), ((496, 540), 'librosa.power_to_db', 'librosa.power_to_db', (['spectrogram'], {'ref': 'np.max'}), '(spectrogram, ref=np.max)\n', (515, 540), False, 'import librosa\n'), ((660, 681), 'scipy.signal.filtfilt', 'filtfilt', (['b', 'a', 'array'], {}), '(b, a, array)\n', (668, 681), False, 'from scipy.signal import filtfilt, find_peaks\n'), ((750, 767), 'numpy.array', 'np.array', (['r_peaks'], {}), '(r_peaks)\n', (758, 767), True, 'import numpy as np\n'), ((1419, 1442), 'numpy.array', 'np.array', (['r_peaks_annot'], {}), '(r_peaks_annot)\n', (1427, 1442), True, 'import numpy as np\n'), ((1793, 1839), 'numpy.array', 'np.array', (['sig[r - win_len_1_4:r + win_len_3_4]'], {}), '(sig[r - win_len_1_4:r + win_len_3_4])\n', (1801, 1839), True, 'import numpy as np\n'), ((2402, 2435), 'os.path.expanduser', 'os.path.expanduser', (['"""data/ready/"""'], {}), "('data/ready/')\n", (2420, 2435), False, 'import os\n'), ((4034, 4066), 'numpy.array', 'np.array', (['array'], {'dtype': '"""float32"""'}), "(array, dtype='float32')\n", (4042, 4066), True, 'import numpy as np\n'), ((4564, 4574), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4572, 4574), True, 'from matplotlib import pyplot as plt\n'), ((4941, 4956), 'matplotlib.pyplot.plot', 'plt.plot', (['array'], {}), '(array)\n', (4949, 4956), True, 'from matplotlib import pyplot as plt\n'), ((7393, 7425), 'pickle.dump', 'pickle.dump', (['signals', 'pickle_out'], {}), '(signals, pickle_out)\n', (7404, 7425), False, 'import pickle\n'), ((3699, 3724), 'random.randint', 'random.randint', (['(0)', 'length'], {}), '(0, length)\n', (3713, 3724), False, 'import random\n'), ((3737, 3753), 'random.seed', 'random.seed', (['num'], {}), '(num)\n', (3748, 3753), False, 'import random\n'), ((3766, 3794), 'random.shuffle', 'random.shuffle', (['self.signals'], {}), '(self.signals)\n', (3780, 3794), False, 'import random\n'), ((4151, 4206), 'biosppy.signals.ecg.christov_segmenter', 'ecg.christov_segmenter', ([], {'signal': 'array', 'sampling_rate': '(500)'}), '(signal=array, sampling_rate=500)\n', (4173, 4206), False, 'from biosppy.signals import ecg\n'), ((4428, 4450), 'matplotlib.pyplot.title', 'plt.title', (['self.person'], {}), '(self.person)\n', (4437, 4450), True, 'from matplotlib import pyplot as plt\n'), ((4646, 4663), 'numpy.mean', 'np.mean', (['how_many'], {}), '(how_many)\n', (4653, 4663), True, 'import numpy as np\n'), ((6125, 6140), 'matplotlib.pyplot.plot', 'plt.plot', (['noise'], {}), '(noise)\n', (6133, 6140), True, 'from matplotlib import pyplot as plt\n'), ((6357, 6380), 'matplotlib.pyplot.plot', 'plt.plot', (['time_shifting'], {}), '(time_shifting)\n', (6365, 6380), True, 'from matplotlib import pyplot as plt\n'), ((6740, 6764), 'matplotlib.pyplot.plot', 'plt.plot', (['pitch_shifting'], {}), '(pitch_shifting)\n', (6748, 6764), True, 'from matplotlib import pyplot as plt\n'), ((7110, 7135), 'matplotlib.pyplot.plot', 'plt.plot', (['time_stretching'], {}), '(time_stretching)\n', (7118, 7135), True, 'from matplotlib import pyplot as plt\n'), ((7237, 7259), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (7251, 7259), False, 'import os\n'), ((7273, 7292), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (7284, 7292), False, 'import os\n'), ((994, 1018), 'numpy.argmax', 'np.argmax', (['small_segment'], {}), '(small_segment)\n', (1003, 1018), True, 'import numpy as np\n'), ((2125, 2149), 'numpy.std', 'np.std', (['segmented_signal'], {}), '(segmented_signal)\n', (2131, 2149), True, 'import numpy as np\n'), ((3568, 3599), 'numpy.concatenate', 'np.concatenate', (['(sgs, filtered)'], {}), '((sgs, filtered))\n', (3582, 3599), True, 'import numpy as np\n'), ((3626, 3639), 'numpy.array', 'np.array', (['sgs'], {}), '(sgs)\n', (3634, 3639), True, 'import numpy as np\n'), ((6437, 6462), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (6460, 6462), False, 'import warnings\n'), ((6480, 6524), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'UserWarning'], {}), "('ignore', UserWarning)\n", (6501, 6524), False, 'import warnings\n'), ((6822, 6847), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (6845, 6847), False, 'import warnings\n'), ((6865, 6909), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'UserWarning'], {}), "('ignore', UserWarning)\n", (6886, 6909), False, 'import warnings\n'), ((6944, 6987), 'librosa.effects.time_stretch', 'librosa.effects.time_stretch', (['array', 'factor'], {}), '(array, factor)\n', (6972, 6987), False, 'import librosa\n'), ((1937, 1961), 'numpy.std', 'np.std', (['segmented_signal'], {}), '(segmented_signal)\n', (1943, 1961), True, 'import numpy as np\n'), ((2096, 2121), 'numpy.mean', 'np.mean', (['segmented_signal'], {}), '(segmented_signal)\n', (2103, 2121), True, 'import numpy as np\n'), ((2166, 2192), 'numpy.isnan', 'np.isnan', (['segmented_signal'], {}), '(segmented_signal)\n', (2174, 2192), True, 'import numpy as np\n'), ((2963, 3022), 'os.path.expanduser', 'os.path.expanduser', (["('data/raw/ecgid/Person_' + person + '/')"], {}), "('data/raw/ecgid/Person_' + person + '/')\n", (2981, 3022), False, 'import os\n'), ((3483, 3497), 'pandas.read_csv', 'pd.read_csv', (['f'], {}), '(f)\n', (3494, 3497), True, 'import pandas as pd\n')] |
"""
Configuration file for capturing TD Ameritrade data into PostgreSQL database
@author: <NAME>, August 2021
"""
import numpy as np
import pandas as pd
import os
import copy
# Define symbol lookup path - make sure 'symbol' is a column name
symbolpath = '/path/to/symbols.csv'
# Define chrome webdriver path
webdriverpath = '/path/to/chromedriver'
# Define TD Ameritrade Credentials
token_path = '/path/to/token/token.pickle'
api_key = 'insert api key here'
redirect_uri = 'https://localhost'
# Define PostgreSQL Database Credentials
db = 'dbname'
dbuser = 'dbusername'
dbpassword = '<PASSWORD>'
dbhost = 'host here'
dbport = 'port here'
# Functions to be used
def rsi(values):
up = values[values>0].mean()
down = -1*values[values<0].mean()
return 100 * up / (up + down)
def bbands(price, length=30, numsd=2):
""" returns average, upper band, and lower band"""
# ave = pd.stats.moments.rolling_mean(price,length)
ave = price.rolling(window = length, center = False).mean()
# sd = pd.stats.moments.rolling_std(price,length)
sd = price.rolling(window = length, center = False).std()
upband = ave + (sd*numsd)
dnband = ave - (sd*numsd)
return np.round(ave,3), np.round(upband,3), np.round(dnband,3)
def aroon(df, tf=25):
aroonup = []
aroondown = []
x = tf
while x< len(df['Date']):
aroon_up = ((df['High'][x-tf:x].tolist().index(max(df['High'][x-tf:x])))/float(tf))*100
aroon_down = ((df['Low'][x-tf:x].tolist().index(min(df['Low'][x-tf:x])))/float(tf))*100
aroonup.append(aroon_up)
aroondown.append(aroon_down)
x+=1
return aroonup, aroondown
def abands(df):
# df['AB_Middle_Band'] = pd.rolling_mean(df['Close'], 20)
df['AB_Middle_Band'] = df['Close'].rolling(window = 20, center=False).mean()
# High * ( 1 + 4 * (High - Low) / (High + Low))
df['aupband'] = df['High'] * (1 + 4 * (df['High']-df['Low'])/(df['High']+df['Low']))
df['AB_Upper_Band'] = df['aupband'].rolling(window=20, center=False).mean()
# Low *(1 - 4 * (High - Low)/ (High + Low))
df['adownband'] = df['Low'] * (1 - 4 * (df['High']-df['Low'])/(df['High']+df['Low']))
df['AB_Lower_Band'] = df['adownband'].rolling(window=20, center=False).mean()
def STOK(df, n):
df['STOK'] = ((df['Close'] - df['Low'].rolling(window=n, center=False).mean()) / (df['High'].rolling(window=n, center=False).max() - df['Low'].rolling(window=n, center=False).min())) * 100
df['STOD'] = df['STOK'].rolling(window = 3, center=False).mean()
def CMFlow(df, tf):
CHMF = []
MFMs = []
MFVs = []
x = tf
while x < len(df['Date']):
PeriodVolume = 0
volRange = df['Volume'][x-tf:x]
for eachVol in volRange:
PeriodVolume += eachVol
MFM = ((df['Close'][x] - df['Low'][x]) - (df['High'][x] - df['Close'][x])) / (df['High'][x] - df['Low'][x])
MFV = MFM*PeriodVolume
MFMs.append(MFM)
MFVs.append(MFV)
x+=1
y = tf
while y < len(MFVs):
PeriodVolume = 0
volRange = df['Volume'][x-tf:x]
for eachVol in volRange:
PeriodVolume += eachVol
consider = MFVs[y-tf:y]
tfsMFV = 0
for eachMFV in consider:
tfsMFV += eachMFV
tfsCMF = tfsMFV/PeriodVolume
CHMF.append(tfsCMF)
y+=1
return CHMF
def psar(df, iaf = 0.02, maxaf = 0.2):
length = len(df)
dates = (df['Date'])
high = (df['High'])
low = (df['Low'])
orig_close = copy.deepcopy(df['Close'])
close = (df['Close'])
psar = df['Close'][0:len(df['Close'])]
psarbull = [None] * length
psarbear = [None] * length
bull = True
af = iaf
ep = df['Low'][0]
hp = df['High'][0]
lp = df['Low'][0]
for i in range(2,length):
if bull:
psar[i] = psar[i - 1] + af * (hp - psar[i - 1])
else:
psar[i] = psar[i - 1] + af * (lp - psar[i - 1])
reverse = False
if bull:
if df['Low'][i] < psar[i]:
bull = False
reverse = True
psar[i] = hp
lp = df['Low'][i]
af = iaf
else:
if df['High'][i] > psar[i]:
bull = True
reverse = True
psar[i] = lp
hp = df['High'][i]
af = iaf
if not reverse:
if bull:
if df['High'][i] > hp:
hp = df['High'][i]
af = min(af + iaf, maxaf)
if df['Low'][i - 1] < psar[i]:
psar[i] = df['Low'][i - 1]
if df['Low'][i - 2] < psar[i]:
psar[i] = df['Low'][i - 2]
else:
if df['Low'][i] < lp:
lp = df['Low'][i]
af = min(af + iaf, maxaf)
if df['High'][i - 1] > psar[i]:
psar[i] = df['High'][i - 1]
if df['High'][i - 2] > psar[i]:
psar[i] = df['High'][i - 2]
if bull:
psarbull[i] = psar[i]
else:
psarbear[i] = psar[i]
# return {"dates":dates, "high":high, "low":low, "close":close, "psar":psar, "psarbear":psarbear, "psarbull":psarbull}
# return psar, psarbear, psarbull
df['psar'] = psar
df['Close'] = orig_close
# df['psarbear'] = psarbear
# df['psarbull'] = psarbull
def CCI(df, n, constant):
TP = (df['High'] + df['Low'] + df['Close']) / 3
CCI = pd.Series((TP - TP.rolling(window=n, center=False).mean()) / (constant * TP.rolling(window=n, center=False).std())) #, name = 'CCI_' + str(n))
return CCI
# Keltner Channel
def KELCH(df, n):
KelChM = pd.Series(((df['High'] + df['Low'] + df['Close']) / 3).rolling(window =n, center=False).mean(), name = 'KelChM_' + str(n))
KelChU = pd.Series(((4 * df['High'] - 2 * df['Low'] + df['Close']) / 3).rolling(window =n, center=False).mean(), name = 'KelChU_' + str(n))
KelChD = pd.Series(((-2 * df['High'] + 4 * df['Low'] + df['Close']) / 3).rolling(window =n, center=False).mean(), name = 'KelChD_' + str(n))
return KelChM, KelChD, KelChU
def DMI(df, period):
df['UpMove'] = df['High'] - df['High'].shift(1)
df['DownMove'] = df['Low'].shift(1) - df['Low']
df['Zero'] = 0
df['PlusDM'] = np.where((df['UpMove'] > df['DownMove']) & (df['UpMove'] > df['Zero']), df['UpMove'], 0)
df['MinusDM'] = np.where((df['UpMove'] < df['DownMove']) & (df['DownMove'] > df['Zero']), df['DownMove'], 0)
df['plusDI'] = 100 * (df['PlusDM']/df['ATR']).ewm(span=period,min_periods=0,adjust=True,ignore_na=False).mean()
df['minusDI'] = 100 * (df['MinusDM']/df['ATR']).ewm(span=period,min_periods=0,adjust=True,ignore_na=False).mean()
df['ADX'] = 100 * (abs((df['plusDI'] - df['minusDI'])/(df['plusDI'] + df['minusDI']))).ewm(span=period,min_periods=0,adjust=True,ignore_na=False).mean()
def MFI(df):
# typical price
df['tp'] = (df['High']+df['Low']+df['Close'])/3
#raw money flow
df['rmf'] = df['tp'] * df['Volume']
# positive and negative money flow
df['pmf'] = np.where(df['tp'] > df['tp'].shift(1), df['tp'], 0)
df['nmf'] = np.where(df['tp'] < df['tp'].shift(1), df['tp'], 0)
# money flow ratio
df['mfr'] = df['pmf'].rolling(window=14,center=False).sum()/df['nmf'].rolling(window=14,center=False).sum()
df['Money_Flow_Index'] = 100 - 100 / (1 + df['mfr'])
def ichimoku(df):
# Turning Line
period9_high = df['High'].rolling(window=9,center=False).max()
period9_low = df['Low'].rolling(window=9,center=False).min()
df['turning_line'] = (period9_high + period9_low) / 2
# Standard Line
period26_high = df['High'].rolling(window=26,center=False).max()
period26_low = df['Low'].rolling(window=26,center=False).min()
df['standard_line'] = (period26_high + period26_low) / 2
# Leading Span 1
df['ichimoku_span1'] = ((df['turning_line'] + df['standard_line']) / 2).shift(26)
# Leading Span 2
period52_high = df['High'].rolling(window=52,center=False).max()
period52_low = df['Low'].rolling(window=52,center=False).min()
df['ichimoku_span2'] = ((period52_high + period52_low) / 2).shift(26)
# The most current closing price plotted 22 time periods behind (optional)
df['chikou_span'] = df['Close'].shift(-22) # 22 according to investopedia
def WillR(df):
highest_high = df['High'].rolling(window=14,center=False).max()
lowest_low = df['Low'].rolling(window=14,center=False).min()
df['WillR'] = (-100) * ((highest_high - df['Close']) / (highest_high - lowest_low))
def MINMAX(df):
df['MIN_Volume'] = df['Volume'].rolling(window=14,center=False).min()
df['MAX_Volume'] = df['Volume'].rolling(window=14,center=False).max()
def KAMA(price, n=10, pow1=2, pow2=30):
''' kama indicator '''
''' accepts pandas dataframe of prices '''
absDiffx = abs(price - price.shift(1) )
ER_num = abs( price - price.shift(n) )
ER_den = absDiffx.rolling(window=n,center=False).sum()
ER = ER_num / ER_den
sc = ( ER*(2.0/(pow1+1)-2.0/(pow2+1.0))+2/(pow2+1.0) ) ** 2.0
answer = np.zeros(sc.size)
N = len(answer)
first_value = True
for i in range(N):
if sc[i] != sc[i]:
answer[i] = np.nan
else:
if first_value:
answer[i] = price[i]
first_value = False
else:
answer[i] = answer[i-1] + sc[i] * (price[i] - answer[i-1])
return answer
| [
"numpy.round",
"copy.deepcopy",
"numpy.where",
"numpy.zeros"
] | [((3558, 3584), 'copy.deepcopy', 'copy.deepcopy', (["df['Close']"], {}), "(df['Close'])\n", (3571, 3584), False, 'import copy\n'), ((6407, 6500), 'numpy.where', 'np.where', (["((df['UpMove'] > df['DownMove']) & (df['UpMove'] > df['Zero']))", "df['UpMove']", '(0)'], {}), "((df['UpMove'] > df['DownMove']) & (df['UpMove'] > df['Zero']), df[\n 'UpMove'], 0)\n", (6415, 6500), True, 'import numpy as np\n'), ((6516, 6612), 'numpy.where', 'np.where', (["((df['UpMove'] < df['DownMove']) & (df['DownMove'] > df['Zero']))", "df['DownMove']", '(0)'], {}), "((df['UpMove'] < df['DownMove']) & (df['DownMove'] > df['Zero']),\n df['DownMove'], 0)\n", (6524, 6612), True, 'import numpy as np\n'), ((9263, 9280), 'numpy.zeros', 'np.zeros', (['sc.size'], {}), '(sc.size)\n', (9271, 9280), True, 'import numpy as np\n'), ((1190, 1206), 'numpy.round', 'np.round', (['ave', '(3)'], {}), '(ave, 3)\n', (1198, 1206), True, 'import numpy as np\n'), ((1207, 1226), 'numpy.round', 'np.round', (['upband', '(3)'], {}), '(upband, 3)\n', (1215, 1226), True, 'import numpy as np\n'), ((1227, 1246), 'numpy.round', 'np.round', (['dnband', '(3)'], {}), '(dnband, 3)\n', (1235, 1246), True, 'import numpy as np\n')] |
"""
The PortOpt application is powered by multiple optimizers designed to implement theory in an elegant
and easy to use way.
This module consists all the functions required to run a portfolio optimization using parameters
that the user inputs
"""
import math
import numpy as np
from numpy import linalg as LA
import pandas as pd
import osqp
import scipy as sp
from scipy import sparse
def testFunction():
"""
Function to test if the import is working
Parameters
----------
This function has no parameters
Returns
----------
This function returns true
"""
return True
def preprocessData(data):
"""
Helper function to create a covariance matrix and mean vector
Parameters
----------
data : Dictionary
Dictionary containing Date, Ticker and Adjusted Close price
Returns
-------
meanVec : Vector
sigMat : Matrix
"""
data = pd.DataFrame.from_dict(data)
df = data[["Date", "Ticker", "Adjusted_Close"]]
df.columns = ["date", "ticker", "price"]
df1 = df.pivot_table(index=["date"], columns="ticker", values=["price"])
df1.columns = [col[1] for col in df1.columns.values]
df_logret = 100 * (np.log(df1) - np.log(df1.shift(1)))
df_logret = df_logret[1:]
logret = np.array(df_logret)
df_daily_returns = df1.pct_change()
df_daily_returns = df_daily_returns[1:]
data = np.array(data)
daily_returns = np.array(df_daily_returns)
n = logret.shape[0]
sigMat = np.cov(logret, rowvar=False)
meanVec = np.mean(logret, axis=0)
return meanVec, sigMat
def SymPDcovmatrix(A, tol=None):
"""
function corrects a covariance matrix A to be symmetric positive definite
it uses eigenvalue decomposition and shifts all small eigenvalues to tol
Parameters
----------
A : Array like object
tol : float
(optional, default tol = 1e-04) minimum value for all eigenvalues
Returns
-------
A : Array
corrected matrix A.
e_min : float
minimum value for all eigenvalues
"""
m, n = A.shape
if n != m:
print("Input matrix has to be a square matrix ")
if not tol:
tol = 1e-04
A = (A + A.transpose()) / 2
D, V = LA.eig(A)
for i in range(len(D)):
if D[i] < tol:
D[i] = tol
D = np.diag(D)
t = np.dot(V, D)
A = np.dot(t, V.transpose())
e_min = max(tol, min(np.diag(D)))
A = (A + A.transpose()) / 2
return A, e_min
def sigMatShrinkage(sigMat, lambda_l2):
"""
Function to shrink the covariance matrix
Parameters
----------
sigMat : Matrix
lambda_l2 : Float
Returns
-------
D : Array
"""
d = sigMat.shape[0]
sig = np.sqrt(np.diag(sigMat))
t = np.dot(np.diag(sig ** (-1)), sigMat)
corrMat = np.dot(t, np.diag(sig ** (-1)))
corrs = None
for k in range(d - 1):
if corrs is None:
corrs = np.diag(corrMat, k + 1)
else:
corrs = np.hstack([corrs, np.diag(corrMat, k + 1)])
if 1 == 1:
sigMat = sigMat + lambda_l2 * np.mean(sig ** 2) * np.eye(d)
else:
t = np.dot(
np.mean(sig) * np.eye(d),
np.eye(d) + (np.ones(d, d) - np.eye(d)) * np.mean(corrs),
)
sigMat = sigMat + lambda_l2 * np.dot(t, np.mean(sig) * np.eye(d))
return sigMat
def Dmat(n, k):
"""
function reform a matrix for assets with order
Parameters
----------
n : int
k : int
Returns
-------
D : Array
"""
if k == 0:
D = np.eye(n)
elif k == 1:
D = np.eye(n - 1, n)
for i in range(n - 1):
D[i, i + 1] = -1
else:
D = Dmat(n, 1)
for i in range(k - 1):
Dn = Dmat(n - i - 1, 1)
D = np.dot(Dn, D)
return D
def minimumVariancePortfolio(
sigMat, longShort, maxAlloc=1, lambda_l1=0, lambda_l2=0, assetsOrder=None
):
"""
Optimizes portfolio for minimum variance
Parameters
----------
SigMat : Matrix
LongShort : Float
Takes value between 0 and 1
maxAlloc : Float
Takes value between 0 and 1. Specifies the maximum weight an asset can get
lambda_l1 : Float
Takes a value greater than 0. Specifies L1 penalty
lambda_l2 : Float
Takes a value greater than 0. Specifies L2 penalty
Returns
-------
w_opt : Array
Returns the weights of given to each asset in form of a numpy array
var_opt : Float
Returns the variance of the portfolio
"""
d = sigMat.shape[0]
if assetsOrder:
temp = sigMat[:, assetsOrder]
sigMat = temp[assetsOrder, :]
if lambda_l2:
sigMat = sigMatShrinkage(sigMat, lambda_l2)
sigMat, e_min = SymPDcovmatrix(sigMat)
else:
sigMat, e_min = SymPDcovmatrix(sigMat)
if longShort == 0:
Aeq = np.ones(d)
Beq = 1
LB = np.zeros(d)
UB = maxAlloc * np.ones(d)
if assetsOrder:
L_ine = -np.ones(d - 1)
D = np.eye(d - 1, d)
for i in range(d - 1):
D[i, i + 1] = -1
A = -1 * D
B = np.zeros(d - 1)
A = np.vstack([A, Aeq, np.eye(d)])
l = np.hstack([L_ine, Beq, LB])
u = np.hstack([B, Beq, UB])
else:
A = np.vstack([Aeq, np.eye(d)])
l = np.hstack([Beq, LB])
u = np.hstack([Beq, UB])
if lambda_l1:
meanVec = -lambda_l1 * np.ones(d)
else:
meanVec = -np.zeros(d)
P = sparse.csc_matrix(sigMat)
A = sparse.csc_matrix(A)
prob = osqp.OSQP()
# Setup workspace
prob.setup(P, -meanVec, A, l, u, verbose=False)
# Solve problem
res = prob.solve()
w_opt = res.x
if not w_opt.all():
w_opt = np.ones(d) / d
elif longShort != 0:
A = np.hstack([np.zeros(d), np.ones(d), np.zeros(d)])
B = 1 + abs(longShort)
Grenze = min(abs(longShort), maxAlloc)
if assetsOrder:
L_ine = np.hstack([0, -(1 + 2 * Grenze) * np.ones(d - 1)])
D = np.eye(d - 1, d)
for i in range(d - 1):
D[i, i + 1] = -1
A = np.vstack([A, np.hstack([-1 * D, np.zeros((d - 1, 2 * d))])])
B = np.hstack([B, np.zeros(d - 1)])
else:
L_ine = 0
Aeq = np.vstack(
[
np.hstack([np.eye(d), -np.eye(d), np.eye(d)]),
np.hstack([np.ones(d), np.zeros(d), np.zeros(d)]),
]
)
Beq = np.hstack([np.zeros(d), 1])
LB = np.hstack([-Grenze * np.ones(d), np.zeros(2 * d)])
UB = maxAlloc * np.ones(3 * d)
sigMat3d = np.vstack(
[np.hstack([sigMat, np.zeros((d, 2 * d))]), np.zeros((2 * d, 3 * d))]
)
sigMat3d = sigMat3d + np.diag(
np.hstack([-0.1 * e_min * np.ones(d), 0.1 * e_min * np.ones(2 * d)])
)
if lambda_l1:
meanvec3d = np.hstack([np.zeros(d), -lambda_l1 * np.ones(2 * d)])
else:
meanvec3d = np.hstack([np.zeros(d), np.zeros(2 * d)])
A = np.vstack([A, Aeq, np.eye(3 * d)])
l = np.hstack([L_ine, Beq, LB])
u = np.hstack([B, Beq, UB])
A = sparse.csc_matrix(A)
sigMat3d = sparse.csc_matrix(sigMat3d)
prob = osqp.OSQP()
# Setup workspace
prob.setup(sigMat3d, -meanvec3d, A, l, u, verbose=False)
# Solve problem
res = prob.solve()
wuv_opt = res.x
if not wuv_opt.all():
w_opt = np.ones(d) / d
else:
w_opt = wuv_opt[:d]
t = np.dot(w_opt, sigMat)
Var_opt = np.dot(t, w_opt.transpose())
if assetsOrder:
w_opt = w_opt[assetsOrder]
# if exitflag!=1:
# print("minimumVariancePortfolio: Exitflag different than 1 in quadprog")
return w_opt, Var_opt
def meanVariancePortfolioReturnsTarget(
meanVec,
sigMat,
retTarget,
longShort,
maxAlloc=1,
lambda_l1=0,
lambda_l2=0,
assetsOrder=None,
):
"""
Mean-Variance portfolio for a target return
Parameters
----------
meanVec : Array
A vector of mean returns of assets
SigMat : Matrix
A covariance matrix of appropriate dimensions
retTarget : Float
Target return percentage. Values specified between 0 and 100
LongShort : Float
Takes value between 0 and 1
maxAlloc : Float
Takes value between 0 and 1. Specifies the maximum weight an asset can get
lambda_l1 : Float
Takes a value greater than 0. Specifies L1 penalty
lambda_l2 : Float
Takes a value greater than 0. Specifies L2 penalty
Returns
-------
w_opt : Array
Returns the weights of given to each asset in form of a numpy array
var_opt : Float
Returns the variance of the portfolio
"""
dailyRetTarget = 100 * ((retTarget / 100 + 1) ** (1 / 250) - 1)
minEret = min(meanVec)
maxEret = max(meanVec)
if (dailyRetTarget < minEret) or (maxEret < dailyRetTarget):
part1 = minEret
part2 = min(maxEret, dailyRetTarget)
dailyRetTarget = max(part1, part2)
d = sigMat.shape[0]
if assetsOrder:
temp = sigMat[:, assetsOrder]
sigMat = temp[assetsOrder, :]
meanVec = meanVec[assetsOrder]
if lambda_l2:
sigMat = sigMatShrinkage(sigMat, lambda_l2)
sigMat, e_min = SymPDcovmatrix(sigMat)
else:
sigMat, e_min = SymPDcovmatrix(sigMat)
if longShort == 0:
Aeq = np.ones(d)
Beq = 1
LB = np.zeros(d)
UB = maxAlloc * np.ones(d)
if assetsOrder:
L_ine = np.hstack([-np.inf, -np.ones(d - 1)])
tau = dailyRetTarget
A = -meanVec
B = -tau
A = np.vstack([A, -1 * Dmat(d, 1)])
B = np.hstack([B, np.zeros(d - 1)])
else:
tau = dailyRetTarget
A = -meanVec
B = -tau
L_ine = -np.inf
if lambda_l1:
meanVec = -lambda_l1 * meanVec
else:
meanVec = -np.zeros(d)
A = np.vstack([A, Aeq, np.eye(d)])
l = np.hstack([L_ine, Beq, LB])
u = np.hstack([B, Beq, UB])
P = sparse.csc_matrix(sigMat)
A = sparse.csc_matrix(A)
prob = osqp.OSQP()
# Setup workspace
prob.setup(P, -meanVec, A, l, u, verbose=False)
# Solve problem
res = prob.solve()
w_opt = res.x
if not w_opt.all():
w_opt = np.ones(d) / d
elif longShort != 0:
A = np.hstack([np.zeros(d), np.ones(d), np.zeros(d)])
B = 1 + abs(longShort)
Grenze = min(abs(longShort), maxAlloc)
if assetsOrder:
tau = dailyRetTarget
A = np.vstack([A, np.hstack([-meanVec, np.zeros(2 * d)])])
B = np.hstack([B, -tau])
A = np.vstack([A, np.hstack([-1 * Dmat(d, 1), np.zeros((d - 1, 2 * d))])])
B = np.hstack([B, np.zeros(d - 1)])
L_ine = np.hstack([0, -np.inf, -(1 + 2 * Grenze) * np.ones(d - 1)])
else:
tau = dailyRetTarget
A = np.vstack([A, np.hstack([-meanVec, np.zeros(2 * d)])])
B = np.hstack([B, -tau])
L_ine = np.hstack([0, -np.inf])
Aeq = np.vstack(
[
np.hstack([np.eye(d), -np.eye(d), np.eye(d)]),
np.hstack([np.ones((1, d)), np.zeros((1, d)), np.zeros((1, d))]),
]
)
Beq = np.hstack([np.zeros(d), 1])
LB = np.hstack([-Grenze * np.ones(d), np.zeros(2 * d)])
UB = maxAlloc * np.ones(3 * d)
sigMat3d = np.vstack(
[np.hstack([sigMat, np.zeros((d, 2 * d))]), np.zeros((2 * d, 3 * d))]
)
sigMat3d = sigMat3d + np.diag(
np.hstack([-0.1 * e_min * np.ones(d), 0.1 * e_min * np.ones(2 * d)])
)
if lambda_l1:
meanvec3d = np.hstack([np.zeros(d), -lambda_l1 * np.ones(2 * d)])
else:
meanvec3d = np.hstack([np.zeros(d), np.zeros(2 * d)])
A = np.vstack([A, Aeq, np.eye(3 * d)])
l = np.hstack([L_ine, Beq, LB])
u = np.hstack([B, Beq, UB])
A = sparse.csc_matrix(A)
sigMat3d = sparse.csc_matrix(sigMat3d)
prob = osqp.OSQP()
# Setup workspace
prob.setup(sigMat3d, -meanvec3d, A, l, u, verbose=False)
# Solve problem
res = prob.solve()
wuv_opt = res.x
if not wuv_opt.all():
w_opt = np.ones(d) / d
else:
w_opt = wuv_opt[:d]
t = np.dot(w_opt, sigMat)
Var_opt = np.dot(t, w_opt.transpose())
if assetsOrder:
w_opt = w_opt[assetsOrder]
# if exitflag!=1:
# print("minimumVariancePortfolio: Exitflag different than 1 in quadprog")
return w_opt, Var_opt
def check_missing(df_logret):
"""
function to check the missing values and delete the stocks with missing value
Parameters
----------
df_logret : pandas.core.frame.DataFrame
the price window
Returns
-------
res : pandas.core.frame.DataFrame
the price window without missing value
"""
df_logret = df_logret.transpose()
flag = np.zeros(len(df_logret))
for i in range(len(df_logret)):
if df_logret.iloc[i, :].isnull().any():
flag[i] = 0
else:
flag[i] = 1
df_logret["missing_flag"] = flag
res = df_logret.loc[df_logret["missing_flag"] == 1]
return res.transpose()
def rollingwindow_backtest(
optimizerName,
data,
window_size,
rebalance_time,
maxAlloc=1,
riskAversion=0,
meanQuantile=0,
retTarget=0,
longShort=0,
lambda_l1=0,
lambda_l2=0,
assetsOrder=None,
):
"""
function do the rolling window back test
Parameters
----------
optimizerName : String
The name of the optimizer to use for rolling window exercise
data : Dictionary
Data with Ticker, Date and Adjusted Close price
whindow_size : int
parameter for the size of rolling window
rebalance_time : int
rebalance time of rolling window test
maxAlloc : Float
maximum allocation. Takes values between 0 and 1
riskAversion : Float
Riske Aversion for your portfolio. Takes values greater than 0
meanQuantile : Float
Takes values between 0 and 1
RetTarget : Float
Target returns in percentage for optimizer. Takes values between 0 and 100
LongShort : Float
Takes value between 0 and 1
maxAlloc : Float
Takes value between 0 and 1. Specifies the maximum weight an asset can get
lambda_l1 : Float
Takes a value greater than 0. Specifies L1 penalty
lambda_l2 : Float
Takes a value greater than 0. Specifies L2 penalty
Returns
-------
R : 2d array
return matrix depends on the rebalance time
logret: 2d array
log return matrix for each stocks
w_all: 2d array
optimal weight for each revalance time
rownames: array
date time of rolling window test
Notes
-------
Note for now we have provided additional parameters that'll be used in future versions of the optimizers
"""
df = pd.DataFrame(data)
df.columns = ["date", "ticker", "price"]
df1 = df.pivot_table(index=["date"], columns="ticker", values=["price"])
df1.columns = [col[1] for col in df1.columns.values]
df_logret = 100 * (np.log(df1) - np.log(df1.shift(1)))
df_logret = df_logret[1:]
logret = np.array(df_logret)
n = logret.shape[0]
d = rebalance_time
start = window_size
R = None
portfolio_return = None
w_all = None
for i in range(start, n, d):
k = 0
w_opt = np.zeros(df1.shape[1])
# import pdb; pdb.set_trace()
window = check_missing(df_logret[i - window_size : i] / 100)
m = window.shape[0]
sample_stocks = window.columns
logret_window = np.array(window.iloc[: n - 1])
sigMat = np.cov(logret_window, rowvar=False)
meanVec = np.mean(logret_window, axis=0) / 100
if optimizerName == "minimumVariancePortfolio":
w_sample, _ = minimumVariancePortfolio(
sigMat,
float(maxAlloc),
float(longShort),
float(lambda_l1),
float(lambda_l2),
)
elif optimizerName == "meanVariancePortfolioReturnsTarget":
w_sample, _ = meanVariancePortfolioReturnsTarget(
meanVec,
sigMat,
float(retTarget),
float(maxAlloc),
float(longShort),
float(lambda_l1),
float(lambda_l2),
)
elif optimizerName == "test":
import test
test.displayText()
for j in range(df1.shape[1]):
if df1.columns[j] in sample_stocks:
w_opt[j] = w_sample[k]
k += 1
if w_all is None:
w_all = w_opt
else:
w_all = np.vstack([w_all, w_opt])
if (i + d) < n:
if R is None:
logret_sample = np.nan_to_num(logret[i : i + d], nan=0)
simple_returns = 100 * (math.exp(1) ** (logret_sample / 100) - 1)
R = np.dot(w_opt, simple_returns.transpose())
else:
logret_sample = np.nan_to_num(logret[i : i + d], nan=0)
simple_returns = 100 * (math.exp(1) ** (logret_sample / 100) - 1)
R = np.hstack([R, np.dot(w_opt, simple_returns.transpose())])
elif (i + d) >= n:
logret_sample = np.nan_to_num(logret[i:], nan=0)
simple_returns = 100 * (math.exp(1) ** (logret_sample / 100) - 1)
R = np.hstack([R, np.dot(w_opt, simple_returns.transpose())])
rownames = df1.index[start + 1 :]
return R, df_logret, w_all, rownames
if __name__ == "__main__":
pass
| [
"numpy.nan_to_num",
"numpy.ones",
"numpy.mean",
"numpy.diag",
"pandas.DataFrame",
"numpy.linalg.eig",
"osqp.OSQP",
"test.displayText",
"numpy.cov",
"pandas.DataFrame.from_dict",
"numpy.hstack",
"numpy.dot",
"numpy.vstack",
"math.exp",
"numpy.log",
"numpy.zeros",
"scipy.sparse.csc_mat... | [((931, 959), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['data'], {}), '(data)\n', (953, 959), True, 'import pandas as pd\n'), ((1293, 1312), 'numpy.array', 'np.array', (['df_logret'], {}), '(df_logret)\n', (1301, 1312), True, 'import numpy as np\n'), ((1410, 1424), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (1418, 1424), True, 'import numpy as np\n'), ((1445, 1471), 'numpy.array', 'np.array', (['df_daily_returns'], {}), '(df_daily_returns)\n', (1453, 1471), True, 'import numpy as np\n'), ((1509, 1537), 'numpy.cov', 'np.cov', (['logret'], {'rowvar': '(False)'}), '(logret, rowvar=False)\n', (1515, 1537), True, 'import numpy as np\n'), ((1552, 1575), 'numpy.mean', 'np.mean', (['logret'], {'axis': '(0)'}), '(logret, axis=0)\n', (1559, 1575), True, 'import numpy as np\n'), ((2253, 2262), 'numpy.linalg.eig', 'LA.eig', (['A'], {}), '(A)\n', (2259, 2262), True, 'from numpy import linalg as LA\n'), ((2346, 2356), 'numpy.diag', 'np.diag', (['D'], {}), '(D)\n', (2353, 2356), True, 'import numpy as np\n'), ((2365, 2377), 'numpy.dot', 'np.dot', (['V', 'D'], {}), '(V, D)\n', (2371, 2377), True, 'import numpy as np\n'), ((7730, 7751), 'numpy.dot', 'np.dot', (['w_opt', 'sigMat'], {}), '(w_opt, sigMat)\n', (7736, 7751), True, 'import numpy as np\n'), ((12719, 12740), 'numpy.dot', 'np.dot', (['w_opt', 'sigMat'], {}), '(w_opt, sigMat)\n', (12725, 12740), True, 'import numpy as np\n'), ((15389, 15407), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (15401, 15407), True, 'import pandas as pd\n'), ((15690, 15709), 'numpy.array', 'np.array', (['df_logret'], {}), '(df_logret)\n', (15698, 15709), True, 'import numpy as np\n'), ((2758, 2773), 'numpy.diag', 'np.diag', (['sigMat'], {}), '(sigMat)\n', (2765, 2773), True, 'import numpy as np\n'), ((2790, 2808), 'numpy.diag', 'np.diag', (['(sig ** -1)'], {}), '(sig ** -1)\n', (2797, 2808), True, 'import numpy as np\n'), ((2844, 2862), 'numpy.diag', 'np.diag', (['(sig ** -1)'], {}), '(sig ** -1)\n', (2851, 2862), True, 'import numpy as np\n'), ((3587, 3596), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (3593, 3596), True, 'import numpy as np\n'), ((4913, 4923), 'numpy.ones', 'np.ones', (['d'], {}), '(d)\n', (4920, 4923), True, 'import numpy as np\n'), ((4953, 4964), 'numpy.zeros', 'np.zeros', (['d'], {}), '(d)\n', (4961, 4964), True, 'import numpy as np\n'), ((5610, 5635), 'scipy.sparse.csc_matrix', 'sparse.csc_matrix', (['sigMat'], {}), '(sigMat)\n', (5627, 5635), False, 'from scipy import sparse\n'), ((5648, 5668), 'scipy.sparse.csc_matrix', 'sparse.csc_matrix', (['A'], {}), '(A)\n', (5665, 5668), False, 'from scipy import sparse\n'), ((5685, 5696), 'osqp.OSQP', 'osqp.OSQP', ([], {}), '()\n', (5694, 5696), False, 'import osqp\n'), ((9652, 9662), 'numpy.ones', 'np.ones', (['d'], {}), '(d)\n', (9659, 9662), True, 'import numpy as np\n'), ((9692, 9703), 'numpy.zeros', 'np.zeros', (['d'], {}), '(d)\n', (9700, 9703), True, 'import numpy as np\n'), ((10289, 10316), 'numpy.hstack', 'np.hstack', (['[L_ine, Beq, LB]'], {}), '([L_ine, Beq, LB])\n', (10298, 10316), True, 'import numpy as np\n'), ((10329, 10352), 'numpy.hstack', 'np.hstack', (['[B, Beq, UB]'], {}), '([B, Beq, UB])\n', (10338, 10352), True, 'import numpy as np\n'), ((10365, 10390), 'scipy.sparse.csc_matrix', 'sparse.csc_matrix', (['sigMat'], {}), '(sigMat)\n', (10382, 10390), False, 'from scipy import sparse\n'), ((10403, 10423), 'scipy.sparse.csc_matrix', 'sparse.csc_matrix', (['A'], {}), '(A)\n', (10420, 10423), False, 'from scipy import sparse\n'), ((10440, 10451), 'osqp.OSQP', 'osqp.OSQP', ([], {}), '()\n', (10449, 10451), False, 'import osqp\n'), ((15902, 15924), 'numpy.zeros', 'np.zeros', (['df1.shape[1]'], {}), '(df1.shape[1])\n', (15910, 15924), True, 'import numpy as np\n'), ((16123, 16152), 'numpy.array', 'np.array', (['window.iloc[:n - 1]'], {}), '(window.iloc[:n - 1])\n', (16131, 16152), True, 'import numpy as np\n'), ((16171, 16206), 'numpy.cov', 'np.cov', (['logret_window'], {'rowvar': '(False)'}), '(logret_window, rowvar=False)\n', (16177, 16206), True, 'import numpy as np\n'), ((1214, 1225), 'numpy.log', 'np.log', (['df1'], {}), '(df1)\n', (1220, 1225), True, 'import numpy as np\n'), ((2436, 2446), 'numpy.diag', 'np.diag', (['D'], {}), '(D)\n', (2443, 2446), True, 'import numpy as np\n'), ((2956, 2979), 'numpy.diag', 'np.diag', (['corrMat', '(k + 1)'], {}), '(corrMat, k + 1)\n', (2963, 2979), True, 'import numpy as np\n'), ((3626, 3642), 'numpy.eye', 'np.eye', (['(n - 1)', 'n'], {}), '(n - 1, n)\n', (3632, 3642), True, 'import numpy as np\n'), ((4989, 4999), 'numpy.ones', 'np.ones', (['d'], {}), '(d)\n', (4996, 4999), True, 'import numpy as np\n'), ((5076, 5092), 'numpy.eye', 'np.eye', (['(d - 1)', 'd'], {}), '(d - 1, d)\n', (5082, 5092), True, 'import numpy as np\n'), ((5200, 5215), 'numpy.zeros', 'np.zeros', (['(d - 1)'], {}), '(d - 1)\n', (5208, 5215), True, 'import numpy as np\n'), ((5279, 5306), 'numpy.hstack', 'np.hstack', (['[L_ine, Beq, LB]'], {}), '([L_ine, Beq, LB])\n', (5288, 5306), True, 'import numpy as np\n'), ((5323, 5346), 'numpy.hstack', 'np.hstack', (['[B, Beq, UB]'], {}), '([B, Beq, UB])\n', (5332, 5346), True, 'import numpy as np\n'), ((5421, 5441), 'numpy.hstack', 'np.hstack', (['[Beq, LB]'], {}), '([Beq, LB])\n', (5430, 5441), True, 'import numpy as np\n'), ((5458, 5478), 'numpy.hstack', 'np.hstack', (['[Beq, UB]'], {}), '([Beq, UB])\n', (5467, 5478), True, 'import numpy as np\n'), ((7271, 7298), 'numpy.hstack', 'np.hstack', (['[L_ine, Beq, LB]'], {}), '([L_ine, Beq, LB])\n', (7280, 7298), True, 'import numpy as np\n'), ((7311, 7334), 'numpy.hstack', 'np.hstack', (['[B, Beq, UB]'], {}), '([B, Beq, UB])\n', (7320, 7334), True, 'import numpy as np\n'), ((7348, 7368), 'scipy.sparse.csc_matrix', 'sparse.csc_matrix', (['A'], {}), '(A)\n', (7365, 7368), False, 'from scipy import sparse\n'), ((7388, 7415), 'scipy.sparse.csc_matrix', 'sparse.csc_matrix', (['sigMat3d'], {}), '(sigMat3d)\n', (7405, 7415), False, 'from scipy import sparse\n'), ((7432, 7443), 'osqp.OSQP', 'osqp.OSQP', ([], {}), '()\n', (7441, 7443), False, 'import osqp\n'), ((9728, 9738), 'numpy.ones', 'np.ones', (['d'], {}), '(d)\n', (9735, 9738), True, 'import numpy as np\n'), ((12263, 12290), 'numpy.hstack', 'np.hstack', (['[L_ine, Beq, LB]'], {}), '([L_ine, Beq, LB])\n', (12272, 12290), True, 'import numpy as np\n'), ((12303, 12326), 'numpy.hstack', 'np.hstack', (['[B, Beq, UB]'], {}), '([B, Beq, UB])\n', (12312, 12326), True, 'import numpy as np\n'), ((12339, 12359), 'scipy.sparse.csc_matrix', 'sparse.csc_matrix', (['A'], {}), '(A)\n', (12356, 12359), False, 'from scipy import sparse\n'), ((12379, 12406), 'scipy.sparse.csc_matrix', 'sparse.csc_matrix', (['sigMat3d'], {}), '(sigMat3d)\n', (12396, 12406), False, 'from scipy import sparse\n'), ((12422, 12433), 'osqp.OSQP', 'osqp.OSQP', ([], {}), '()\n', (12431, 12433), False, 'import osqp\n'), ((15611, 15622), 'numpy.log', 'np.log', (['df1'], {}), '(df1)\n', (15617, 15622), True, 'import numpy as np\n'), ((16225, 16255), 'numpy.mean', 'np.mean', (['logret_window'], {'axis': '(0)'}), '(logret_window, axis=0)\n', (16232, 16255), True, 'import numpy as np\n'), ((17248, 17273), 'numpy.vstack', 'np.vstack', (['[w_all, w_opt]'], {}), '([w_all, w_opt])\n', (17257, 17273), True, 'import numpy as np\n'), ((3131, 3140), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (3137, 3140), True, 'import numpy as np\n'), ((3183, 3195), 'numpy.mean', 'np.mean', (['sig'], {}), '(sig)\n', (3190, 3195), True, 'import numpy as np\n'), ((3198, 3207), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (3204, 3207), True, 'import numpy as np\n'), ((3221, 3230), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (3227, 3230), True, 'import numpy as np\n'), ((3819, 3832), 'numpy.dot', 'np.dot', (['Dn', 'D'], {}), '(Dn, D)\n', (3825, 3832), True, 'import numpy as np\n'), ((5045, 5059), 'numpy.ones', 'np.ones', (['(d - 1)'], {}), '(d - 1)\n', (5052, 5059), True, 'import numpy as np\n'), ((5537, 5547), 'numpy.ones', 'np.ones', (['d'], {}), '(d)\n', (5544, 5547), True, 'import numpy as np\n'), ((5585, 5596), 'numpy.zeros', 'np.zeros', (['d'], {}), '(d)\n', (5593, 5596), True, 'import numpy as np\n'), ((5900, 5910), 'numpy.ones', 'np.ones', (['d'], {}), '(d)\n', (5907, 5910), True, 'import numpy as np\n'), ((6192, 6208), 'numpy.eye', 'np.eye', (['(d - 1)', 'd'], {}), '(d - 1, d)\n', (6198, 6208), True, 'import numpy as np\n'), ((6762, 6776), 'numpy.ones', 'np.ones', (['(3 * d)'], {}), '(3 * d)\n', (6769, 6776), True, 'import numpy as np\n'), ((10221, 10232), 'numpy.zeros', 'np.zeros', (['d'], {}), '(d)\n', (10229, 10232), True, 'import numpy as np\n'), ((10265, 10274), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (10271, 10274), True, 'import numpy as np\n'), ((10655, 10665), 'numpy.ones', 'np.ones', (['d'], {}), '(d)\n', (10662, 10665), True, 'import numpy as np\n'), ((10981, 11001), 'numpy.hstack', 'np.hstack', (['[B, -tau]'], {}), '([B, -tau])\n', (10990, 11001), True, 'import numpy as np\n'), ((11351, 11371), 'numpy.hstack', 'np.hstack', (['[B, -tau]'], {}), '([B, -tau])\n', (11360, 11371), True, 'import numpy as np\n'), ((11392, 11415), 'numpy.hstack', 'np.hstack', (['[0, -np.inf]'], {}), '([0, -np.inf])\n', (11401, 11415), True, 'import numpy as np\n'), ((11755, 11769), 'numpy.ones', 'np.ones', (['(3 * d)'], {}), '(3 * d)\n', (11762, 11769), True, 'import numpy as np\n'), ((17357, 17394), 'numpy.nan_to_num', 'np.nan_to_num', (['logret[i:i + d]'], {'nan': '(0)'}), '(logret[i:i + d], nan=0)\n', (17370, 17394), True, 'import numpy as np\n'), ((17591, 17628), 'numpy.nan_to_num', 'np.nan_to_num', (['logret[i:i + d]'], {'nan': '(0)'}), '(logret[i:i + d], nan=0)\n', (17604, 17628), True, 'import numpy as np\n'), ((17846, 17878), 'numpy.nan_to_num', 'np.nan_to_num', (['logret[i:]'], {'nan': '(0)'}), '(logret[i:], nan=0)\n', (17859, 17878), True, 'import numpy as np\n'), ((3032, 3055), 'numpy.diag', 'np.diag', (['corrMat', '(k + 1)'], {}), '(corrMat, k + 1)\n', (3039, 3055), True, 'import numpy as np\n'), ((3111, 3128), 'numpy.mean', 'np.mean', (['(sig ** 2)'], {}), '(sig ** 2)\n', (3118, 3128), True, 'import numpy as np\n'), ((3263, 3277), 'numpy.mean', 'np.mean', (['corrs'], {}), '(corrs)\n', (3270, 3277), True, 'import numpy as np\n'), ((5251, 5260), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (5257, 5260), True, 'import numpy as np\n'), ((5393, 5402), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (5399, 5402), True, 'import numpy as np\n'), ((5964, 5975), 'numpy.zeros', 'np.zeros', (['d'], {}), '(d)\n', (5972, 5975), True, 'import numpy as np\n'), ((5977, 5987), 'numpy.ones', 'np.ones', (['d'], {}), '(d)\n', (5984, 5987), True, 'import numpy as np\n'), ((5989, 6000), 'numpy.zeros', 'np.zeros', (['d'], {}), '(d)\n', (5997, 6000), True, 'import numpy as np\n'), ((6657, 6668), 'numpy.zeros', 'np.zeros', (['d'], {}), '(d)\n', (6665, 6668), True, 'import numpy as np\n'), ((6720, 6735), 'numpy.zeros', 'np.zeros', (['(2 * d)'], {}), '(2 * d)\n', (6728, 6735), True, 'import numpy as np\n'), ((6863, 6887), 'numpy.zeros', 'np.zeros', (['(2 * d, 3 * d)'], {}), '((2 * d, 3 * d))\n', (6871, 6887), True, 'import numpy as np\n'), ((7243, 7256), 'numpy.eye', 'np.eye', (['(3 * d)'], {}), '(3 * d)\n', (7249, 7256), True, 'import numpy as np\n'), ((7660, 7670), 'numpy.ones', 'np.ones', (['d'], {}), '(d)\n', (7667, 7670), True, 'import numpy as np\n'), ((9979, 9994), 'numpy.zeros', 'np.zeros', (['(d - 1)'], {}), '(d - 1)\n', (9987, 9994), True, 'import numpy as np\n'), ((10719, 10730), 'numpy.zeros', 'np.zeros', (['d'], {}), '(d)\n', (10727, 10730), True, 'import numpy as np\n'), ((10732, 10742), 'numpy.ones', 'np.ones', (['d'], {}), '(d)\n', (10739, 10742), True, 'import numpy as np\n'), ((10744, 10755), 'numpy.zeros', 'np.zeros', (['d'], {}), '(d)\n', (10752, 10755), True, 'import numpy as np\n'), ((11650, 11661), 'numpy.zeros', 'np.zeros', (['d'], {}), '(d)\n', (11658, 11661), True, 'import numpy as np\n'), ((11713, 11728), 'numpy.zeros', 'np.zeros', (['(2 * d)'], {}), '(2 * d)\n', (11721, 11728), True, 'import numpy as np\n'), ((11856, 11880), 'numpy.zeros', 'np.zeros', (['(2 * d, 3 * d)'], {}), '((2 * d, 3 * d))\n', (11864, 11880), True, 'import numpy as np\n'), ((12235, 12248), 'numpy.eye', 'np.eye', (['(3 * d)'], {}), '(3 * d)\n', (12241, 12248), True, 'import numpy as np\n'), ((12650, 12660), 'numpy.ones', 'np.ones', (['d'], {}), '(d)\n', (12657, 12660), True, 'import numpy as np\n'), ((16981, 16999), 'test.displayText', 'test.displayText', ([], {}), '()\n', (16997, 16999), False, 'import test\n'), ((3234, 3247), 'numpy.ones', 'np.ones', (['d', 'd'], {}), '(d, d)\n', (3241, 3247), True, 'import numpy as np\n'), ((3250, 3259), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (3256, 3259), True, 'import numpy as np\n'), ((3337, 3349), 'numpy.mean', 'np.mean', (['sig'], {}), '(sig)\n', (3344, 3349), True, 'import numpy as np\n'), ((3352, 3361), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (3358, 3361), True, 'import numpy as np\n'), ((6385, 6400), 'numpy.zeros', 'np.zeros', (['(d - 1)'], {}), '(d - 1)\n', (6393, 6400), True, 'import numpy as np\n'), ((6708, 6718), 'numpy.ones', 'np.ones', (['d'], {}), '(d)\n', (6715, 6718), True, 'import numpy as np\n'), ((7088, 7099), 'numpy.zeros', 'np.zeros', (['d'], {}), '(d)\n', (7096, 7099), True, 'import numpy as np\n'), ((7180, 7191), 'numpy.zeros', 'np.zeros', (['d'], {}), '(d)\n', (7188, 7191), True, 'import numpy as np\n'), ((7193, 7208), 'numpy.zeros', 'np.zeros', (['(2 * d)'], {}), '(2 * d)\n', (7201, 7208), True, 'import numpy as np\n'), ((9805, 9819), 'numpy.ones', 'np.ones', (['(d - 1)'], {}), '(d - 1)\n', (9812, 9819), True, 'import numpy as np\n'), ((11119, 11134), 'numpy.zeros', 'np.zeros', (['(d - 1)'], {}), '(d - 1)\n', (11127, 11134), True, 'import numpy as np\n'), ((11701, 11711), 'numpy.ones', 'np.ones', (['d'], {}), '(d)\n', (11708, 11711), True, 'import numpy as np\n'), ((12080, 12091), 'numpy.zeros', 'np.zeros', (['d'], {}), '(d)\n', (12088, 12091), True, 'import numpy as np\n'), ((12172, 12183), 'numpy.zeros', 'np.zeros', (['d'], {}), '(d)\n', (12180, 12183), True, 'import numpy as np\n'), ((12185, 12200), 'numpy.zeros', 'np.zeros', (['(2 * d)'], {}), '(2 * d)\n', (12193, 12200), True, 'import numpy as np\n'), ((6159, 6173), 'numpy.ones', 'np.ones', (['(d - 1)'], {}), '(d - 1)\n', (6166, 6173), True, 'import numpy as np\n'), ((6505, 6514), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (6511, 6514), True, 'import numpy as np\n'), ((6528, 6537), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (6534, 6537), True, 'import numpy as np\n'), ((6568, 6578), 'numpy.ones', 'np.ones', (['d'], {}), '(d)\n', (6575, 6578), True, 'import numpy as np\n'), ((6580, 6591), 'numpy.zeros', 'np.zeros', (['d'], {}), '(d)\n', (6588, 6591), True, 'import numpy as np\n'), ((6593, 6604), 'numpy.zeros', 'np.zeros', (['d'], {}), '(d)\n', (6601, 6604), True, 'import numpy as np\n'), ((6839, 6859), 'numpy.zeros', 'np.zeros', (['(d, 2 * d)'], {}), '((d, 2 * d))\n', (6847, 6859), True, 'import numpy as np\n'), ((7114, 7128), 'numpy.ones', 'np.ones', (['(2 * d)'], {}), '(2 * d)\n', (7121, 7128), True, 'import numpy as np\n'), ((11200, 11214), 'numpy.ones', 'np.ones', (['(d - 1)'], {}), '(d - 1)\n', (11207, 11214), True, 'import numpy as np\n'), ((11483, 11492), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (11489, 11492), True, 'import numpy as np\n'), ((11506, 11515), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (11512, 11515), True, 'import numpy as np\n'), ((11546, 11561), 'numpy.ones', 'np.ones', (['(1, d)'], {}), '((1, d))\n', (11553, 11561), True, 'import numpy as np\n'), ((11563, 11579), 'numpy.zeros', 'np.zeros', (['(1, d)'], {}), '((1, d))\n', (11571, 11579), True, 'import numpy as np\n'), ((11581, 11597), 'numpy.zeros', 'np.zeros', (['(1, d)'], {}), '((1, d))\n', (11589, 11597), True, 'import numpy as np\n'), ((11832, 11852), 'numpy.zeros', 'np.zeros', (['(d, 2 * d)'], {}), '((d, 2 * d))\n', (11840, 11852), True, 'import numpy as np\n'), ((12106, 12120), 'numpy.ones', 'np.ones', (['(2 * d)'], {}), '(2 * d)\n', (12113, 12120), True, 'import numpy as np\n'), ((17437, 17448), 'math.exp', 'math.exp', (['(1)'], {}), '(1)\n', (17445, 17448), False, 'import math\n'), ((17671, 17682), 'math.exp', 'math.exp', (['(1)'], {}), '(1)\n', (17679, 17682), False, 'import math\n'), ((17915, 17926), 'math.exp', 'math.exp', (['(1)'], {}), '(1)\n', (17923, 17926), False, 'import math\n'), ((6326, 6350), 'numpy.zeros', 'np.zeros', (['(d - 1, 2 * d)'], {}), '((d - 1, 2 * d))\n', (6334, 6350), True, 'import numpy as np\n'), ((6517, 6526), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (6523, 6526), True, 'import numpy as np\n'), ((6977, 6987), 'numpy.ones', 'np.ones', (['d'], {}), '(d)\n', (6984, 6987), True, 'import numpy as np\n'), ((7003, 7017), 'numpy.ones', 'np.ones', (['(2 * d)'], {}), '(2 * d)\n', (7010, 7017), True, 'import numpy as np\n'), ((10945, 10960), 'numpy.zeros', 'np.zeros', (['(2 * d)'], {}), '(2 * d)\n', (10953, 10960), True, 'import numpy as np\n'), ((11060, 11084), 'numpy.zeros', 'np.zeros', (['(d - 1, 2 * d)'], {}), '((d - 1, 2 * d))\n', (11068, 11084), True, 'import numpy as np\n'), ((11315, 11330), 'numpy.zeros', 'np.zeros', (['(2 * d)'], {}), '(2 * d)\n', (11323, 11330), True, 'import numpy as np\n'), ((11495, 11504), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (11501, 11504), True, 'import numpy as np\n'), ((11969, 11979), 'numpy.ones', 'np.ones', (['d'], {}), '(d)\n', (11976, 11979), True, 'import numpy as np\n'), ((11995, 12009), 'numpy.ones', 'np.ones', (['(2 * d)'], {}), '(2 * d)\n', (12002, 12009), True, 'import numpy as np\n')] |
# Originally by adamb70 from https://github.com/adamb70/Python-Spherical-Projection
# Modified to be used with Source Engine cubemaps.
# Converted to numpy to achieve reasonable performance.
import numpy
from numpy import ndarray
from enum import IntEnum
from typing import Tuple
def spherical_coordinates(i: ndarray, j: ndarray, w: float, h: float) -> Tuple[ndarray, ndarray]:
""" Returns spherical coordinates of the pixel from the output image. """
theta = 2*i/w-1
phi = 2*j/h-1
# phi = lat, theta = long
return phi*(numpy.pi/2), theta*numpy.pi
def vector_coordinates(phi: ndarray, theta: ndarray) -> Tuple[ndarray, ndarray, ndarray]:
""" Returns 3D vector which points to the pixel location inside a sphere. """
phi_cos = numpy.cos(phi)
return (phi_cos * numpy.cos(theta), # X
numpy.sin(phi), # Y
phi_cos * numpy.sin(theta)) # Z
class CubemapFace(IntEnum):
LEFT = 0
RIGHT = 1
TOP = 2
BOTTOM = 3
FRONT = 4
BACK = 5
def get_face(x: ndarray, y: ndarray, z: ndarray) -> ndarray:
""" Uses 3D vector to find which cube face the pixel lies on. """
abs_x = numpy.abs(x)
abs_y = numpy.abs(y)
abs_z = numpy.abs(z)
largest_magnitude = numpy.maximum.reduce((abs_x, abs_y, abs_z))
x_selector: ndarray = largest_magnitude - abs_x < 1e-9
x_specifier: ndarray = x < 0
y_selector: ndarray = largest_magnitude - abs_y < 1e-9
y_specifier: ndarray = y < 0
z_selector: ndarray = largest_magnitude - abs_z < 1e-9
z_specifier: ndarray = z < 0
return numpy.select(
(
x_selector & x_specifier, x_selector & ~x_specifier,
y_selector & y_specifier, y_selector & ~y_specifier,
z_selector & z_specifier, z_selector & ~z_specifier,
),
(
CubemapFace.LEFT, CubemapFace.RIGHT,
CubemapFace.TOP, CubemapFace.BOTTOM,
CubemapFace.BACK, CubemapFace.FRONT,
),
)
def raw_face_coordinates(face: ndarray, x: ndarray, y: ndarray, z: ndarray) -> Tuple[ndarray, ndarray, ndarray]:
"""
Return coordinates with necessary sign (- or +) depending on which face they lie on.
From Open-GL specification (chapter 3.8.10) https://www.opengl.org/registry/doc/glspec41.core.20100725.pdf
"""
front = face == CubemapFace.FRONT
back = face == CubemapFace.BACK
bottom = face == CubemapFace.BOTTOM
top = face == CubemapFace.TOP
left = face == CubemapFace.LEFT
right = face == CubemapFace.RIGHT
x_neg = -x
xc = numpy.select(
(
front, back, bottom, top, left, right,
),
(
x_neg, x, z, z, -z, z,
)
)
yc = numpy.select(
(
front, back, bottom, top, left, right,
),
(
y, y, x_neg, x, y, y,
)
)
ma = numpy.select(
(
front, back, bottom, top, left, right,
),
(
z, z, y, y, x, x,
)
)
return xc, yc, ma
def raw_coordinates(xc: ndarray, yc: ndarray, ma: ndarray) -> Tuple[ndarray, ndarray]:
"""
Return 2D coordinates on the specified face relative to the bottom-left corner of the face.
Also from Open-GL spec.
"""
return (xc/numpy.abs(ma) + 1) / 2, (yc/numpy.abs(ma) + 1) / 2
def normalized_coordinates(face: ndarray, x: ndarray, y: ndarray, n: int) -> Tuple[ndarray, ndarray]:
""" Return pixel coordinates in the input image where the specified pixel lies. """
return (x*n).clip(0, n-1), (y*n).clip(0, n-1)
def find_corresponding_pixels(width: int, height: int, out_dim: int) -> Tuple[ndarray, Tuple[ndarray, ndarray]]:
""" Returns face index, pixel coordinates for the input image that a specified pixel in the output image maps to."""
y, x = numpy.mgrid[0:height, 0:width]
y = y[::-1]
spherical = spherical_coordinates(x, y, width, height)
vector_coords = vector_coordinates(spherical[0], spherical[1])
face = get_face(vector_coords[0], vector_coords[1], vector_coords[2])
raw_face_coords = raw_face_coordinates(face, vector_coords[0], vector_coords[1], vector_coords[2])
cube_coords = raw_coordinates(raw_face_coords[0], raw_face_coords[1], raw_face_coords[2])
return face, normalized_coordinates(face, cube_coords[0], cube_coords[1], out_dim)
| [
"numpy.abs",
"numpy.sin",
"numpy.select",
"numpy.cos",
"numpy.maximum.reduce"
] | [((759, 773), 'numpy.cos', 'numpy.cos', (['phi'], {}), '(phi)\n', (768, 773), False, 'import numpy\n'), ((1165, 1177), 'numpy.abs', 'numpy.abs', (['x'], {}), '(x)\n', (1174, 1177), False, 'import numpy\n'), ((1190, 1202), 'numpy.abs', 'numpy.abs', (['y'], {}), '(y)\n', (1199, 1202), False, 'import numpy\n'), ((1215, 1227), 'numpy.abs', 'numpy.abs', (['z'], {}), '(z)\n', (1224, 1227), False, 'import numpy\n'), ((1252, 1295), 'numpy.maximum.reduce', 'numpy.maximum.reduce', (['(abs_x, abs_y, abs_z)'], {}), '((abs_x, abs_y, abs_z))\n', (1272, 1295), False, 'import numpy\n'), ((1585, 1889), 'numpy.select', 'numpy.select', (['(x_selector & x_specifier, x_selector & ~x_specifier, y_selector &\n y_specifier, y_selector & ~y_specifier, z_selector & z_specifier, \n z_selector & ~z_specifier)', '(CubemapFace.LEFT, CubemapFace.RIGHT, CubemapFace.TOP, CubemapFace.BOTTOM,\n CubemapFace.BACK, CubemapFace.FRONT)'], {}), '((x_selector & x_specifier, x_selector & ~x_specifier, \n y_selector & y_specifier, y_selector & ~y_specifier, z_selector &\n z_specifier, z_selector & ~z_specifier), (CubemapFace.LEFT, CubemapFace\n .RIGHT, CubemapFace.TOP, CubemapFace.BOTTOM, CubemapFace.BACK,\n CubemapFace.FRONT))\n', (1597, 1889), False, 'import numpy\n'), ((2569, 2647), 'numpy.select', 'numpy.select', (['(front, back, bottom, top, left, right)', '(x_neg, x, z, z, -z, z)'], {}), '((front, back, bottom, top, left, right), (x_neg, x, z, z, -z, z))\n', (2581, 2647), False, 'import numpy\n'), ((2725, 2802), 'numpy.select', 'numpy.select', (['(front, back, bottom, top, left, right)', '(y, y, x_neg, x, y, y)'], {}), '((front, back, bottom, top, left, right), (y, y, x_neg, x, y, y))\n', (2737, 2802), False, 'import numpy\n'), ((2880, 2953), 'numpy.select', 'numpy.select', (['(front, back, bottom, top, left, right)', '(z, z, y, y, x, x)'], {}), '((front, back, bottom, top, left, right), (z, z, y, y, x, x))\n', (2892, 2953), False, 'import numpy\n'), ((831, 845), 'numpy.sin', 'numpy.sin', (['phi'], {}), '(phi)\n', (840, 845), False, 'import numpy\n'), ((796, 812), 'numpy.cos', 'numpy.cos', (['theta'], {}), '(theta)\n', (805, 812), False, 'import numpy\n'), ((886, 902), 'numpy.sin', 'numpy.sin', (['theta'], {}), '(theta)\n', (895, 902), False, 'import numpy\n'), ((3289, 3302), 'numpy.abs', 'numpy.abs', (['ma'], {}), '(ma)\n', (3298, 3302), False, 'import numpy\n'), ((3317, 3330), 'numpy.abs', 'numpy.abs', (['ma'], {}), '(ma)\n', (3326, 3330), False, 'import numpy\n')] |
import pickle
import torch.utils.data as data
import torch.nn.parallel
import os
import torch
import numpy as np
class _OneHotIterator:
"""
>>> it_1 = _OneHotIterator(n_features=128, n_batches_per_epoch=2, batch_size=64, seed=1)
>>> it_2 = _OneHotIterator(n_features=128, n_batches_per_epoch=2, batch_size=64, seed=1)
>>> list(it_1)[0][0].allclose(list(it_2)[0][0])
True
>>> it = _OneHotIterator(n_features=8, n_batches_per_epoch=1, batch_size=4)
>>> data = list(it)
>>> len(data)
1
>>> batch = data[0]
>>> x, y = batch
>>> x.size()
torch.Size([4, 8])
>>> x.sum(dim=1)
tensor([1., 1., 1., 1.])
"""
def __init__(self, n_features, n_batches_per_epoch, batch_size, seed=None):
self.n_batches_per_epoch = n_batches_per_epoch
self.n_features = n_features
self.batch_size = batch_size
self.probs = np.ones(n_features) / n_features
self.batches_generated = 0
self.random_state = np.random.RandomState(seed)
def __iter__(self):
return self
def __next__(self):
if self.batches_generated >= self.n_batches_per_epoch:
raise StopIteration()
# batch_data = self.random_state.multinomial(1, self.probs, size=self.batch_size)
# one input
dims = []
for idx in range(2):
# pick properties from scale(set) of values
dimension = np.random.choice(2048,
2048, replace=False)
print(dimension)
# put min value in 0, 2, 4 at the current dimension?
# the index of min properties
where_min = np.argmin(dimension)
# min_idx = double the dim
min_idx = idx * 2
dimension[[where_min, min_idx]] = dimension[[min_idx, where_min]] # swap values at the two positions
# put max value in 1, 3, 5 at the current dimension? (target place?)
where_max = np.argmax(dimension)
max_idx = min_idx + 1
dimension[[where_max, max_idx]] = dimension[[max_idx, where_max]]
dims.append(dimension)
batch_data = np.array([np.array(dims).flatten(), np.array(dims).flatten(), np.array(dims).flatten()])
# batch_data = np.expand_dims(batch_data, axis = 0)
print("batch shape: " + str(batch_data.shape))
self.batches_generated += 1
return torch.from_numpy(batch_data).float(), torch.zeros(1)
class OneHotLoader(torch.utils.data.DataLoader):
"""
>>> data_loader = OneHotLoader(n_features=8, batches_per_epoch=3, batch_size=2, seed=1)
>>> epoch_1 = []
>>> for batch in data_loader:
... epoch_1.append(batch)≤
>>> [b[0].size() for b in epoch_1]
[torch.Size([2, 8]), torch.Size([2, 8]), torch.Size([2, 8])]
>>> data_loader_other = OneHotLoader(n_features=8, batches_per_epoch=3, batch_size=2)
>>> all_equal = True
>>> for a, b in zip(data_loader, data_loader_other):
... all_equal = all_equal and (a[0] == b[0]).all()
>>> all_equal.item()
0≤
"""
def __init__(self, n_features, batches_per_epoch, batch_size, seed=None):
self.seed = seed
self.batches_per_epoch = batches_per_epoch
self.n_features = n_features
self.batch_size = batch_size
def __iter__(self):
if self.seed is None:
seed = np.random.randint(0, 2 ** 32)
else:
seed = self.seed
return _OneHotIterator(n_features=self.n_features, n_batches_per_epoch=self.batches_per_epoch,
batch_size=self.batch_size, seed=seed) | [
"numpy.argmax",
"numpy.ones",
"numpy.argmin",
"numpy.random.RandomState",
"numpy.random.randint",
"numpy.array",
"numpy.random.choice",
"torch.zeros",
"torch.from_numpy"
] | [((991, 1018), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (1012, 1018), True, 'import numpy as np\n'), ((895, 914), 'numpy.ones', 'np.ones', (['n_features'], {}), '(n_features)\n', (902, 914), True, 'import numpy as np\n'), ((1425, 1468), 'numpy.random.choice', 'np.random.choice', (['(2048)', '(2048)'], {'replace': '(False)'}), '(2048, 2048, replace=False)\n', (1441, 1468), True, 'import numpy as np\n'), ((1673, 1693), 'numpy.argmin', 'np.argmin', (['dimension'], {}), '(dimension)\n', (1682, 1693), True, 'import numpy as np\n'), ((1984, 2004), 'numpy.argmax', 'np.argmax', (['dimension'], {}), '(dimension)\n', (1993, 2004), True, 'import numpy as np\n'), ((2476, 2490), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (2487, 2490), False, 'import torch\n'), ((3411, 3440), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2 ** 32)'], {}), '(0, 2 ** 32)\n', (3428, 3440), True, 'import numpy as np\n'), ((2438, 2466), 'torch.from_numpy', 'torch.from_numpy', (['batch_data'], {}), '(batch_data)\n', (2454, 2466), False, 'import torch\n'), ((2185, 2199), 'numpy.array', 'np.array', (['dims'], {}), '(dims)\n', (2193, 2199), True, 'import numpy as np\n'), ((2211, 2225), 'numpy.array', 'np.array', (['dims'], {}), '(dims)\n', (2219, 2225), True, 'import numpy as np\n'), ((2237, 2251), 'numpy.array', 'np.array', (['dims'], {}), '(dims)\n', (2245, 2251), True, 'import numpy as np\n')] |
import shutil, os, sys, json
import numpy as np
system = "slurm"
from_dir = sys.argv[1]
to_dir = sys.argv[2]
t_cut = int(sys.argv[3])
submit_job = len(sys.argv) == 5 and str(sys.argv[4]) == "--submit"
with open(from_dir + "/evo_params.json", "r") as f:
fromevo_params = json.load(f)
with open(to_dir + "/evo_params.json", "r") as f:
toevo_params = json.load(f)
from_dt = fromevo_params["dt"]
to_dt = toevo_params["dt"]
if from_dt != to_dt:
raise ValueError("dt's do not match!")
stepnum_cut = int(t_cut / from_dt) - 1
print("Copying up to step:", stepnum_cut)
expvals_evo = list(np.load(from_dir + "/expvals_evo.npy", allow_pickle=True))[:stepnum_cut+1]
Ds_evo = list(np.load(from_dir + "/Ds_evo.npy", allow_pickle=True))[:stepnum_cut+1]
eta_sqs = list(np.load(from_dir + "/eta_sqs_evo.npy", allow_pickle=True))[:stepnum_cut+1]
schmidts = list(np.load(from_dir + "/schmidts_evo.npy", allow_pickle=True))[:stepnum_cut+1]
proj_errs = list(np.load(from_dir + "/proj_errs_evo.npy", allow_pickle=True))[:stepnum_cut+1]
np.save(to_dir + "/expvals_evo.npy", np.vstack(expvals_evo))
np.save(to_dir + "/Ds_evo.npy", np.vstack(Ds_evo))
np.save(to_dir + "/eta_sqs_evo.npy", eta_sqs)
np.save(to_dir + "/schmidts_evo.npy", schmidts)
np.save(to_dir + "/proj_errs_evo.npy", proj_errs)
statefn = "/state_evo_step{}.npy".format(stepnum_cut)
shutil.copyfile(from_dir + statefn, to_dir + statefn)
shutil.copyfile(from_dir + "/evo_params.json", to_dir + "/evo_params_upto_t{}.json".format(t_cut))
if submit_job:
jobpath = os.path.join(os.getcwd(), to_dir)
import subprocess
if system == "slurm":
subprocess.run(["sbatch", os.path.join(jobpath, "./jobscript.sh")])
else:
subprocess.run(["qsub", os.path.join(jobpath, "./jobscript.sh")])
| [
"numpy.load",
"numpy.save",
"json.load",
"os.getcwd",
"shutil.copyfile",
"os.path.join",
"numpy.vstack"
] | [((1160, 1205), 'numpy.save', 'np.save', (["(to_dir + '/eta_sqs_evo.npy')", 'eta_sqs'], {}), "(to_dir + '/eta_sqs_evo.npy', eta_sqs)\n", (1167, 1205), True, 'import numpy as np\n'), ((1206, 1253), 'numpy.save', 'np.save', (["(to_dir + '/schmidts_evo.npy')", 'schmidts'], {}), "(to_dir + '/schmidts_evo.npy', schmidts)\n", (1213, 1253), True, 'import numpy as np\n'), ((1254, 1303), 'numpy.save', 'np.save', (["(to_dir + '/proj_errs_evo.npy')", 'proj_errs'], {}), "(to_dir + '/proj_errs_evo.npy', proj_errs)\n", (1261, 1303), True, 'import numpy as np\n'), ((1359, 1412), 'shutil.copyfile', 'shutil.copyfile', (['(from_dir + statefn)', '(to_dir + statefn)'], {}), '(from_dir + statefn, to_dir + statefn)\n', (1374, 1412), False, 'import shutil, os, sys, json\n'), ((278, 290), 'json.load', 'json.load', (['f'], {}), '(f)\n', (287, 290), False, 'import shutil, os, sys, json\n'), ((365, 377), 'json.load', 'json.load', (['f'], {}), '(f)\n', (374, 377), False, 'import shutil, os, sys, json\n'), ((1085, 1107), 'numpy.vstack', 'np.vstack', (['expvals_evo'], {}), '(expvals_evo)\n', (1094, 1107), True, 'import numpy as np\n'), ((1141, 1158), 'numpy.vstack', 'np.vstack', (['Ds_evo'], {}), '(Ds_evo)\n', (1150, 1158), True, 'import numpy as np\n'), ((612, 669), 'numpy.load', 'np.load', (["(from_dir + '/expvals_evo.npy')"], {'allow_pickle': '(True)'}), "(from_dir + '/expvals_evo.npy', allow_pickle=True)\n", (619, 669), True, 'import numpy as np\n'), ((701, 753), 'numpy.load', 'np.load', (["(from_dir + '/Ds_evo.npy')"], {'allow_pickle': '(True)'}), "(from_dir + '/Ds_evo.npy', allow_pickle=True)\n", (708, 753), True, 'import numpy as np\n'), ((786, 843), 'numpy.load', 'np.load', (["(from_dir + '/eta_sqs_evo.npy')"], {'allow_pickle': '(True)'}), "(from_dir + '/eta_sqs_evo.npy', allow_pickle=True)\n", (793, 843), True, 'import numpy as np\n'), ((877, 935), 'numpy.load', 'np.load', (["(from_dir + '/schmidts_evo.npy')"], {'allow_pickle': '(True)'}), "(from_dir + '/schmidts_evo.npy', allow_pickle=True)\n", (884, 935), True, 'import numpy as np\n'), ((970, 1029), 'numpy.load', 'np.load', (["(from_dir + '/proj_errs_evo.npy')"], {'allow_pickle': '(True)'}), "(from_dir + '/proj_errs_evo.npy', allow_pickle=True)\n", (977, 1029), True, 'import numpy as np\n'), ((1556, 1567), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1565, 1567), False, 'import shutil, os, sys, json\n'), ((1660, 1699), 'os.path.join', 'os.path.join', (['jobpath', '"""./jobscript.sh"""'], {}), "(jobpath, './jobscript.sh')\n", (1672, 1699), False, 'import shutil, os, sys, json\n'), ((1744, 1783), 'os.path.join', 'os.path.join', (['jobpath', '"""./jobscript.sh"""'], {}), "(jobpath, './jobscript.sh')\n", (1756, 1783), False, 'import shutil, os, sys, json\n')] |
import subprocess as sp
import pandas as pd
import numpy as np
import h5py
import click
import os
import sys
from tqdm import tqdm
from astropy.io import fits
from joblib import Parallel, delayed
from fact.credentials import create_factdb_engine
import config as config
from constants import NRCHID
@click.command()
@click.argument('source_folder_path',
default=('/net/big-tank/POOL/projects/fact/' +
'drs4_calibration_data/'),
type=click.Path(exists=True))
@click.argument('store_folder_path',
default=('/net/big-tank/POOL/projects/fact/' +
'drs4_calibration_data/calibration/validating/version_0/meanAndStd/interval2/'),
type=click.Path(exists=True))
@click.argument('facttools_file_path',
default=('/home/fschulz/git/fact-tools_drs/' +
'target/fact-tools-0.18.1-SNAPSHOT.jar'),
type=click.Path(exists=True))
@click.argument('facttools_xml_path',
default=('/home/fschulz/git/fact-tools_drs/' +
'examples/drsCalibration/drsCalibrationMeanAndStd.xml'),
type=click.Path(exists=True))
@click.argument('fitparameter_file_path_temp',
default=('/net/big-tank/POOL/projects/fact/' +
'drs4_calibration_data/calibration/calculation/' +
'version_0/drsFitParameter_interval2.fits'),
type=click.Path(exists=True))
@click.argument('time_interval',
default=['2014-05-20', '2015-05-26']) # ['2014-05-20', '2015-05-26'], ['2015-05-26', '2017-10-01']
###############################################################################
def drs_pedestal_run_mean_and_std(source_folder_path, store_folder_path,
facttools_file_path, facttools_xml_path,
fitparameter_file_path_temp, time_interval):
jobs = 15
verbosity = 10
pool = Parallel(n_jobs=jobs, verbose=verbosity, max_nbytes='50G')
with fits.open(fitparameter_file_path_temp) as tab:
interval_limits = [tab[0].header['LowLimit'], tab[0].header['UppLimit']]
# if ((pd.to_datetime(time_interval[0]) < pd.to_datetime(interval_limits[0]).replace(hour=0)) or
# (pd.to_datetime(time_interval[1]) > pd.to_datetime(interval_limits[1]).replace(hour=0))):
# print('Input range [{}, {}] '.format(time_interval[0], time_interval[1]) +
# 'is out of interval range [{}, {}]'.format(interval_limits[0],
# interval_limits[1]))
# sys.exit()
print('Loading Database ...')
# TODO maybe query start_date an end_date with fNight
db_table = pd.read_sql(
'RunInfo',
create_factdb_engine(),
columns=[
'fNight', 'fRunID',
'fRunTypeKey', 'fDrsStep',
'fNumEvents', 'fBiasVoltageMedian'])
# loop over the start_date to end_date interval
pre_filename = 'pedestelStats_'
for date in tqdm(pd.date_range(start=time_interval[0], end=time_interval[1], freq='D')):
date_str = date.strftime('%Y%m%d')
date_path = date.strftime('%Y/%m/%d/')
pre_aux_path = source_folder_path+'aux/'+date_path
pre_drs_path = source_folder_path+'raw/'+date_path
temp_file = (pre_aux_path+date_str+'.FAD_CONTROL_TEMPERATURE.fits')
# skip calculation if no temperatur file exist
if(not os.path.isfile(temp_file)):
print('Date: ', date_str, ' has no temp file') # TODO maybe log
continue
selected_drs_infos = db_table.query('fNight =='+str(date_str)+'&' +
'fRunTypeKey == 2 &' +
'fDrsStep == 2 &' +
'fNumEvents == 1000').copy()
selected_drs_infos['date'] = pd.to_datetime(
selected_drs_infos.fNight.astype(str),
format='%Y%m%d')
drs_run_id_list = selected_drs_infos['fRunID'].tolist()
existing_drs_run_ids = []
existing_drs_run_files = []
for run_id in drs_run_id_list:
drs_run_filename = (pre_drs_path+date_str +
'_'+str('{:03d}'.format(run_id))+'.drs.fits.gz')
if(os.path.isfile(drs_run_filename)):
existing_drs_run_ids.append(run_id)
existing_drs_run_files.append(drs_run_filename)
else:
print(drs_run_filename, ' not found')
if(len(existing_drs_run_files) == 0):
print('Date: ', date_str, ' no drs files found') # TODO maybe log
continue
# just use one drs-Run for the calculations
# to afford larger temperature differences
# (all pedestal-runs follow right after
# the drs-run taking, so there are just small
# temperature differences)
# so we take the drs-Run of the middle of the night
# keep im mind other influences in time can now appear
# and distort the results
drs_run_index = int(len(existing_drs_run_files)/2)
drs_run_id = existing_drs_run_ids[drs_run_index]
drs_file = existing_drs_run_files[drs_run_index]
# Searching pedestal_runs
# fDrsStep == NaN and fBiasVoltageMedian == NaN dosent work
nr_of_pedestal_events = 1000
selected_drs_infos = db_table.query(
'fNight == '+str(date_str)+'&' +
'fRunTypeKey == 2 &' +
'fDrsStep != fDrsStep &' +
'fBiasVoltageMedian != fBiasVoltageMedian &'
'fNumEvents == '+str(nr_of_pedestal_events)
).copy()
selected_drs_infos['date'] = pd.to_datetime(selected_drs_infos.fNight.astype(str), format='%Y%m%d')
pedestal_run_id_list = selected_drs_infos['fRunID'].tolist()
existing_pedestal_run_ids = []
existing_pedestal_run_files = []
for run_id in pedestal_run_id_list:
pedestal_run_filename = (pre_drs_path+date_str+'_'+str('{:03d}'.format(run_id))+'.fits.fz')
if(os.path.isfile(pedestal_run_filename)):
existing_pedestal_run_ids.append(run_id)
existing_pedestal_run_files.append(pedestal_run_filename)
else:
print(pedestal_run_filename, ' not found')
if(existing_pedestal_run_files == []):
continue
with fits.open(temp_file) as tempTab:
timeList = np.array(tempTab[1].data['Time'])
temp_list = np.array(tempTab[1].data['temp'])
tempDatetime = pd.to_datetime(timeList * 24 * 3600 * 1e9)
with fits.open(drs_file) as drsTab:
drsStart = pd.to_datetime(drsTab[1].header['DATE-OBS'])
drsEnd = pd.to_datetime(drsTab[1].header['DATE-END'])
# mean ignore patches -->, axis=0 <--
drsTempMean = np.mean(temp_list[np.where((tempDatetime > drsStart) & (tempDatetime < drsEnd))])
store_folder_path_tmp = store_folder_path+date_str+'/'
if not os.path.exists(store_folder_path_tmp):
os.makedirs(store_folder_path_tmp)
nr_runs_of_the_day = len(existing_pedestal_run_files)
temp_diff_list = [np.nan] * nr_runs_of_the_day
store_file_list = [np.nan] * nr_runs_of_the_day
for run_index in range(nr_runs_of_the_day):
run_file = existing_pedestal_run_files[run_index]
run_id = existing_pedestal_run_ids[run_index]
with fits.open(run_file) as run_tab:
run_start = run_tab[2].header['DATE-OBS']
run_end = run_tab[2].header['DATE-END']
run_temp = temp_list[np.where((tempDatetime > run_start) & (tempDatetime < run_end))[0]]
if(len(run_temp) == 0):
run_temp = temp_list[np.where((tempDatetime < run_start))[0][-1]:
np.where((tempDatetime > run_end))[0][0]+1]
temp_diff_list[run_index] = np.mean(run_temp) - drsTempMean
store_file_path = (store_folder_path_tmp+pre_filename +
date_str+'_{0:03d}'.format(run_id)+'_tmp.fits')
store_file_list[run_index] = store_file_path
# pool(delayed(run_fact_tools)(
# facttools_file_path,
# facttools_xml_path,
# existing_pedestal_run_files[run_index],
# store_file_list[run_index],
# drs_file,
# pre_aux_path,
# fitparameter_file_path_temp
# ) for run_index in range(nr_runs_of_the_day))
print('Join stats.fits of ', date_str)
drs_calibrated_data_mean = []
drs_calibrated_data_std = []
drs_calibrated_data_mean_temp = []
drs_calibrated_data_std_temp = []
used_pedestal_run_ids = []
used_temperature_differences = []
data_shape = NRCHID * nr_of_pedestal_events
for run_id in existing_pedestal_run_ids:
print('Try to add run ID: ', run_id)
source_file = (store_folder_path_tmp+pre_filename +
date_str+'_{0:03d}'.format(run_id)+'_tmp.fits')
try:
with fits.open(source_file) as stats_tab:
data_mean = stats_tab[1].data['DRSCalibratedData_Mean'].flatten()
data_std = stats_tab[1].data['DRSCalibratedData_Std'].flatten()
data_mean_temp = stats_tab[1].data['DRSCalibratedData_Temp_Mean'].flatten()
data_std_temp = stats_tab[1].data['DRSCalibratedData_Temp_Std'].flatten()
if((len(data_mean) == data_shape) and (len(data_std) == data_shape) and
(len(data_mean_temp) == data_shape) and (len(data_std_temp) == data_shape)):
drs_calibrated_data_mean.append(data_mean)
drs_calibrated_data_std.append(data_std)
drs_calibrated_data_mean_temp.append(data_mean_temp)
drs_calibrated_data_std_temp.append(data_std_temp)
used_pedestal_run_ids.append(run_id)
run_id_index = existing_pedestal_run_ids.index(run_id)
used_temperature_differences.append(temp_diff_list[run_id_index])
else:
error_str = ("Incomplete run")
#logging.error(error_str)
raise Exception(error_str)
except Exception as errInfos:
print('Unable to add run ID: ', run_id, '-', str(errInfos))
# os.remove(source_file)
# os.rmdir(store_folder_path_tmp)
# if(len(used_pedestal_run_ids) == 0):
# continue
print('Write Data to Table')
tbhduStats = fits.BinTableHDU.from_columns(
[fits.Column(
name='PedestelRunId', format='1E', # format='1I' for int dosent work
unit='1', array=used_pedestal_run_ids),
fits.Column(
name='TempDiff', format='1E',
unit='Degree C', array=used_temperature_differences),
fits.Column(
name='DRSCalibratedData_Mean', format='{}E'.format(data_shape),
unit='mV', array=drs_calibrated_data_mean),
fits.Column(
name='DrsCalibratedData_Std', format='{}E'.format(data_shape),
unit='mV', array=drs_calibrated_data_std),
fits.Column(
name='DrsCalibratedData_Temp_Mean', format='{}E'.format(data_shape),
unit='mV', array=drs_calibrated_data_mean_temp),
fits.Column(
name='DrsCalibratedData_Temp_Std', format='{}E'.format(data_shape),
unit='mV', array=drs_calibrated_data_std_temp)])
tbhduStats.header.insert('TFIELDS', ('EXTNAME', 'StatsPerChid'), after=True)
commentStr = ('-')
tbhduStats.header.insert('EXTNAME', ('comment', commentStr), after='True')
tbhduStats.header.insert('comment', ('Date', date_str, 'Date yyyy-mm-dd'), after=True)
tbhduStats.header.insert('Date', ('DrsRunId', drs_run_id, 'RunID of the based drsrun_file'), after=True)
store_file_path = store_folder_path+pre_filename+date_str+'_.fits'
print('Save Table')
thdulist = fits.HDUList([fits.PrimaryHDU(), tbhduStats])
thdulist.writeto(store_file_path, overwrite=True, checksum=True)
print('Verify Checksum')
# Open the File verifying the checksum values for all HDUs
try:
hdul = fits.open(store_file_path, checksum=True)
print(hdul['StatsPerChid'].header)
except Exception as errInfos:
errorStr = str(errInfos)
print(errorStr)
###############################################################################
def run_fact_tools(facttools_file_path, facttools_xml_path, run_file,
store_file_path, drs_file, pre_aux_path,
fitparameter_file_path_temp):
sp.run(['java', '-jar', '{}'.format(facttools_file_path),
'{}'.format(facttools_xml_path),
'-Dinfile=file:{}'.format(run_file),
'-Doutfile=file:{}'.format(store_file_path),
'-Ddrsfile=file:{}'.format(drs_file),
'-DauxFolder=file:{}'.format(pre_aux_path),
'-DfitParameterFile_Temp=file:{}'.format(fitparameter_file_path_temp),
'-j8'])
| [
"pandas.date_range",
"click.argument",
"os.makedirs",
"astropy.io.fits.PrimaryHDU",
"os.path.exists",
"click.command",
"fact.credentials.create_factdb_engine",
"os.path.isfile",
"numpy.mean",
"click.Path",
"astropy.io.fits.open",
"numpy.array",
"joblib.Parallel",
"pandas.to_datetime",
"a... | [((305, 320), 'click.command', 'click.command', ([], {}), '()\n', (318, 320), False, 'import click\n'), ((1519, 1588), 'click.argument', 'click.argument', (['"""time_interval"""'], {'default': "['2014-05-20', '2015-05-26']"}), "('time_interval', default=['2014-05-20', '2015-05-26'])\n", (1533, 1588), False, 'import click\n'), ((2019, 2077), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'jobs', 'verbose': 'verbosity', 'max_nbytes': '"""50G"""'}), "(n_jobs=jobs, verbose=verbosity, max_nbytes='50G')\n", (2027, 2077), False, 'from joblib import Parallel, delayed\n'), ((2087, 2125), 'astropy.io.fits.open', 'fits.open', (['fitparameter_file_path_temp'], {}), '(fitparameter_file_path_temp)\n', (2096, 2125), False, 'from astropy.io import fits\n'), ((2854, 2876), 'fact.credentials.create_factdb_engine', 'create_factdb_engine', ([], {}), '()\n', (2874, 2876), False, 'from fact.credentials import create_factdb_engine\n'), ((3174, 3243), 'pandas.date_range', 'pd.date_range', ([], {'start': 'time_interval[0]', 'end': 'time_interval[1]', 'freq': '"""D"""'}), "(start=time_interval[0], end=time_interval[1], freq='D')\n", (3187, 3243), True, 'import pandas as pd\n'), ((495, 518), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (505, 518), False, 'import click\n'), ((747, 770), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (757, 770), False, 'import click\n'), ((962, 985), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (972, 985), False, 'import click\n'), ((1191, 1214), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (1201, 1214), False, 'import click\n'), ((1493, 1516), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (1503, 1516), False, 'import click\n'), ((3604, 3629), 'os.path.isfile', 'os.path.isfile', (['temp_file'], {}), '(temp_file)\n', (3618, 3629), False, 'import os\n'), ((4527, 4559), 'os.path.isfile', 'os.path.isfile', (['drs_run_filename'], {}), '(drs_run_filename)\n', (4541, 4559), False, 'import os\n'), ((6447, 6484), 'os.path.isfile', 'os.path.isfile', (['pedestal_run_filename'], {}), '(pedestal_run_filename)\n', (6461, 6484), False, 'import os\n'), ((6778, 6798), 'astropy.io.fits.open', 'fits.open', (['temp_file'], {}), '(temp_file)\n', (6787, 6798), False, 'from astropy.io import fits\n'), ((6834, 6867), 'numpy.array', 'np.array', (["tempTab[1].data['Time']"], {}), "(tempTab[1].data['Time'])\n", (6842, 6867), True, 'import numpy as np\n'), ((6892, 6925), 'numpy.array', 'np.array', (["tempTab[1].data['temp']"], {}), "(tempTab[1].data['temp'])\n", (6900, 6925), True, 'import numpy as np\n'), ((6953, 7004), 'pandas.to_datetime', 'pd.to_datetime', (['(timeList * 24 * 3600 * 1000000000.0)'], {}), '(timeList * 24 * 3600 * 1000000000.0)\n', (6967, 7004), True, 'import pandas as pd\n'), ((7010, 7029), 'astropy.io.fits.open', 'fits.open', (['drs_file'], {}), '(drs_file)\n', (7019, 7029), False, 'from astropy.io import fits\n'), ((7064, 7108), 'pandas.to_datetime', 'pd.to_datetime', (["drsTab[1].header['DATE-OBS']"], {}), "(drsTab[1].header['DATE-OBS'])\n", (7078, 7108), True, 'import pandas as pd\n'), ((7130, 7174), 'pandas.to_datetime', 'pd.to_datetime', (["drsTab[1].header['DATE-END']"], {}), "(drsTab[1].header['DATE-END'])\n", (7144, 7174), True, 'import pandas as pd\n'), ((7412, 7449), 'os.path.exists', 'os.path.exists', (['store_folder_path_tmp'], {}), '(store_folder_path_tmp)\n', (7426, 7449), False, 'import os\n'), ((7463, 7497), 'os.makedirs', 'os.makedirs', (['store_folder_path_tmp'], {}), '(store_folder_path_tmp)\n', (7474, 7497), False, 'import os\n'), ((13033, 13074), 'astropy.io.fits.open', 'fits.open', (['store_file_path'], {'checksum': '(True)'}), '(store_file_path, checksum=True)\n', (13042, 13074), False, 'from astropy.io import fits\n'), ((7862, 7881), 'astropy.io.fits.open', 'fits.open', (['run_file'], {}), '(run_file)\n', (7871, 7881), False, 'from astropy.io import fits\n'), ((8351, 8368), 'numpy.mean', 'np.mean', (['run_temp'], {}), '(run_temp)\n', (8358, 8368), True, 'import numpy as np\n'), ((11211, 11301), 'astropy.io.fits.Column', 'fits.Column', ([], {'name': '"""PedestelRunId"""', 'format': '"""1E"""', 'unit': '"""1"""', 'array': 'used_pedestal_run_ids'}), "(name='PedestelRunId', format='1E', unit='1', array=\n used_pedestal_run_ids)\n", (11222, 11301), False, 'from astropy.io import fits\n'), ((11390, 11489), 'astropy.io.fits.Column', 'fits.Column', ([], {'name': '"""TempDiff"""', 'format': '"""1E"""', 'unit': '"""Degree C"""', 'array': 'used_temperature_differences'}), "(name='TempDiff', format='1E', unit='Degree C', array=\n used_temperature_differences)\n", (11401, 11489), False, 'from astropy.io import fits\n'), ((12796, 12813), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {}), '()\n', (12811, 12813), False, 'from astropy.io import fits\n'), ((7269, 7330), 'numpy.where', 'np.where', (['((tempDatetime > drsStart) & (tempDatetime < drsEnd))'], {}), '((tempDatetime > drsStart) & (tempDatetime < drsEnd))\n', (7277, 7330), True, 'import numpy as np\n'), ((8042, 8105), 'numpy.where', 'np.where', (['((tempDatetime > run_start) & (tempDatetime < run_end))'], {}), '((tempDatetime > run_start) & (tempDatetime < run_end))\n', (8050, 8105), True, 'import numpy as np\n'), ((9570, 9592), 'astropy.io.fits.open', 'fits.open', (['source_file'], {}), '(source_file)\n', (9579, 9592), False, 'from astropy.io import fits\n'), ((8184, 8218), 'numpy.where', 'np.where', (['(tempDatetime < run_start)'], {}), '(tempDatetime < run_start)\n', (8192, 8218), True, 'import numpy as np\n'), ((8266, 8298), 'numpy.where', 'np.where', (['(tempDatetime > run_end)'], {}), '(tempDatetime > run_end)\n', (8274, 8298), True, 'import numpy as np\n')] |
# Copyright 2020 <NAME> (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import abc
import codecs
import unicodedata
from multiprocessing import cpu_count
import sentencepiece as sp
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tds
from ..configs.config import DecoderConfig
from ..utils import file_util
ENGLISH_CHARACTERS = [" ", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m",
"n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "'"]
class TextFeaturizer(metaclass=abc.ABCMeta):
def __init__(self, decoder_config: dict):
self.scorer = None
self.decoder_config = DecoderConfig(decoder_config)
self.blank = None
self.tokens2indices = {}
self.tokens = []
self.num_classes = None
self.max_length = 0
@property
def shape(self) -> list:
return [self.max_length if self.max_length > 0 else None]
@property
def prepand_shape(self) -> list:
return [self.max_length + 1 if self.max_length > 0 else None]
def update_length(self, length: int):
self.max_length = max(self.max_length, length)
def reset_length(self):
self.max_length = 0
def preprocess_text(self, text):
text = unicodedata.normalize("NFC", text.lower())
return text.strip("\n") # remove trailing newline
def add_scorer(self, scorer: any = None):
""" Add scorer to this instance """
self.scorer = scorer
def normalize_indices(self, indices: tf.Tensor) -> tf.Tensor:
"""
Remove -1 in indices by replacing them with blanks
Args:
indices (tf.Tensor): shape any
Returns:
tf.Tensor: normalized indices with shape same as indices
"""
with tf.name_scope("normalize_indices"):
minus_one = -1 * tf.ones_like(indices, dtype=tf.int32)
blank_like = self.blank * tf.ones_like(indices, dtype=tf.int32)
return tf.where(indices == minus_one, blank_like, indices)
def prepand_blank(self, text: tf.Tensor) -> tf.Tensor:
""" Prepand blank index for transducer models """
return tf.concat([[self.blank], text], axis=0)
@abc.abstractclassmethod
def extract(self, text):
raise NotImplementedError()
@abc.abstractclassmethod
def iextract(self, indices):
raise NotImplementedError()
@abc.abstractclassmethod
def indices2upoints(self, indices):
raise NotImplementedError()
class CharFeaturizer(TextFeaturizer):
"""
Extract text feature based on char-level granularity.
By looking up the vocabulary table, each line of transcript will be
converted to a sequence of integer indexes.
"""
def __init__(self, decoder_config: dict):
"""
decoder_config = {
"vocabulary": str,
"blank_at_zero": bool,
"beam_width": int,
"lm_config": {
...
}
}
"""
super(CharFeaturizer, self).__init__(decoder_config)
self.__init_vocabulary()
def __init_vocabulary(self):
lines = []
if self.decoder_config.vocabulary is not None:
with codecs.open(self.decoder_config.vocabulary, "r", "utf-8") as fin:
lines.extend(fin.readlines())
else:
lines = ENGLISH_CHARACTERS
self.blank = 0 if self.decoder_config.blank_at_zero else None
self.tokens2indices = {}
self.tokens = []
index = 1 if self.blank == 0 else 0
for line in lines:
line = self.preprocess_text(line)
if line.startswith("#") or not line: continue
self.tokens2indices[line[0]] = index
self.tokens.append(line[0])
index += 1
if self.blank is None: self.blank = len(self.tokens) # blank not at zero
self.vocab_array = self.tokens.copy()
self.tokens.insert(self.blank, "") # add blank token to tokens
self.num_classes = len(self.tokens)
self.tokens = tf.convert_to_tensor(self.tokens, dtype=tf.string)
self.upoints = tf.strings.unicode_decode(self.tokens, "UTF-8").to_tensor(shape=[None, 1])
def extract(self, text: str) -> tf.Tensor:
"""
Convert string to a list of integers
Args:
text: string (sequence of characters)
Returns:
sequence of ints in tf.Tensor
"""
text = self.preprocess_text(text)
text = list(text.strip()) # remove trailing space
indices = [self.tokens2indices[token] for token in text]
return tf.convert_to_tensor(indices, dtype=tf.int32)
def iextract(self, indices: tf.Tensor) -> tf.Tensor:
"""
Convert list of indices to string
Args:
indices: tf.Tensor with dim [B, None]
Returns:
transcripts: tf.Tensor of dtype tf.string with dim [B]
"""
indices = self.normalize_indices(indices)
tokens = tf.gather_nd(self.tokens, tf.expand_dims(indices, axis=-1))
with tf.device("/CPU:0"): # string data is not supported on GPU
tokens = tf.strings.reduce_join(tokens, axis=-1)
return tokens
@tf.function(
input_signature=[
tf.TensorSpec([None], dtype=tf.int32)
]
)
def indices2upoints(self, indices: tf.Tensor) -> tf.Tensor:
"""
Transform Predicted Indices to Unicode Code Points (for using tflite)
Args:
indices: tf.Tensor of Classes in shape [None]
Returns:
unicode code points transcript with dtype tf.int32 and shape [None]
"""
with tf.name_scope("indices2upoints"):
indices = self.normalize_indices(indices)
upoints = tf.gather_nd(self.upoints, tf.expand_dims(indices, axis=-1))
return tf.gather_nd(upoints, tf.where(tf.not_equal(upoints, 0)))
class SubwordFeaturizer(TextFeaturizer):
"""
Extract text feature based on char-level granularity.
By looking up the vocabulary table, each line of transcript will be
converted to a sequence of integer indexes.
"""
def __init__(self, decoder_config: dict, subwords=None):
"""
decoder_config = {
"target_vocab_size": int,
"max_subword_length": 4,
"max_corpus_chars": None,
"reserved_tokens": None,
"beam_width": int,
"lm_config": {
...
}
}
"""
super(SubwordFeaturizer, self).__init__(decoder_config)
self.subwords = self.__load_subwords() if subwords is None else subwords
self.blank = 0 # subword treats blank as 0
self.num_classes = self.subwords.vocab_size
# create upoints
self.__init_upoints()
def __init_upoints(self):
text = [""]
for idx in np.arange(1, self.num_classes, dtype=np.int32):
text.append(self.subwords.decode([idx]))
self.upoints = tf.strings.unicode_decode(text, "UTF-8")
self.upoints = self.upoints.to_tensor() # [num_classes, max_subword_length]
def __load_subwords(self):
filename_prefix = os.path.splitext(self.decoder_config.vocabulary)[0]
return tds.deprecated.text.SubwordTextEncoder.load_from_file(filename_prefix)
@classmethod
def build_from_corpus(cls, decoder_config: dict, corpus_files: list = None):
dconf = DecoderConfig(decoder_config.copy())
corpus_files = dconf.corpus_files if corpus_files is None or len(corpus_files) == 0 else corpus_files
def corpus_generator():
for file in corpus_files:
with open(file, "r", encoding="utf-8") as f:
lines = f.read().splitlines()
lines = lines[1:]
for line in lines:
line = line.split("\t")
yield line[-1]
subwords = tds.deprecated.text.SubwordTextEncoder.build_from_corpus(
corpus_generator(),
dconf.target_vocab_size,
dconf.max_subword_length,
dconf.max_corpus_chars,
dconf.reserved_tokens
)
return cls(decoder_config, subwords)
@classmethod
def load_from_file(cls, decoder_config: dict, filename: str = None):
dconf = DecoderConfig(decoder_config.copy())
filename = dconf.vocabulary if filename is None else file_util.preprocess_paths(filename)
filename_prefix = os.path.splitext(filename)[0]
subwords = tds.deprecated.text.SubwordTextEncoder.load_from_file(filename_prefix)
return cls(decoder_config, subwords)
def save_to_file(self, filename: str = None):
filename = self.decoder_config.vocabulary if filename is None else file_util.preprocess_paths(filename)
filename_prefix = os.path.splitext(filename)[0]
return self.subwords.save_to_file(filename_prefix)
def extract(self, text: str) -> tf.Tensor:
"""
Convert string to a list of integers
Args:
text: string (sequence of characters)
Returns:
sequence of ints in tf.Tensor
"""
text = self.preprocess_text(text)
text = text.strip() # remove trailing space
indices = self.subwords.encode(text)
return tf.convert_to_tensor(indices, dtype=tf.int32)
def iextract(self, indices: tf.Tensor) -> tf.Tensor:
"""
Convert list of indices to string
Args:
indices: tf.Tensor with dim [B, None]
Returns:
transcripts: tf.Tensor of dtype tf.string with dim [B]
"""
with tf.device("/CPU:0"): # string data is not supported on GPU
total = tf.shape(indices)[0]
batch = tf.constant(0, dtype=tf.int32)
transcripts = tf.TensorArray(
dtype=tf.string, size=total, dynamic_size=False, infer_shape=False,
clear_after_read=False, element_shape=tf.TensorShape([])
)
def cond(batch, total, _): return tf.less(batch, total)
def body(batch, total, transcripts):
norm_indices = self.normalize_indices(indices[batch])
norm_indices = tf.gather_nd(norm_indices, tf.where(tf.not_equal(norm_indices, 0)))
decoded = tf.numpy_function(self.subwords.decode, inp=[norm_indices], Tout=tf.string)
transcripts = transcripts.write(batch, decoded)
return batch + 1, total, transcripts
_, _, transcripts = tf.while_loop(cond, body, loop_vars=[batch, total, transcripts])
return transcripts.stack()
@tf.function(
input_signature=[
tf.TensorSpec([None], dtype=tf.int32)
]
)
def indices2upoints(self, indices: tf.Tensor) -> tf.Tensor:
"""
Transform Predicted Indices to Unicode Code Points (for using tflite)
Args:
indices: tf.Tensor of Classes in shape [None]
Returns:
unicode code points transcript with dtype tf.int32 and shape [None]
"""
with tf.name_scope("indices2upoints"):
indices = self.normalize_indices(indices)
upoints = tf.gather_nd(self.upoints, tf.expand_dims(indices, axis=-1))
return tf.gather_nd(upoints, tf.where(tf.not_equal(upoints, 0)))
class SentencePieceFeaturizer(TextFeaturizer):
"""
Extract text feature based on sentence piece package.
"""
UNK_TOKEN, UNK_TOKEN_ID = "<unk>", 1
BOS_TOKEN, BOS_TOKEN_ID = "<s>", 2
EOS_TOKEN, EOS_TOKEN_ID = "</s>", 3
PAD_TOKEN, PAD_TOKEN_ID = "<pad>", 0 # unused, by default
def __init__(self, decoder_config: dict, model=None):
super(SentencePieceFeaturizer, self).__init__(decoder_config)
self.model = self.__load_model() if model is None else model
self.blank = 0 # treats blank as 0 (pad)
self.upoints = None
# vocab size
self.num_classes = self.model.get_piece_size()
self.upoints = None
def __load_model(self):
filename_prefix = os.path.splitext(self.decoder_config.vocabulary)[0]
processor = sp.SentencePieceProcessor()
processor.load(filename_prefix + ".model")
return processor
def __init_upoints(self):
text = [""]
for idx in range(1, self.num_classes):
text.append(self.model.decode_ids([idx]))
self.upoints = tf.strings.unicode_decode(text, "UTF-8")
self.upoints = self.upoints.to_tensor() # [num_classes, max_subword_length]
@classmethod
def build_from_corpus(cls, decoder_config: dict):
"""
--model_prefix: output model name prefix. <model_name>.model and <model_name>.vocab are generated.
--vocab_size: vocabulary size, e.g., 8000, 16000, or 32000
--model_type: model type. Choose from unigram (default), bpe, char, or word.
The input sentence must be pretokenized when using word type."""
decoder_cfg = DecoderConfig(decoder_config)
# Train SentencePiece Model
def corpus_iterator():
for file in decoder_cfg.corpus_files:
with open(file, "r", encoding="utf-8") as f:
lines = f.read().splitlines()
lines = lines[1:]
for line in lines:
line = line.split("\t")
yield line[-1]
sp.SentencePieceTrainer.Train(
sentence_iterator=corpus_iterator(),
model_prefix=decoder_cfg.output_path_prefix,
model_type=decoder_cfg.model_type,
vocab_size=decoder_cfg.target_vocab_size,
num_threads=cpu_count(),
unk_id=cls.UNK_TOKEN_ID,
bos_id=cls.BOS_TOKEN_ID,
eos_id=cls.EOS_TOKEN_ID,
pad_id=cls.PAD_TOKEN_ID,
unk_surface='__UNKNOWN__' # change default unk surface U+2047("⁇") by "__UNKNOWN__"
)
# Export fairseq dictionary
processor = sp.SentencePieceProcessor()
processor.Load(decoder_cfg.output_path_prefix + ".model")
vocab = {i: processor.IdToPiece(i) for i in range(processor.GetPieceSize())}
assert (
vocab.get(cls.UNK_TOKEN_ID) == cls.UNK_TOKEN
and vocab.get(cls.BOS_TOKEN_ID) == cls.BOS_TOKEN
and vocab.get(cls.EOS_TOKEN_ID) == cls.EOS_TOKEN
)
vocab = {
i: s
for i, s in vocab.items()
if s not in {cls.UNK_TOKEN, cls.BOS_TOKEN, cls.EOS_TOKEN, cls.PAD_TOKEN}
}
with open(decoder_cfg.output_path_prefix + ".txt", "w") as f_out:
for _, s in sorted(vocab.items(), key=lambda x: x[0]):
f_out.write(f"{s} 1\n")
return cls(decoder_config, processor)
@classmethod
def load_from_file(cls, decoder_config: dict, filename: str = None):
if filename is not None:
filename_prefix = os.path.splitext(file_util.preprocess_paths(filename))[0]
else:
filename_prefix = decoder_config.get("output_path_prefix", None)
processor = sp.SentencePieceProcessor()
processor.load(filename_prefix + ".model")
return cls(decoder_config, processor)
def extract(self, text: str) -> tf.Tensor:
"""
Convert string to a list of integers
# encode: text => id
sp.encode_as_pieces('This is a test') --> ['▁This', '▁is', '▁a', '▁t', 'est']
sp.encode_as_ids('This is a test') --> [209, 31, 9, 375, 586]
Args:
text: string (sequence of characters)
Returns:
sequence of ints in tf.Tensor
"""
text = self.preprocess_text(text)
text = text.strip() # remove trailing space
indices = self.model.encode_as_ids(text)
return tf.convert_to_tensor(indices, dtype=tf.int32)
def iextract(self, indices: tf.Tensor) -> tf.Tensor:
"""
Convert list of indices to string
# decode: id => text
sp.decode_pieces(['▁This', '▁is', '▁a', '▁t', 'est']) --> This is a test
sp.decode_ids([209, 31, 9, 375, 586]) --> This is a test
Args:
indices: tf.Tensor with dim [B, None]
Returns:
transcripts: tf.Tensor of dtype tf.string with dim [B]
"""
indices = self.normalize_indices(indices)
with tf.device("/CPU:0"): # string data is not supported on GPU
def decode(x):
if x[0] == self.blank: x = x[1:]
return self.model.decode_ids(x.tolist())
text = tf.map_fn(
lambda x: tf.numpy_function(decode, inp=[x], Tout=tf.string),
indices,
fn_output_signature=tf.TensorSpec([], dtype=tf.string)
)
return text
@tf.function(
input_signature=[
tf.TensorSpec([None], dtype=tf.int32)
]
)
def indices2upoints(self, indices: tf.Tensor) -> tf.Tensor:
"""
Transform Predicted Indices to Unicode Code Points (for using tflite)
Args:
indices: tf.Tensor of Classes in shape [None]
Returns:
unicode code points transcript with dtype tf.int32 and shape [None]
"""
if self.upoints is None:
self.__init_upoints()
with tf.name_scope("indices2upoints"):
indices = self.normalize_indices(indices)
upoints = tf.gather_nd(self.upoints, tf.expand_dims(indices, axis=-1))
return tf.gather_nd(upoints, tf.where(tf.not_equal(upoints, 0)))
| [
"sentencepiece.SentencePieceProcessor",
"tensorflow_datasets.deprecated.text.SubwordTextEncoder.load_from_file",
"numpy.arange",
"tensorflow.numpy_function",
"multiprocessing.cpu_count",
"tensorflow.not_equal",
"tensorflow.strings.unicode_decode",
"codecs.open",
"tensorflow.less",
"tensorflow.Tens... | [((2714, 2753), 'tensorflow.concat', 'tf.concat', (['[[self.blank], text]'], {'axis': '(0)'}), '([[self.blank], text], axis=0)\n', (2723, 2753), True, 'import tensorflow as tf\n'), ((4619, 4669), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['self.tokens'], {'dtype': 'tf.string'}), '(self.tokens, dtype=tf.string)\n', (4639, 4669), True, 'import tensorflow as tf\n'), ((5190, 5235), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['indices'], {'dtype': 'tf.int32'}), '(indices, dtype=tf.int32)\n', (5210, 5235), True, 'import tensorflow as tf\n'), ((7476, 7522), 'numpy.arange', 'np.arange', (['(1)', 'self.num_classes'], {'dtype': 'np.int32'}), '(1, self.num_classes, dtype=np.int32)\n', (7485, 7522), True, 'import numpy as np\n'), ((7600, 7640), 'tensorflow.strings.unicode_decode', 'tf.strings.unicode_decode', (['text', '"""UTF-8"""'], {}), "(text, 'UTF-8')\n", (7625, 7640), True, 'import tensorflow as tf\n'), ((7851, 7921), 'tensorflow_datasets.deprecated.text.SubwordTextEncoder.load_from_file', 'tds.deprecated.text.SubwordTextEncoder.load_from_file', (['filename_prefix'], {}), '(filename_prefix)\n', (7904, 7921), True, 'import tensorflow_datasets as tds\n'), ((9145, 9215), 'tensorflow_datasets.deprecated.text.SubwordTextEncoder.load_from_file', 'tds.deprecated.text.SubwordTextEncoder.load_from_file', (['filename_prefix'], {}), '(filename_prefix)\n', (9198, 9215), True, 'import tensorflow_datasets as tds\n'), ((9935, 9980), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['indices'], {'dtype': 'tf.int32'}), '(indices, dtype=tf.int32)\n', (9955, 9980), True, 'import tensorflow as tf\n'), ((12798, 12825), 'sentencepiece.SentencePieceProcessor', 'sp.SentencePieceProcessor', ([], {}), '()\n', (12823, 12825), True, 'import sentencepiece as sp\n'), ((13077, 13117), 'tensorflow.strings.unicode_decode', 'tf.strings.unicode_decode', (['text', '"""UTF-8"""'], {}), "(text, 'UTF-8')\n", (13102, 13117), True, 'import tensorflow as tf\n'), ((14647, 14674), 'sentencepiece.SentencePieceProcessor', 'sp.SentencePieceProcessor', ([], {}), '()\n', (14672, 14674), True, 'import sentencepiece as sp\n'), ((15751, 15778), 'sentencepiece.SentencePieceProcessor', 'sp.SentencePieceProcessor', ([], {}), '()\n', (15776, 15778), True, 'import sentencepiece as sp\n'), ((16461, 16506), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['indices'], {'dtype': 'tf.int32'}), '(indices, dtype=tf.int32)\n', (16481, 16506), True, 'import tensorflow as tf\n'), ((2331, 2365), 'tensorflow.name_scope', 'tf.name_scope', (['"""normalize_indices"""'], {}), "('normalize_indices')\n", (2344, 2365), True, 'import tensorflow as tf\n'), ((2529, 2580), 'tensorflow.where', 'tf.where', (['(indices == minus_one)', 'blank_like', 'indices'], {}), '(indices == minus_one, blank_like, indices)\n', (2537, 2580), True, 'import tensorflow as tf\n'), ((5602, 5634), 'tensorflow.expand_dims', 'tf.expand_dims', (['indices'], {'axis': '(-1)'}), '(indices, axis=-1)\n', (5616, 5634), True, 'import tensorflow as tf\n'), ((5649, 5668), 'tensorflow.device', 'tf.device', (['"""/CPU:0"""'], {}), "('/CPU:0')\n", (5658, 5668), True, 'import tensorflow as tf\n'), ((5730, 5769), 'tensorflow.strings.reduce_join', 'tf.strings.reduce_join', (['tokens'], {'axis': '(-1)'}), '(tokens, axis=-1)\n', (5752, 5769), True, 'import tensorflow as tf\n'), ((6252, 6284), 'tensorflow.name_scope', 'tf.name_scope', (['"""indices2upoints"""'], {}), "('indices2upoints')\n", (6265, 6284), True, 'import tensorflow as tf\n'), ((7784, 7832), 'os.path.splitext', 'os.path.splitext', (['self.decoder_config.vocabulary'], {}), '(self.decoder_config.vocabulary)\n', (7800, 7832), False, 'import os\n'), ((9096, 9122), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (9112, 9122), False, 'import os\n'), ((9450, 9476), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (9466, 9476), False, 'import os\n'), ((10267, 10286), 'tensorflow.device', 'tf.device', (['"""/CPU:0"""'], {}), "('/CPU:0')\n", (10276, 10286), True, 'import tensorflow as tf\n'), ((10388, 10418), 'tensorflow.constant', 'tf.constant', (['(0)'], {'dtype': 'tf.int32'}), '(0, dtype=tf.int32)\n', (10399, 10418), True, 'import tensorflow as tf\n'), ((11172, 11236), 'tensorflow.while_loop', 'tf.while_loop', (['cond', 'body'], {'loop_vars': '[batch, total, transcripts]'}), '(cond, body, loop_vars=[batch, total, transcripts])\n', (11185, 11236), True, 'import tensorflow as tf\n'), ((11737, 11769), 'tensorflow.name_scope', 'tf.name_scope', (['"""indices2upoints"""'], {}), "('indices2upoints')\n", (11750, 11769), True, 'import tensorflow as tf\n'), ((12726, 12774), 'os.path.splitext', 'os.path.splitext', (['self.decoder_config.vocabulary'], {}), '(self.decoder_config.vocabulary)\n', (12742, 12774), False, 'import os\n'), ((17019, 17038), 'tensorflow.device', 'tf.device', (['"""/CPU:0"""'], {}), "('/CPU:0')\n", (17028, 17038), True, 'import tensorflow as tf\n'), ((17978, 18010), 'tensorflow.name_scope', 'tf.name_scope', (['"""indices2upoints"""'], {}), "('indices2upoints')\n", (17991, 18010), True, 'import tensorflow as tf\n'), ((2396, 2433), 'tensorflow.ones_like', 'tf.ones_like', (['indices'], {'dtype': 'tf.int32'}), '(indices, dtype=tf.int32)\n', (2408, 2433), True, 'import tensorflow as tf\n'), ((2472, 2509), 'tensorflow.ones_like', 'tf.ones_like', (['indices'], {'dtype': 'tf.int32'}), '(indices, dtype=tf.int32)\n', (2484, 2509), True, 'import tensorflow as tf\n'), ((3773, 3830), 'codecs.open', 'codecs.open', (['self.decoder_config.vocabulary', '"""r"""', '"""utf-8"""'], {}), "(self.decoder_config.vocabulary, 'r', 'utf-8')\n", (3784, 3830), False, 'import codecs\n'), ((4693, 4740), 'tensorflow.strings.unicode_decode', 'tf.strings.unicode_decode', (['self.tokens', '"""UTF-8"""'], {}), "(self.tokens, 'UTF-8')\n", (4718, 4740), True, 'import tensorflow as tf\n'), ((6389, 6421), 'tensorflow.expand_dims', 'tf.expand_dims', (['indices'], {'axis': '(-1)'}), '(indices, axis=-1)\n', (6403, 6421), True, 'import tensorflow as tf\n'), ((5849, 5886), 'tensorflow.TensorSpec', 'tf.TensorSpec', (['[None]'], {'dtype': 'tf.int32'}), '([None], dtype=tf.int32)\n', (5862, 5886), True, 'import tensorflow as tf\n'), ((10347, 10364), 'tensorflow.shape', 'tf.shape', (['indices'], {}), '(indices)\n', (10355, 10364), True, 'import tensorflow as tf\n'), ((10679, 10700), 'tensorflow.less', 'tf.less', (['batch', 'total'], {}), '(batch, total)\n', (10686, 10700), True, 'import tensorflow as tf\n'), ((10946, 11021), 'tensorflow.numpy_function', 'tf.numpy_function', (['self.subwords.decode'], {'inp': '[norm_indices]', 'Tout': 'tf.string'}), '(self.subwords.decode, inp=[norm_indices], Tout=tf.string)\n', (10963, 11021), True, 'import tensorflow as tf\n'), ((11874, 11906), 'tensorflow.expand_dims', 'tf.expand_dims', (['indices'], {'axis': '(-1)'}), '(indices, axis=-1)\n', (11888, 11906), True, 'import tensorflow as tf\n'), ((11334, 11371), 'tensorflow.TensorSpec', 'tf.TensorSpec', (['[None]'], {'dtype': 'tf.int32'}), '([None], dtype=tf.int32)\n', (11347, 11371), True, 'import tensorflow as tf\n'), ((14323, 14334), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (14332, 14334), False, 'from multiprocessing import cpu_count\n'), ((18115, 18147), 'tensorflow.expand_dims', 'tf.expand_dims', (['indices'], {'axis': '(-1)'}), '(indices, axis=-1)\n', (18129, 18147), True, 'import tensorflow as tf\n'), ((17508, 17545), 'tensorflow.TensorSpec', 'tf.TensorSpec', (['[None]'], {'dtype': 'tf.int32'}), '([None], dtype=tf.int32)\n', (17521, 17545), True, 'import tensorflow as tf\n'), ((6473, 6497), 'tensorflow.not_equal', 'tf.not_equal', (['upoints', '(0)'], {}), '(upoints, 0)\n', (6485, 6497), True, 'import tensorflow as tf\n'), ((10599, 10617), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (10613, 10617), True, 'import tensorflow as tf\n'), ((11958, 11982), 'tensorflow.not_equal', 'tf.not_equal', (['upoints', '(0)'], {}), '(upoints, 0)\n', (11970, 11982), True, 'import tensorflow as tf\n'), ((17269, 17319), 'tensorflow.numpy_function', 'tf.numpy_function', (['decode'], {'inp': '[x]', 'Tout': 'tf.string'}), '(decode, inp=[x], Tout=tf.string)\n', (17286, 17319), True, 'import tensorflow as tf\n'), ((17382, 17416), 'tensorflow.TensorSpec', 'tf.TensorSpec', (['[]'], {'dtype': 'tf.string'}), '([], dtype=tf.string)\n', (17395, 17416), True, 'import tensorflow as tf\n'), ((18199, 18223), 'tensorflow.not_equal', 'tf.not_equal', (['upoints', '(0)'], {}), '(upoints, 0)\n', (18211, 18223), True, 'import tensorflow as tf\n'), ((10888, 10917), 'tensorflow.not_equal', 'tf.not_equal', (['norm_indices', '(0)'], {}), '(norm_indices, 0)\n', (10900, 10917), True, 'import tensorflow as tf\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.