hexsha
stringlengths
40
40
size
int64
4
996k
ext
stringclasses
8 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
4
245
max_stars_repo_name
stringlengths
6
130
max_stars_repo_head_hexsha
stringlengths
40
40
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
4
245
max_issues_repo_name
stringlengths
6
130
max_issues_repo_head_hexsha
stringlengths
40
40
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
4
245
max_forks_repo_name
stringlengths
6
130
max_forks_repo_head_hexsha
stringlengths
40
40
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
4
996k
avg_line_length
float64
1.33
58.2k
max_line_length
int64
2
323k
alphanum_fraction
float64
0
0.97
content_no_comment
stringlengths
0
946k
is_comment_constant_removed
bool
2 classes
is_sharp_comment_removed
bool
1 class
f7f6660a04bab9e209e2f7758b0cca0b182bfa35
6,426
py
Python
maskrcnn_benchmark/modeling/backbone/resnet_fast.py
amsword/maskrcnn-benchmark
660457d5f28c5d7d7887829486a20c60976b1dd8
[ "MIT" ]
2
2020-08-18T05:14:58.000Z
2020-08-20T05:13:36.000Z
maskrcnn_benchmark/modeling/backbone/resnet_fast.py
jacobswan1/maskrcnn-benchmark
660457d5f28c5d7d7887829486a20c60976b1dd8
[ "MIT" ]
null
null
null
maskrcnn_benchmark/modeling/backbone/resnet_fast.py
jacobswan1/maskrcnn-benchmark
660457d5f28c5d7d7887829486a20c60976b1dd8
[ "MIT" ]
1
2020-08-18T05:15:08.000Z
2020-08-18T05:15:08.000Z
# This file cames from Xiyang Dai import torch.nn as nn from torch.nn import BatchNorm2d from maskrcnn_benchmark.modeling.make_layers import frozen_batch_norm from maskrcnn_benchmark.layers.batch_norm import FrozenBatchNorm2d def conv3x3(in_planes, out_planes, stride=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) def conv1x1(in_planes, out_planes, stride=1): """1x1 convolution""" return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None, normf=frozen_batch_norm): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = normf(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = normf(planes) self.downsample = downsample self.stride = stride def forward(self, x): identity = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: identity = self.downsample(x) out += identity out = self.relu(out) return out class ResNet(nn.Module): def __init__(self, cfg, block=BasicBlock): super(ResNet, self).__init__() layers = [3, 4, 6, 3] if cfg.MODEL.BACKBONE.USE_BN: normf = BatchNorm2d else: normf = frozen_batch_norm self.inplanes = 16 self.conv1 = nn.Conv2d(3, 16, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = normf(16) self.relu = nn.ReLU(inplace=True) self.conv2 = self._make_layer(block, 16, 1) if True: self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) else: self.maxpool = nn.Sequential(nn.Conv2d(16, 16, kernel_size=3, stride=2, padding=1, bias=False), nn.ReLU(inplace=True)) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self._freeze_backbone(cfg.MODEL.BACKBONE.FREEZE_CONV_BODY_AT) def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( conv1x1(self.inplanes, planes * block.expansion, stride), frozen_batch_norm(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion for _ in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def _freeze_backbone(self, freeze_at): if freeze_at < 0: return for stage_index in range(freeze_at): if stage_index == 0: m = self.conv1 # stage 0 is the stem else: m = getattr(self, "layer" + str(stage_index)) for p in m.parameters(): p.requires_grad = False def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.conv2(x) x = self.maxpool(x) res = [] x = self.layer1(x) res.append(x) x = self.layer2(x) res.append(x) x = self.layer3(x) res.append(x) x = self.layer4(x) res.append(x) return res class ResNet_XX(nn.Module): def __init__(self, cfg, block=BasicBlock): super().__init__() layers = cfg.MODEL.RESNETS.LAYERS in_channels = cfg.MODEL.RESNETS.IN_CHANNELS if cfg.MODEL.BACKBONE.USE_BN: self.normf = BatchNorm2d else: self.normf = FrozenBatchNorm2d self.inplanes = 16 self.conv1 = nn.Conv2d(3, 16, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = self.normf(16) self.relu = nn.ReLU(inplace=True) self.conv2 = self._make_layer(block, 16, 1) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, in_channels[0], layers[0]) self.layer2 = self._make_layer(block, in_channels[1], layers[1], stride=2) self.layer3 = self._make_layer(block, in_channels[2], layers[2], stride=2) self.layer4 = self._make_layer(block, in_channels[3], layers[3], stride=2) self._freeze_backbone(cfg.MODEL.BACKBONE.FREEZE_CONV_BODY_AT) def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( conv1x1(self.inplanes, planes * block.expansion, stride), self.normf(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample, normf=self.normf)) self.inplanes = planes * block.expansion for _ in range(1, blocks): layers.append(block(self.inplanes, planes, normf=self.normf)) return nn.Sequential(*layers) def _freeze_backbone(self, freeze_at): if freeze_at < 0: return for stage_index in range(freeze_at): if stage_index == 0: self._no_grad(self.conv1) self._no_grad(self.conv2) else: m = getattr(self, "layer" + str(stage_index)) self._no_grad(m) def _no_grad(self, m): for p in m.parameters(): p.requires_grad = False def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.conv2(x) x = self.maxpool(x) res = [] x = self.layer1(x) res.append(x) x = self.layer2(x) res.append(x) x = self.layer3(x) res.append(x) x = self.layer4(x) res.append(x) return res
32.619289
107
0.589169
import torch.nn as nn from torch.nn import BatchNorm2d from maskrcnn_benchmark.modeling.make_layers import frozen_batch_norm from maskrcnn_benchmark.layers.batch_norm import FrozenBatchNorm2d def conv3x3(in_planes, out_planes, stride=1): return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) def conv1x1(in_planes, out_planes, stride=1): return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None, normf=frozen_batch_norm): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = normf(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = normf(planes) self.downsample = downsample self.stride = stride def forward(self, x): identity = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: identity = self.downsample(x) out += identity out = self.relu(out) return out class ResNet(nn.Module): def __init__(self, cfg, block=BasicBlock): super(ResNet, self).__init__() layers = [3, 4, 6, 3] if cfg.MODEL.BACKBONE.USE_BN: normf = BatchNorm2d else: normf = frozen_batch_norm self.inplanes = 16 self.conv1 = nn.Conv2d(3, 16, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = normf(16) self.relu = nn.ReLU(inplace=True) self.conv2 = self._make_layer(block, 16, 1) if True: self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) else: self.maxpool = nn.Sequential(nn.Conv2d(16, 16, kernel_size=3, stride=2, padding=1, bias=False), nn.ReLU(inplace=True)) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self._freeze_backbone(cfg.MODEL.BACKBONE.FREEZE_CONV_BODY_AT) def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( conv1x1(self.inplanes, planes * block.expansion, stride), frozen_batch_norm(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion for _ in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def _freeze_backbone(self, freeze_at): if freeze_at < 0: return for stage_index in range(freeze_at): if stage_index == 0: m = self.conv1 else: m = getattr(self, "layer" + str(stage_index)) for p in m.parameters(): p.requires_grad = False def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.conv2(x) x = self.maxpool(x) res = [] x = self.layer1(x) res.append(x) x = self.layer2(x) res.append(x) x = self.layer3(x) res.append(x) x = self.layer4(x) res.append(x) return res class ResNet_XX(nn.Module): def __init__(self, cfg, block=BasicBlock): super().__init__() layers = cfg.MODEL.RESNETS.LAYERS in_channels = cfg.MODEL.RESNETS.IN_CHANNELS if cfg.MODEL.BACKBONE.USE_BN: self.normf = BatchNorm2d else: self.normf = FrozenBatchNorm2d self.inplanes = 16 self.conv1 = nn.Conv2d(3, 16, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = self.normf(16) self.relu = nn.ReLU(inplace=True) self.conv2 = self._make_layer(block, 16, 1) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, in_channels[0], layers[0]) self.layer2 = self._make_layer(block, in_channels[1], layers[1], stride=2) self.layer3 = self._make_layer(block, in_channels[2], layers[2], stride=2) self.layer4 = self._make_layer(block, in_channels[3], layers[3], stride=2) self._freeze_backbone(cfg.MODEL.BACKBONE.FREEZE_CONV_BODY_AT) def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( conv1x1(self.inplanes, planes * block.expansion, stride), self.normf(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample, normf=self.normf)) self.inplanes = planes * block.expansion for _ in range(1, blocks): layers.append(block(self.inplanes, planes, normf=self.normf)) return nn.Sequential(*layers) def _freeze_backbone(self, freeze_at): if freeze_at < 0: return for stage_index in range(freeze_at): if stage_index == 0: self._no_grad(self.conv1) self._no_grad(self.conv2) else: m = getattr(self, "layer" + str(stage_index)) self._no_grad(m) def _no_grad(self, m): for p in m.parameters(): p.requires_grad = False def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.conv2(x) x = self.maxpool(x) res = [] x = self.layer1(x) res.append(x) x = self.layer2(x) res.append(x) x = self.layer3(x) res.append(x) x = self.layer4(x) res.append(x) return res
true
true
f7f666ddc6762d62eacf13a073aad06d68118588
1,809
py
Python
python/complete/no335.py
dhermes/project-euler
2aabf89b28f033a3e59b7411a9c9c261117818cf
[ "Apache-2.0" ]
7
2015-03-24T21:30:33.000Z
2021-03-13T14:01:23.000Z
python/complete/no335.py
dhermes/project-euler
2aabf89b28f033a3e59b7411a9c9c261117818cf
[ "Apache-2.0" ]
2
2015-03-25T22:53:54.000Z
2016-08-15T16:07:48.000Z
python/complete/no335.py
dhermes/project-euler
2aabf89b28f033a3e59b7411a9c9c261117818cf
[ "Apache-2.0" ]
1
2015-03-25T22:34:04.000Z
2015-03-25T22:34:04.000Z
#!/usr/bin/env python # Since M(2**n + 1) = 4**n + 3**n - 2**(n + 1) (empirically), # we find sum_{n=0}^{P} M(2**n + 1) is equal to # (4**(P + 1) - 1)/3 + (3**(P + 1) - 1)/2 + 2*(2**(P + 1) - 1) # = (4*(4**P) - 1)*(3**(-1)) + (3*(3**P) - 1)*(2**(-1)) + 4*(2**P) - 2 # (This is because (r - 1)*(r**P + ... + r + 1) = r**(P + 1) - 1 from python.decorators import euler_timer from python.functions import inverse_mod_n def moves(n): if n < 3: return n goal_state = [1] * n state = [0, 2] + [1] * (n - 2) num_moves = 1 last_placed = 1 while state != goal_state: beans = state[last_placed] state[last_placed] = 0 for bean in range(1, beans + 1): next_index = (last_placed + bean) % n state[next_index] += 1 last_placed = (last_placed + beans) % n num_moves += 1 return num_moves def check_formula(n): return (moves(2 ** n + 1) == 4 ** n - 3 ** n + 2 ** (n + 1)) # Since (a**(n**k))**n = a**(n*(n**k)) = a**(n**(k + 1)), # We can easily compute X**(P + 1) = X*(X**P) for P = 10**18 def modular_exponentiate(val, exp_base, exp_power, modulus): result = val for i in xrange(exp_power): result = (result ** exp_base) % modulus return result def main(verbose=False): for n in range(10): if not check_formula(n): raise Exception("Proposed formula for M(2**k + 1) incorrect.") modulus = 7 ** 9 p_2 = 4 * modular_exponentiate(2, 10, 18, modulus) - 2 p_3 = 3 * modular_exponentiate(3, 10, 18, modulus) - 1 p_4 = 4 * modular_exponentiate(4, 10, 18, modulus) - 1 return (p_4 * inverse_mod_n(3, modulus) - p_3 * inverse_mod_n(2, modulus) + p_2) % (modulus) if __name__ == '__main__': print euler_timer(335)(main)(verbose=True)
29.655738
74
0.537866
from python.decorators import euler_timer from python.functions import inverse_mod_n def moves(n): if n < 3: return n goal_state = [1] * n state = [0, 2] + [1] * (n - 2) num_moves = 1 last_placed = 1 while state != goal_state: beans = state[last_placed] state[last_placed] = 0 for bean in range(1, beans + 1): next_index = (last_placed + bean) % n state[next_index] += 1 last_placed = (last_placed + beans) % n num_moves += 1 return num_moves def check_formula(n): return (moves(2 ** n + 1) == 4 ** n - 3 ** n + 2 ** (n + 1)) def modular_exponentiate(val, exp_base, exp_power, modulus): result = val for i in xrange(exp_power): result = (result ** exp_base) % modulus return result def main(verbose=False): for n in range(10): if not check_formula(n): raise Exception("Proposed formula for M(2**k + 1) incorrect.") modulus = 7 ** 9 p_2 = 4 * modular_exponentiate(2, 10, 18, modulus) - 2 p_3 = 3 * modular_exponentiate(3, 10, 18, modulus) - 1 p_4 = 4 * modular_exponentiate(4, 10, 18, modulus) - 1 return (p_4 * inverse_mod_n(3, modulus) - p_3 * inverse_mod_n(2, modulus) + p_2) % (modulus) if __name__ == '__main__': print euler_timer(335)(main)(verbose=True)
false
true
f7f6681b43a764564e2b8ac5d6512ab35bd02bf7
97,646
py
Python
test/distributed/test_c10d_nccl.py
FloCF/pytorch
383a33a0eb28ae454c0c8965650aea8ce1608943
[ "Intel" ]
1
2021-08-18T08:59:49.000Z
2021-08-18T08:59:49.000Z
test/distributed/test_c10d_nccl.py
FloCF/pytorch
383a33a0eb28ae454c0c8965650aea8ce1608943
[ "Intel" ]
null
null
null
test/distributed/test_c10d_nccl.py
FloCF/pytorch
383a33a0eb28ae454c0c8965650aea8ce1608943
[ "Intel" ]
null
null
null
import copy import math import os import random import signal import sys import tempfile import threading import time from contextlib import contextmanager from datetime import timedelta from itertools import product from unittest import mock import torch import torch.distributed as c10d if not c10d.is_available(): print("c10d not available, skipping tests", file=sys.stderr) sys.exit(0) import test_c10d_common import torch.distributed as dist import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD import torch.nn.functional as F import torch.testing._internal.common_utils as common from test_c10d_common import gpus_for_rank, DoubleGpuNet, ConvNet, ModuleForDdpCommHook from torch import nn from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import ( MultiProcessTestCase, requires_nccl, requires_nccl_version, skip_if_lt_x_gpu, get_timeout, skip_if_rocm, with_dist_debug_levels, with_nccl_blocking_wait, ) from torch.testing._internal.common_utils import ( IS_WINDOWS, TestCase, run_tests, retry_on_connect_failures, TEST_WITH_DEV_DBG_ASAN, TEST_WITH_TSAN, sandcastle_skip, sandcastle_skip_if, ) from torch.utils.checkpoint import checkpoint from torch.distributed.optim import functional_optim_map if not IS_WINDOWS: from torch.distributed.optim.functional_sgd import _FunctionalSGD from torch.distributed.optim.functional_adam import _FunctionalAdam from torch.distributed.optim.functional_adamw import _FunctionalAdamW if TEST_WITH_TSAN: print( "Skip as TSAN is not fork-safe since we're forking in a multi-threaded environment", file=sys.stderr, ) sys.exit(0) if TEST_WITH_DEV_DBG_ASAN: print( "Skip ASAN as torch + multiprocessing spawn have known issues", file=sys.stderr ) sys.exit(0) class RendezvousEnvTest(TestCase): @retry_on_connect_failures @requires_nccl() @sandcastle_skip_if( torch.cuda.device_count() == 0, "No GPUs available, skipping test" ) def test_common_errors(self): vars = { "WORLD_SIZE": "1", "RANK": "0", "MASTER_ADDR": "127.0.0.1", "MASTER_PORT": str(common.find_free_port()), } class Env(object): def __init__(self, vars): self.env_patcher = mock.patch.dict(os.environ, vars, clear=True) def __enter__(self): self.env_patcher.start() def __exit__(self, type, value, traceback): self.env_patcher.stop() def without(d, key): d = d.copy() d.pop(key) return d def withouts(d, keys): d = d.copy() for key in keys: d.pop(key) return d with Env(without(vars, "WORLD_SIZE")): self.assertEqual(None, os.environ.get("WORLD_SIZE")) with self.assertRaisesRegex(ValueError, "WORLD_SIZE expected"): gen = c10d.rendezvous("env://") next(gen) c10d.init_process_group(backend="nccl", world_size=1) self.assertEqual(c10d.get_rank(), 0) self.assertEqual(c10d.get_world_size(), 1) c10d.destroy_process_group() with Env(without(vars, "RANK")): self.assertEqual(None, os.environ.get("RANK")) with self.assertRaisesRegex(ValueError, "RANK expected"): gen = c10d.rendezvous("env://") next(gen) c10d.init_process_group(backend="nccl", rank=0) self.assertEqual(c10d.get_rank(), 0) self.assertEqual(c10d.get_world_size(), 1) c10d.destroy_process_group() with Env(withouts(vars, ["RANK", "WORLD_SIZE"])): self.assertEqual(None, os.environ.get("RANK")) self.assertEqual(None, os.environ.get("WORLD_SIZE")) c10d.init_process_group(backend="nccl", rank=0, world_size=1) self.assertEqual(c10d.get_rank(), 0) self.assertEqual(c10d.get_world_size(), 1) c10d.destroy_process_group() with Env(vars): c10d.init_process_group(backend="nccl") self.assertEqual(c10d.get_rank(), 0) self.assertEqual(c10d.get_world_size(), 1) c10d.destroy_process_group() with Env(without(vars, "MASTER_ADDR")): self.assertEqual(None, os.environ.get("MASTER_ADDR")) with self.assertRaisesRegex(ValueError, "MASTER_ADDR expected"): gen = c10d.rendezvous("env://") next(gen) with Env(without(vars, "MASTER_PORT")): self.assertEqual(None, os.environ.get("MASTER_PORT")) with self.assertRaisesRegex(ValueError, "MASTER_PORT expected"): gen = c10d.rendezvous("env://") next(gen) with Env(without(vars, "WORLD_SIZE")): self.assertEqual(None, os.environ.get("WORLD_SIZE")) gen = c10d.rendezvous("env://?world_size={}".format(1)) _, _, size = next(gen) self.assertEqual(size, 1) with Env(without(vars, "RANK")): self.assertEqual(None, os.environ.get("RANK")) gen = c10d.rendezvous("env://?rank={}".format(0)) _, rank, _ = next(gen) self.assertEqual(rank, 0) with Env(withouts(vars, ["RANK", "WORLD_SIZE"])): self.assertEqual(None, os.environ.get("RANK")) self.assertEqual(None, os.environ.get("WORLD_SIZE")) gen = c10d.rendezvous("env://?rank={}&world_size={}".format(0, 1)) _, rank, size = next(gen) self.assertEqual(rank, 0) self.assertEqual(size, 1) class TimeoutTest(test_c10d_common.AbstractTimeoutTest, TestCase): @requires_nccl() @retry_on_connect_failures @sandcastle_skip_if( torch.cuda.device_count() == 0, "No GPUs available, skipping test" ) def test_default_store_timeout_nccl(self): self._test_default_store_timeout("nccl") class ProcessGroupNCCLNoGPUTest(TestCase): MAIN_PROCESS_RANK = 0 def setUp(self): self.rank = self.MAIN_PROCESS_RANK self.world_size = 1 self.file = tempfile.NamedTemporaryFile(delete=False) def tearDown(self): pass @requires_nccl() @sandcastle_skip_if( torch.cuda.device_count() > 0, "GPUs are available, skipping test" ) def test_init_no_gpus(self): store = c10d.FileStore(self.file.name, self.world_size) with self.assertRaisesRegex( RuntimeError, "ProcessGroupNCCL is only supported with GPUs, no GPUs found!" ): c10d.ProcessGroupNCCL(store, self.rank, self.world_size) class ProcessGroupNCCLTest(TestCase): MAIN_PROCESS_RANK = 0 def setUp(self): self.rank = self.MAIN_PROCESS_RANK self.world_size = 1 self.file = tempfile.NamedTemporaryFile(delete=False) # NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests # that use NCCL_BLOCKING_WAIT will test it as expected. os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1" self.num_gpus = torch.cuda.device_count() def tearDown(self): pass @requires_nccl() @sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs") def test_empty_tensors(self): store = c10d.FileStore(self.file.name, self.world_size) pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) xs = [torch.cuda.FloatTensor([])] pg.broadcast(xs).wait() self.assertEqual(0, xs[0].numel()) pg.allreduce(xs).wait() self.assertEqual(0, xs[0].numel()) pg.reduce(xs).wait() self.assertEqual(0, xs[0].numel()) ys = [[torch.cuda.FloatTensor([]) for _ in range(self.world_size)]] pg.allgather(ys, xs).wait() for y in ys[0]: self.assertEqual(0, y.numel()) ys = [torch.cuda.FloatTensor([])] xs = [[torch.cuda.FloatTensor([]) for _ in range(self.world_size)]] pg.reduce_scatter(ys, xs).wait() self.assertEqual(0, ys[0].numel()) @requires_nccl() @sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs") def test_broadcast_ops(self): store = c10d.FileStore(self.file.name, self.world_size) pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) def broadcast(xs, rootRank, rootTensor): opts = c10d.BroadcastOptions() opts.rootRank = rootRank opts.rootTensor = rootTensor work = pg.broadcast(xs, opts) work.wait() # for every root tensor for rt in range(self.num_gpus): tensors = [] for i in range(self.num_gpus): tensors.append(torch.tensor([i]).cuda(i)) broadcast(tensors, self.rank, rt) for i in range(self.num_gpus): self.assertEqual(tensors[i], tensors[rt]) @requires_nccl() @sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs") def test_allreduce_ops(self): store = c10d.FileStore(self.file.name, self.world_size) pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) def allreduce(tensors, op): opts = c10d.AllreduceOptions() opts.reduceOp = op work = pg.allreduce(tensors, opts) work.wait() # Sum tensors = [] for i in range(self.num_gpus): tensors.append(torch.tensor([i + 1]).cuda(i)) allreduce(tensors, c10d.ReduceOp.SUM) for i in range(self.num_gpus): # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095 self.assertEqualIgnoreType( torch.tensor([float(self.num_gpus * (self.num_gpus + 1) / 2)]), tensors[i], ) # Product tensors = [] for i in range(self.num_gpus): tensors.append(torch.tensor([i + 1]).cuda(i)) allreduce(tensors, c10d.ReduceOp.PRODUCT) for i in range(self.num_gpus): # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095 self.assertEqualIgnoreType( torch.tensor([float(math.factorial(self.num_gpus))]), tensors[i] ) # Min tensors = [] for i in range(self.num_gpus): tensors.append(torch.tensor([i + 1]).cuda(i)) allreduce(tensors, c10d.ReduceOp.MIN) for i in range(self.num_gpus): # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095 self.assertEqualIgnoreType(torch.tensor([1.0]), tensors[i]) # Max tensors = [] for i in range(self.num_gpus): tensors.append(torch.tensor([i + 1]).cuda(i)) allreduce(tensors, c10d.ReduceOp.MAX) for i in range(self.num_gpus): self.assertEqual(torch.tensor([self.num_gpus]), tensors[i]) for op in (c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR): with self.assertRaisesRegex( RuntimeError, "Cannot use " + str(op) + " with NCCL" ): allreduce(tensors, op) @requires_nccl() @sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs") def test_reduce_ops(self): store = c10d.FileStore(self.file.name, self.world_size) pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) def reduce(xs, rootRank, rootTensor, op=None): opts = c10d.ReduceOptions() opts.rootRank = rootRank opts.rootTensor = rootTensor if op: opts.reduceOp = op work = pg.reduce(xs, opts) work.wait() # for every root tensor for rt in range(self.num_gpus): tensors = [] for i in range(self.num_gpus): tensors.append(torch.tensor([i + 1]).cuda(i)) reduce(tensors, self.rank, rt) # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095 self.assertEqualIgnoreType( torch.tensor([float(self.num_gpus * (self.num_gpus + 1) / 2)]), tensors[rt], ) for op in (c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR): with self.assertRaisesRegex( RuntimeError, "Cannot use " + str(op) + " with NCCL" ): reduce(tensors, self.rank, rt, op) @requires_nccl() @sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs") def test_allgather_ops(self): store = c10d.FileStore(self.file.name, self.world_size) pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) def allgather(output_ts, input_ts): work = pg.allgather(output_ts, input_ts) work.wait() tensors = [] output_ts = [[] for _ in range(self.num_gpus)] for idx, ls in enumerate(output_ts): for _ in range(self.world_size * self.num_gpus): ls.append(torch.tensor([0]).cuda(idx)) for i in range(self.num_gpus): tensors.append(torch.tensor([i]).cuda(i)) allgather(output_ts, tensors) # Verification for device_ts in output_ts: for s_idx, t in enumerate(device_ts): self.assertEqual(torch.tensor([s_idx]), t) @requires_nccl() @sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs") def test_allgather_base_ops(self): store = c10d.FileStore(self.file.name, self.world_size) pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) def allgather_base(output_t, input_t): work = pg._allgather_base(output_t, input_t) work.wait() device_id = self.rank % self.num_gpus # allgather_base is GPU number agnostic. # Each rank contribute one tensor regardless of GPU counts tensor = torch.tensor([self.rank]).cuda(device_id) output_t = torch.empty((self.world_size), dtype=tensor.dtype).cuda(device_id) allgather_base(output_t, tensor) # Verification self.assertEqual(torch.arange(self.world_size), output_t) @requires_nccl() @sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs") def test_allgather_base_basics(self): store = c10d.FileStore(self.file.name, self.world_size) pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) def allgather_base(output_t, input_t): work = pg._allgather_base(output_t, input_t) work.wait() device_id = self.rank % self.num_gpus # anticpate an error with self.assertRaisesRegex( RuntimeError, "output tensor size must be equal to world_size times input tensor size", ): tensor = torch.tensor([self.rank]).cuda(device_id) output_t = torch.empty((self.world_size + 1), dtype=tensor.dtype).cuda( device_id ) # fails the check because output_t is not correctly sized allgather_base(output_t, tensor) # anticpate an error with self.assertRaisesRegex( RuntimeError, "output tensor must have the same type as input tensor" ): tensor = torch.tensor([self.rank], dtype=torch.float).cuda(device_id) output_t = torch.empty((self.world_size + 1), dtype=torch.long).cuda( device_id ) # fails the check because the dtype is different allgather_base(output_t, tensor) @requires_nccl() @sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs") def test_reduce_scatter_base_basics(self): store = c10d.FileStore(self.file.name, self.world_size) pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) def reduce_scatter_base(output_t, input_t): work = pg._reduce_scatter_base(output_t, input_t) work.wait() device_id = self.rank % self.num_gpus # anticpate an error with self.assertRaisesRegex( RuntimeError, "input tensor must be the same size as output size times world size", ): input_t = torch.tensor([self.rank]).cuda(device_id) output_t = torch.empty((self.world_size + 1), dtype=input_t.dtype).cuda( device_id ) # fails the check because output_t is not correctly sized reduce_scatter_base(output_t, input_t) # anticpate an error with self.assertRaisesRegex( RuntimeError, "input tensor must be the same type as the outut tensor." ): tensor = torch.tensor([self.rank], dtype=torch.float).cuda(device_id) output_t = torch.empty((self.world_size + 1), dtype=torch.long).cuda( device_id ) # fails the check because the dtype is different reduce_scatter_base(output_t, tensor) @requires_nccl() @sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs") def test_reduce_scatter_ops(self): store = c10d.FileStore(self.file.name, self.world_size) pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) def reduce_scatter(outputs, input_lists, op): opts = c10d.ReduceScatterOptions() opts.reduceOp = op work = pg.reduce_scatter(outputs, input_lists, opts) work.wait() virtual_rank = self.rank * self.world_size virtual_world_size = self.num_gpus * self.world_size output = [torch.tensor([0]).cuda(i) for i in range(self.num_gpus)] # 0 1 2 # 0 [0..11] [1..12] # 1 [3..14] # 2 # 3 # Sum tensor_lists = [ [ torch.tensor([self.rank * self.num_gpus + i + j]).cuda(i) for j in range(virtual_world_size) ] for i in range(self.num_gpus) ] reduce_scatter(output, tensor_lists, c10d.ReduceOp.SUM) for i in range(self.num_gpus): expected = torch.tensor( [ float(self.num_gpus * (self.num_gpus - 1) / 2) + (virtual_rank + i) * virtual_world_size ] ) # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095 self.assertEqualIgnoreType(expected, output[i]) # Min reduce_scatter(output, tensor_lists, c10d.ReduceOp.MIN) for i in range(self.num_gpus): expected = torch.tensor([self.rank * self.world_size + i]) self.assertEqual(expected, output[i]) # Max reduce_scatter(output, tensor_lists, c10d.ReduceOp.MAX) for i in range(self.num_gpus): expected = torch.tensor( [self.rank * self.world_size + i + virtual_world_size - 1] ) self.assertEqual(expected, output[i]) # Product tensor_lists = [ [ torch.tensor( [(self.rank * self.num_gpus + i + j) % virtual_world_size + 1] ).cuda(i) for j in range(virtual_world_size) ] for i in range(self.num_gpus) ] reduce_scatter(output, tensor_lists, c10d.ReduceOp.PRODUCT) for i in range(self.num_gpus): expected = torch.tensor([float(math.factorial(virtual_world_size))]) # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095 self.assertEqualIgnoreType(expected, output[i]) @requires_nccl() @sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs") def test_reduce_scatter_base_ops(self): store = c10d.FileStore(self.file.name, self.world_size) pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) def reduce_scatter_base(output_t, input_t): work = pg._reduce_scatter_base(output_t, input_t) work.wait() device_id = self.rank % self.num_gpus # reduce_scatter_base is GPU number agnostic. # Each rank contribute one tensor regardless of GPU counts output_t = torch.empty([1]).cuda(device_id) tensor = torch.arange(self.world_size, dtype=output_t.dtype).cuda(device_id) reduce_scatter_base(output_t, tensor) # Verification self.assertEqual(output_t[0], self.rank * self.world_size) @requires_nccl() @sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs") def test_barrier(self): store = c10d.FileStore(self.file.name, self.world_size) pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) def allreduce(tensors): opts = c10d.AllreduceOptions() work = pg.allreduce(tensors, opts) return work # Making the collective to operate on # 1, 2, 3, 4, .... self.num_gpus GPUs tensors_list = [[] for _ in range(2, self.num_gpus + 1)] for i in range(2, self.num_gpus + 1): for j in range(i): tensors_list[i - 2].append(torch.tensor([j + 1]).cuda(j)) works = [] for tensors in tensors_list: work = allreduce(tensors) works.append(work) # Barrier will ensure that all previous work is completed pg.barrier().wait() for i in range(2, self.num_gpus + 1): for j in range(i): # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095 self.assertEqualIgnoreType( torch.tensor([float(i * (i + 1) / 2)]), tensors_list[i - 2][j] ) class DistributedDataParallelTest( test_c10d_common.AbstractDistributedDataParallelTest, MultiProcessTestCase ): def setUp(self): super(DistributedDataParallelTest, self).setUp() # NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests # that use NCCL_BLOCKING_WAIT will test it as expected. os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1" self._spawn_processes() def _test_nccl_backend( self, devices, device_ids, multi_device=False, gradient_as_bucket_view=False ): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) self._test_ddp_with_process_group( process_group, devices, device_ids, multi_device, gradient_as_bucket_view ) @requires_nccl() @skip_if_lt_x_gpu(2) def test_nccl_backend_multi_device_ids_not_allowed(self): int_devices = list(range(torch.cuda.device_count())) devices = [torch.device("cuda:" + str(i)) for i in int_devices] with self.assertRaisesRegex( ValueError, "device_ids can only be None or contain a single element." ): self._test_nccl_backend(devices, int_devices) @requires_nccl() @skip_if_lt_x_gpu(2) def test_nccl_backend_single_device_module_device_ids_None(self): self._test_nccl_backend(None, None) @requires_nccl() @skip_if_lt_x_gpu(2) def test_nccl_backend_single_device_module_empty_device_ids(self): # This tests the backward compatibility of accepting an empty list as `device_ids`, # although we no longer document this in favor of the default value of `None`, # which is consistent with multi-device modules and CPU modules. self._test_nccl_backend(None, []) @requires_nccl() @skip_if_lt_x_gpu(4) def test_nccl_backend_multi_device_module_device_ids_None(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:2] devices = [torch.device("cuda:" + str(i)) for i in int_devices] self._test_nccl_backend(devices, None, multi_device=True) @requires_nccl() @skip_if_lt_x_gpu(2) def test_nccl_backend_1gpu_module_device_ids_integer_list(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:1] devices = [torch.device("cuda:" + str(i)) for i in int_devices] self._test_nccl_backend(devices, int_devices) @requires_nccl() @skip_if_lt_x_gpu(2) def test_nccl_backend_1gpu_module_device_ids_torch_device_list(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:1] devices = [torch.device("cuda:" + str(i)) for i in int_devices] self._test_nccl_backend(devices, devices) @requires_nccl() @skip_if_lt_x_gpu(4) def test_nccl_backend_2gpu_module(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:2] devices = [torch.device("cuda:" + str(i)) for i in int_devices] self._test_nccl_backend(devices, None, multi_device=True) @requires_nccl() @skip_if_lt_x_gpu(8) def test_nccl_backend_4gpu_module(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:4] devices = [torch.device("cuda:" + str(i)) for i in int_devices] self._test_nccl_backend(devices, None, multi_device=True) @requires_nccl() @skip_if_lt_x_gpu(4) def test_ddp_multi_device_module_config(self): gpus = gpus_for_rank(self.world_size)[self.rank] self.assertTrue(len(gpus) >= 2, "expecting at least 2 gpus per process") store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) gpus = gpus[:2] model = DoubleGpuNet(gpus) with self.assertRaisesRegex( ValueError, "DistributedDataParallel device_ids and output_device arguments only work with " "single-device/multiple-device GPU modules or CPU modules", ): ddp_model = DistributedDataParallel( model, output_device=gpus[1], process_group=process_group ) with self.assertRaisesRegex( ValueError, "device_ids can only be None or contain a single element." ): ddp_model = DistributedDataParallel( model, device_ids=gpus, process_group=process_group ) with self.assertRaisesRegex( ValueError, "input module must be on the same type of devices" ): model.fc1 = model.fc1.cpu() ddp_model = DistributedDataParallel(model, process_group=process_group) model = model.cpu() with self.assertRaisesRegex( ValueError, "device_ids can only be None or contain a single element." ): ddp_model = DistributedDataParallel( model, device_ids=gpus, process_group=process_group ) def _test_fp16(self, gradient_as_bucket_view=False): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) gpus = gpus_for_rank(self.world_size)[self.rank] model = nn.Linear(1, 1, bias=False).cuda(gpus[0]).half() nn.init.constant_(model.weight, 1) ddp_model = DistributedDataParallel( model, device_ids=[gpus[0]], process_group=process_group, bucket_cap_mb=0.001, gradient_as_bucket_view=gradient_as_bucket_view, ) # Input 2**15, so that the gradients will overflow with a # world_size of 2, unless we normalize the gradient by the # world_size before the reduction input = torch.tensor([[2 ** 15]]).cuda(gpus[0]).half() # Step model ddp_model.train() output = ddp_model(input) loss = output.sum() loss.backward() self.assertFalse(any(torch.isinf(p.grad).any() for p in ddp_model.parameters())) @requires_nccl() @skip_if_lt_x_gpu(2) def test_fp16(self): self._test_fp16() @requires_nccl() @skip_if_lt_x_gpu(2) def test_fp16_grad_is_view(self): self._test_fp16(gradient_as_bucket_view=True) def _test_arbitrary_forward_return_value(self, gradient_as_bucket_view=False): """ Note: this test can be sped up by only running it on a CPU module once DistributedDataParallel supports them. """ store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) class ForwardReturnValueModule(nn.Module): def __init__(self): super(ForwardReturnValueModule, self).__init__() self.fc1 = nn.Linear(2, 10, bias=False) self.fc2 = nn.Linear(10, 4, bias=False) self.fc3 = nn.Linear(4, 4, bias=False) self.relu = nn.ReLU() def forward(self, x, fn): x = self.relu(self.fc1(x)) x = self.relu(self.fc2(x)) # The first softmax does NOT include fc3 in its autograd graph # whereas the second softmax DOES. If we pass only the first # tensor we see in the output to the reducer, it marks the # gradient for fc3 as ready (because it doesn't show up). If # downstream uses of this return value choose to differentiate # against the second output tensor, it would still receive a # gradient and a callback for this tensor, resulting in a crash. return fn( F.softmax(x, dim=1), F.softmax(self.fc3(x), dim=1), ) device_id = gpus_for_rank(self.world_size)[self.rank][0] model = DistributedDataParallel( ForwardReturnValueModule().float().to(device_id), device_ids=[device_id], process_group=process_group, gradient_as_bucket_view=gradient_as_bucket_view, ) batch_size = 4 criterion = nn.CrossEntropyLoss() input = torch.rand([batch_size, 2], dtype=torch.float) target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to( device_id ) # Always run "backward" to ensure the reducer is called by autograd. # If we don't correctly capture the output tensors from the return value, # the reducer won't see a hook for the unused parameter, and throw an error. # The correct capture is what we're testing in this function. def test(box, unbox): output = model(input, fn=box) loss = criterion(unbox(output), target) loss.backward() # Test with identity return value test( box=lambda x, y: (x, y), unbox=lambda obj: obj[1], ) # Test with list return value test( box=lambda x, y: ["foo", x, "bar", y], unbox=lambda obj: obj[3], ) # Test with tuple return value test( box=lambda x, y: ("foo", x, "bar", y), unbox=lambda obj: obj[3], ) # Test with dict return value test( box=lambda x, y: {"foo": "bar", "a": x, "b": y}, unbox=lambda obj: obj["b"], ) # Test with list with dict return value test( box=lambda x, y: ["foo", "bar", {"a": x, "b": y}], unbox=lambda obj: obj[2]["b"], ) # Test with dict with list return value test( box=lambda x, y: {"foo": "bar", "list": [0, x, 1, y]}, unbox=lambda obj: obj["list"][3], ) @requires_nccl() @skip_if_lt_x_gpu(2) def test_arbitrary_forward_return_value(self): self._test_arbitrary_forward_return_value() @requires_nccl() @skip_if_lt_x_gpu(2) def test_arbitrary_forward_return_value_grad_is_view(self): self._test_arbitrary_forward_return_value(gradient_as_bucket_view=True) @requires_nccl() @skip_if_lt_x_gpu(2) def test_ddp_with_lazy_parameters(self): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) with self.assertRaisesRegex( RuntimeError, "Modules with uninitialized parameters" ): DistributedDataParallel( torch.nn.LazyLinear(10), process_group=process_group ) def _test_find_unused_parameters_kwarg(self, gradient_as_bucket_view=False): """ Note: this test can be sped up by only running it on a CPU module once DistributedDataParallel supports them. """ torch.cuda.set_device(self.rank) dist.init_process_group( backend="nccl", world_size=self.world_size, rank=self.rank, init_method=f"file://{self.file_name}", ) process_group = c10d.distributed_c10d._get_default_group() class FindUnusedParametersModule(nn.Module): def __init__(self): super(FindUnusedParametersModule, self).__init__() self.fc1 = nn.Linear(2, 10, bias=False) self.fc2 = nn.Linear(10, 4, bias=False) self.fc3 = nn.Linear(4, 4, bias=False) self.relu = nn.ReLU() def forward(self, x): x = self.relu(self.fc1(x)) x = self.relu(self.fc2(x)) # Return the fc3 module so that the caller can invoke it # outside of the forward function. While this is bad practice, # we can use it to trigger a reducer error. return (F.softmax(x, dim=1), self.fc3) device_id = gpus_for_rank(self.world_size)[self.rank][0] batch_size = 4 criterion = nn.CrossEntropyLoss() input = torch.rand([batch_size, 2], dtype=torch.float) target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to( device_id ) ddp_model = None def test_find_unused_parameters( find_unused_parameters, test_default=False, gradient_as_bucket_view=False ): if test_default: model = DistributedDataParallel( FindUnusedParametersModule().float().to(device_id), device_ids=[device_id], process_group=process_group, gradient_as_bucket_view=gradient_as_bucket_view, ) else: model = DistributedDataParallel( FindUnusedParametersModule().float().to(device_id), device_ids=[device_id], process_group=process_group, find_unused_parameters=find_unused_parameters, gradient_as_bucket_view=gradient_as_bucket_view, ) nonlocal ddp_model ddp_model = model output, fc3 = model(input) output = fc3(output) loss = criterion(output, target) loss.backward() # First test that finding unused params under these conditions is to # trigger an error when `backward` is called (because fc3 is an unused # parameter and will therefore be marked ready twice). try: test_find_unused_parameters( True, gradient_as_bucket_view=gradient_as_bucket_view ) except Exception as ex: self.assertTrue( str(ex).startswith( "Expected to mark a variable ready only once.", ) ) unused_index = 2 unused_index_str = f"Parameter at index {unused_index}" model = ddp_model.module for module_name, module in model.named_modules(): if module == model.fc3: for parameter_name, _ in module.named_parameters(recurse=False): unused_fqn = f"{module_name}.{parameter_name}" # Only one such parameter in model.fc3, since bias=False break if dist._get_debug_mode() != dist._DistributedDebugLevel.OFF: unused_index_str += f" with name {unused_fqn}" self.assertTrue(unused_index_str in str(ex)) else: self.fail("Expected exception") dist.barrier(process_group) # Then test that the default behavior can be overridden by setting # `find_unused_parameters=False`. try: test_find_unused_parameters( False, gradient_as_bucket_view=gradient_as_bucket_view ) except Exception as ex: self.fail("Unexpected exception: %s" % ex) # Test find_unused_parameters defaults to False try: test_find_unused_parameters( True, test_default=True, gradient_as_bucket_view=gradient_as_bucket_view ) except Exception as ex: self.fail("Unexpected exception: %s" % ex) # TODO: Combine the following tests once https://github.com/pytorch/pytorch/issues/55967 # is resolved. @requires_nccl() @skip_if_lt_x_gpu(2) @with_dist_debug_levels(levels=["DETAIL"]) def test_find_unused_parameters_kwarg_debug_detail(self): self._test_find_unused_parameters_kwarg() @requires_nccl() @skip_if_lt_x_gpu(2) @with_dist_debug_levels(levels=["INFO"]) def test_find_unused_parameters_kwarg_debug_info(self): self._test_find_unused_parameters_kwarg() @requires_nccl() @skip_if_lt_x_gpu(2) @with_dist_debug_levels(levels=["OFF"]) def test_find_unused_parameters_kwarg_debug_off(self): self._test_find_unused_parameters_kwarg() @requires_nccl() @skip_if_lt_x_gpu(2) @with_dist_debug_levels(levels=["DETAIL"]) def test_find_unused_parameters_kwarg_grad_is_view_debug_detail(self): self._test_find_unused_parameters_kwarg(gradient_as_bucket_view=True) @requires_nccl() @skip_if_lt_x_gpu(2) @with_dist_debug_levels(levels=["INFO"]) def test_find_unused_parameters_kwarg_grad_is_view_debug_info(self): self._test_find_unused_parameters_kwarg(gradient_as_bucket_view=True) @requires_nccl() @skip_if_lt_x_gpu(2) @with_dist_debug_levels(levels=["OFF"]) def test_find_unused_parameters_kwarg_grad_is_view_debug_off(self): self._test_find_unused_parameters_kwarg(gradient_as_bucket_view=True) def _test_multiple_outputs_multiple_backward(self, gradient_as_bucket_view=False): """ Note: this test can be sped up by only running it on a CPU module once DistributedDataParallel supports them. """ store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) class MultipleOutputModule(nn.Module): def __init__(self): super(MultipleOutputModule, self).__init__() def define_module(): return nn.Sequential( nn.Linear(2, 10, bias=False), nn.ReLU(), nn.Linear(10, 4, bias=False), nn.ReLU(), ) self.module0 = define_module() self.module1 = define_module() def forward(self, x): return ( F.softmax(self.module0(x), dim=1), F.softmax(self.module1(x), dim=1), ) device_id = gpus_for_rank(self.world_size)[self.rank][0] model = DistributedDataParallel( MultipleOutputModule().float().to(device_id), device_ids=[device_id], process_group=process_group, gradient_as_bucket_view=gradient_as_bucket_view, ) batch_size = 4 criterion = nn.CrossEntropyLoss() input = torch.rand([batch_size, 2], dtype=torch.float) target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to( device_id ) # Compute loss and gradients for both outputs output1, output2 = model(input) loss1 = criterion(output1, target) loss1.backward() loss2 = criterion(output2, target) loss2.backward() @requires_nccl() @skip_if_lt_x_gpu(2) def test_multiple_outputs_multiple_backward(self): self._test_multiple_outputs_multiple_backward() @requires_nccl() @skip_if_lt_x_gpu(2) def test_multiple_outputs_multiple_backward_grad_is_view(self): self._test_multiple_outputs_multiple_backward(gradient_as_bucket_view=True) @requires_nccl() @skip_if_lt_x_gpu(2) def test_no_grad(self): """ Note: this test can be sped up by only running it on a CPU module once DistributedDataParallel supports them. """ store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) class NoGradModule(nn.Module): def __init__(self): super(NoGradModule, self).__init__() self.fc1 = nn.Linear(2, 10, bias=False) self.fc2 = nn.Linear(10, 4, bias=False) self.relu = nn.ReLU() def forward(self, x): x = self.relu(self.fc1(x)) x = self.relu(self.fc2(x)) return F.softmax(x, dim=1) device_id = gpus_for_rank(self.world_size)[self.rank][0] model = DistributedDataParallel( NoGradModule().float().to(device_id), device_ids=[device_id], process_group=process_group, ) batch_size = 4 input = torch.rand([batch_size, 2], dtype=torch.float) def check_no_grads(): for p in model.parameters(): self.assertTrue(p.requires_grad) self.assertIsNone(p.grad) # After initialization, no parameter has their gradient set. check_no_grads() # Run `forward` function with torch.no_grad() with torch.no_grad(): output = model(input) self.assertTrue(isinstance(output, torch.Tensor)) # No parameter should have their gradient set. check_no_grads() def _test_accumulate_gradients_module(self, gradient_as_bucket_view=False): # This is NOT the recommended way to implement accumulating grads, but # we would like to make sure DDP does not mess up with the underlying # module. int_devices = gpus_for_rank(self.world_size)[self.rank][:1] devices = [torch.device("cuda:" + str(i)) for i in int_devices] store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) global_batch_size = self.world_size model, ddp_model, input, target = self._prepare_single_device_module( process_group, devices, devices, global_batch_size, gradient_as_bucket_view ) def step_model(model, input, target): model.train() output = model(input) loss = F.mse_loss(output, target.to(output.device)) loss.backward() # ensure accumulate grads works with no_grad with torch.no_grad(): ddp_model.train() ddp_model.module(input) # Check two model parameters over 4 iterations. # Use 4 iterations because we alternate between reducing and # not reducing and want to make sure we switch both ways. for iteration in range(4): step_model(model, input, target) if iteration % 2 == 0: # Skip gradients sync without calling prepare_for_backward step_model( ddp_model.module, input[self.rank : (self.rank + 1)], target[self.rank : (self.rank + 1)], ) for i, j in zip(model.parameters(), ddp_model.parameters()): self.assertNotEqual(i.grad, j.grad) else: step_model( ddp_model, input[self.rank : (self.rank + 1)], target[self.rank : (self.rank + 1)], ) for i, j in zip(model.parameters(), ddp_model.parameters()): # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095 self.assertEqualIgnoreType(i.grad, j.grad, rtol=1.3e-06, atol=5e-5) # Shuffle the input so that DDP input is different torch.manual_seed(1337 + iteration) input = input[torch.randperm(global_batch_size)] @requires_nccl() @skip_if_lt_x_gpu(2) def test_accumulate_gradients_module(self): self._test_accumulate_gradients_module() @requires_nccl() @skip_if_lt_x_gpu(2) def test_accumulate_gradients_module_with_grad_is_view(self): self._test_accumulate_gradients_module(gradient_as_bucket_view=True) @requires_nccl() @skip_if_lt_x_gpu(2) def test_failure_recovery(self): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) # need to create a separate file for the recovered FileStore, because # the original one will be deleted when destructing the first FileStore. recovery_filename = self.file_name + "_recovery" if self.rank == 0: # the file will be deleted by the recovered FileStore open(recovery_filename, "w").close() # not necessary to run barrier here, as DDP will synchronize class TestModel(nn.Module): def __init__(self): super(TestModel, self).__init__() self.fc1 = nn.Linear(2, 10, bias=False) self.fc2 = nn.Linear(10, 4, bias=False) self.relu = nn.ReLU() def forward(self, x): x = self.relu(self.fc1(x)) x = self.relu(self.fc2(x)) return F.softmax(x, dim=1) device_id = gpus_for_rank(self.world_size)[self.rank][0] model = TestModel().float().to(device_id) ddp = DistributedDataParallel( model, device_ids=[device_id], process_group=process_group, ) batch_size = 4 criterion = nn.CrossEntropyLoss() input = torch.rand([batch_size, 2], dtype=torch.float) target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to( device_id ) for _ in range(6): output = ddp(input) loss = criterion(output, target) loss.backward() del ddp del process_group del store # this will delete self.file_name store = c10d.FileStore(recovery_filename, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) ddp = DistributedDataParallel( model, device_ids=[device_id], process_group=process_group, ) input = torch.rand([batch_size, 2], dtype=torch.float) target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to( device_id ) for _ in range(6): output = ddp(input) loss = criterion(output, target) loss.backward() @requires_nccl() @skip_if_lt_x_gpu(2) def test_pass_default_pg(self): dist.init_process_group( "nccl", init_method=f"file://{self.file_name}", world_size=self.world_size, rank=self.rank, ) default_pg = c10d.distributed_c10d._get_default_group() dist.destroy_process_group(default_pg) self.assertFalse(dist.is_initialized()) def _test_grad_layout(self, replica_devices, layer_devs, local_batch_size): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) global_batch_size = local_batch_size * self.world_size # Carry out some trials with small buckets and some with big buckets. bucketsizes = (0.000001, 25) # Tuples of lists. Each list describes per-layer characteristics for one trial. layer_formats = ( [torch.contiguous_format] * 4, [torch.channels_last] * 2 + [torch.contiguous_format] * 2, [torch.channels_last] * 4, ) layer_dtypes = ( [torch.float] * 4, [torch.float] * 2 + [torch.half] * 2, [torch.half] * 4, ) input_dev = layer_devs[0] if isinstance(layer_devs, list) else layer_devs target_dev = layer_devs[-1] if isinstance(layer_devs, list) else layer_devs input = torch.randn( (global_batch_size, 8, 8, 8), device=input_dev, dtype=torch.float ) target = torch.randn( (global_batch_size, 8, 4, 4), device=target_dev, dtype=torch.float ) local_batch_start = self.rank * local_batch_size local_batch_end = (self.rank + 1) * local_batch_size # Reducer.cpp sneakily creates one "initial bucket" that ignores the "bucket_cap_mb" # argument. The following makes sure the initial bucket also complies. @contextmanager def first_bucket_size(ddp_bucket_mb): old_DEFAULT_FIRST_BUCKET_BYTES = dist._DEFAULT_FIRST_BUCKET_BYTES dist._DEFAULT_FIRST_BUCKET_BYTES = int(ddp_bucket_mb * 1.0e6) try: yield finally: dist._DEFAULT_FIRST_BUCKET_BYTES = old_DEFAULT_FIRST_BUCKET_BYTES with torch.backends.cudnn.flags( enabled=True, deterministic=True, benchmark=False ): for formats, dtypes, bucketsize in product( layer_formats, layer_dtypes, bucketsizes ): with first_bucket_size(bucketsize): model_msg = ( "rank = {} formats = {} dtypes = {} bucketsize = {} ".format( self.rank, formats, dtypes, bucketsize ) ) try: m = ConvNet(layer_devs, formats, dtypes) m_ddp = DistributedDataParallel( copy.deepcopy(m), device_ids=replica_devices, process_group=process_group, bucket_cap_mb=bucketsize, ) opt = torch.optim.SGD(m.parameters(), lr=0.1) opt_ddp = torch.optim.SGD(m_ddp.parameters(), lr=0.1) has_half = any(p.dtype is torch.half for p in m.parameters()) tol = 1.0e-3 if has_half else 1.0e-5 except BaseException: # Prints case-specific debugging info to narrow down failing case. print( "Caught exception during model creation for " + model_msg, flush=True, ) raise # 3 iters: First iter creates grads, second iter retests after rebucketing, # third iter tries zeroed grads. for it in range(3): iter_msg = "iter = {} ".format(it) + model_msg named_msg = iter_msg try: F.mse_loss(m(input).float(), target).backward() F.mse_loss( m_ddp(input[local_batch_start:local_batch_end]).float(), target[local_batch_start:local_batch_end], ).backward() for i, ((layer_name, m_child), m_ddp_child) in enumerate( zip(m.named_children(), m_ddp.module.children()) ): named_msg = layer_name + ".weight" + " " + iter_msg self.assertTrue( m_child.weight.grad.is_contiguous( memory_format=formats[i] ), named_msg, ) self.assertTrue( m_ddp_child.weight.grad.is_contiguous( memory_format=formats[i] ), named_msg, ) for j, ((param_name, p), p_ddp) in enumerate( zip( m_child.named_parameters(), m_ddp_child.parameters(), ) ): named_msg = ( layer_name + "." + param_name + " " + iter_msg ) self.assertEqual( p.grad, p_ddp.grad, rtol=tol, atol=tol ) opt.step() opt_ddp.step() if it == 0: for p, p_ddp in zip(m.parameters(), m_ddp.parameters()): p.grad = None p_ddp.grad = None else: m.zero_grad() m_ddp.zero_grad() except BaseException: # Makes sure we still get info if an error occurred somewhere other than the asserts. print( "Caught exception during iterations at " + named_msg, flush=True, ) raise @requires_nccl() @skip_if_lt_x_gpu(2) @skip_if_rocm def test_grad_layout_1devicemodule_1replicaperprocess(self): dev0 = torch.device("cuda:" + str(gpus_for_rank(self.world_size)[self.rank][0])) # Tells DDP to use just one device. replica_devices = [dev0] # Tells _test_grad_layout to construct ConvNet with all layers on this process's first assigned device. layer_devs = dev0 local_batch_size = 8 self._test_grad_layout(replica_devices, layer_devs, local_batch_size) @requires_nccl() @skip_if_lt_x_gpu(4) @skip_if_rocm def test_grad_layout_2devicemodule(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:2] dev0 = torch.device("cuda:" + str(int_devices[0])) dev1 = torch.device("cuda:" + str(int_devices[1])) # DDP's default behavior for a multi-device module is "don't replicate." replica_devices = None # Tells _test_grad_layout to constructs this process's ConvNet on 2 devices, with 2 layers on each device. layer_devs = [dev0] * 2 + [dev1] * 2 local_batch_size = 8 self._test_grad_layout(replica_devices, layer_devs, local_batch_size) @requires_nccl() @skip_if_lt_x_gpu(2) def test_param_layout_mismatch_error(self): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) dev0 = torch.device("cuda:" + str(gpus_for_rank(self.world_size)[self.rank][0])) layer_devs = dev0 layer_formats = ( [torch.contiguous_format] * 4 if self.rank == 0 else [torch.channels_last] * 4 ) layer_dtypes = [torch.float] * 4 m = ConvNet(layer_devs, layer_formats, layer_dtypes) if self.rank == 0: m_ddp = DistributedDataParallel( m, device_ids=[dev0], process_group=process_group ) else: with self.assertRaisesRegex( RuntimeError, ".* appears not to match strides of the same param in process 0", ): m_ddp = DistributedDataParallel( m, device_ids=[dev0], process_group=process_group ) def _gpu_model_with_ddp_comm_hook( self, process_group, hook=None, gradient_as_bucket_view=False, state=None, static_graph=False, ): device_id = gpus_for_rank(self.world_size)[self.rank][0] gpu_model = DistributedDataParallel( ModuleForDdpCommHook().to(device_id), device_ids=[device_id], process_group=process_group, gradient_as_bucket_view=gradient_as_bucket_view, ) if static_graph: gpu_model._set_static_graph() # Register a DDP communication hook if any. if hook is not None: gpu_model.register_comm_hook(state, hook) return gpu_model @requires_nccl() @skip_if_lt_x_gpu(2) def test_ddp_comm_hook_future_passing_gpu_nccl(self): """ This unit test verifies whether the Future object is passed properly using nccl backend. The hook callback function creates a Future object and sets a value to it. """ store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) # Get GPU model with simple_hook registered. gpu_model = self._gpu_model_with_ddp_comm_hook(process_group, self._simple_hook) # check whether the grads are equal to what simple_hook's then callback returns. # without the comm_hook, result would be 0.25 * torch.ones(2, 2). self._run_and_verify_hook(gpu_model, 8, 2 * torch.ones(2, 2)) def _test_ddp_comm_hook_allreduce_hook_nccl( self, gradient_as_bucket_view=False, static_graph=False ): """ This unit test verifies whether a DDP communication hook that just calls allreduce gives the same result with the case of no hook registered. Without the then callback, the future_value in reducer is no longer a PyObject, and this unit test verifies future_value is properly checked. """ store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) def allreduce_hook( state: object, bucket: dist.GradBucket ) -> torch.futures.Future[torch.Tensor]: tensors = [bucket.buffer() / self.world_size] return ( process_group.allreduce(tensors) .get_future() .then(lambda fut: fut.value()[0]) ) # Get GPU model with allreduce_hook registered. gpu_model = self._gpu_model_with_ddp_comm_hook( process_group, allreduce_hook, gradient_as_bucket_view, static_graph ) # check whether the grads are equal to what DDP without hook would return. self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2)) def _test_default_ddp_comm_hooks_nccl(self, gradient_as_bucket_view=False): """ This unit test verifies whether default Python DDP communication hooks ALLREDUCE and FP16_COMPRESS can give the same result with the case of no hook registered. """ store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) # For these default DDP comm hooks, the only state is process group. state = process_group for hook in [default.allreduce_hook, default.fp16_compress_hook]: # Get GPU model with the hook registered. # The first arg 'process_group' is used for initializing the test environment, # so it cannot be replaced by 'state', although they have the same value. gpu_model = self._gpu_model_with_ddp_comm_hook( process_group, hook, gradient_as_bucket_view, state ) # check whether the grads are equal to what DDP without hook would return. self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2)) def _test_fp16_compress_wrapper(self, gradient_as_bucket_view=False): """ This unit test verifies whether wrapping the ALLREDUCE and POWER_SGD hooks with the FP16_WRAPPER can give the same result as when there is no hook registered. """ store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) powerSGD_state = powerSGD.PowerSGDState(process_group=process_group) hook_args = [ (powerSGD.powerSGD_hook, powerSGD_state), (default.allreduce_hook, process_group), ] for hook, state in hook_args: gpu_model = self._gpu_model_with_ddp_comm_hook( process_group, default.fp16_compress_wrapper(hook), gradient_as_bucket_view, state, ) # check whether the grads are equal to what DDP without hook would return. self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2)) def _test_hook_then_optimizer( self, functional_optim_cls, *functional_optim_args, gradient_as_bucket_view=False, **functional_optim_kwargs ): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) hook, hook_state = default.allreduce_hook, process_group opt_hook_state = default._OptimizerHookState( functional_optim_cls, *functional_optim_args, **functional_optim_kwargs, ) gpu_model = self._gpu_model_with_ddp_comm_hook( process_group, default._hook_then_optimizer(hook, opt_hook_state), gradient_as_bucket_view, hook_state, ) prev_params = copy.deepcopy(list(gpu_model.parameters())) # Run model with optimizer as part of hook for _ in range(8): gpu_model.zero_grad() self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2)) new_params = list(gpu_model.parameters()) # Run plain model with allreduce hook and separate optimizer step. # Verify gradients are the same. gpu_model_allreduce = self._gpu_model_with_ddp_comm_hook( process_group, default.allreduce_hook, gradient_as_bucket_view, hook_state ) mapping = {v: k for k, v in functional_optim_map.items()} sgd = mapping.get(functional_optim_cls)( gpu_model_allreduce.parameters(), *functional_optim_args, **functional_optim_kwargs, ) for _ in range(8): gpu_model_allreduce.zero_grad() self._run_and_verify_hook(gpu_model_allreduce, 8, 0.25 * torch.ones(2, 2)) sgd.step() post_opt_params = list(gpu_model_allreduce.parameters()) for opt_as_hook_param, post_opt_param in zip(new_params, post_opt_params): self.assertEqual(opt_as_hook_param, post_opt_param) def _test_powerSGD_ddp_comm_hook_nccl(self, gradient_as_bucket_view=False): """ This unit test verifies whether Python DDP communication hook POWER_SGD can give the same result with the case of no hook registered. """ store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) # Get GPU model with the hook registered. # Test the hook with different algorithmic configs. for use_error_feedback, warm_start in product([True, False], [True, False]): state = powerSGD.PowerSGDState( process_group=process_group, matrix_approximation_rank=1, use_error_feedback=use_error_feedback, warm_start=warm_start, ) for hook in [powerSGD.powerSGD_hook, powerSGD.batched_powerSGD_hook]: gpu_model = self._gpu_model_with_ddp_comm_hook( process_group, hook, gradient_as_bucket_view, state ) # check whether the grads are equal to what DDP without hook would return. self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2)) def _test_builtin_ddp_comm_hooks_nccl(self, gradient_as_bucket_view=False): """ This unit test verifies whether built-in C++ DDP communication hooks ALLREDUCE and FP16_COMPRESS can give the same result with the case of no hook registered. """ store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) for comm_hook_type in [ dist.BuiltinCommHookType.ALLREDUCE, dist.BuiltinCommHookType.FP16_COMPRESS, ]: # Get GPU model with the built-in communication hook. gpu_model = self._gpu_model_with_builtin_ddp_comm_hook( process_group, comm_hook_type, gradient_as_bucket_view ) # check whether the grads are equal to what DDP without hook would return. self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2)) @requires_nccl() @skip_if_lt_x_gpu(2) def test_ddp_comm_hook_allreduce_hook_nccl(self): self._test_ddp_comm_hook_allreduce_hook_nccl() @requires_nccl() @skip_if_lt_x_gpu(2) def test_default_ddp_comm_hooks_nccl(self): self._test_default_ddp_comm_hooks_nccl() @requires_nccl() @skip_if_lt_x_gpu(2) def test_fp16_compress_wrapper_nccl(self): self._test_fp16_compress_wrapper() @requires_nccl() @skip_if_lt_x_gpu(2) def test_hook_then_sgd_nccl(self): sgd_lr = 1e-2 sgd_momentum = 0.9 sgd_weight_decay = 0.01 self._test_hook_then_optimizer( _FunctionalSGD, sgd_lr, momentum=sgd_momentum, weight_decay=sgd_weight_decay, ) @requires_nccl() @skip_if_lt_x_gpu(2) def test_hook_then_sgd_nccl_grad_as_bucket_view(self): sgd_lr = 1e-2 sgd_momentum = 0.9 sgd_weight_decay = 0.01 self._test_hook_then_optimizer( _FunctionalSGD, sgd_lr, momentum=sgd_momentum, weight_decay=sgd_weight_decay, gradient_as_bucket_view=True ) @requires_nccl() @skip_if_lt_x_gpu(2) def test_hook_then_adamw_nccl(self): adamw_lr = 1e-2 adamw_betas = (0.9, 0.99) adamw_eps = 1e-6 self._test_hook_then_optimizer( _FunctionalAdamW, adamw_lr, betas=adamw_betas, eps=adamw_eps, gradient_as_bucket_view=True ) @requires_nccl() @skip_if_lt_x_gpu(2) def test_hook_then_adam_nccl(self): adam_lr = 1e-2 adam_betas = (0.9, 0.99) adam_eps = 1e-6 self._test_hook_then_optimizer( _FunctionalAdam, adam_lr, betas=adam_betas, eps=adam_eps, gradient_as_bucket_view=True ) @requires_nccl() @skip_if_lt_x_gpu(2) def test_hook_then_adam_nccl_grad_as_bucket_view(self): adam_lr = 1e-2 adam_betas = (0.9, 0.99) adam_eps = 1e-6 self._test_hook_then_optimizer( _FunctionalAdam, adam_lr, betas=adam_betas, eps=adam_eps, gradient_as_bucket_view=True ) @requires_nccl() @skip_if_lt_x_gpu(2) def test_builtin_ddp_comm_hooks_nccl(self): self._test_builtin_ddp_comm_hooks_nccl() @requires_nccl() @skip_if_lt_x_gpu(2) def test_powerSGD_ddp_comm_hook_nccl(self): self._test_powerSGD_ddp_comm_hook_nccl() @requires_nccl() @skip_if_lt_x_gpu(2) def test_ddp_comm_hook_allreduce_hook_nccl_grad_is_view(self): self._test_ddp_comm_hook_allreduce_hook_nccl(gradient_as_bucket_view=True) @requires_nccl() @skip_if_lt_x_gpu(2) def test_ddp_comm_hook_allreduce_hook_nccl_static_graph(self): self._test_ddp_comm_hook_allreduce_hook_nccl(static_graph=True) @requires_nccl() @skip_if_lt_x_gpu(2) def test_default_ddp_comm_hooks_nccl_is_view(self): self._test_default_ddp_comm_hooks_nccl(gradient_as_bucket_view=True) @requires_nccl() @skip_if_lt_x_gpu(2) def test_fp16_compress_wrapper_is_view(self): self._test_fp16_compress_wrapper(gradient_as_bucket_view=True) @requires_nccl() @skip_if_lt_x_gpu(2) def test_builtin_ddp_comm_hooks_nccl_grad_is_view(self): self._test_builtin_ddp_comm_hooks_nccl(gradient_as_bucket_view=True) @requires_nccl() @skip_if_lt_x_gpu(2) def test_powerSGD_ddp_comm_hook_nccl_grad_is_view(self): self._test_powerSGD_ddp_comm_hook_nccl(gradient_as_bucket_view=True) @requires_nccl() @skip_if_lt_x_gpu(2) def test_ddp_comm_hook_allreduce_with_then_hook_nccl(self): """ This unit test verifies whether a DDP communication hook that calls allreduce and then multiplies the result by ten and divides by two gives the expected result. """ store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) def allreduce_with_then_hook( state: object, bucket: dist.GradBucket ) -> torch.futures.Future[torch.Tensor]: tensors = [bucket.buffer() / self.world_size] fut = process_group.allreduce(tensors).get_future() def mult(fut): # Multiply the result by 10. return 10 * fut.value()[0] def div(fut): # Divide the result by 2. return 0.5 * fut.value() return fut.then(mult).then(div) # Get GPU model with allreduce_with_then_hook registered. gpu_model = self._gpu_model_with_ddp_comm_hook( process_group, allreduce_with_then_hook ) # check whether the grads are equal to what allreduce returns multuplied by 5. # without the comm_hook, result would be still 0.25 * torch.ones(2, 2). self._run_and_verify_hook(gpu_model, 8, 1.25 * torch.ones(2, 2)) class AcceptsParam(torch.nn.Module): def __init__(self, p, factor): super().__init__() self.a = p self.f = factor def forward(self, input): return input + self.a * self.f @requires_nccl() @skip_if_lt_x_gpu(2) def test_ddp_weight_sharing(self): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) size = 2048 * 2048 dev = self.rank world = self.world_size p = torch.nn.Parameter(torch.randn(size, requires_grad=True)) for try_set_to_none, use_bucket_view in product((False, True), (False, True)): m = torch.nn.Sequential( self.AcceptsParam(p, dev + 1), self.AcceptsParam(p, dev + 1) ).cuda(dev) m = torch.nn.parallel.DistributedDataParallel( m, bucket_cap_mb=1, gradient_as_bucket_view=use_bucket_view, device_ids=[dev], process_group=process_group, ) for i in range(3): m.zero_grad(set_to_none=try_set_to_none) m(1).sum().backward() # Each param value is multiplied by "rank + 1" twice in forward, so the grad # values produced by a particular rank should be 2. * (rank + 1). # Summing these over ranks and dividing by world size gives the expected result: analytic = torch.full_like( p, 2.0 * (world * (world + 1.0) / 2.0) / world, device=dev ) for name, p in m.named_parameters(): self.assertEqual( p.grad, analytic, "mismatch at " + name + ".grad for " + "set_to_none = {}, use_bucket_view = {}".format( try_set_to_none, use_bucket_view ), ) # A list of tests for ddp with activation checkpointing # when gradient_as_bucket_view=True, False. # Most of the tests are referred to # https://github.com/facebookresearch/fairscale/blob/master/tests/nn/pipe/test_checkpoint_ddp.py class CheckpointOnceModule(nn.Module): def __init__(self): super().__init__() self.l1 = nn.Linear(20, 20) self.l2 = nn.Linear(20, 20) def forward(self, inp): x = self.l1(inp) x = checkpoint(self.l2, x) return x class CheckpointTwiceModule(CheckpointOnceModule): def __init__(self): super().__init__() def forward(self, inp): x = self.l1(inp) x = checkpoint(self.l2, x) x = checkpoint(self.l2, x) return x def _prepare_dummy_data(self): ddp_bs = 16 bs = ddp_bs * self.world_size input = torch.rand((bs, 20), device="cuda", requires_grad=True) target = torch.randn((bs, 20), device="cuda") offset = self.rank * ddp_bs ddp_input = input[offset : offset + ddp_bs] ddp_target = target[offset : offset + ddp_bs] return input, ddp_input, target, ddp_target def _train_model(self, model, input_var, target, loss, run_checkpoint=False): model.train() if run_checkpoint: output = checkpoint(model, input_var) else: output = model(input_var) l = loss(output, target) l.backward() def _test_ddp_checkpointing( self, input_model, process_group, use_bucket_view, find_unused_parameters=False, static_graph=False, run_checkpoint=False, ): # to reprodce the same training results torch.cuda.set_device(self.rank) torch.manual_seed(31415) model = copy.deepcopy(input_model).cuda() ddp_model = copy.deepcopy(input_model).cuda() ddp_model = nn.parallel.DistributedDataParallel( ddp_model, bucket_cap_mb=1, gradient_as_bucket_view=use_bucket_view, device_ids=[self.rank], process_group=process_group, find_unused_parameters=find_unused_parameters, ) if static_graph: ddp_model._set_static_graph() self.assertEqual( ddp_model._get_ddp_logging_data().get("static_graph", 0), static_graph ) input, ddp_input, target, ddp_target = self._prepare_dummy_data() loss = nn.MSELoss() for i in range(5): model.zero_grad(set_to_none=False) ddp_model.zero_grad(set_to_none=False) self._train_model(model, input, target, loss, run_checkpoint=run_checkpoint) self._train_model( ddp_model, ddp_input, ddp_target, loss, run_checkpoint=run_checkpoint ) for i, j in zip(model.parameters(), ddp_model.parameters()): self.assertTrue(i.grad is not None) self.assertTrue(j.grad is not None) self.assertEqual(i.grad, j.grad, rtol=1.3e-06, atol=5e-5) # DDP works as expect when layer is checkpointed only once @requires_nccl() @skip_if_lt_x_gpu(2) def test_ddp_checkpointing_once(self): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) for use_bucket_view, static_graph in product((False, True), (False, True)): self._test_ddp_checkpointing( self.CheckpointOnceModule(), process_group=process_group, use_bucket_view=use_bucket_view, static_graph=static_graph, ) # DDP will fail when there are unused_parameters in the model @requires_nccl() @skip_if_lt_x_gpu(2) def test_ddp_checkpointing_unused_params(self): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) for use_bucket_view in (True, False): with self.assertRaisesRegex( RuntimeError, "Expected to mark a variable ready only once.", ): model = self._test_ddp_checkpointing( self.CheckpointOnceModule(), process_group=process_group, use_bucket_view=use_bucket_view, find_unused_parameters=True, static_graph=False, ) # test passes when static_graph is true model = self._test_ddp_checkpointing( self.CheckpointOnceModule(), process_group=process_group, use_bucket_view=use_bucket_view, find_unused_parameters=True, static_graph=True, ) # DDP will fail when the same layer is checkponted twice @requires_nccl() @skip_if_lt_x_gpu(2) def test_ddp_checkpointing_twice(self): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) for use_bucket_view in (True, False): with self.assertRaisesRegex( RuntimeError, "Expected to mark a variable ready only once.", ): model = self._test_ddp_checkpointing( self.CheckpointTwiceModule(), process_group=process_group, use_bucket_view=use_bucket_view, static_graph=False, ) model = self._test_ddp_checkpointing( self.CheckpointTwiceModule(), process_group=process_group, use_bucket_view=use_bucket_view, static_graph=True, ) # DDP works as expected if there is weight sharing among layers @requires_nccl() @skip_if_lt_x_gpu(2) def test_ddp_checkpointing_weight_sharing(self): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) torch.cuda.set_device(self.rank) for use_bucket_view, static_graph in product((False, True), (False, True)): torch.manual_seed(31415) l1 = nn.Linear(20, 20) l2 = nn.Linear(20, 20) l1.weight = l2.weight model = nn.Sequential(l1, l2) self._test_ddp_checkpointing( model, process_group=process_group, use_bucket_view=use_bucket_view, static_graph=static_graph, run_checkpoint=True, ) class NcclErrorHandlingTest(MultiProcessTestCase): def setUp(self): super(NcclErrorHandlingTest, self).setUp() # Need to skip return code checking for these tests since the child # processes don't exit cleanly. self.skip_return_code_checks = [ self.test_nccl_errors_blocking_abort.__wrapped__, self.test_nccl_errors_blocking_sigkill.__wrapped__, self.test_nccl_errors_blocking_sigterm.__wrapped__, self.test_nccl_errors_blocking_nonzero_exit.__wrapped__, ] # NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests # that use NCCL_BLOCKING_WAIT will test it as expected. os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1" self._spawn_processes() def tearDown(self): super(NcclErrorHandlingTest, self).tearDown() try: os.remove(self.file_name) except OSError: pass @property def op_timeout_sec(self): return 1 @property def world_size(self): return 3 @property def blocking_wait_error_msg(self): return "Caught collective operation timeout" def _run_all_reduce(self, pg): pg.allreduce(torch.rand(10).cuda(self.rank)) @requires_nccl() @requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking") @skip_if_lt_x_gpu(3) @skip_if_rocm def test_nccl_errors_nonblocking(self): # Note: we unset and restore NCCL_ASYNC_ERROR_HANDLING for this test # since test_c10d_common runs with async error handling by default, but this # tests behavior when it is not enabled. prev_nccl_async_error_handling = os.environ.get( "NCCL_ASYNC_ERROR_HANDLING", None ) os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "0" store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) process_group.allreduce(torch.rand(10).cuda(self.rank)) if self.rank == 0: # This allreduce does not block Python thread as allreduce enqueues # the cuda operation, and then wait only blocks the current cuda # stream. work = process_group.allreduce(torch.rand(10).cuda(self.rank)) work.wait() # Now the work scheduled next should hang forever since the previous # allreduce will never complete. t = threading.Thread(target=self._run_all_reduce, args=(process_group,)) t.daemon = True t.start() t.join(int(get_timeout(self.id()) / 5)) self.assertTrue(t.is_alive()) if prev_nccl_async_error_handling is not None: os.environ["NCCL_ASYNC_ERROR_HANDLING"] = prev_nccl_async_error_handling def _test_nccl_errors_blocking(self, func): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL( store, self.rank, self.world_size, timeout=timedelta(seconds=self.op_timeout_sec), ) process_group.allreduce(torch.rand(10).cuda(self.rank)) if self.rank == 0: work = process_group.allreduce(torch.rand(10).cuda(self.rank)) with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg): # Operation would time out in blocking mode. work.wait() # Run some GPU operations to make sure cuda has not gotten stuck. # It was observed cuda could get stuck if NCCL communicators were # not properly aborted before throwing RuntimeError. a = torch.rand(10).cuda(self.rank) elif self.rank == 1: # Clean up structures (ex: files for FileStore before going down) del process_group func() else: # Wait for timeout time.sleep(2 * self.op_timeout_sec) # Now verify communicators on this rank have been aborted by the watchdog thread. self._wait_for_comm_abort(process_group) @with_nccl_blocking_wait @requires_nccl() @requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking") @skip_if_lt_x_gpu(3) @skip_if_rocm def test_nccl_errors_blocking_clean_exit(self): self._test_nccl_errors_blocking(lambda: sys.exit(0)) @with_nccl_blocking_wait @requires_nccl() @requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking") @skip_if_lt_x_gpu(3) @skip_if_rocm def test_nccl_errors_blocking_nonzero_exit(self): self._test_nccl_errors_blocking(lambda: sys.exit(1)) @with_nccl_blocking_wait @requires_nccl() @requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking") @skip_if_lt_x_gpu(3) @skip_if_rocm @sandcastle_skip( "Frequently times out see https://github.com/pytorch/pytorch/issues/58920" ) def test_nccl_errors_blocking_abort(self): self._test_nccl_errors_blocking(lambda: os.abort()) @with_nccl_blocking_wait @requires_nccl() @requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking") @skip_if_lt_x_gpu(3) @skip_if_rocm def test_nccl_errors_blocking_sigkill(self): self._test_nccl_errors_blocking(lambda: os.kill(os.getpid(), signal.SIGKILL)) @with_nccl_blocking_wait @requires_nccl() @requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking") @skip_if_lt_x_gpu(3) @skip_if_rocm def test_nccl_errors_blocking_sigterm(self): self._test_nccl_errors_blocking(lambda: os.kill(os.getpid(), signal.SIGTERM)) @with_nccl_blocking_wait @requires_nccl() @requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking") @skip_if_lt_x_gpu(3) def test_nccl_blocking_wait_with_barrier(self): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL( store, self.rank, self.world_size, timeout=timedelta(seconds=self.op_timeout_sec), ) process_group.barrier().wait() if self.rank == 0: with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg): # This should timeout process_group.barrier().wait() def _run_invalid_nccl_blocking_wait_env(self, val): os.environ["NCCL_BLOCKING_WAIT"] = val store = c10d.FileStore(self.file_name, self.world_size) with self.assertRaises(RuntimeError): process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) @requires_nccl() @skip_if_lt_x_gpu(3) def test_invalid_nccl_blocking_wait_env(self): self._run_invalid_nccl_blocking_wait_env("abc") self._run_invalid_nccl_blocking_wait_env("-1") self._run_invalid_nccl_blocking_wait_env("2147483647") self._run_invalid_nccl_blocking_wait_env("4294967295") def _wait_for_comm_abort(self, process_group): """ Waits for the watchdog thread to abort communicators for the process group. """ while True: try: process_group.allreduce(torch.rand(10).cuda(self.rank)) except Exception as e: if "NCCL communicator was aborted" in str(e): return else: raise e time.sleep(1) @with_nccl_blocking_wait @requires_nccl() @skip_if_lt_x_gpu(3) def test_nccl_timeout(self): store = c10d.FileStore(self.file_name, self.world_size) # Initialize process_group. timeout = 1 process_group = c10d.ProcessGroupNCCL( store, self.rank, self.world_size, timeout=timedelta(seconds=timeout) ) process_group.allreduce(torch.rand(10).cuda(self.rank)).wait() if self.rank == 0: # This should timeout in about 1 second. start = time.time() # Watchdog may abort timed out work resulting in NCCL error instead of operation timed out. with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg): process_group.allreduce(torch.rand(10).cuda(self.rank)).wait() else: # Sleep to ensure timeout. time.sleep(2 * timeout) self._wait_for_comm_abort(process_group) class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase): def setUp(self): super(CommTest, self).setUp() # NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests # that use NCCL_BLOCKING_WAIT will test it as expected. os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1" self._spawn_processes() def tearDown(self): super(CommTest, self).tearDown() try: os.remove(self.file_name) except OSError: pass def _test_broadcast_coalesced(self, process_group, device, root_rank): half = torch.float16 # No support for float16 for CPU tensors if device == torch.device("cpu"): half = torch.float32 target = torch.arange(60, dtype=half, device=device).chunk(5) target += torch.arange(60, dtype=torch.float32, device=device).chunk(5) target += torch.arange(60, dtype=half, device=device).chunk(5) target += torch.arange(60, dtype=torch.float64, device=device).chunk(5) target += torch.arange(60, dtype=half, device=device).chunk(5) target += torch.arange(60, dtype=torch.float32, device=device).chunk(5) # The tensors to pass to broadcast are idential to the target # only on the process that is the root of the broadcast. if self.rank == root_rank: tensors = list(tensor.clone() for tensor in target) else: tensors = list(torch.zeros_like(tensor) for tensor in target) if self.rank != root_rank: self.assertNotEqual(tensors, target) c10d._broadcast_coalesced( process_group, tensors, buffer_size=256, src=root_rank ) if self.rank != root_rank: self.assertEqual(tensors, target) @requires_nccl() @skip_if_lt_x_gpu(2) def test_broadcast_coalesced_nccl(self): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) device = torch.device("cuda:%d" % self.rank) ranks = [0, 1] for root_rank in ranks: self._test_broadcast_coalesced(process_group, device, root_rank) @requires_nccl() @skip_if_lt_x_gpu(2) def test_sequence_num_set_default_pg_nccl(self): torch.cuda.set_device(self.rank) self._test_sequence_num_set_default_pg(backend="nccl") @skip_if_lt_x_gpu(2) @requires_nccl() def test_sequence_num_incremented_nccl_default(self): self._test_sequence_num_incremented_default_group("nccl") @skip_if_lt_x_gpu(4) @requires_nccl() def test_sequence_num_incremented_nccl_subgroup(self): if self.world_size < 4: return sandcastle_skip("Test requires world_size of at least 4") self._test_sequence_num_incremented_subgroup("nccl") @requires_nccl() @skip_if_lt_x_gpu(2) def test_sequence_num_set_nccl_new_group(self): torch.cuda.set_device(self.rank) self._test_sequence_num_set_new_group(backend="nccl") @requires_nccl() @skip_if_lt_x_gpu(2) def test_pass_nccl_options_high_priority_stream(self): pg_opts = c10d.ProcessGroupNCCL.Options() pg_opts.is_high_priority_stream = True store = c10d.FileStore(self.file_name, self.world_size) # Test init_process_group accepts options dist.init_process_group( "nccl", world_size=self.world_size, rank=self.rank, store=store, pg_options=pg_opts, ) # Test with new_group pg = c10d.new_group([0, 1], pg_options=pg_opts) # test if the process group constructed with high priority stream self.assertTrue(pg.options.is_high_priority_stream) # test the process group works as expected t = torch.tensor([self.rank + 1] * 10).cuda(self.rank) pg.allreduce(t).wait() expected_tensor = torch.tensor([3] * 10).cuda(self.rank) self.assertEqual(expected_tensor, t) @requires_nccl() @skip_if_lt_x_gpu(4) def test_nccl_barrier(self): store = c10d.FileStore(self.file_name, self.world_size) c10d.init_process_group( backend="nccl", rank=self.rank, world_size=self.world_size, store=store ) t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank) c10d.all_reduce(t) expected_tensor = torch.tensor([3] * 10).cuda(2 * self.rank) self.assertEqual(expected_tensor, t) # Test with new_group pg = c10d.new_group([0, 1]) t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank) pg.allreduce(t).wait() self.assertEqual(expected_tensor, t) pg = c10d.new_group([0]) if self.rank == 0: t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank) expected_tensor = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank) pg.allreduce(t).wait() self.assertEqual(expected_tensor, t) pg = c10d.new_group([1]) if self.rank == 1: t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank) expected_tensor = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank) pg.allreduce(t).wait() self.assertEqual(expected_tensor, t) @requires_nccl() @skip_if_lt_x_gpu(4) def test_nccl_barrier_timeout(self): store = c10d.FileStore(self.file_name, self.world_size) if self.rank == 0: with self.assertRaisesRegex( RuntimeError, "Timed out initializing process group" ): c10d.init_process_group( backend="nccl", rank=self.rank, world_size=self.world_size, store=store, timeout=timedelta(seconds=1), ) @requires_nccl() @skip_if_lt_x_gpu(4) def test_nccl_barrier_timeout_new_group(self): store = c10d.FileStore(self.file_name, self.world_size) c10d.init_process_group( backend="nccl", rank=self.rank, world_size=self.world_size, store=store, timeout=timedelta(seconds=1), ) if self.rank == 0: with self.assertRaisesRegex( RuntimeError, "Timed out initializing process group" ): c10d.new_group([0, 1], timeout=timedelta(seconds=1)) with self.assertRaisesRegex( RuntimeError, "Timed out initializing process group" ): c10d.new_group([0], timeout=timedelta(seconds=1)) @requires_nccl() @skip_if_lt_x_gpu(4) def test_nccl_barrier_timeout_new_group_non_member(self): store = c10d.FileStore(self.file_name, self.world_size) c10d.init_process_group( backend="nccl", rank=self.rank, world_size=self.world_size, store=store, timeout=timedelta(seconds=1), ) if self.rank == 1: with self.assertRaisesRegex( RuntimeError, "Timed out initializing process group" ): c10d.new_group([0, 1], timeout=timedelta(seconds=1)) with self.assertRaisesRegex( RuntimeError, "Timed out initializing process group" ): c10d.new_group([0], timeout=timedelta(seconds=1)) @requires_nccl() @skip_if_lt_x_gpu(2) def test_nccl_barrier_device_ids(self): store = c10d.FileStore(self.file_name, self.world_size) c10d.init_process_group( backend="nccl", rank=self.rank, world_size=self.world_size, store=store ) c10d.barrier(device_ids=[self.rank]) @requires_nccl() @skip_if_lt_x_gpu(2) def test_nccl_barrier_device_ids_function_argument(self): store = c10d.FileStore(self.file_name, self.world_size) c10d.init_process_group( backend="nccl", rank=self.rank, world_size=self.world_size, store=store ) with self.assertRaisesRegex(RuntimeError, "Invalid function argument"): c10d.barrier(device_ids=self.rank) if __name__ == "__main__": assert ( not torch.cuda._initialized ), "test_distributed must not have initialized CUDA context on main process" run_tests()
38.625791
114
0.608269
import copy import math import os import random import signal import sys import tempfile import threading import time from contextlib import contextmanager from datetime import timedelta from itertools import product from unittest import mock import torch import torch.distributed as c10d if not c10d.is_available(): print("c10d not available, skipping tests", file=sys.stderr) sys.exit(0) import test_c10d_common import torch.distributed as dist import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD import torch.nn.functional as F import torch.testing._internal.common_utils as common from test_c10d_common import gpus_for_rank, DoubleGpuNet, ConvNet, ModuleForDdpCommHook from torch import nn from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import ( MultiProcessTestCase, requires_nccl, requires_nccl_version, skip_if_lt_x_gpu, get_timeout, skip_if_rocm, with_dist_debug_levels, with_nccl_blocking_wait, ) from torch.testing._internal.common_utils import ( IS_WINDOWS, TestCase, run_tests, retry_on_connect_failures, TEST_WITH_DEV_DBG_ASAN, TEST_WITH_TSAN, sandcastle_skip, sandcastle_skip_if, ) from torch.utils.checkpoint import checkpoint from torch.distributed.optim import functional_optim_map if not IS_WINDOWS: from torch.distributed.optim.functional_sgd import _FunctionalSGD from torch.distributed.optim.functional_adam import _FunctionalAdam from torch.distributed.optim.functional_adamw import _FunctionalAdamW if TEST_WITH_TSAN: print( "Skip as TSAN is not fork-safe since we're forking in a multi-threaded environment", file=sys.stderr, ) sys.exit(0) if TEST_WITH_DEV_DBG_ASAN: print( "Skip ASAN as torch + multiprocessing spawn have known issues", file=sys.stderr ) sys.exit(0) class RendezvousEnvTest(TestCase): @retry_on_connect_failures @requires_nccl() @sandcastle_skip_if( torch.cuda.device_count() == 0, "No GPUs available, skipping test" ) def test_common_errors(self): vars = { "WORLD_SIZE": "1", "RANK": "0", "MASTER_ADDR": "127.0.0.1", "MASTER_PORT": str(common.find_free_port()), } class Env(object): def __init__(self, vars): self.env_patcher = mock.patch.dict(os.environ, vars, clear=True) def __enter__(self): self.env_patcher.start() def __exit__(self, type, value, traceback): self.env_patcher.stop() def without(d, key): d = d.copy() d.pop(key) return d def withouts(d, keys): d = d.copy() for key in keys: d.pop(key) return d with Env(without(vars, "WORLD_SIZE")): self.assertEqual(None, os.environ.get("WORLD_SIZE")) with self.assertRaisesRegex(ValueError, "WORLD_SIZE expected"): gen = c10d.rendezvous("env://") next(gen) c10d.init_process_group(backend="nccl", world_size=1) self.assertEqual(c10d.get_rank(), 0) self.assertEqual(c10d.get_world_size(), 1) c10d.destroy_process_group() with Env(without(vars, "RANK")): self.assertEqual(None, os.environ.get("RANK")) with self.assertRaisesRegex(ValueError, "RANK expected"): gen = c10d.rendezvous("env://") next(gen) c10d.init_process_group(backend="nccl", rank=0) self.assertEqual(c10d.get_rank(), 0) self.assertEqual(c10d.get_world_size(), 1) c10d.destroy_process_group() with Env(withouts(vars, ["RANK", "WORLD_SIZE"])): self.assertEqual(None, os.environ.get("RANK")) self.assertEqual(None, os.environ.get("WORLD_SIZE")) c10d.init_process_group(backend="nccl", rank=0, world_size=1) self.assertEqual(c10d.get_rank(), 0) self.assertEqual(c10d.get_world_size(), 1) c10d.destroy_process_group() with Env(vars): c10d.init_process_group(backend="nccl") self.assertEqual(c10d.get_rank(), 0) self.assertEqual(c10d.get_world_size(), 1) c10d.destroy_process_group() with Env(without(vars, "MASTER_ADDR")): self.assertEqual(None, os.environ.get("MASTER_ADDR")) with self.assertRaisesRegex(ValueError, "MASTER_ADDR expected"): gen = c10d.rendezvous("env://") next(gen) with Env(without(vars, "MASTER_PORT")): self.assertEqual(None, os.environ.get("MASTER_PORT")) with self.assertRaisesRegex(ValueError, "MASTER_PORT expected"): gen = c10d.rendezvous("env://") next(gen) with Env(without(vars, "WORLD_SIZE")): self.assertEqual(None, os.environ.get("WORLD_SIZE")) gen = c10d.rendezvous("env://?world_size={}".format(1)) _, _, size = next(gen) self.assertEqual(size, 1) with Env(without(vars, "RANK")): self.assertEqual(None, os.environ.get("RANK")) gen = c10d.rendezvous("env://?rank={}".format(0)) _, rank, _ = next(gen) self.assertEqual(rank, 0) with Env(withouts(vars, ["RANK", "WORLD_SIZE"])): self.assertEqual(None, os.environ.get("RANK")) self.assertEqual(None, os.environ.get("WORLD_SIZE")) gen = c10d.rendezvous("env://?rank={}&world_size={}".format(0, 1)) _, rank, size = next(gen) self.assertEqual(rank, 0) self.assertEqual(size, 1) class TimeoutTest(test_c10d_common.AbstractTimeoutTest, TestCase): @requires_nccl() @retry_on_connect_failures @sandcastle_skip_if( torch.cuda.device_count() == 0, "No GPUs available, skipping test" ) def test_default_store_timeout_nccl(self): self._test_default_store_timeout("nccl") class ProcessGroupNCCLNoGPUTest(TestCase): MAIN_PROCESS_RANK = 0 def setUp(self): self.rank = self.MAIN_PROCESS_RANK self.world_size = 1 self.file = tempfile.NamedTemporaryFile(delete=False) def tearDown(self): pass @requires_nccl() @sandcastle_skip_if( torch.cuda.device_count() > 0, "GPUs are available, skipping test" ) def test_init_no_gpus(self): store = c10d.FileStore(self.file.name, self.world_size) with self.assertRaisesRegex( RuntimeError, "ProcessGroupNCCL is only supported with GPUs, no GPUs found!" ): c10d.ProcessGroupNCCL(store, self.rank, self.world_size) class ProcessGroupNCCLTest(TestCase): MAIN_PROCESS_RANK = 0 def setUp(self): self.rank = self.MAIN_PROCESS_RANK self.world_size = 1 self.file = tempfile.NamedTemporaryFile(delete=False) # NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests # that use NCCL_BLOCKING_WAIT will test it as expected. os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1" self.num_gpus = torch.cuda.device_count() def tearDown(self): pass @requires_nccl() @sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs") def test_empty_tensors(self): store = c10d.FileStore(self.file.name, self.world_size) pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) xs = [torch.cuda.FloatTensor([])] pg.broadcast(xs).wait() self.assertEqual(0, xs[0].numel()) pg.allreduce(xs).wait() self.assertEqual(0, xs[0].numel()) pg.reduce(xs).wait() self.assertEqual(0, xs[0].numel()) ys = [[torch.cuda.FloatTensor([]) for _ in range(self.world_size)]] pg.allgather(ys, xs).wait() for y in ys[0]: self.assertEqual(0, y.numel()) ys = [torch.cuda.FloatTensor([])] xs = [[torch.cuda.FloatTensor([]) for _ in range(self.world_size)]] pg.reduce_scatter(ys, xs).wait() self.assertEqual(0, ys[0].numel()) @requires_nccl() @sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs") def test_broadcast_ops(self): store = c10d.FileStore(self.file.name, self.world_size) pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) def broadcast(xs, rootRank, rootTensor): opts = c10d.BroadcastOptions() opts.rootRank = rootRank opts.rootTensor = rootTensor work = pg.broadcast(xs, opts) work.wait() # for every root tensor for rt in range(self.num_gpus): tensors = [] for i in range(self.num_gpus): tensors.append(torch.tensor([i]).cuda(i)) broadcast(tensors, self.rank, rt) for i in range(self.num_gpus): self.assertEqual(tensors[i], tensors[rt]) @requires_nccl() @sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs") def test_allreduce_ops(self): store = c10d.FileStore(self.file.name, self.world_size) pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) def allreduce(tensors, op): opts = c10d.AllreduceOptions() opts.reduceOp = op work = pg.allreduce(tensors, opts) work.wait() # Sum tensors = [] for i in range(self.num_gpus): tensors.append(torch.tensor([i + 1]).cuda(i)) allreduce(tensors, c10d.ReduceOp.SUM) for i in range(self.num_gpus): # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095 self.assertEqualIgnoreType( torch.tensor([float(self.num_gpus * (self.num_gpus + 1) / 2)]), tensors[i], ) # Product tensors = [] for i in range(self.num_gpus): tensors.append(torch.tensor([i + 1]).cuda(i)) allreduce(tensors, c10d.ReduceOp.PRODUCT) for i in range(self.num_gpus): # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095 self.assertEqualIgnoreType( torch.tensor([float(math.factorial(self.num_gpus))]), tensors[i] ) # Min tensors = [] for i in range(self.num_gpus): tensors.append(torch.tensor([i + 1]).cuda(i)) allreduce(tensors, c10d.ReduceOp.MIN) for i in range(self.num_gpus): # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095 self.assertEqualIgnoreType(torch.tensor([1.0]), tensors[i]) # Max tensors = [] for i in range(self.num_gpus): tensors.append(torch.tensor([i + 1]).cuda(i)) allreduce(tensors, c10d.ReduceOp.MAX) for i in range(self.num_gpus): self.assertEqual(torch.tensor([self.num_gpus]), tensors[i]) for op in (c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR): with self.assertRaisesRegex( RuntimeError, "Cannot use " + str(op) + " with NCCL" ): allreduce(tensors, op) @requires_nccl() @sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs") def test_reduce_ops(self): store = c10d.FileStore(self.file.name, self.world_size) pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) def reduce(xs, rootRank, rootTensor, op=None): opts = c10d.ReduceOptions() opts.rootRank = rootRank opts.rootTensor = rootTensor if op: opts.reduceOp = op work = pg.reduce(xs, opts) work.wait() # for every root tensor for rt in range(self.num_gpus): tensors = [] for i in range(self.num_gpus): tensors.append(torch.tensor([i + 1]).cuda(i)) reduce(tensors, self.rank, rt) # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095 self.assertEqualIgnoreType( torch.tensor([float(self.num_gpus * (self.num_gpus + 1) / 2)]), tensors[rt], ) for op in (c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR): with self.assertRaisesRegex( RuntimeError, "Cannot use " + str(op) + " with NCCL" ): reduce(tensors, self.rank, rt, op) @requires_nccl() @sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs") def test_allgather_ops(self): store = c10d.FileStore(self.file.name, self.world_size) pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) def allgather(output_ts, input_ts): work = pg.allgather(output_ts, input_ts) work.wait() tensors = [] output_ts = [[] for _ in range(self.num_gpus)] for idx, ls in enumerate(output_ts): for _ in range(self.world_size * self.num_gpus): ls.append(torch.tensor([0]).cuda(idx)) for i in range(self.num_gpus): tensors.append(torch.tensor([i]).cuda(i)) allgather(output_ts, tensors) # Verification for device_ts in output_ts: for s_idx, t in enumerate(device_ts): self.assertEqual(torch.tensor([s_idx]), t) @requires_nccl() @sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs") def test_allgather_base_ops(self): store = c10d.FileStore(self.file.name, self.world_size) pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) def allgather_base(output_t, input_t): work = pg._allgather_base(output_t, input_t) work.wait() device_id = self.rank % self.num_gpus # allgather_base is GPU number agnostic. # Each rank contribute one tensor regardless of GPU counts tensor = torch.tensor([self.rank]).cuda(device_id) output_t = torch.empty((self.world_size), dtype=tensor.dtype).cuda(device_id) allgather_base(output_t, tensor) # Verification self.assertEqual(torch.arange(self.world_size), output_t) @requires_nccl() @sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs") def test_allgather_base_basics(self): store = c10d.FileStore(self.file.name, self.world_size) pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) def allgather_base(output_t, input_t): work = pg._allgather_base(output_t, input_t) work.wait() device_id = self.rank % self.num_gpus # anticpate an error with self.assertRaisesRegex( RuntimeError, "output tensor size must be equal to world_size times input tensor size", ): tensor = torch.tensor([self.rank]).cuda(device_id) output_t = torch.empty((self.world_size + 1), dtype=tensor.dtype).cuda( device_id ) # fails the check because output_t is not correctly sized allgather_base(output_t, tensor) # anticpate an error with self.assertRaisesRegex( RuntimeError, "output tensor must have the same type as input tensor" ): tensor = torch.tensor([self.rank], dtype=torch.float).cuda(device_id) output_t = torch.empty((self.world_size + 1), dtype=torch.long).cuda( device_id ) # fails the check because the dtype is different allgather_base(output_t, tensor) @requires_nccl() @sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs") def test_reduce_scatter_base_basics(self): store = c10d.FileStore(self.file.name, self.world_size) pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) def reduce_scatter_base(output_t, input_t): work = pg._reduce_scatter_base(output_t, input_t) work.wait() device_id = self.rank % self.num_gpus # anticpate an error with self.assertRaisesRegex( RuntimeError, "input tensor must be the same size as output size times world size", ): input_t = torch.tensor([self.rank]).cuda(device_id) output_t = torch.empty((self.world_size + 1), dtype=input_t.dtype).cuda( device_id ) # fails the check because output_t is not correctly sized reduce_scatter_base(output_t, input_t) # anticpate an error with self.assertRaisesRegex( RuntimeError, "input tensor must be the same type as the outut tensor." ): tensor = torch.tensor([self.rank], dtype=torch.float).cuda(device_id) output_t = torch.empty((self.world_size + 1), dtype=torch.long).cuda( device_id ) # fails the check because the dtype is different reduce_scatter_base(output_t, tensor) @requires_nccl() @sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs") def test_reduce_scatter_ops(self): store = c10d.FileStore(self.file.name, self.world_size) pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) def reduce_scatter(outputs, input_lists, op): opts = c10d.ReduceScatterOptions() opts.reduceOp = op work = pg.reduce_scatter(outputs, input_lists, opts) work.wait() virtual_rank = self.rank * self.world_size virtual_world_size = self.num_gpus * self.world_size output = [torch.tensor([0]).cuda(i) for i in range(self.num_gpus)] # 0 1 2 # 0 [0..11] [1..12] # 1 [3..14] # 2 # 3 # Sum tensor_lists = [ [ torch.tensor([self.rank * self.num_gpus + i + j]).cuda(i) for j in range(virtual_world_size) ] for i in range(self.num_gpus) ] reduce_scatter(output, tensor_lists, c10d.ReduceOp.SUM) for i in range(self.num_gpus): expected = torch.tensor( [ float(self.num_gpus * (self.num_gpus - 1) / 2) + (virtual_rank + i) * virtual_world_size ] ) # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095 self.assertEqualIgnoreType(expected, output[i]) # Min reduce_scatter(output, tensor_lists, c10d.ReduceOp.MIN) for i in range(self.num_gpus): expected = torch.tensor([self.rank * self.world_size + i]) self.assertEqual(expected, output[i]) # Max reduce_scatter(output, tensor_lists, c10d.ReduceOp.MAX) for i in range(self.num_gpus): expected = torch.tensor( [self.rank * self.world_size + i + virtual_world_size - 1] ) self.assertEqual(expected, output[i]) # Product tensor_lists = [ [ torch.tensor( [(self.rank * self.num_gpus + i + j) % virtual_world_size + 1] ).cuda(i) for j in range(virtual_world_size) ] for i in range(self.num_gpus) ] reduce_scatter(output, tensor_lists, c10d.ReduceOp.PRODUCT) for i in range(self.num_gpus): expected = torch.tensor([float(math.factorial(virtual_world_size))]) # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095 self.assertEqualIgnoreType(expected, output[i]) @requires_nccl() @sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs") def test_reduce_scatter_base_ops(self): store = c10d.FileStore(self.file.name, self.world_size) pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) def reduce_scatter_base(output_t, input_t): work = pg._reduce_scatter_base(output_t, input_t) work.wait() device_id = self.rank % self.num_gpus # reduce_scatter_base is GPU number agnostic. # Each rank contribute one tensor regardless of GPU counts output_t = torch.empty([1]).cuda(device_id) tensor = torch.arange(self.world_size, dtype=output_t.dtype).cuda(device_id) reduce_scatter_base(output_t, tensor) # Verification self.assertEqual(output_t[0], self.rank * self.world_size) @requires_nccl() @sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs") def test_barrier(self): store = c10d.FileStore(self.file.name, self.world_size) pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) def allreduce(tensors): opts = c10d.AllreduceOptions() work = pg.allreduce(tensors, opts) return work # Making the collective to operate on # 1, 2, 3, 4, .... self.num_gpus GPUs tensors_list = [[] for _ in range(2, self.num_gpus + 1)] for i in range(2, self.num_gpus + 1): for j in range(i): tensors_list[i - 2].append(torch.tensor([j + 1]).cuda(j)) works = [] for tensors in tensors_list: work = allreduce(tensors) works.append(work) # Barrier will ensure that all previous work is completed pg.barrier().wait() for i in range(2, self.num_gpus + 1): for j in range(i): # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095 self.assertEqualIgnoreType( torch.tensor([float(i * (i + 1) / 2)]), tensors_list[i - 2][j] ) class DistributedDataParallelTest( test_c10d_common.AbstractDistributedDataParallelTest, MultiProcessTestCase ): def setUp(self): super(DistributedDataParallelTest, self).setUp() # NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests # that use NCCL_BLOCKING_WAIT will test it as expected. os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1" self._spawn_processes() def _test_nccl_backend( self, devices, device_ids, multi_device=False, gradient_as_bucket_view=False ): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) self._test_ddp_with_process_group( process_group, devices, device_ids, multi_device, gradient_as_bucket_view ) @requires_nccl() @skip_if_lt_x_gpu(2) def test_nccl_backend_multi_device_ids_not_allowed(self): int_devices = list(range(torch.cuda.device_count())) devices = [torch.device("cuda:" + str(i)) for i in int_devices] with self.assertRaisesRegex( ValueError, "device_ids can only be None or contain a single element." ): self._test_nccl_backend(devices, int_devices) @requires_nccl() @skip_if_lt_x_gpu(2) def test_nccl_backend_single_device_module_device_ids_None(self): self._test_nccl_backend(None, None) @requires_nccl() @skip_if_lt_x_gpu(2) def test_nccl_backend_single_device_module_empty_device_ids(self): # This tests the backward compatibility of accepting an empty list as `device_ids`, # although we no longer document this in favor of the default value of `None`, # which is consistent with multi-device modules and CPU modules. self._test_nccl_backend(None, []) @requires_nccl() @skip_if_lt_x_gpu(4) def test_nccl_backend_multi_device_module_device_ids_None(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:2] devices = [torch.device("cuda:" + str(i)) for i in int_devices] self._test_nccl_backend(devices, None, multi_device=True) @requires_nccl() @skip_if_lt_x_gpu(2) def test_nccl_backend_1gpu_module_device_ids_integer_list(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:1] devices = [torch.device("cuda:" + str(i)) for i in int_devices] self._test_nccl_backend(devices, int_devices) @requires_nccl() @skip_if_lt_x_gpu(2) def test_nccl_backend_1gpu_module_device_ids_torch_device_list(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:1] devices = [torch.device("cuda:" + str(i)) for i in int_devices] self._test_nccl_backend(devices, devices) @requires_nccl() @skip_if_lt_x_gpu(4) def test_nccl_backend_2gpu_module(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:2] devices = [torch.device("cuda:" + str(i)) for i in int_devices] self._test_nccl_backend(devices, None, multi_device=True) @requires_nccl() @skip_if_lt_x_gpu(8) def test_nccl_backend_4gpu_module(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:4] devices = [torch.device("cuda:" + str(i)) for i in int_devices] self._test_nccl_backend(devices, None, multi_device=True) @requires_nccl() @skip_if_lt_x_gpu(4) def test_ddp_multi_device_module_config(self): gpus = gpus_for_rank(self.world_size)[self.rank] self.assertTrue(len(gpus) >= 2, "expecting at least 2 gpus per process") store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) gpus = gpus[:2] model = DoubleGpuNet(gpus) with self.assertRaisesRegex( ValueError, "DistributedDataParallel device_ids and output_device arguments only work with " "single-device/multiple-device GPU modules or CPU modules", ): ddp_model = DistributedDataParallel( model, output_device=gpus[1], process_group=process_group ) with self.assertRaisesRegex( ValueError, "device_ids can only be None or contain a single element." ): ddp_model = DistributedDataParallel( model, device_ids=gpus, process_group=process_group ) with self.assertRaisesRegex( ValueError, "input module must be on the same type of devices" ): model.fc1 = model.fc1.cpu() ddp_model = DistributedDataParallel(model, process_group=process_group) model = model.cpu() with self.assertRaisesRegex( ValueError, "device_ids can only be None or contain a single element." ): ddp_model = DistributedDataParallel( model, device_ids=gpus, process_group=process_group ) def _test_fp16(self, gradient_as_bucket_view=False): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) gpus = gpus_for_rank(self.world_size)[self.rank] model = nn.Linear(1, 1, bias=False).cuda(gpus[0]).half() nn.init.constant_(model.weight, 1) ddp_model = DistributedDataParallel( model, device_ids=[gpus[0]], process_group=process_group, bucket_cap_mb=0.001, gradient_as_bucket_view=gradient_as_bucket_view, ) # Input 2**15, so that the gradients will overflow with a # world_size of 2, unless we normalize the gradient by the # world_size before the reduction input = torch.tensor([[2 ** 15]]).cuda(gpus[0]).half() # Step model ddp_model.train() output = ddp_model(input) loss = output.sum() loss.backward() self.assertFalse(any(torch.isinf(p.grad).any() for p in ddp_model.parameters())) @requires_nccl() @skip_if_lt_x_gpu(2) def test_fp16(self): self._test_fp16() @requires_nccl() @skip_if_lt_x_gpu(2) def test_fp16_grad_is_view(self): self._test_fp16(gradient_as_bucket_view=True) def _test_arbitrary_forward_return_value(self, gradient_as_bucket_view=False): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) class ForwardReturnValueModule(nn.Module): def __init__(self): super(ForwardReturnValueModule, self).__init__() self.fc1 = nn.Linear(2, 10, bias=False) self.fc2 = nn.Linear(10, 4, bias=False) self.fc3 = nn.Linear(4, 4, bias=False) self.relu = nn.ReLU() def forward(self, x, fn): x = self.relu(self.fc1(x)) x = self.relu(self.fc2(x)) # The first softmax does NOT include fc3 in its autograd graph # whereas the second softmax DOES. If we pass only the first # tensor we see in the output to the reducer, it marks the # gradient for fc3 as ready (because it doesn't show up). If return fn( F.softmax(x, dim=1), F.softmax(self.fc3(x), dim=1), ) device_id = gpus_for_rank(self.world_size)[self.rank][0] model = DistributedDataParallel( ForwardReturnValueModule().float().to(device_id), device_ids=[device_id], process_group=process_group, gradient_as_bucket_view=gradient_as_bucket_view, ) batch_size = 4 criterion = nn.CrossEntropyLoss() input = torch.rand([batch_size, 2], dtype=torch.float) target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to( device_id ) # the reducer won't see a hook for the unused parameter, and throw an error. def test(box, unbox): output = model(input, fn=box) loss = criterion(unbox(output), target) loss.backward() # Test with identity return value test( box=lambda x, y: (x, y), unbox=lambda obj: obj[1], ) # Test with list return value test( box=lambda x, y: ["foo", x, "bar", y], unbox=lambda obj: obj[3], ) # Test with tuple return value test( box=lambda x, y: ("foo", x, "bar", y), unbox=lambda obj: obj[3], ) # Test with dict return value test( box=lambda x, y: {"foo": "bar", "a": x, "b": y}, unbox=lambda obj: obj["b"], ) # Test with list with dict return value test( box=lambda x, y: ["foo", "bar", {"a": x, "b": y}], unbox=lambda obj: obj[2]["b"], ) # Test with dict with list return value test( box=lambda x, y: {"foo": "bar", "list": [0, x, 1, y]}, unbox=lambda obj: obj["list"][3], ) @requires_nccl() @skip_if_lt_x_gpu(2) def test_arbitrary_forward_return_value(self): self._test_arbitrary_forward_return_value() @requires_nccl() @skip_if_lt_x_gpu(2) def test_arbitrary_forward_return_value_grad_is_view(self): self._test_arbitrary_forward_return_value(gradient_as_bucket_view=True) @requires_nccl() @skip_if_lt_x_gpu(2) def test_ddp_with_lazy_parameters(self): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) with self.assertRaisesRegex( RuntimeError, "Modules with uninitialized parameters" ): DistributedDataParallel( torch.nn.LazyLinear(10), process_group=process_group ) def _test_find_unused_parameters_kwarg(self, gradient_as_bucket_view=False): torch.cuda.set_device(self.rank) dist.init_process_group( backend="nccl", world_size=self.world_size, rank=self.rank, init_method=f"file://{self.file_name}", ) process_group = c10d.distributed_c10d._get_default_group() class FindUnusedParametersModule(nn.Module): def __init__(self): super(FindUnusedParametersModule, self).__init__() self.fc1 = nn.Linear(2, 10, bias=False) self.fc2 = nn.Linear(10, 4, bias=False) self.fc3 = nn.Linear(4, 4, bias=False) self.relu = nn.ReLU() def forward(self, x): x = self.relu(self.fc1(x)) x = self.relu(self.fc2(x)) # Return the fc3 module so that the caller can invoke it # outside of the forward function. While this is bad practice, # we can use it to trigger a reducer error. return (F.softmax(x, dim=1), self.fc3) device_id = gpus_for_rank(self.world_size)[self.rank][0] batch_size = 4 criterion = nn.CrossEntropyLoss() input = torch.rand([batch_size, 2], dtype=torch.float) target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to( device_id ) ddp_model = None def test_find_unused_parameters( find_unused_parameters, test_default=False, gradient_as_bucket_view=False ): if test_default: model = DistributedDataParallel( FindUnusedParametersModule().float().to(device_id), device_ids=[device_id], process_group=process_group, gradient_as_bucket_view=gradient_as_bucket_view, ) else: model = DistributedDataParallel( FindUnusedParametersModule().float().to(device_id), device_ids=[device_id], process_group=process_group, find_unused_parameters=find_unused_parameters, gradient_as_bucket_view=gradient_as_bucket_view, ) nonlocal ddp_model ddp_model = model output, fc3 = model(input) output = fc3(output) loss = criterion(output, target) loss.backward() # First test that finding unused params under these conditions is to # trigger an error when `backward` is called (because fc3 is an unused # parameter and will therefore be marked ready twice). try: test_find_unused_parameters( True, gradient_as_bucket_view=gradient_as_bucket_view ) except Exception as ex: self.assertTrue( str(ex).startswith( "Expected to mark a variable ready only once.", ) ) unused_index = 2 unused_index_str = f"Parameter at index {unused_index}" model = ddp_model.module for module_name, module in model.named_modules(): if module == model.fc3: for parameter_name, _ in module.named_parameters(recurse=False): unused_fqn = f"{module_name}.{parameter_name}" # Only one such parameter in model.fc3, since bias=False break if dist._get_debug_mode() != dist._DistributedDebugLevel.OFF: unused_index_str += f" with name {unused_fqn}" self.assertTrue(unused_index_str in str(ex)) else: self.fail("Expected exception") dist.barrier(process_group) # Then test that the default behavior can be overridden by setting # `find_unused_parameters=False`. try: test_find_unused_parameters( False, gradient_as_bucket_view=gradient_as_bucket_view ) except Exception as ex: self.fail("Unexpected exception: %s" % ex) # Test find_unused_parameters defaults to False try: test_find_unused_parameters( True, test_default=True, gradient_as_bucket_view=gradient_as_bucket_view ) except Exception as ex: self.fail("Unexpected exception: %s" % ex) # TODO: Combine the following tests once https://github.com/pytorch/pytorch/issues/55967 # is resolved. @requires_nccl() @skip_if_lt_x_gpu(2) @with_dist_debug_levels(levels=["DETAIL"]) def test_find_unused_parameters_kwarg_debug_detail(self): self._test_find_unused_parameters_kwarg() @requires_nccl() @skip_if_lt_x_gpu(2) @with_dist_debug_levels(levels=["INFO"]) def test_find_unused_parameters_kwarg_debug_info(self): self._test_find_unused_parameters_kwarg() @requires_nccl() @skip_if_lt_x_gpu(2) @with_dist_debug_levels(levels=["OFF"]) def test_find_unused_parameters_kwarg_debug_off(self): self._test_find_unused_parameters_kwarg() @requires_nccl() @skip_if_lt_x_gpu(2) @with_dist_debug_levels(levels=["DETAIL"]) def test_find_unused_parameters_kwarg_grad_is_view_debug_detail(self): self._test_find_unused_parameters_kwarg(gradient_as_bucket_view=True) @requires_nccl() @skip_if_lt_x_gpu(2) @with_dist_debug_levels(levels=["INFO"]) def test_find_unused_parameters_kwarg_grad_is_view_debug_info(self): self._test_find_unused_parameters_kwarg(gradient_as_bucket_view=True) @requires_nccl() @skip_if_lt_x_gpu(2) @with_dist_debug_levels(levels=["OFF"]) def test_find_unused_parameters_kwarg_grad_is_view_debug_off(self): self._test_find_unused_parameters_kwarg(gradient_as_bucket_view=True) def _test_multiple_outputs_multiple_backward(self, gradient_as_bucket_view=False): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) class MultipleOutputModule(nn.Module): def __init__(self): super(MultipleOutputModule, self).__init__() def define_module(): return nn.Sequential( nn.Linear(2, 10, bias=False), nn.ReLU(), nn.Linear(10, 4, bias=False), nn.ReLU(), ) self.module0 = define_module() self.module1 = define_module() def forward(self, x): return ( F.softmax(self.module0(x), dim=1), F.softmax(self.module1(x), dim=1), ) device_id = gpus_for_rank(self.world_size)[self.rank][0] model = DistributedDataParallel( MultipleOutputModule().float().to(device_id), device_ids=[device_id], process_group=process_group, gradient_as_bucket_view=gradient_as_bucket_view, ) batch_size = 4 criterion = nn.CrossEntropyLoss() input = torch.rand([batch_size, 2], dtype=torch.float) target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to( device_id ) # Compute loss and gradients for both outputs output1, output2 = model(input) loss1 = criterion(output1, target) loss1.backward() loss2 = criterion(output2, target) loss2.backward() @requires_nccl() @skip_if_lt_x_gpu(2) def test_multiple_outputs_multiple_backward(self): self._test_multiple_outputs_multiple_backward() @requires_nccl() @skip_if_lt_x_gpu(2) def test_multiple_outputs_multiple_backward_grad_is_view(self): self._test_multiple_outputs_multiple_backward(gradient_as_bucket_view=True) @requires_nccl() @skip_if_lt_x_gpu(2) def test_no_grad(self): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) class NoGradModule(nn.Module): def __init__(self): super(NoGradModule, self).__init__() self.fc1 = nn.Linear(2, 10, bias=False) self.fc2 = nn.Linear(10, 4, bias=False) self.relu = nn.ReLU() def forward(self, x): x = self.relu(self.fc1(x)) x = self.relu(self.fc2(x)) return F.softmax(x, dim=1) device_id = gpus_for_rank(self.world_size)[self.rank][0] model = DistributedDataParallel( NoGradModule().float().to(device_id), device_ids=[device_id], process_group=process_group, ) batch_size = 4 input = torch.rand([batch_size, 2], dtype=torch.float) def check_no_grads(): for p in model.parameters(): self.assertTrue(p.requires_grad) self.assertIsNone(p.grad) # After initialization, no parameter has their gradient set. check_no_grads() # Run `forward` function with torch.no_grad() with torch.no_grad(): output = model(input) self.assertTrue(isinstance(output, torch.Tensor)) # No parameter should have their gradient set. check_no_grads() def _test_accumulate_gradients_module(self, gradient_as_bucket_view=False): # This is NOT the recommended way to implement accumulating grads, but # we would like to make sure DDP does not mess up with the underlying # module. int_devices = gpus_for_rank(self.world_size)[self.rank][:1] devices = [torch.device("cuda:" + str(i)) for i in int_devices] store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) global_batch_size = self.world_size model, ddp_model, input, target = self._prepare_single_device_module( process_group, devices, devices, global_batch_size, gradient_as_bucket_view ) def step_model(model, input, target): model.train() output = model(input) loss = F.mse_loss(output, target.to(output.device)) loss.backward() # ensure accumulate grads works with no_grad with torch.no_grad(): ddp_model.train() ddp_model.module(input) # Check two model parameters over 4 iterations. # Use 4 iterations because we alternate between reducing and # not reducing and want to make sure we switch both ways. for iteration in range(4): step_model(model, input, target) if iteration % 2 == 0: # Skip gradients sync without calling prepare_for_backward step_model( ddp_model.module, input[self.rank : (self.rank + 1)], target[self.rank : (self.rank + 1)], ) for i, j in zip(model.parameters(), ddp_model.parameters()): self.assertNotEqual(i.grad, j.grad) else: step_model( ddp_model, input[self.rank : (self.rank + 1)], target[self.rank : (self.rank + 1)], ) for i, j in zip(model.parameters(), ddp_model.parameters()): # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095 self.assertEqualIgnoreType(i.grad, j.grad, rtol=1.3e-06, atol=5e-5) # Shuffle the input so that DDP input is different torch.manual_seed(1337 + iteration) input = input[torch.randperm(global_batch_size)] @requires_nccl() @skip_if_lt_x_gpu(2) def test_accumulate_gradients_module(self): self._test_accumulate_gradients_module() @requires_nccl() @skip_if_lt_x_gpu(2) def test_accumulate_gradients_module_with_grad_is_view(self): self._test_accumulate_gradients_module(gradient_as_bucket_view=True) @requires_nccl() @skip_if_lt_x_gpu(2) def test_failure_recovery(self): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) # need to create a separate file for the recovered FileStore, because # the original one will be deleted when destructing the first FileStore. recovery_filename = self.file_name + "_recovery" if self.rank == 0: # the file will be deleted by the recovered FileStore open(recovery_filename, "w").close() # not necessary to run barrier here, as DDP will synchronize class TestModel(nn.Module): def __init__(self): super(TestModel, self).__init__() self.fc1 = nn.Linear(2, 10, bias=False) self.fc2 = nn.Linear(10, 4, bias=False) self.relu = nn.ReLU() def forward(self, x): x = self.relu(self.fc1(x)) x = self.relu(self.fc2(x)) return F.softmax(x, dim=1) device_id = gpus_for_rank(self.world_size)[self.rank][0] model = TestModel().float().to(device_id) ddp = DistributedDataParallel( model, device_ids=[device_id], process_group=process_group, ) batch_size = 4 criterion = nn.CrossEntropyLoss() input = torch.rand([batch_size, 2], dtype=torch.float) target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to( device_id ) for _ in range(6): output = ddp(input) loss = criterion(output, target) loss.backward() del ddp del process_group del store # this will delete self.file_name store = c10d.FileStore(recovery_filename, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) ddp = DistributedDataParallel( model, device_ids=[device_id], process_group=process_group, ) input = torch.rand([batch_size, 2], dtype=torch.float) target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to( device_id ) for _ in range(6): output = ddp(input) loss = criterion(output, target) loss.backward() @requires_nccl() @skip_if_lt_x_gpu(2) def test_pass_default_pg(self): dist.init_process_group( "nccl", init_method=f"file://{self.file_name}", world_size=self.world_size, rank=self.rank, ) default_pg = c10d.distributed_c10d._get_default_group() dist.destroy_process_group(default_pg) self.assertFalse(dist.is_initialized()) def _test_grad_layout(self, replica_devices, layer_devs, local_batch_size): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) global_batch_size = local_batch_size * self.world_size # Carry out some trials with small buckets and some with big buckets. bucketsizes = (0.000001, 25) # Tuples of lists. Each list describes per-layer characteristics for one trial. layer_formats = ( [torch.contiguous_format] * 4, [torch.channels_last] * 2 + [torch.contiguous_format] * 2, [torch.channels_last] * 4, ) layer_dtypes = ( [torch.float] * 4, [torch.float] * 2 + [torch.half] * 2, [torch.half] * 4, ) input_dev = layer_devs[0] if isinstance(layer_devs, list) else layer_devs target_dev = layer_devs[-1] if isinstance(layer_devs, list) else layer_devs input = torch.randn( (global_batch_size, 8, 8, 8), device=input_dev, dtype=torch.float ) target = torch.randn( (global_batch_size, 8, 4, 4), device=target_dev, dtype=torch.float ) local_batch_start = self.rank * local_batch_size local_batch_end = (self.rank + 1) * local_batch_size # Reducer.cpp sneakily creates one "initial bucket" that ignores the "bucket_cap_mb" # argument. The following makes sure the initial bucket also complies. @contextmanager def first_bucket_size(ddp_bucket_mb): old_DEFAULT_FIRST_BUCKET_BYTES = dist._DEFAULT_FIRST_BUCKET_BYTES dist._DEFAULT_FIRST_BUCKET_BYTES = int(ddp_bucket_mb * 1.0e6) try: yield finally: dist._DEFAULT_FIRST_BUCKET_BYTES = old_DEFAULT_FIRST_BUCKET_BYTES with torch.backends.cudnn.flags( enabled=True, deterministic=True, benchmark=False ): for formats, dtypes, bucketsize in product( layer_formats, layer_dtypes, bucketsizes ): with first_bucket_size(bucketsize): model_msg = ( "rank = {} formats = {} dtypes = {} bucketsize = {} ".format( self.rank, formats, dtypes, bucketsize ) ) try: m = ConvNet(layer_devs, formats, dtypes) m_ddp = DistributedDataParallel( copy.deepcopy(m), device_ids=replica_devices, process_group=process_group, bucket_cap_mb=bucketsize, ) opt = torch.optim.SGD(m.parameters(), lr=0.1) opt_ddp = torch.optim.SGD(m_ddp.parameters(), lr=0.1) has_half = any(p.dtype is torch.half for p in m.parameters()) tol = 1.0e-3 if has_half else 1.0e-5 except BaseException: # Prints case-specific debugging info to narrow down failing case. print( "Caught exception during model creation for " + model_msg, flush=True, ) raise # 3 iters: First iter creates grads, second iter retests after rebucketing, # third iter tries zeroed grads. for it in range(3): iter_msg = "iter = {} ".format(it) + model_msg named_msg = iter_msg try: F.mse_loss(m(input).float(), target).backward() F.mse_loss( m_ddp(input[local_batch_start:local_batch_end]).float(), target[local_batch_start:local_batch_end], ).backward() for i, ((layer_name, m_child), m_ddp_child) in enumerate( zip(m.named_children(), m_ddp.module.children()) ): named_msg = layer_name + ".weight" + " " + iter_msg self.assertTrue( m_child.weight.grad.is_contiguous( memory_format=formats[i] ), named_msg, ) self.assertTrue( m_ddp_child.weight.grad.is_contiguous( memory_format=formats[i] ), named_msg, ) for j, ((param_name, p), p_ddp) in enumerate( zip( m_child.named_parameters(), m_ddp_child.parameters(), ) ): named_msg = ( layer_name + "." + param_name + " " + iter_msg ) self.assertEqual( p.grad, p_ddp.grad, rtol=tol, atol=tol ) opt.step() opt_ddp.step() if it == 0: for p, p_ddp in zip(m.parameters(), m_ddp.parameters()): p.grad = None p_ddp.grad = None else: m.zero_grad() m_ddp.zero_grad() except BaseException: # Makes sure we still get info if an error occurred somewhere other than the asserts. print( "Caught exception during iterations at " + named_msg, flush=True, ) raise @requires_nccl() @skip_if_lt_x_gpu(2) @skip_if_rocm def test_grad_layout_1devicemodule_1replicaperprocess(self): dev0 = torch.device("cuda:" + str(gpus_for_rank(self.world_size)[self.rank][0])) # Tells DDP to use just one device. replica_devices = [dev0] # Tells _test_grad_layout to construct ConvNet with all layers on this process's first assigned device. layer_devs = dev0 local_batch_size = 8 self._test_grad_layout(replica_devices, layer_devs, local_batch_size) @requires_nccl() @skip_if_lt_x_gpu(4) @skip_if_rocm def test_grad_layout_2devicemodule(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:2] dev0 = torch.device("cuda:" + str(int_devices[0])) dev1 = torch.device("cuda:" + str(int_devices[1])) replica_devices = None layer_devs = [dev0] * 2 + [dev1] * 2 local_batch_size = 8 self._test_grad_layout(replica_devices, layer_devs, local_batch_size) @requires_nccl() @skip_if_lt_x_gpu(2) def test_param_layout_mismatch_error(self): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) dev0 = torch.device("cuda:" + str(gpus_for_rank(self.world_size)[self.rank][0])) layer_devs = dev0 layer_formats = ( [torch.contiguous_format] * 4 if self.rank == 0 else [torch.channels_last] * 4 ) layer_dtypes = [torch.float] * 4 m = ConvNet(layer_devs, layer_formats, layer_dtypes) if self.rank == 0: m_ddp = DistributedDataParallel( m, device_ids=[dev0], process_group=process_group ) else: with self.assertRaisesRegex( RuntimeError, ".* appears not to match strides of the same param in process 0", ): m_ddp = DistributedDataParallel( m, device_ids=[dev0], process_group=process_group ) def _gpu_model_with_ddp_comm_hook( self, process_group, hook=None, gradient_as_bucket_view=False, state=None, static_graph=False, ): device_id = gpus_for_rank(self.world_size)[self.rank][0] gpu_model = DistributedDataParallel( ModuleForDdpCommHook().to(device_id), device_ids=[device_id], process_group=process_group, gradient_as_bucket_view=gradient_as_bucket_view, ) if static_graph: gpu_model._set_static_graph() # Register a DDP communication hook if any. if hook is not None: gpu_model.register_comm_hook(state, hook) return gpu_model @requires_nccl() @skip_if_lt_x_gpu(2) def test_ddp_comm_hook_future_passing_gpu_nccl(self): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) # Get GPU model with simple_hook registered. gpu_model = self._gpu_model_with_ddp_comm_hook(process_group, self._simple_hook) # check whether the grads are equal to what simple_hook's then callback returns. self._run_and_verify_hook(gpu_model, 8, 2 * torch.ones(2, 2)) def _test_ddp_comm_hook_allreduce_hook_nccl( self, gradient_as_bucket_view=False, static_graph=False ): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) def allreduce_hook( state: object, bucket: dist.GradBucket ) -> torch.futures.Future[torch.Tensor]: tensors = [bucket.buffer() / self.world_size] return ( process_group.allreduce(tensors) .get_future() .then(lambda fut: fut.value()[0]) ) gpu_model = self._gpu_model_with_ddp_comm_hook( process_group, allreduce_hook, gradient_as_bucket_view, static_graph ) self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2)) def _test_default_ddp_comm_hooks_nccl(self, gradient_as_bucket_view=False): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) state = process_group for hook in [default.allreduce_hook, default.fp16_compress_hook]: gpu_model = self._gpu_model_with_ddp_comm_hook( process_group, hook, gradient_as_bucket_view, state ) self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2)) def _test_fp16_compress_wrapper(self, gradient_as_bucket_view=False): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) powerSGD_state = powerSGD.PowerSGDState(process_group=process_group) hook_args = [ (powerSGD.powerSGD_hook, powerSGD_state), (default.allreduce_hook, process_group), ] for hook, state in hook_args: gpu_model = self._gpu_model_with_ddp_comm_hook( process_group, default.fp16_compress_wrapper(hook), gradient_as_bucket_view, state, ) self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2)) def _test_hook_then_optimizer( self, functional_optim_cls, *functional_optim_args, gradient_as_bucket_view=False, **functional_optim_kwargs ): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) hook, hook_state = default.allreduce_hook, process_group opt_hook_state = default._OptimizerHookState( functional_optim_cls, *functional_optim_args, **functional_optim_kwargs, ) gpu_model = self._gpu_model_with_ddp_comm_hook( process_group, default._hook_then_optimizer(hook, opt_hook_state), gradient_as_bucket_view, hook_state, ) prev_params = copy.deepcopy(list(gpu_model.parameters())) for _ in range(8): gpu_model.zero_grad() self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2)) new_params = list(gpu_model.parameters()) gpu_model_allreduce = self._gpu_model_with_ddp_comm_hook( process_group, default.allreduce_hook, gradient_as_bucket_view, hook_state ) mapping = {v: k for k, v in functional_optim_map.items()} sgd = mapping.get(functional_optim_cls)( gpu_model_allreduce.parameters(), *functional_optim_args, **functional_optim_kwargs, ) for _ in range(8): gpu_model_allreduce.zero_grad() self._run_and_verify_hook(gpu_model_allreduce, 8, 0.25 * torch.ones(2, 2)) sgd.step() post_opt_params = list(gpu_model_allreduce.parameters()) for opt_as_hook_param, post_opt_param in zip(new_params, post_opt_params): self.assertEqual(opt_as_hook_param, post_opt_param) def _test_powerSGD_ddp_comm_hook_nccl(self, gradient_as_bucket_view=False): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) for use_error_feedback, warm_start in product([True, False], [True, False]): state = powerSGD.PowerSGDState( process_group=process_group, matrix_approximation_rank=1, use_error_feedback=use_error_feedback, warm_start=warm_start, ) for hook in [powerSGD.powerSGD_hook, powerSGD.batched_powerSGD_hook]: gpu_model = self._gpu_model_with_ddp_comm_hook( process_group, hook, gradient_as_bucket_view, state ) self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2)) def _test_builtin_ddp_comm_hooks_nccl(self, gradient_as_bucket_view=False): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) for comm_hook_type in [ dist.BuiltinCommHookType.ALLREDUCE, dist.BuiltinCommHookType.FP16_COMPRESS, ]: gpu_model = self._gpu_model_with_builtin_ddp_comm_hook( process_group, comm_hook_type, gradient_as_bucket_view ) self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2)) @requires_nccl() @skip_if_lt_x_gpu(2) def test_ddp_comm_hook_allreduce_hook_nccl(self): self._test_ddp_comm_hook_allreduce_hook_nccl() @requires_nccl() @skip_if_lt_x_gpu(2) def test_default_ddp_comm_hooks_nccl(self): self._test_default_ddp_comm_hooks_nccl() @requires_nccl() @skip_if_lt_x_gpu(2) def test_fp16_compress_wrapper_nccl(self): self._test_fp16_compress_wrapper() @requires_nccl() @skip_if_lt_x_gpu(2) def test_hook_then_sgd_nccl(self): sgd_lr = 1e-2 sgd_momentum = 0.9 sgd_weight_decay = 0.01 self._test_hook_then_optimizer( _FunctionalSGD, sgd_lr, momentum=sgd_momentum, weight_decay=sgd_weight_decay, ) @requires_nccl() @skip_if_lt_x_gpu(2) def test_hook_then_sgd_nccl_grad_as_bucket_view(self): sgd_lr = 1e-2 sgd_momentum = 0.9 sgd_weight_decay = 0.01 self._test_hook_then_optimizer( _FunctionalSGD, sgd_lr, momentum=sgd_momentum, weight_decay=sgd_weight_decay, gradient_as_bucket_view=True ) @requires_nccl() @skip_if_lt_x_gpu(2) def test_hook_then_adamw_nccl(self): adamw_lr = 1e-2 adamw_betas = (0.9, 0.99) adamw_eps = 1e-6 self._test_hook_then_optimizer( _FunctionalAdamW, adamw_lr, betas=adamw_betas, eps=adamw_eps, gradient_as_bucket_view=True ) @requires_nccl() @skip_if_lt_x_gpu(2) def test_hook_then_adam_nccl(self): adam_lr = 1e-2 adam_betas = (0.9, 0.99) adam_eps = 1e-6 self._test_hook_then_optimizer( _FunctionalAdam, adam_lr, betas=adam_betas, eps=adam_eps, gradient_as_bucket_view=True ) @requires_nccl() @skip_if_lt_x_gpu(2) def test_hook_then_adam_nccl_grad_as_bucket_view(self): adam_lr = 1e-2 adam_betas = (0.9, 0.99) adam_eps = 1e-6 self._test_hook_then_optimizer( _FunctionalAdam, adam_lr, betas=adam_betas, eps=adam_eps, gradient_as_bucket_view=True ) @requires_nccl() @skip_if_lt_x_gpu(2) def test_builtin_ddp_comm_hooks_nccl(self): self._test_builtin_ddp_comm_hooks_nccl() @requires_nccl() @skip_if_lt_x_gpu(2) def test_powerSGD_ddp_comm_hook_nccl(self): self._test_powerSGD_ddp_comm_hook_nccl() @requires_nccl() @skip_if_lt_x_gpu(2) def test_ddp_comm_hook_allreduce_hook_nccl_grad_is_view(self): self._test_ddp_comm_hook_allreduce_hook_nccl(gradient_as_bucket_view=True) @requires_nccl() @skip_if_lt_x_gpu(2) def test_ddp_comm_hook_allreduce_hook_nccl_static_graph(self): self._test_ddp_comm_hook_allreduce_hook_nccl(static_graph=True) @requires_nccl() @skip_if_lt_x_gpu(2) def test_default_ddp_comm_hooks_nccl_is_view(self): self._test_default_ddp_comm_hooks_nccl(gradient_as_bucket_view=True) @requires_nccl() @skip_if_lt_x_gpu(2) def test_fp16_compress_wrapper_is_view(self): self._test_fp16_compress_wrapper(gradient_as_bucket_view=True) @requires_nccl() @skip_if_lt_x_gpu(2) def test_builtin_ddp_comm_hooks_nccl_grad_is_view(self): self._test_builtin_ddp_comm_hooks_nccl(gradient_as_bucket_view=True) @requires_nccl() @skip_if_lt_x_gpu(2) def test_powerSGD_ddp_comm_hook_nccl_grad_is_view(self): self._test_powerSGD_ddp_comm_hook_nccl(gradient_as_bucket_view=True) @requires_nccl() @skip_if_lt_x_gpu(2) def test_ddp_comm_hook_allreduce_with_then_hook_nccl(self): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) def allreduce_with_then_hook( state: object, bucket: dist.GradBucket ) -> torch.futures.Future[torch.Tensor]: tensors = [bucket.buffer() / self.world_size] fut = process_group.allreduce(tensors).get_future() def mult(fut): return 10 * fut.value()[0] def div(fut): return 0.5 * fut.value() return fut.then(mult).then(div) gpu_model = self._gpu_model_with_ddp_comm_hook( process_group, allreduce_with_then_hook ) self._run_and_verify_hook(gpu_model, 8, 1.25 * torch.ones(2, 2)) class AcceptsParam(torch.nn.Module): def __init__(self, p, factor): super().__init__() self.a = p self.f = factor def forward(self, input): return input + self.a * self.f @requires_nccl() @skip_if_lt_x_gpu(2) def test_ddp_weight_sharing(self): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) size = 2048 * 2048 dev = self.rank world = self.world_size p = torch.nn.Parameter(torch.randn(size, requires_grad=True)) for try_set_to_none, use_bucket_view in product((False, True), (False, True)): m = torch.nn.Sequential( self.AcceptsParam(p, dev + 1), self.AcceptsParam(p, dev + 1) ).cuda(dev) m = torch.nn.parallel.DistributedDataParallel( m, bucket_cap_mb=1, gradient_as_bucket_view=use_bucket_view, device_ids=[dev], process_group=process_group, ) for i in range(3): m.zero_grad(set_to_none=try_set_to_none) m(1).sum().backward() analytic = torch.full_like( p, 2.0 * (world * (world + 1.0) / 2.0) / world, device=dev ) for name, p in m.named_parameters(): self.assertEqual( p.grad, analytic, "mismatch at " + name + ".grad for " + "set_to_none = {}, use_bucket_view = {}".format( try_set_to_none, use_bucket_view ), ) class CheckpointOnceModule(nn.Module): def __init__(self): super().__init__() self.l1 = nn.Linear(20, 20) self.l2 = nn.Linear(20, 20) def forward(self, inp): x = self.l1(inp) x = checkpoint(self.l2, x) return x class CheckpointTwiceModule(CheckpointOnceModule): def __init__(self): super().__init__() def forward(self, inp): x = self.l1(inp) x = checkpoint(self.l2, x) x = checkpoint(self.l2, x) return x def _prepare_dummy_data(self): ddp_bs = 16 bs = ddp_bs * self.world_size input = torch.rand((bs, 20), device="cuda", requires_grad=True) target = torch.randn((bs, 20), device="cuda") offset = self.rank * ddp_bs ddp_input = input[offset : offset + ddp_bs] ddp_target = target[offset : offset + ddp_bs] return input, ddp_input, target, ddp_target def _train_model(self, model, input_var, target, loss, run_checkpoint=False): model.train() if run_checkpoint: output = checkpoint(model, input_var) else: output = model(input_var) l = loss(output, target) l.backward() def _test_ddp_checkpointing( self, input_model, process_group, use_bucket_view, find_unused_parameters=False, static_graph=False, run_checkpoint=False, ): torch.cuda.set_device(self.rank) torch.manual_seed(31415) model = copy.deepcopy(input_model).cuda() ddp_model = copy.deepcopy(input_model).cuda() ddp_model = nn.parallel.DistributedDataParallel( ddp_model, bucket_cap_mb=1, gradient_as_bucket_view=use_bucket_view, device_ids=[self.rank], process_group=process_group, find_unused_parameters=find_unused_parameters, ) if static_graph: ddp_model._set_static_graph() self.assertEqual( ddp_model._get_ddp_logging_data().get("static_graph", 0), static_graph ) input, ddp_input, target, ddp_target = self._prepare_dummy_data() loss = nn.MSELoss() for i in range(5): model.zero_grad(set_to_none=False) ddp_model.zero_grad(set_to_none=False) self._train_model(model, input, target, loss, run_checkpoint=run_checkpoint) self._train_model( ddp_model, ddp_input, ddp_target, loss, run_checkpoint=run_checkpoint ) for i, j in zip(model.parameters(), ddp_model.parameters()): self.assertTrue(i.grad is not None) self.assertTrue(j.grad is not None) self.assertEqual(i.grad, j.grad, rtol=1.3e-06, atol=5e-5) @requires_nccl() @skip_if_lt_x_gpu(2) def test_ddp_checkpointing_once(self): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) for use_bucket_view, static_graph in product((False, True), (False, True)): self._test_ddp_checkpointing( self.CheckpointOnceModule(), process_group=process_group, use_bucket_view=use_bucket_view, static_graph=static_graph, ) @requires_nccl() @skip_if_lt_x_gpu(2) def test_ddp_checkpointing_unused_params(self): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) for use_bucket_view in (True, False): with self.assertRaisesRegex( RuntimeError, "Expected to mark a variable ready only once.", ): model = self._test_ddp_checkpointing( self.CheckpointOnceModule(), process_group=process_group, use_bucket_view=use_bucket_view, find_unused_parameters=True, static_graph=False, ) model = self._test_ddp_checkpointing( self.CheckpointOnceModule(), process_group=process_group, use_bucket_view=use_bucket_view, find_unused_parameters=True, static_graph=True, ) @requires_nccl() @skip_if_lt_x_gpu(2) def test_ddp_checkpointing_twice(self): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) for use_bucket_view in (True, False): with self.assertRaisesRegex( RuntimeError, "Expected to mark a variable ready only once.", ): model = self._test_ddp_checkpointing( self.CheckpointTwiceModule(), process_group=process_group, use_bucket_view=use_bucket_view, static_graph=False, ) model = self._test_ddp_checkpointing( self.CheckpointTwiceModule(), process_group=process_group, use_bucket_view=use_bucket_view, static_graph=True, ) @requires_nccl() @skip_if_lt_x_gpu(2) def test_ddp_checkpointing_weight_sharing(self): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) torch.cuda.set_device(self.rank) for use_bucket_view, static_graph in product((False, True), (False, True)): torch.manual_seed(31415) l1 = nn.Linear(20, 20) l2 = nn.Linear(20, 20) l1.weight = l2.weight model = nn.Sequential(l1, l2) self._test_ddp_checkpointing( model, process_group=process_group, use_bucket_view=use_bucket_view, static_graph=static_graph, run_checkpoint=True, ) class NcclErrorHandlingTest(MultiProcessTestCase): def setUp(self): super(NcclErrorHandlingTest, self).setUp() self.skip_return_code_checks = [ self.test_nccl_errors_blocking_abort.__wrapped__, self.test_nccl_errors_blocking_sigkill.__wrapped__, self.test_nccl_errors_blocking_sigterm.__wrapped__, self.test_nccl_errors_blocking_nonzero_exit.__wrapped__, ] # NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests # that use NCCL_BLOCKING_WAIT will test it as expected. os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1" self._spawn_processes() def tearDown(self): super(NcclErrorHandlingTest, self).tearDown() try: os.remove(self.file_name) except OSError: pass @property def op_timeout_sec(self): return 1 @property def world_size(self): return 3 @property def blocking_wait_error_msg(self): return "Caught collective operation timeout" def _run_all_reduce(self, pg): pg.allreduce(torch.rand(10).cuda(self.rank)) @requires_nccl() @requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking") @skip_if_lt_x_gpu(3) @skip_if_rocm def test_nccl_errors_nonblocking(self): # Note: we unset and restore NCCL_ASYNC_ERROR_HANDLING for this test # since test_c10d_common runs with async error handling by default, but this # tests behavior when it is not enabled. prev_nccl_async_error_handling = os.environ.get( "NCCL_ASYNC_ERROR_HANDLING", None ) os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "0" store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) process_group.allreduce(torch.rand(10).cuda(self.rank)) if self.rank == 0: # This allreduce does not block Python thread as allreduce enqueues # the cuda operation, and then wait only blocks the current cuda # stream. work = process_group.allreduce(torch.rand(10).cuda(self.rank)) work.wait() # Now the work scheduled next should hang forever since the previous # allreduce will never complete. t = threading.Thread(target=self._run_all_reduce, args=(process_group,)) t.daemon = True t.start() t.join(int(get_timeout(self.id()) / 5)) self.assertTrue(t.is_alive()) if prev_nccl_async_error_handling is not None: os.environ["NCCL_ASYNC_ERROR_HANDLING"] = prev_nccl_async_error_handling def _test_nccl_errors_blocking(self, func): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL( store, self.rank, self.world_size, timeout=timedelta(seconds=self.op_timeout_sec), ) process_group.allreduce(torch.rand(10).cuda(self.rank)) if self.rank == 0: work = process_group.allreduce(torch.rand(10).cuda(self.rank)) with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg): # Operation would time out in blocking mode. work.wait() # Run some GPU operations to make sure cuda has not gotten stuck. # It was observed cuda could get stuck if NCCL communicators were # not properly aborted before throwing RuntimeError. a = torch.rand(10).cuda(self.rank) elif self.rank == 1: # Clean up structures (ex: files for FileStore before going down) del process_group func() else: # Wait for timeout time.sleep(2 * self.op_timeout_sec) # Now verify communicators on this rank have been aborted by the watchdog thread. self._wait_for_comm_abort(process_group) @with_nccl_blocking_wait @requires_nccl() @requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking") @skip_if_lt_x_gpu(3) @skip_if_rocm def test_nccl_errors_blocking_clean_exit(self): self._test_nccl_errors_blocking(lambda: sys.exit(0)) @with_nccl_blocking_wait @requires_nccl() @requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking") @skip_if_lt_x_gpu(3) @skip_if_rocm def test_nccl_errors_blocking_nonzero_exit(self): self._test_nccl_errors_blocking(lambda: sys.exit(1)) @with_nccl_blocking_wait @requires_nccl() @requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking") @skip_if_lt_x_gpu(3) @skip_if_rocm @sandcastle_skip( "Frequently times out see https://github.com/pytorch/pytorch/issues/58920" ) def test_nccl_errors_blocking_abort(self): self._test_nccl_errors_blocking(lambda: os.abort()) @with_nccl_blocking_wait @requires_nccl() @requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking") @skip_if_lt_x_gpu(3) @skip_if_rocm def test_nccl_errors_blocking_sigkill(self): self._test_nccl_errors_blocking(lambda: os.kill(os.getpid(), signal.SIGKILL)) @with_nccl_blocking_wait @requires_nccl() @requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking") @skip_if_lt_x_gpu(3) @skip_if_rocm def test_nccl_errors_blocking_sigterm(self): self._test_nccl_errors_blocking(lambda: os.kill(os.getpid(), signal.SIGTERM)) @with_nccl_blocking_wait @requires_nccl() @requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking") @skip_if_lt_x_gpu(3) def test_nccl_blocking_wait_with_barrier(self): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL( store, self.rank, self.world_size, timeout=timedelta(seconds=self.op_timeout_sec), ) process_group.barrier().wait() if self.rank == 0: with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg): # This should timeout process_group.barrier().wait() def _run_invalid_nccl_blocking_wait_env(self, val): os.environ["NCCL_BLOCKING_WAIT"] = val store = c10d.FileStore(self.file_name, self.world_size) with self.assertRaises(RuntimeError): process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) @requires_nccl() @skip_if_lt_x_gpu(3) def test_invalid_nccl_blocking_wait_env(self): self._run_invalid_nccl_blocking_wait_env("abc") self._run_invalid_nccl_blocking_wait_env("-1") self._run_invalid_nccl_blocking_wait_env("2147483647") self._run_invalid_nccl_blocking_wait_env("4294967295") def _wait_for_comm_abort(self, process_group): while True: try: process_group.allreduce(torch.rand(10).cuda(self.rank)) except Exception as e: if "NCCL communicator was aborted" in str(e): return else: raise e time.sleep(1) @with_nccl_blocking_wait @requires_nccl() @skip_if_lt_x_gpu(3) def test_nccl_timeout(self): store = c10d.FileStore(self.file_name, self.world_size) # Initialize process_group. timeout = 1 process_group = c10d.ProcessGroupNCCL( store, self.rank, self.world_size, timeout=timedelta(seconds=timeout) ) process_group.allreduce(torch.rand(10).cuda(self.rank)).wait() if self.rank == 0: # This should timeout in about 1 second. start = time.time() # Watchdog may abort timed out work resulting in NCCL error instead of operation timed out. with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg): process_group.allreduce(torch.rand(10).cuda(self.rank)).wait() else: # Sleep to ensure timeout. time.sleep(2 * timeout) self._wait_for_comm_abort(process_group) class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase): def setUp(self): super(CommTest, self).setUp() # NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests # that use NCCL_BLOCKING_WAIT will test it as expected. os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1" self._spawn_processes() def tearDown(self): super(CommTest, self).tearDown() try: os.remove(self.file_name) except OSError: pass def _test_broadcast_coalesced(self, process_group, device, root_rank): half = torch.float16 # No support for float16 for CPU tensors if device == torch.device("cpu"): half = torch.float32 target = torch.arange(60, dtype=half, device=device).chunk(5) target += torch.arange(60, dtype=torch.float32, device=device).chunk(5) target += torch.arange(60, dtype=half, device=device).chunk(5) target += torch.arange(60, dtype=torch.float64, device=device).chunk(5) target += torch.arange(60, dtype=half, device=device).chunk(5) target += torch.arange(60, dtype=torch.float32, device=device).chunk(5) # The tensors to pass to broadcast are idential to the target # only on the process that is the root of the broadcast. if self.rank == root_rank: tensors = list(tensor.clone() for tensor in target) else: tensors = list(torch.zeros_like(tensor) for tensor in target) if self.rank != root_rank: self.assertNotEqual(tensors, target) c10d._broadcast_coalesced( process_group, tensors, buffer_size=256, src=root_rank ) if self.rank != root_rank: self.assertEqual(tensors, target) @requires_nccl() @skip_if_lt_x_gpu(2) def test_broadcast_coalesced_nccl(self): store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size) device = torch.device("cuda:%d" % self.rank) ranks = [0, 1] for root_rank in ranks: self._test_broadcast_coalesced(process_group, device, root_rank) @requires_nccl() @skip_if_lt_x_gpu(2) def test_sequence_num_set_default_pg_nccl(self): torch.cuda.set_device(self.rank) self._test_sequence_num_set_default_pg(backend="nccl") @skip_if_lt_x_gpu(2) @requires_nccl() def test_sequence_num_incremented_nccl_default(self): self._test_sequence_num_incremented_default_group("nccl") @skip_if_lt_x_gpu(4) @requires_nccl() def test_sequence_num_incremented_nccl_subgroup(self): if self.world_size < 4: return sandcastle_skip("Test requires world_size of at least 4") self._test_sequence_num_incremented_subgroup("nccl") @requires_nccl() @skip_if_lt_x_gpu(2) def test_sequence_num_set_nccl_new_group(self): torch.cuda.set_device(self.rank) self._test_sequence_num_set_new_group(backend="nccl") @requires_nccl() @skip_if_lt_x_gpu(2) def test_pass_nccl_options_high_priority_stream(self): pg_opts = c10d.ProcessGroupNCCL.Options() pg_opts.is_high_priority_stream = True store = c10d.FileStore(self.file_name, self.world_size) # Test init_process_group accepts options dist.init_process_group( "nccl", world_size=self.world_size, rank=self.rank, store=store, pg_options=pg_opts, ) # Test with new_group pg = c10d.new_group([0, 1], pg_options=pg_opts) # test if the process group constructed with high priority stream self.assertTrue(pg.options.is_high_priority_stream) # test the process group works as expected t = torch.tensor([self.rank + 1] * 10).cuda(self.rank) pg.allreduce(t).wait() expected_tensor = torch.tensor([3] * 10).cuda(self.rank) self.assertEqual(expected_tensor, t) @requires_nccl() @skip_if_lt_x_gpu(4) def test_nccl_barrier(self): store = c10d.FileStore(self.file_name, self.world_size) c10d.init_process_group( backend="nccl", rank=self.rank, world_size=self.world_size, store=store ) t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank) c10d.all_reduce(t) expected_tensor = torch.tensor([3] * 10).cuda(2 * self.rank) self.assertEqual(expected_tensor, t) # Test with new_group pg = c10d.new_group([0, 1]) t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank) pg.allreduce(t).wait() self.assertEqual(expected_tensor, t) pg = c10d.new_group([0]) if self.rank == 0: t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank) expected_tensor = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank) pg.allreduce(t).wait() self.assertEqual(expected_tensor, t) pg = c10d.new_group([1]) if self.rank == 1: t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank) expected_tensor = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank) pg.allreduce(t).wait() self.assertEqual(expected_tensor, t) @requires_nccl() @skip_if_lt_x_gpu(4) def test_nccl_barrier_timeout(self): store = c10d.FileStore(self.file_name, self.world_size) if self.rank == 0: with self.assertRaisesRegex( RuntimeError, "Timed out initializing process group" ): c10d.init_process_group( backend="nccl", rank=self.rank, world_size=self.world_size, store=store, timeout=timedelta(seconds=1), ) @requires_nccl() @skip_if_lt_x_gpu(4) def test_nccl_barrier_timeout_new_group(self): store = c10d.FileStore(self.file_name, self.world_size) c10d.init_process_group( backend="nccl", rank=self.rank, world_size=self.world_size, store=store, timeout=timedelta(seconds=1), ) if self.rank == 0: with self.assertRaisesRegex( RuntimeError, "Timed out initializing process group" ): c10d.new_group([0, 1], timeout=timedelta(seconds=1)) with self.assertRaisesRegex( RuntimeError, "Timed out initializing process group" ): c10d.new_group([0], timeout=timedelta(seconds=1)) @requires_nccl() @skip_if_lt_x_gpu(4) def test_nccl_barrier_timeout_new_group_non_member(self): store = c10d.FileStore(self.file_name, self.world_size) c10d.init_process_group( backend="nccl", rank=self.rank, world_size=self.world_size, store=store, timeout=timedelta(seconds=1), ) if self.rank == 1: with self.assertRaisesRegex( RuntimeError, "Timed out initializing process group" ): c10d.new_group([0, 1], timeout=timedelta(seconds=1)) with self.assertRaisesRegex( RuntimeError, "Timed out initializing process group" ): c10d.new_group([0], timeout=timedelta(seconds=1)) @requires_nccl() @skip_if_lt_x_gpu(2) def test_nccl_barrier_device_ids(self): store = c10d.FileStore(self.file_name, self.world_size) c10d.init_process_group( backend="nccl", rank=self.rank, world_size=self.world_size, store=store ) c10d.barrier(device_ids=[self.rank]) @requires_nccl() @skip_if_lt_x_gpu(2) def test_nccl_barrier_device_ids_function_argument(self): store = c10d.FileStore(self.file_name, self.world_size) c10d.init_process_group( backend="nccl", rank=self.rank, world_size=self.world_size, store=store ) with self.assertRaisesRegex(RuntimeError, "Invalid function argument"): c10d.barrier(device_ids=self.rank) if __name__ == "__main__": assert ( not torch.cuda._initialized ), "test_distributed must not have initialized CUDA context on main process" run_tests()
true
true
f7f66a14deffaee4c0d6bb03f92171b850ac9ba0
16,008
py
Python
gosecure_app.py
larrycameron80/goSecure
8d2b8179e63d26640a46aa6475c4e13300ade55f
[ "CC0-1.0" ]
790
2016-05-01T17:18:22.000Z
2018-04-26T18:16:02.000Z
gosecure_app.py
rev10d/goSecure
8d2b8179e63d26640a46aa6475c4e13300ade55f
[ "CC0-1.0" ]
6
2016-06-22T14:16:37.000Z
2017-10-23T16:33:20.000Z
gosecure_app.py
rev10d/goSecure
8d2b8179e63d26640a46aa6475c4e13300ade55f
[ "CC0-1.0" ]
140
2016-05-01T04:50:46.000Z
2018-04-21T19:51:26.000Z
#!/usr/bin/env python import hashlib import os import pickle import time from functools import wraps import flask.ext.login as flask_login from flask import ( Flask, render_template, request, Response, flash, redirect, url_for) from forms import ( loginForm, initialSetupForm, userForm, wifiForm, vpnPskForm, resetToDefaultForm, statusForm) from scripts.pi_mgmt import ( pi_reboot, pi_shutdown, start_ssh_service, update_client) from scripts.rpi_network_conn import add_wifi, internet_status, reset_wifi from scripts.vpn_server_conn import ( set_vpn_params, reset_vpn_params, start_vpn, stop_vpn, restart_vpn, vpn_status, vpn_configuration_status) app = Flask(__name__) login_manager = flask_login.LoginManager() login_manager.init_app(app) # Flask-Login functions (for regular Pages) # users = {"admin":{"password":"2074ad04839ae517751e5948ae13f0e3c90d186c9c9bbd29c3c88b9c6000dba5", "salt":"uOMbInZTYYpiCGvEaH8Byw==\n"}} # default username=admin password=gosecure, user is prompted to change if default is being used. with open("/home/pi/goSecure_Web_GUI/users_db.p", "rb") as fin: users = pickle.load(fin) class User(flask_login.UserMixin): pass @login_manager.user_loader def user_loader(username): if username not in users: return user = User() user.id = username return user @login_manager.request_loader def request_loader(request): username = request.form.get('username') if username not in users: return user = User() user.id = username # DO NOT ever store passwords in plaintext and always compare password # hashes using constant-time comparison! if user_validate_credentials(request.form['username'], request.form['password']): user.is_authenticated = True return user @login_manager.unauthorized_handler def unauthorized_handler(): flash("Unauthorized, please log in.", "error") return redirect(url_for("login")) # Flask HTTP Basic Auth (for API) def authenticate(): """Sends a 401 response that enables basic auth""" return Response( "Unauthorized", 401, {'WWW-Authenticate': 'Basic realm="Login Required"'}) def requires_basic_auth(f): @wraps(f) def decorated(*args, **kwargs): auth = request.authorization if not auth or not user_validate_credentials(auth.username, auth.password): return authenticate() return f(*args, **kwargs) return decorated def flash_form_errors(form): for field, errors in form.errors.items(): for error in errors: flash(u"Error in the %s field - %s" % ( getattr(form, field).label.text, error ), "error") # Auth helper functions # return True is username and password pair match what's in the database # else return False def user_validate_credentials(username, password): if username not in users: return False else: stored_password = users[username]['password'] stored_salt = users[username]['salt'] userPasswordHash = hashlib.sha256(str(stored_salt) + password).hexdigest() return stored_password == userPasswordHash # return True is password is changed successfully # else return False def user_change_credentials(username, password, new_password): if username not in users: return False else: # verify current password if user_validate_credentials(username, password): #change password userPasswordHashSalt = os.urandom(16).encode("base64") userPasswordHash = hashlib.sha256(str(userPasswordHashSalt) + new_password).hexdigest() users[username]["salt"] = userPasswordHashSalt users[username]["password"] = userPasswordHash with open("/home/pi/goSecure_Web_GUI/users_db.p", "wb") as fout: pickle.dump(users, fout) return True else: return False # return True if credentials are reset # else return False def user_reset_credentials(username, password): return user_change_credentials(username, password, "gosecure") # Routes for web pages # 404 page @app.errorhandler(404) def page_not_found(e): return render_template('404.html'), 404 # Login Page @app.route("/", methods=["GET", "POST"]) def login(): form = loginForm() if request.method == "GET": return render_template("login.html", form=form) elif request.method == "POST": if form.validate(): username = form.username.data password = form.password.data if user_validate_credentials(username, password): user = User() user.id = username flask_login.login_user(user) # check to see if default credentials are being used. If so, redirect to change password page. if user_validate_credentials("admin", "gosecure"): flash("Please change the default password.", "notice") return redirect(url_for("user")) else: internet_status_bool = internet_status() vpn_status_bool = vpn_status() vpn_configuration_status_bool = vpn_configuration_status() # check to see if network is up. If not, redirect to network page if internet_status_bool is False and vpn_configuration_status_bool is True: flash("Internet is not reachable.", "notice") return redirect(url_for("wifi")) # check to see if network and vpn are up. If not, redirect to initial setup page elif internet_status_bool is False and vpn_status_bool is False: return redirect(url_for("initial_setup")) # check to see if vpn is up. If not, redirect to vpn page elif vpn_status_bool is False: flash("VPN is not established.", "notice") return redirect(url_for("vpn_psk")) else: return redirect(request.args.get("next") or url_for("status")) else: flash("Invalid username or password. Please try again.", "error") return render_template("login.html", form=form) else: flash_form_errors(form) return render_template("login.html", form=form) @app.route('/logout') def logout(): flask_login.logout_user() return redirect(url_for("login")) # User page @app.route("/status", methods=["GET", "POST"]) @flask_login.login_required def status(): form = statusForm() if request.method == "GET": # check to see if network and vpn are active, red=not active, green=active internet_status_color = "green" if internet_status() else "red" vpn_status_color = "green" if vpn_status() else "red" return render_template("status.html", form=form, internet_status_color=internet_status_color, vpn_status_color=vpn_status_color) # User page @app.route("/user", methods=["GET", "POST"]) @flask_login.login_required def user(): form = userForm() if request.method == "GET": form.username.data = flask_login.current_user.id return render_template("user.html", form=form) elif request.method == "POST": if form.validate(): username = form.username.data password = form.password.data new_password = form.new_password.data if user_change_credentials(username, password, new_password): flash("Your user information has been successfully changed. Please login with the new credentials.", "success") return redirect(url_for("login")) else: flash("Invalid current username or password. Please try again.", "error") return render_template("user.html", form=form) else: flash_form_errors(form) return render_template("user.html", form=form) # Initial setup page @app.route("/initial_setup", methods=["GET", "POST"]) @flask_login.login_required def initial_setup(): form = initialSetupForm() if request.method == "GET": return render_template("initial_setup.html", form=form) elif request.method == "POST": if form.validate(): ssid = form.ssid.data.rsplit("-", 1)[0] psk = form.psk.data add_wifi(ssid, psk) if internet_status() is True: vpn_server = form.vpn_server.data user_id = form.user_id.data user_psk = form.user_psk.data set_vpn_params(vpn_server, user_id, user_psk) restart_vpn() flash("Wifi and VPN settings saved!", "success") return redirect(url_for("status")) else: flash("Error! Cannot reach the internet...", "error") return render_template("initial_setup.html", form=form) else: flash("Error! " + str(form.data), "error") return render_template("initial_setup.html", form=form) # Wifi page @app.route("/wifi", methods=["GET", "POST"]) @flask_login.login_required def wifi(): form = wifiForm() if request.method == "GET": return render_template("wifi.html", form=form) elif request.method == "POST": if form.validate(): ssid = form.ssid.data.rsplit("-", 1)[0] psk = form.psk.data add_wifi(ssid, psk) time.sleep(5) if internet_status() is True: restart_vpn() time.sleep(5) flash("Wifi settings saved! VPN Restarted!", "success") return redirect(url_for("status")) else: flash("Error! Cannot reach the internet...", "error") return render_template("wifi.html", form=form) else: flash("Error! " + str(form.data), "error") return render_template("wifi.html", form=form) # VPN psk page @app.route("/vpn_psk", methods=["GET", "POST"]) @flask_login.login_required def vpn_psk(): form = vpnPskForm() if request.method == "GET": return render_template("vpn_psk.html", form=form) elif request.method == "POST": if form.validate(): vpn_server = form.vpn_server.data user_id = form.user_id.data user_psk = form.user_psk.data set_vpn_params(vpn_server, user_id, user_psk) restart_vpn() if vpn_status(): flash("VPN settings saved and VPN restarted!", "success") return redirect(url_for("status")) else: flash("VPN settings saved and VPN restarted! Unable to establish VPN connection.", "error") return render_template("vpn_psk.html", form=form) else: flash("Error! " + str(form.data), "error") return render_template("vpn_psk.html", form=form) # Reset to default page @app.route("/reset_to_default", methods=["GET", "POST"]) @flask_login.login_required def reset_to_default(): form = resetToDefaultForm() if request.method == "GET": form.username.data = flask_login.current_user.id return render_template("reset_to_default.html", form=form) elif request.method == "POST": if form.validate(): username = form.username.data password = form.password.data reset_vpn_params() reset_wifi() if user_reset_credentials(username, password): flash("Your client has been successfully reset to default settings.", "success") return redirect(url_for("logout")) else: flash("Error resetting client.", "error") return render_template("reset_to_default.html", form=form) else: flash_form_errors(form) return render_template("reset_to_default.html", form=form) @app.route("/action", methods=["POST"]) @flask_login.login_required def execute_action(): action = request.form["action"] if action == "reboot": pi_reboot() elif action == "shutdown": pi_shutdown() elif action == "start_vpn": start_vpn() flash("VPN Started!", "notice") elif action == "stop_vpn": stop_vpn() flash("VPN Stopped!", "notice") elif action == "restart_vpn": restart_vpn() flash("VPN Restarted!", "notice") elif action == "ssh_service": start_ssh_service() flash("SSH Service Started! It will be turned off on reboot.") elif action == "update_client": update_client() flash("Client will reboot... please reload this page in 1 minute.") else: form = initialSetupForm() flash("Error! Invalid Action!", "error") return redirect(url_for("status")) # REST API @app.route("/v1.0/vpn/credentials", methods=["POST", "DELETE"]) @requires_basic_auth def api_vpn_credentials(): if request.method == "POST": form = initialSetupForm() form.vpn_server.data = request.json["vpn_server"] form.user_id.data = request.json["user_id"] form.user_psk.data = request.json["user_psk"] if request.headers['Content-Type'] == 'application/json': if form.vpn_server.validate(form) and form.user_id.validate(form) and form.user_psk.validate(form): set_vpn_params(form.vpn_server.data, form.user_id.data, form.user_psk.data) return "Successfully set vpn_server, user_id, and psk for VPN" else: return "Invalid user_id or psk format" else: return "415 Unsupported Media Type - Use application/json" elif request.method == "DELETE": reset_vpn_params() return "Successfully reset vpn_server, user_id, and psk for VPN" else: return "Only POST and DELETE methods are supported. Refer to the API Documentation" @app.route("/v1.0/vpn/actions", methods=["POST"]) @requires_basic_auth def api_vpn_actions(): if request.method == "POST": if request.headers['Content-Type'] == 'application/json': action = request.json["action"] if action == "start_vpn": if start_vpn(): return "VPN service started, VPN is ESTABLISHED" else: return "VPN service started, VPN is NOT ESTABLISHED" elif action == "stop_vpn": stop_vpn() return "VPN service stopped, VPN is NOT ESTABLISHED" elif action == "restart_vpn": if restart_vpn(): return "VPN service restarted, VPN is ESTABLISHED" else: return "VPN service restarted, VPN is NOT ESTABLISHED" else: return "Error! Invalid Action!" else: return "415 Unsupported Media Type - Use application/json" else: return "Only POST method is supported. Refer to the API Documentation" if __name__ == "__main__": app.secret_key = os.urandom(24) # if SSL key and certificate pair do not exist, create them. if (os.path.exists("ssl.key") and os.path.exists("ssl.crt")) is not True: os.system('openssl genrsa 2048 > ssl.key') os.system('openssl req -new -x509 -nodes -sha256 -days 1095 -subj "/C=US/O=goSecure/CN=goSecureClient" -key ssl.key > ssl.crt') os.system('sudo chown pi:pi ssl.crt ssl.key') os.system('sudo chmod 440 ssl.crt ssl.key') app.run(host="192.168.50.1", port=443, ssl_context=("ssl.crt", "ssl.key"))
34.649351
136
0.614693
import hashlib import os import pickle import time from functools import wraps import flask.ext.login as flask_login from flask import ( Flask, render_template, request, Response, flash, redirect, url_for) from forms import ( loginForm, initialSetupForm, userForm, wifiForm, vpnPskForm, resetToDefaultForm, statusForm) from scripts.pi_mgmt import ( pi_reboot, pi_shutdown, start_ssh_service, update_client) from scripts.rpi_network_conn import add_wifi, internet_status, reset_wifi from scripts.vpn_server_conn import ( set_vpn_params, reset_vpn_params, start_vpn, stop_vpn, restart_vpn, vpn_status, vpn_configuration_status) app = Flask(__name__) login_manager = flask_login.LoginManager() login_manager.init_app(app) with open("/home/pi/goSecure_Web_GUI/users_db.p", "rb") as fin: users = pickle.load(fin) class User(flask_login.UserMixin): pass @login_manager.user_loader def user_loader(username): if username not in users: return user = User() user.id = username return user @login_manager.request_loader def request_loader(request): username = request.form.get('username') if username not in users: return user = User() user.id = username if user_validate_credentials(request.form['username'], request.form['password']): user.is_authenticated = True return user @login_manager.unauthorized_handler def unauthorized_handler(): flash("Unauthorized, please log in.", "error") return redirect(url_for("login")) def authenticate(): return Response( "Unauthorized", 401, {'WWW-Authenticate': 'Basic realm="Login Required"'}) def requires_basic_auth(f): @wraps(f) def decorated(*args, **kwargs): auth = request.authorization if not auth or not user_validate_credentials(auth.username, auth.password): return authenticate() return f(*args, **kwargs) return decorated def flash_form_errors(form): for field, errors in form.errors.items(): for error in errors: flash(u"Error in the %s field - %s" % ( getattr(form, field).label.text, error ), "error") # else return False def user_validate_credentials(username, password): if username not in users: return False else: stored_password = users[username]['password'] stored_salt = users[username]['salt'] userPasswordHash = hashlib.sha256(str(stored_salt) + password).hexdigest() return stored_password == userPasswordHash # return True is password is changed successfully # else return False def user_change_credentials(username, password, new_password): if username not in users: return False else: # verify current password if user_validate_credentials(username, password): #change password userPasswordHashSalt = os.urandom(16).encode("base64") userPasswordHash = hashlib.sha256(str(userPasswordHashSalt) + new_password).hexdigest() users[username]["salt"] = userPasswordHashSalt users[username]["password"] = userPasswordHash with open("/home/pi/goSecure_Web_GUI/users_db.p", "wb") as fout: pickle.dump(users, fout) return True else: return False # return True if credentials are reset # else return False def user_reset_credentials(username, password): return user_change_credentials(username, password, "gosecure") # Routes for web pages # 404 page @app.errorhandler(404) def page_not_found(e): return render_template('404.html'), 404 # Login Page @app.route("/", methods=["GET", "POST"]) def login(): form = loginForm() if request.method == "GET": return render_template("login.html", form=form) elif request.method == "POST": if form.validate(): username = form.username.data password = form.password.data if user_validate_credentials(username, password): user = User() user.id = username flask_login.login_user(user) # check to see if default credentials are being used. If so, redirect to change password page. if user_validate_credentials("admin", "gosecure"): flash("Please change the default password.", "notice") return redirect(url_for("user")) else: internet_status_bool = internet_status() vpn_status_bool = vpn_status() vpn_configuration_status_bool = vpn_configuration_status() # check to see if network is up. If not, redirect to network page if internet_status_bool is False and vpn_configuration_status_bool is True: flash("Internet is not reachable.", "notice") return redirect(url_for("wifi")) # check to see if network and vpn are up. If not, redirect to initial setup page elif internet_status_bool is False and vpn_status_bool is False: return redirect(url_for("initial_setup")) # check to see if vpn is up. If not, redirect to vpn page elif vpn_status_bool is False: flash("VPN is not established.", "notice") return redirect(url_for("vpn_psk")) else: return redirect(request.args.get("next") or url_for("status")) else: flash("Invalid username or password. Please try again.", "error") return render_template("login.html", form=form) else: flash_form_errors(form) return render_template("login.html", form=form) @app.route('/logout') def logout(): flask_login.logout_user() return redirect(url_for("login")) # User page @app.route("/status", methods=["GET", "POST"]) @flask_login.login_required def status(): form = statusForm() if request.method == "GET": # check to see if network and vpn are active, red=not active, green=active internet_status_color = "green" if internet_status() else "red" vpn_status_color = "green" if vpn_status() else "red" return render_template("status.html", form=form, internet_status_color=internet_status_color, vpn_status_color=vpn_status_color) # User page @app.route("/user", methods=["GET", "POST"]) @flask_login.login_required def user(): form = userForm() if request.method == "GET": form.username.data = flask_login.current_user.id return render_template("user.html", form=form) elif request.method == "POST": if form.validate(): username = form.username.data password = form.password.data new_password = form.new_password.data if user_change_credentials(username, password, new_password): flash("Your user information has been successfully changed. Please login with the new credentials.", "success") return redirect(url_for("login")) else: flash("Invalid current username or password. Please try again.", "error") return render_template("user.html", form=form) else: flash_form_errors(form) return render_template("user.html", form=form) # Initial setup page @app.route("/initial_setup", methods=["GET", "POST"]) @flask_login.login_required def initial_setup(): form = initialSetupForm() if request.method == "GET": return render_template("initial_setup.html", form=form) elif request.method == "POST": if form.validate(): ssid = form.ssid.data.rsplit("-", 1)[0] psk = form.psk.data add_wifi(ssid, psk) if internet_status() is True: vpn_server = form.vpn_server.data user_id = form.user_id.data user_psk = form.user_psk.data set_vpn_params(vpn_server, user_id, user_psk) restart_vpn() flash("Wifi and VPN settings saved!", "success") return redirect(url_for("status")) else: flash("Error! Cannot reach the internet...", "error") return render_template("initial_setup.html", form=form) else: flash("Error! " + str(form.data), "error") return render_template("initial_setup.html", form=form) # Wifi page @app.route("/wifi", methods=["GET", "POST"]) @flask_login.login_required def wifi(): form = wifiForm() if request.method == "GET": return render_template("wifi.html", form=form) elif request.method == "POST": if form.validate(): ssid = form.ssid.data.rsplit("-", 1)[0] psk = form.psk.data add_wifi(ssid, psk) time.sleep(5) if internet_status() is True: restart_vpn() time.sleep(5) flash("Wifi settings saved! VPN Restarted!", "success") return redirect(url_for("status")) else: flash("Error! Cannot reach the internet...", "error") return render_template("wifi.html", form=form) else: flash("Error! " + str(form.data), "error") return render_template("wifi.html", form=form) # VPN psk page @app.route("/vpn_psk", methods=["GET", "POST"]) @flask_login.login_required def vpn_psk(): form = vpnPskForm() if request.method == "GET": return render_template("vpn_psk.html", form=form) elif request.method == "POST": if form.validate(): vpn_server = form.vpn_server.data user_id = form.user_id.data user_psk = form.user_psk.data set_vpn_params(vpn_server, user_id, user_psk) restart_vpn() if vpn_status(): flash("VPN settings saved and VPN restarted!", "success") return redirect(url_for("status")) else: flash("VPN settings saved and VPN restarted! Unable to establish VPN connection.", "error") return render_template("vpn_psk.html", form=form) else: flash("Error! " + str(form.data), "error") return render_template("vpn_psk.html", form=form) # Reset to default page @app.route("/reset_to_default", methods=["GET", "POST"]) @flask_login.login_required def reset_to_default(): form = resetToDefaultForm() if request.method == "GET": form.username.data = flask_login.current_user.id return render_template("reset_to_default.html", form=form) elif request.method == "POST": if form.validate(): username = form.username.data password = form.password.data reset_vpn_params() reset_wifi() if user_reset_credentials(username, password): flash("Your client has been successfully reset to default settings.", "success") return redirect(url_for("logout")) else: flash("Error resetting client.", "error") return render_template("reset_to_default.html", form=form) else: flash_form_errors(form) return render_template("reset_to_default.html", form=form) @app.route("/action", methods=["POST"]) @flask_login.login_required def execute_action(): action = request.form["action"] if action == "reboot": pi_reboot() elif action == "shutdown": pi_shutdown() elif action == "start_vpn": start_vpn() flash("VPN Started!", "notice") elif action == "stop_vpn": stop_vpn() flash("VPN Stopped!", "notice") elif action == "restart_vpn": restart_vpn() flash("VPN Restarted!", "notice") elif action == "ssh_service": start_ssh_service() flash("SSH Service Started! It will be turned off on reboot.") elif action == "update_client": update_client() flash("Client will reboot... please reload this page in 1 minute.") else: form = initialSetupForm() flash("Error! Invalid Action!", "error") return redirect(url_for("status")) # REST API @app.route("/v1.0/vpn/credentials", methods=["POST", "DELETE"]) @requires_basic_auth def api_vpn_credentials(): if request.method == "POST": form = initialSetupForm() form.vpn_server.data = request.json["vpn_server"] form.user_id.data = request.json["user_id"] form.user_psk.data = request.json["user_psk"] if request.headers['Content-Type'] == 'application/json': if form.vpn_server.validate(form) and form.user_id.validate(form) and form.user_psk.validate(form): set_vpn_params(form.vpn_server.data, form.user_id.data, form.user_psk.data) return "Successfully set vpn_server, user_id, and psk for VPN" else: return "Invalid user_id or psk format" else: return "415 Unsupported Media Type - Use application/json" elif request.method == "DELETE": reset_vpn_params() return "Successfully reset vpn_server, user_id, and psk for VPN" else: return "Only POST and DELETE methods are supported. Refer to the API Documentation" @app.route("/v1.0/vpn/actions", methods=["POST"]) @requires_basic_auth def api_vpn_actions(): if request.method == "POST": if request.headers['Content-Type'] == 'application/json': action = request.json["action"] if action == "start_vpn": if start_vpn(): return "VPN service started, VPN is ESTABLISHED" else: return "VPN service started, VPN is NOT ESTABLISHED" elif action == "stop_vpn": stop_vpn() return "VPN service stopped, VPN is NOT ESTABLISHED" elif action == "restart_vpn": if restart_vpn(): return "VPN service restarted, VPN is ESTABLISHED" else: return "VPN service restarted, VPN is NOT ESTABLISHED" else: return "Error! Invalid Action!" else: return "415 Unsupported Media Type - Use application/json" else: return "Only POST method is supported. Refer to the API Documentation" if __name__ == "__main__": app.secret_key = os.urandom(24) # if SSL key and certificate pair do not exist, create them. if (os.path.exists("ssl.key") and os.path.exists("ssl.crt")) is not True: os.system('openssl genrsa 2048 > ssl.key') os.system('openssl req -new -x509 -nodes -sha256 -days 1095 -subj "/C=US/O=goSecure/CN=goSecureClient" -key ssl.key > ssl.crt') os.system('sudo chown pi:pi ssl.crt ssl.key') os.system('sudo chmod 440 ssl.crt ssl.key') app.run(host="192.168.50.1", port=443, ssl_context=("ssl.crt", "ssl.key"))
true
true
f7f66c3c0c6462f3632cfe81922054428ced6e5b
2,026
py
Python
forms.py
Gilberthtx/AirDash2
cd2cde4d8f96a517110c130c02e9d859b8739d56
[ "MIT" ]
null
null
null
forms.py
Gilberthtx/AirDash2
cd2cde4d8f96a517110c130c02e9d859b8739d56
[ "MIT" ]
null
null
null
forms.py
Gilberthtx/AirDash2
cd2cde4d8f96a517110c130c02e9d859b8739d56
[ "MIT" ]
null
null
null
from flask_wtf import FlaskForm from wtforms import StringField, PasswordField, SubmitField from wtforms.validators import (DataRequired, Regexp, ValidationError, Email, Length, EqualTo) from models import User # this method will check if the email already exists def email_exists(form, field): if User.select().where(User.email == field.data).exists(): raise ValidationError('User with that email already exists.') '''FORM FOR REGISTERING''' class RegisterForm(FlaskForm): name = StringField( 'Name', validators=[ DataRequired(), Regexp( r'^[a-zA-Z]' ) ] ) email = StringField( 'Email', validators=[ DataRequired(), Email(), email_exists ] ) password = PasswordField( 'Password', validators=[ DataRequired(), Length(min=8), EqualTo('password2', message='Passwords must match') ] ) password2 = PasswordField( 'Confirm password', validators=[ DataRequired() ] ) '''FORM FOR LOGIN''' class LoginForm(FlaskForm): email = StringField( 'Email', validators=[ DataRequired(), Email() ] ) password = PasswordField( 'Password', validators=[ DataRequired() ] ) '''FORM FOR SEARCH''' class FlightSearchForm(FlaskForm): form_date = StringField( 'Date', validators=[ DataRequired() ] ) form_origin = StringField( 'From', validators=[ DataRequired() ] ) form_destination = StringField( 'To', validators=[ DataRequired() ] ) submit = SubmitField('Search') '''FORM TO REMOVE FLIGHT''' class RemoveFlightForm(FlaskForm): flight_number = StringField("Flight Number") remove = SubmitField('Remove Flight')
20.059406
94
0.547878
from flask_wtf import FlaskForm from wtforms import StringField, PasswordField, SubmitField from wtforms.validators import (DataRequired, Regexp, ValidationError, Email, Length, EqualTo) from models import User def email_exists(form, field): if User.select().where(User.email == field.data).exists(): raise ValidationError('User with that email already exists.') class RegisterForm(FlaskForm): name = StringField( 'Name', validators=[ DataRequired(), Regexp( r'^[a-zA-Z]' ) ] ) email = StringField( 'Email', validators=[ DataRequired(), Email(), email_exists ] ) password = PasswordField( 'Password', validators=[ DataRequired(), Length(min=8), EqualTo('password2', message='Passwords must match') ] ) password2 = PasswordField( 'Confirm password', validators=[ DataRequired() ] ) class LoginForm(FlaskForm): email = StringField( 'Email', validators=[ DataRequired(), Email() ] ) password = PasswordField( 'Password', validators=[ DataRequired() ] ) class FlightSearchForm(FlaskForm): form_date = StringField( 'Date', validators=[ DataRequired() ] ) form_origin = StringField( 'From', validators=[ DataRequired() ] ) form_destination = StringField( 'To', validators=[ DataRequired() ] ) submit = SubmitField('Search') class RemoveFlightForm(FlaskForm): flight_number = StringField("Flight Number") remove = SubmitField('Remove Flight')
true
true
f7f66c5883b7adbdc9fda4fd8517449d73735c3d
2,800
py
Python
tests/performance/json_performance.py
mabel-dev/mabel
ee1fdfcfe5fb87d2c5ce4f24b4b7113478ba1b8a
[ "Apache-2.0" ]
null
null
null
tests/performance/json_performance.py
mabel-dev/mabel
ee1fdfcfe5fb87d2c5ce4f24b4b7113478ba1b8a
[ "Apache-2.0" ]
287
2021-05-14T21:25:26.000Z
2022-03-30T12:02:51.000Z
tests/performance/json_performance.py
gva-jjoyce/mabel
eb99e02d0287b851e65ad9a75b5f4188805d4ec9
[ "Apache-2.0" ]
1
2021-04-29T18:18:20.000Z
2021-04-29T18:18:20.000Z
""" JSON parsing and serialization performance tests so a decision on which library(s) to use can be made - previously the selection was inconsistent. Results (seconds to process 250,000 rows): library | parsing | serialize ------------------------------- json | 1.08 | 1.74 ujson | 0.52 | 0.86 orjson | 0.40 | 0.66 <- lower is better ------------------------------- """ import time def _inner_file_reader( file_name: str, chunk_size: int = 32 * 1024 * 1024, delimiter: str = "\n" ): """ This is the guts of the reader - it opens a file and reads through it chunk by chunk. This allows huge files to be processed as only a chunk at a time is in memory. """ with open(file_name, "r", encoding="utf8") as f: carry_forward = "" chunk = "INITIALIZED" while len(chunk) > 0: chunk = f.read(chunk_size) augmented_chunk = carry_forward + chunk lines = augmented_chunk.split(delimiter) carry_forward = lines.pop() yield from lines if carry_forward: yield carry_forward reader = list(_inner_file_reader("tests/data/tweets/tweets-0000.jsonl")) * 10000 print(len(reader)) def test_parser(parser): for item in reader: parser(item) def test_serializer(serializer): for item in reader: dic = orjson.loads(item) serializer(dic) def time_it(test, *args): start = time.perf_counter_ns() test(*args) return (time.perf_counter_ns() - start) / 1e9 def time_it_2(): start = time.perf_counter_ns() for r in map(orjson.loads, reader): pass return (time.perf_counter_ns() - start) / 1e9 def time_it_3(): start = time.perf_counter_ns() [orjson.loads(r) for r in reader] return (time.perf_counter_ns() - start) / 1e9 def time_it_4(): start = time.perf_counter_ns() for r in reader: orjson.loads(r) return (time.perf_counter_ns() - start) / 1e9 import json import ujson import orjson import os import sys sys.path.insert(1, os.path.join(sys.path[0], "../..")) import mabel.data.formats.json print("json parse :", time_it(test_parser, json.loads)) print("ujson parse :", time_it(test_parser, ujson.loads)) print("orjson parse:", time_it(test_parser, orjson.loads)) # <- fastest print("mabel parse:", time_it(test_parser, mabel.data.formats.json.parse)) print("map", time_it_2()) print("comp", time_it_3()) print("for", time_it_4()) print("json serialize :", time_it(test_serializer, json.dumps)) print("ujson serializer :", time_it(test_serializer, ujson.dumps)) print("orjson serializer:", time_it(test_serializer, orjson.dumps)) # <- fastest print("mabel serializer:", time_it(test_serializer, mabel.data.formats.json.serialize))
27.45098
87
0.646071
import time def _inner_file_reader( file_name: str, chunk_size: int = 32 * 1024 * 1024, delimiter: str = "\n" ): with open(file_name, "r", encoding="utf8") as f: carry_forward = "" chunk = "INITIALIZED" while len(chunk) > 0: chunk = f.read(chunk_size) augmented_chunk = carry_forward + chunk lines = augmented_chunk.split(delimiter) carry_forward = lines.pop() yield from lines if carry_forward: yield carry_forward reader = list(_inner_file_reader("tests/data/tweets/tweets-0000.jsonl")) * 10000 print(len(reader)) def test_parser(parser): for item in reader: parser(item) def test_serializer(serializer): for item in reader: dic = orjson.loads(item) serializer(dic) def time_it(test, *args): start = time.perf_counter_ns() test(*args) return (time.perf_counter_ns() - start) / 1e9 def time_it_2(): start = time.perf_counter_ns() for r in map(orjson.loads, reader): pass return (time.perf_counter_ns() - start) / 1e9 def time_it_3(): start = time.perf_counter_ns() [orjson.loads(r) for r in reader] return (time.perf_counter_ns() - start) / 1e9 def time_it_4(): start = time.perf_counter_ns() for r in reader: orjson.loads(r) return (time.perf_counter_ns() - start) / 1e9 import json import ujson import orjson import os import sys sys.path.insert(1, os.path.join(sys.path[0], "../..")) import mabel.data.formats.json print("json parse :", time_it(test_parser, json.loads)) print("ujson parse :", time_it(test_parser, ujson.loads)) print("orjson parse:", time_it(test_parser, orjson.loads)) print("mabel parse:", time_it(test_parser, mabel.data.formats.json.parse)) print("map", time_it_2()) print("comp", time_it_3()) print("for", time_it_4()) print("json serialize :", time_it(test_serializer, json.dumps)) print("ujson serializer :", time_it(test_serializer, ujson.dumps)) print("orjson serializer:", time_it(test_serializer, orjson.dumps)) print("mabel serializer:", time_it(test_serializer, mabel.data.formats.json.serialize))
true
true
f7f66d224da95104fb6991e0e17cda4a0b3351c8
664
py
Python
manage.py
HJSAMO/Django-DRF-UserAPI
18347b028d9380b1abded6e717d4e7528936c6d1
[ "Apache-2.0" ]
null
null
null
manage.py
HJSAMO/Django-DRF-UserAPI
18347b028d9380b1abded6e717d4e7528936c6d1
[ "Apache-2.0" ]
null
null
null
manage.py
HJSAMO/Django-DRF-UserAPI
18347b028d9380b1abded6e717d4e7528936c6d1
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os import sys def main(): """Run administrative tasks.""" os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'user_api.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == '__main__': main()
28.869565
73
0.679217
import os import sys def main(): os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'user_api.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == '__main__': main()
true
true
f7f66d7261321c3d23165ba7df07ffcd98f52fe1
8,202
py
Python
ablation-study/1.E_align_z.py
disanda/MSV
066ed236a4c5df8b4b5e366020fe2954b7a6915a
[ "Apache-2.0", "MIT" ]
31
2021-08-21T10:05:26.000Z
2021-12-22T12:36:46.000Z
ablation-study/1.E_align_z.py
disanda/MSV
066ed236a4c5df8b4b5e366020fe2954b7a6915a
[ "Apache-2.0", "MIT" ]
null
null
null
ablation-study/1.E_align_z.py
disanda/MSV
066ed236a4c5df8b4b5e366020fe2954b7a6915a
[ "Apache-2.0", "MIT" ]
6
2021-08-21T10:05:38.000Z
2021-12-10T14:11:27.000Z
import sys sys.path.append("..") import os import math import torch import torchvision import model.E.Ablation_Study.E_Blur_Z as BE from model.utils.custom_adam import LREQAdam import metric.pytorch_ssim as pytorch_ssim import lpips import numpy as np import tensorboardX import argparse from model.stylegan1.net import Generator, Mapping #StyleGANv1 from training_utils import * def train(tensor_writer = None, args = None): type = args.mtype model_path = args.checkpoint_dir_GAN if type == 1: # StyleGAN1 #model_path = './checkpoint/stylegan_v1/ffhq1024/' Gs = Generator(startf=args.start_features, maxf=512, layer_count=int(math.log(args.img_size,2)-1), latent_size=512, channels=3) Gs.load_state_dict(torch.load(model_path+'Gs_dict.pth')) Gm = Mapping(num_layers=int(math.log(args.img_size,2)-1)*2, mapping_layers=8, latent_size=512, dlatent_size=512, mapping_fmaps=512) #num_layers: 14->256 / 16->512 / 18->1024 Gm.load_state_dict(torch.load(model_path+'Gm_dict.pth')) Gm.buffer1 = torch.load(model_path+'./center_tensor.pt') const_ = Gs.const const1 = const_.repeat(args.batch_size,1,1,1).detach().clone().cuda() layer_num = int(math.log(args.img_size,2)-1)*2 # 14->256 / 16 -> 512 / 18->1024 layer_idx = torch.arange(layer_num)[np.newaxis, :, np.newaxis] # shape:[1,18,1], layer_idx = [0,1,2,3,4,5,6。。。,17] ones = torch.ones(layer_idx.shape, dtype=torch.float32) # shape:[1,18,1], ones = [1,1,1,1,1,1,1,1] coefs = torch.where(layer_idx < layer_num//2, 0.7 * ones, ones) # 18个变量前8个裁剪比例truncation_psi [0.7,0.7,...,1,1,1] coefs = coefs.cuda() Gs.cuda() Gm.cuda() E = BE.BE(startf=args.start_features, maxf=512, layer_count=int(math.log(args.img_size,2)-1), latent_size=512, channels=3) else: print('error') return if args.checkpoint_dir_E != None: E.load_state_dict(torch.load(args.checkpoint_dir_E)) E.cuda() writer = tensor_writer E_optimizer = LREQAdam([{'params': E.parameters()},], lr=args.lr, betas=(args.beta_1, 0.99), weight_decay=0) loss_lpips = lpips.LPIPS(net='vgg').to('cuda') batch_size = args.batch_size it_d = 0 for iteration in range(0,args.iterations): set_seed(iteration%30000) z_c1 = torch.randn(batch_size, args.z_dim).cuda() #[n, 512] if type == 1: w1 = Gm(z_c1,coefs_m=coefs) #[batch_size,18,512] imgs1 = Gs.forward(w1,int(math.log(args.img_size,2)-2)) # 7->512 / 6->256 z_c2, _ = E(imgs1) z_c2 = z_c2.squeeze(-1).squeeze(-1) w2 = Gm(z_c2,coefs_m=coefs) imgs2 = Gs.forward(w2,int(math.log(args.img_size,2)-2)) else: print('model type error') return E_optimizer.zero_grad() #loss Images loss_imgs, loss_imgs_info = space_loss(imgs1,imgs2,lpips_model=loss_lpips) loss_msiv = loss_imgs E_optimizer.zero_grad() loss_msiv.backward(retain_graph=True) E_optimizer.step() #Latent-Vectors ## w #loss_w, loss_w_info = space_loss(w1,w2,image_space = False) ## c loss_c, loss_c_info = space_loss(z_c1,z_c2,image_space = False) loss_mslv = loss_c*0.01 E_optimizer.zero_grad() loss_mslv.backward() E_optimizer.step() print('ep_%d_iter_%d'%(iteration//30000,iteration%30000)) print('[loss_imgs_mse[img,img_mean,img_std], loss_imgs_kl, loss_imgs_cosine, loss_imgs_ssim, loss_imgs_lpips]') print('---------ImageSpace--------') print('loss_imgs_info: %s'%loss_imgs_info) print('---------LatentSpace--------') print('loss_c_info: %s'%loss_c_info) it_d += 1 writer.add_scalar('loss_imgs_mse', loss_imgs_info[0][0], global_step=it_d) writer.add_scalar('loss_imgs_mse_mean', loss_imgs_info[0][1], global_step=it_d) writer.add_scalar('loss_imgs_mse_std', loss_imgs_info[0][2], global_step=it_d) writer.add_scalar('loss_imgs_kl', loss_imgs_info[1], global_step=it_d) writer.add_scalar('loss_imgs_cosine', loss_imgs_info[2], global_step=it_d) writer.add_scalar('loss_imgs_ssim', loss_imgs_info[3], global_step=it_d) writer.add_scalar('loss_imgs_lpips', loss_imgs_info[4], global_step=it_d) writer.add_scalar('loss_c_mse', loss_c_info[0][0], global_step=it_d) writer.add_scalar('loss_c_mse_mean', loss_c_info[0][1], global_step=it_d) writer.add_scalar('loss_c_mse_std', loss_c_info[0][2], global_step=it_d) writer.add_scalar('loss_c_kl', loss_c_info[1], global_step=it_d) writer.add_scalar('loss_c_cosine', loss_c_info[2], global_step=it_d) writer.add_scalar('loss_c_ssim', loss_c_info[3], global_step=it_d) writer.add_scalar('loss_c_lpips', loss_c_info[4], global_step=it_d) writer.add_scalars('Latent Space C', {'loss_c_mse':loss_c_info[0][0],'loss_c_mse_mean':loss_c_info[0][1],'loss_c_mse_std':loss_c_info[0][2],'loss_c_kl':loss_c_info[1],'loss_c_cosine':loss_c_info[2]}, global_step=it_d) if iteration % 100 == 0: n_row = batch_size test_img = torch.cat((imgs1[:n_row],imgs2[:n_row]))*0.5+0.5 torchvision.utils.save_image(test_img, resultPath1_1+'/ep%d_iter%d.jpg'%(iteration//30000,iteration%30000),nrow=n_row) # nrow=3 with open(resultPath+'/Loss.txt', 'a+') as f: print('i_'+str(iteration),file=f) print('[loss_imgs_mse[img,img_mean,img_std], loss_imgs_kl, loss_imgs_cosine, loss_imgs_ssim, loss_imgs_lpips]',file=f) print('---------ImageSpace--------',file=f) print('loss_imgs_info: %s'%loss_imgs_info,file=f) print('---------LatentSpace--------',file=f) print('loss_c_info: %s'%loss_c_info,file=f) if iteration % 5000 == 0: torch.save(E.state_dict(), resultPath1_2+'/E_model_ep%d_iter%d.pth'%(iteration//30000,iteration%30000)) #torch.save(Gm.buffer1,resultPath1_2+'/center_tensor_iter%d.pt'%iteration) if __name__ == "__main__": parser = argparse.ArgumentParser(description='the training args') parser.add_argument('--iterations', type=int, default=60001) # epoch = iterations//30000 parser.add_argument('--lr', type=float, default=0.0015) parser.add_argument('--beta_1', type=float, default=0.0) parser.add_argument('--batch_size', type=int, default=2) parser.add_argument('--experiment_dir', default=None) #None parser.add_argument('--checkpoint_dir_GAN', default='../checkpoint/stylegan_v1/ffhq1024/') #None ./checkpoint/stylegan_v1/ffhq1024/ or ./checkpoint/stylegan_v2/stylegan2_ffhq1024.pth or ./checkpoint/biggan/256/G-256.pt parser.add_argument('--config_dir', default='./checkpoint/biggan/256/biggan-deep-256-config.json') # BigGAN needs it parser.add_argument('--checkpoint_dir_E', default=None) parser.add_argument('--img_size',type=int, default=1024) parser.add_argument('--img_channels', type=int, default=3)# RGB:3 ,L:1 parser.add_argument('--z_dim', type=int, default=512) # PGGAN , StyleGANs are 512. BIGGAN is 128 parser.add_argument('--mtype', type=int, default=1) # StyleGANv1=1, StyleGANv2=2, PGGAN=3, BigGAN=4 parser.add_argument('--start_features', type=int, default=16) # 16->1024 32->512 64->256 args = parser.parse_args() if not os.path.exists('./result'): os.mkdir('./result') resultPath = args.experiment_dir if resultPath == None: resultPath = "./result/StyleGANv1-AlationStudy-Z" if not os.path.exists(resultPath): os.mkdir(resultPath) resultPath1_1 = resultPath+"/imgs" if not os.path.exists(resultPath1_1): os.mkdir(resultPath1_1) resultPath1_2 = resultPath+"/models" if not os.path.exists(resultPath1_2): os.mkdir(resultPath1_2) writer_path = os.path.join(resultPath, './summaries') if not os.path.exists(writer_path): os.mkdir(writer_path) writer = tensorboardX.SummaryWriter(writer_path) use_gpu = True device = torch.device("cuda" if use_gpu else "cpu") train(tensor_writer=writer, args = args)
46.868571
225
0.667764
import sys sys.path.append("..") import os import math import torch import torchvision import model.E.Ablation_Study.E_Blur_Z as BE from model.utils.custom_adam import LREQAdam import metric.pytorch_ssim as pytorch_ssim import lpips import numpy as np import tensorboardX import argparse from model.stylegan1.net import Generator, Mapping from training_utils import * def train(tensor_writer = None, args = None): type = args.mtype model_path = args.checkpoint_dir_GAN if type == 1: Gs = Generator(startf=args.start_features, maxf=512, layer_count=int(math.log(args.img_size,2)-1), latent_size=512, channels=3) Gs.load_state_dict(torch.load(model_path+'Gs_dict.pth')) Gm = Mapping(num_layers=int(math.log(args.img_size,2)-1)*2, mapping_layers=8, latent_size=512, dlatent_size=512, mapping_fmaps=512) Gm.load_state_dict(torch.load(model_path+'Gm_dict.pth')) Gm.buffer1 = torch.load(model_path+'./center_tensor.pt') const_ = Gs.const const1 = const_.repeat(args.batch_size,1,1,1).detach().clone().cuda() layer_num = int(math.log(args.img_size,2)-1)*2 layer_idx = torch.arange(layer_num)[np.newaxis, :, np.newaxis] ones = torch.ones(layer_idx.shape, dtype=torch.float32) coefs = torch.where(layer_idx < layer_num//2, 0.7 * ones, ones) coefs = coefs.cuda() Gs.cuda() Gm.cuda() E = BE.BE(startf=args.start_features, maxf=512, layer_count=int(math.log(args.img_size,2)-1), latent_size=512, channels=3) else: print('error') return if args.checkpoint_dir_E != None: E.load_state_dict(torch.load(args.checkpoint_dir_E)) E.cuda() writer = tensor_writer E_optimizer = LREQAdam([{'params': E.parameters()},], lr=args.lr, betas=(args.beta_1, 0.99), weight_decay=0) loss_lpips = lpips.LPIPS(net='vgg').to('cuda') batch_size = args.batch_size it_d = 0 for iteration in range(0,args.iterations): set_seed(iteration%30000) z_c1 = torch.randn(batch_size, args.z_dim).cuda() if type == 1: w1 = Gm(z_c1,coefs_m=coefs) imgs1 = Gs.forward(w1,int(math.log(args.img_size,2)-2)) z_c2, _ = E(imgs1) z_c2 = z_c2.squeeze(-1).squeeze(-1) w2 = Gm(z_c2,coefs_m=coefs) imgs2 = Gs.forward(w2,int(math.log(args.img_size,2)-2)) else: print('model type error') return E_optimizer.zero_grad() loss_imgs, loss_imgs_info = space_loss(imgs1,imgs2,lpips_model=loss_lpips) loss_msiv = loss_imgs E_optimizer.zero_grad() loss_msiv.backward(retain_graph=True) E_optimizer.step() loss_c, loss_c_info = space_loss(z_c1,z_c2,image_space = False) loss_mslv = loss_c*0.01 E_optimizer.zero_grad() loss_mslv.backward() E_optimizer.step() print('ep_%d_iter_%d'%(iteration//30000,iteration%30000)) print('[loss_imgs_mse[img,img_mean,img_std], loss_imgs_kl, loss_imgs_cosine, loss_imgs_ssim, loss_imgs_lpips]') print('---------ImageSpace--------') print('loss_imgs_info: %s'%loss_imgs_info) print('---------LatentSpace--------') print('loss_c_info: %s'%loss_c_info) it_d += 1 writer.add_scalar('loss_imgs_mse', loss_imgs_info[0][0], global_step=it_d) writer.add_scalar('loss_imgs_mse_mean', loss_imgs_info[0][1], global_step=it_d) writer.add_scalar('loss_imgs_mse_std', loss_imgs_info[0][2], global_step=it_d) writer.add_scalar('loss_imgs_kl', loss_imgs_info[1], global_step=it_d) writer.add_scalar('loss_imgs_cosine', loss_imgs_info[2], global_step=it_d) writer.add_scalar('loss_imgs_ssim', loss_imgs_info[3], global_step=it_d) writer.add_scalar('loss_imgs_lpips', loss_imgs_info[4], global_step=it_d) writer.add_scalar('loss_c_mse', loss_c_info[0][0], global_step=it_d) writer.add_scalar('loss_c_mse_mean', loss_c_info[0][1], global_step=it_d) writer.add_scalar('loss_c_mse_std', loss_c_info[0][2], global_step=it_d) writer.add_scalar('loss_c_kl', loss_c_info[1], global_step=it_d) writer.add_scalar('loss_c_cosine', loss_c_info[2], global_step=it_d) writer.add_scalar('loss_c_ssim', loss_c_info[3], global_step=it_d) writer.add_scalar('loss_c_lpips', loss_c_info[4], global_step=it_d) writer.add_scalars('Latent Space C', {'loss_c_mse':loss_c_info[0][0],'loss_c_mse_mean':loss_c_info[0][1],'loss_c_mse_std':loss_c_info[0][2],'loss_c_kl':loss_c_info[1],'loss_c_cosine':loss_c_info[2]}, global_step=it_d) if iteration % 100 == 0: n_row = batch_size test_img = torch.cat((imgs1[:n_row],imgs2[:n_row]))*0.5+0.5 torchvision.utils.save_image(test_img, resultPath1_1+'/ep%d_iter%d.jpg'%(iteration//30000,iteration%30000),nrow=n_row) with open(resultPath+'/Loss.txt', 'a+') as f: print('i_'+str(iteration),file=f) print('[loss_imgs_mse[img,img_mean,img_std], loss_imgs_kl, loss_imgs_cosine, loss_imgs_ssim, loss_imgs_lpips]',file=f) print('---------ImageSpace--------',file=f) print('loss_imgs_info: %s'%loss_imgs_info,file=f) print('---------LatentSpace--------',file=f) print('loss_c_info: %s'%loss_c_info,file=f) if iteration % 5000 == 0: torch.save(E.state_dict(), resultPath1_2+'/E_model_ep%d_iter%d.pth'%(iteration//30000,iteration%30000)) if __name__ == "__main__": parser = argparse.ArgumentParser(description='the training args') parser.add_argument('--iterations', type=int, default=60001) parser.add_argument('--lr', type=float, default=0.0015) parser.add_argument('--beta_1', type=float, default=0.0) parser.add_argument('--batch_size', type=int, default=2) parser.add_argument('--experiment_dir', default=None) parser.add_argument('--checkpoint_dir_GAN', default='../checkpoint/stylegan_v1/ffhq1024/') parser.add_argument('--config_dir', default='./checkpoint/biggan/256/biggan-deep-256-config.json') parser.add_argument('--checkpoint_dir_E', default=None) parser.add_argument('--img_size',type=int, default=1024) parser.add_argument('--img_channels', type=int, default=3) parser.add_argument('--z_dim', type=int, default=512) parser.add_argument('--mtype', type=int, default=1) parser.add_argument('--start_features', type=int, default=16) args = parser.parse_args() if not os.path.exists('./result'): os.mkdir('./result') resultPath = args.experiment_dir if resultPath == None: resultPath = "./result/StyleGANv1-AlationStudy-Z" if not os.path.exists(resultPath): os.mkdir(resultPath) resultPath1_1 = resultPath+"/imgs" if not os.path.exists(resultPath1_1): os.mkdir(resultPath1_1) resultPath1_2 = resultPath+"/models" if not os.path.exists(resultPath1_2): os.mkdir(resultPath1_2) writer_path = os.path.join(resultPath, './summaries') if not os.path.exists(writer_path): os.mkdir(writer_path) writer = tensorboardX.SummaryWriter(writer_path) use_gpu = True device = torch.device("cuda" if use_gpu else "cpu") train(tensor_writer=writer, args = args)
true
true
f7f66e35448c318b0805c9e4488775b9b34665bd
10,677
py
Python
stashpay/rpc.py
voycey/python-stashpay
4f2961c7bf334bd0ff3740fa4e1c027c3ddf0795
[ "MIT" ]
null
null
null
stashpay/rpc.py
voycey/python-stashpay
4f2961c7bf334bd0ff3740fa4e1c027c3ddf0795
[ "MIT" ]
null
null
null
stashpay/rpc.py
voycey/python-stashpay
4f2961c7bf334bd0ff3740fa4e1c027c3ddf0795
[ "MIT" ]
null
null
null
# # Part of `python-stashpay` # # Copyright 2018 dustinface # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import re import subprocess import json import logging import re import copy import base64 try: import http.client as http except ImportError: import httplib as http try: import urllib.parse as urlparse except ImportError: import urlparse logger = logging.getLogger("stashpay.rpc") class RPCException(Exception): def __init__(self, code = None, message = None): super(RPCException, self).__init__() self.error = RPCError(code,message) def __str__(self): return str(self.error) class RPCError(object): def __init__(self, code = None, message = None): self.code = code if code else "?" self.message = message if message else "?" def __str__(self): return '{} => {}'.format(self.code, self.message) class RPCResponse(object): def __init__(self, data = None, error = None): self.data = data self.error = error def __contains__(self, key): return False if not self.data else key in self.data def __getitem__(self, key): return self.data[key] class RPCConfig(object): def __init__(self, user, password, url = "http://127.0.0.1", port = 9679, timeout = 20): self.port = port self.url = urlparse.urlparse(url) try: self.user = user.encode('utf8') except: self.user = user try: self.password = password.encode('utf8') except: self.password = password self.authHeader = b'Basic ' + base64.b64encode(self.user + b':' + self.password) self.timeout = timeout class StashpayRPC(object): def __init__(self, config): self.config = copy.deepcopy(config) self.connection = None def request(self, method, args = None): self.connection = http.HTTPConnection(self.config.url.hostname, self.config.port, timeout=self.config.timeout) post = json.dumps({'version': '1.1', 'method': method, 'params': args}) try: self.connection.request('POST', self.config.url.path, post, {'Host': self.config.url.hostname, 'Authorization': self.config.authHeader, 'Content-type': 'application/json'}) self.connection.sock.settimeout(self.config.timeout) response = self.connection.getresponse() except Exception as e: raise RPCException(10,'Request error - {}'.format(e)) else: if response is None: raise RPCException(11,'No response from server') if response.getheader('Content-Type') != 'application/json': raise RPCException(12, 'Non JSON response: {}, {}'.format(response.status, response.reason)) try: data = response.read().decode('utf8') response = json.loads(data) except: response = None if not response: raise RPCException(13, 'JSON response parse error') error = response['error'] if 'error' in response else None result = response['result'] if 'result' in response else None if error: raise RPCException(response['error']['code'],response['error']['message']) if not result: raise RPCException(14,' RPC result missing') return result def raw(self, method, args): response = RPCResponse() try: response.data = self.request(method, args) except RPCException as e: response.error = e.error logging.debug(method, exc_info=e) return response def validateAddress(self, address): cleanAddress = re.sub('[^A-Za-z0-9]+', '', address) response = RPCResponse() try: response.data = self.request('validateaddress', [address]) except RPCException as e: response.error = e.error logging.debug('validateaddress', exc_info=e) return response def getInfo(self): response = RPCResponse() try: response.data = self.request('getinfo') except RPCException as e: response.error = e.error logging.debug('getInfo', exc_info=e) return response def getBlockByHash(self, blockHash): response = RPCResponse() try: response.data = self.request('getblock', [blockHash]) except RPCException as e: response.error = e.error logging.debug('getBlockByHash', exc_info=e) return response def getBlockByNumber(self, number): response = RPCResponse() try: response.data = self.request('getblockhash', [number]) except RPCException as e: response.error = e.error logging.debug('getBlockByNumber', exc_info=e) else: if response.data: return self.getBlockByHash(response.data) return response def getRawTransaction(self, txhash): response = RPCResponse() try: response.data = self.request('getrawtransaction', [txhash, 1]) except RPCException as e: response.error = e.error logging.debug('getRawTransaction', exc_info=e) return response def getSyncStatus(self): response = RPCResponse() try: response.data = self.request('mnsync', ['status']) except RPCException as e: response.error = e.error logging.debug('mnsync', exc_info=e) else: # { # "AssetID": 999, # "AssetName": "SMARTNODE_SYNC_FINISHED", # "Attempt": 0, # "IsBlockchainSynced": true, # "IsMasternodeListSynced": true, # "IsWinnersListSynced": true, # "IsSynced": true, # "IsFailed": false # } if not response.data: logging.debug('getSyncStatus no status') elif not 'IsBlockchainSynced' in response.data: err = 'getSyncStatus no IsBlockchainSynced' response.data = None response.error = RPCError(16,err) logging.debug(err) elif not 'IsMasternodeListSynced' in response.data: err = 'getSyncStatus no IsMasternodeeListSynced' response.data = None response.error = RPCError(16,err) logging.debug(err) elif not 'IsWinnersListSynced' in response.data: err = 'getSyncStatus no IsWinnersListSynced' response.data = None response.error = RPCError(16,err) logging.debug(err) return response def getMasterNodeList(self, mode): response = RPCResponse() try: response.data = self.request('masternode', ['list', mode ]) except RPCException as e: response.error = e.error logging.debug('mnsync', exc_info=e) return response def unlockWallet(self, password, timeout = 200): response = RPCResponse() try: response.data = self.request('walletpassphrase', [password, timeout ]) except RPCException as e: # Missing RPC result is expected when unlocking if e.error.code != 14: response.error = e.error logging.debug('walletpassphrase', exc_info=e) else: response.error = None response.data = True return response def lockWallet(self): response = RPCResponse() try: response.data = self.request('walletlock') except RPCException as e: # Missing RPC result is expected when locking if e.error.code != 14: response.error = e.error logging.debug('walletlock', exc_info=e) else: response.error = None response.data = True return response def getAccounts(self): response = RPCResponse() try: response.data = self.request('listaccounts') except RPCException as e: response.error = e.error logging.debug('listaccounts', exc_info=e) return response def getAddressGroupings(self): response = RPCResponse() try: response.data = self.request('listaddressgroupings') except RPCException as e: response.error = e.error logging.debug('listaddressgroupings', exc_info=e) return response def signMessage(self, address, message): response = RPCResponse() try: response.data = self.request('signmessage', [address, message]) except RPCException as e: response.error = e.error logging.debug('signmessage', exc_info=e) return response def verifyMessage(self, address, message, signature): response = RPCResponse() try: response.data = self.request('verifymessage', [address, signature, message]) except RPCException as e: response.error = e.error logging.debug('verifymessage', exc_info=e) return response
29.332418
108
0.585089
import re import subprocess import json import logging import re import copy import base64 try: import http.client as http except ImportError: import httplib as http try: import urllib.parse as urlparse except ImportError: import urlparse logger = logging.getLogger("stashpay.rpc") class RPCException(Exception): def __init__(self, code = None, message = None): super(RPCException, self).__init__() self.error = RPCError(code,message) def __str__(self): return str(self.error) class RPCError(object): def __init__(self, code = None, message = None): self.code = code if code else "?" self.message = message if message else "?" def __str__(self): return '{} => {}'.format(self.code, self.message) class RPCResponse(object): def __init__(self, data = None, error = None): self.data = data self.error = error def __contains__(self, key): return False if not self.data else key in self.data def __getitem__(self, key): return self.data[key] class RPCConfig(object): def __init__(self, user, password, url = "http://127.0.0.1", port = 9679, timeout = 20): self.port = port self.url = urlparse.urlparse(url) try: self.user = user.encode('utf8') except: self.user = user try: self.password = password.encode('utf8') except: self.password = password self.authHeader = b'Basic ' + base64.b64encode(self.user + b':' + self.password) self.timeout = timeout class StashpayRPC(object): def __init__(self, config): self.config = copy.deepcopy(config) self.connection = None def request(self, method, args = None): self.connection = http.HTTPConnection(self.config.url.hostname, self.config.port, timeout=self.config.timeout) post = json.dumps({'version': '1.1', 'method': method, 'params': args}) try: self.connection.request('POST', self.config.url.path, post, {'Host': self.config.url.hostname, 'Authorization': self.config.authHeader, 'Content-type': 'application/json'}) self.connection.sock.settimeout(self.config.timeout) response = self.connection.getresponse() except Exception as e: raise RPCException(10,'Request error - {}'.format(e)) else: if response is None: raise RPCException(11,'No response from server') if response.getheader('Content-Type') != 'application/json': raise RPCException(12, 'Non JSON response: {}, {}'.format(response.status, response.reason)) try: data = response.read().decode('utf8') response = json.loads(data) except: response = None if not response: raise RPCException(13, 'JSON response parse error') error = response['error'] if 'error' in response else None result = response['result'] if 'result' in response else None if error: raise RPCException(response['error']['code'],response['error']['message']) if not result: raise RPCException(14,' RPC result missing') return result def raw(self, method, args): response = RPCResponse() try: response.data = self.request(method, args) except RPCException as e: response.error = e.error logging.debug(method, exc_info=e) return response def validateAddress(self, address): cleanAddress = re.sub('[^A-Za-z0-9]+', '', address) response = RPCResponse() try: response.data = self.request('validateaddress', [address]) except RPCException as e: response.error = e.error logging.debug('validateaddress', exc_info=e) return response def getInfo(self): response = RPCResponse() try: response.data = self.request('getinfo') except RPCException as e: response.error = e.error logging.debug('getInfo', exc_info=e) return response def getBlockByHash(self, blockHash): response = RPCResponse() try: response.data = self.request('getblock', [blockHash]) except RPCException as e: response.error = e.error logging.debug('getBlockByHash', exc_info=e) return response def getBlockByNumber(self, number): response = RPCResponse() try: response.data = self.request('getblockhash', [number]) except RPCException as e: response.error = e.error logging.debug('getBlockByNumber', exc_info=e) else: if response.data: return self.getBlockByHash(response.data) return response def getRawTransaction(self, txhash): response = RPCResponse() try: response.data = self.request('getrawtransaction', [txhash, 1]) except RPCException as e: response.error = e.error logging.debug('getRawTransaction', exc_info=e) return response def getSyncStatus(self): response = RPCResponse() try: response.data = self.request('mnsync', ['status']) except RPCException as e: response.error = e.error logging.debug('mnsync', exc_info=e) else: if not response.data: logging.debug('getSyncStatus no status') elif not 'IsBlockchainSynced' in response.data: err = 'getSyncStatus no IsBlockchainSynced' response.data = None response.error = RPCError(16,err) logging.debug(err) elif not 'IsMasternodeListSynced' in response.data: err = 'getSyncStatus no IsMasternodeeListSynced' response.data = None response.error = RPCError(16,err) logging.debug(err) elif not 'IsWinnersListSynced' in response.data: err = 'getSyncStatus no IsWinnersListSynced' response.data = None response.error = RPCError(16,err) logging.debug(err) return response def getMasterNodeList(self, mode): response = RPCResponse() try: response.data = self.request('masternode', ['list', mode ]) except RPCException as e: response.error = e.error logging.debug('mnsync', exc_info=e) return response def unlockWallet(self, password, timeout = 200): response = RPCResponse() try: response.data = self.request('walletpassphrase', [password, timeout ]) except RPCException as e: if e.error.code != 14: response.error = e.error logging.debug('walletpassphrase', exc_info=e) else: response.error = None response.data = True return response def lockWallet(self): response = RPCResponse() try: response.data = self.request('walletlock') except RPCException as e: if e.error.code != 14: response.error = e.error logging.debug('walletlock', exc_info=e) else: response.error = None response.data = True return response def getAccounts(self): response = RPCResponse() try: response.data = self.request('listaccounts') except RPCException as e: response.error = e.error logging.debug('listaccounts', exc_info=e) return response def getAddressGroupings(self): response = RPCResponse() try: response.data = self.request('listaddressgroupings') except RPCException as e: response.error = e.error logging.debug('listaddressgroupings', exc_info=e) return response def signMessage(self, address, message): response = RPCResponse() try: response.data = self.request('signmessage', [address, message]) except RPCException as e: response.error = e.error logging.debug('signmessage', exc_info=e) return response def verifyMessage(self, address, message, signature): response = RPCResponse() try: response.data = self.request('verifymessage', [address, signature, message]) except RPCException as e: response.error = e.error logging.debug('verifymessage', exc_info=e) return response
true
true
f7f66ea0c343370b1597351b2a532e4055504ff0
447
py
Python
ddd_driven/test_sulfuras.py
Neppord/bdd-ddd-gilded-rose
15b0b94a55bc9024b9e7b4b4746914f2fbd46380
[ "MIT" ]
null
null
null
ddd_driven/test_sulfuras.py
Neppord/bdd-ddd-gilded-rose
15b0b94a55bc9024b9e7b4b4746914f2fbd46380
[ "MIT" ]
null
null
null
ddd_driven/test_sulfuras.py
Neppord/bdd-ddd-gilded-rose
15b0b94a55bc9024b9e7b4b4746914f2fbd46380
[ "MIT" ]
null
null
null
from gilded_rose import GildedRose, Sulfuras def test_update_sell_in(): sulfuras_item = Sulfuras("Sulfuras, Hand of Ragnaros", 2, 10) items = [sulfuras_item] GildedRose(items).update_quality() assert sulfuras_item.sell_in == 2 def test_update_quality(): sulfuras_item = Sulfuras("Sulfuras, Hand of Ragnaros", 2, 80) items = [sulfuras_item] GildedRose(items).update_quality() assert sulfuras_item.quality == 80
26.294118
65
0.724832
from gilded_rose import GildedRose, Sulfuras def test_update_sell_in(): sulfuras_item = Sulfuras("Sulfuras, Hand of Ragnaros", 2, 10) items = [sulfuras_item] GildedRose(items).update_quality() assert sulfuras_item.sell_in == 2 def test_update_quality(): sulfuras_item = Sulfuras("Sulfuras, Hand of Ragnaros", 2, 80) items = [sulfuras_item] GildedRose(items).update_quality() assert sulfuras_item.quality == 80
true
true
f7f66f03ee67e70a27cc3e08e6836e235368a4c3
874
py
Python
tests/test_q_resource_helpers.py
xtuzy/enaml
a1b5c0df71c665b6ef7f61d21260db92d77d9a46
[ "BSD-3-Clause-Clear" ]
1,080
2015-01-04T14:29:34.000Z
2022-03-29T05:44:51.000Z
tests/test_q_resource_helpers.py
xtuzy/enaml
a1b5c0df71c665b6ef7f61d21260db92d77d9a46
[ "BSD-3-Clause-Clear" ]
308
2015-01-05T22:44:13.000Z
2022-03-30T21:19:18.000Z
tests/test_q_resource_helpers.py
xtuzy/enaml
a1b5c0df71c665b6ef7f61d21260db92d77d9a46
[ "BSD-3-Clause-Clear" ]
123
2015-01-25T16:33:48.000Z
2022-02-25T19:57:10.000Z
#------------------------------------------------------------------------------ # Copyright (c) 2013, Nucleic Development Team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file LICENSE, distributed with this software. #------------------------------------------------------------------------------ import pytest from utils import is_qt_available pytestmark = pytest.mark.skipif(not is_qt_available(), reason='Requires a Qt binding') from enaml.fontext import Font def test_QFont_from_Font(): # Regression test for PySide: QFont_from_Font was raising a TypeError # in its call to qfont.setStyle(font.style). This test passes if it # does not raise an exception. from enaml.qt.q_resource_helpers import QFont_from_Font f = Font(family="bold") qf = QFont_from_Font(f)
36.416667
79
0.590389
import pytest from utils import is_qt_available pytestmark = pytest.mark.skipif(not is_qt_available(), reason='Requires a Qt binding') from enaml.fontext import Font def test_QFont_from_Font(): from enaml.qt.q_resource_helpers import QFont_from_Font f = Font(family="bold") qf = QFont_from_Font(f)
true
true
f7f66f368a10dcc70fd364e6aa5ec9910e624bcb
4,661
py
Python
src/dcos_migrate/plugins/secret/plugin.py
fatz/dcos_migrate
0310c93b76dd0cd7149dbac335de870a30956a68
[ "Apache-2.0" ]
null
null
null
src/dcos_migrate/plugins/secret/plugin.py
fatz/dcos_migrate
0310c93b76dd0cd7149dbac335de870a30956a68
[ "Apache-2.0" ]
null
null
null
src/dcos_migrate/plugins/secret/plugin.py
fatz/dcos_migrate
0310c93b76dd0cd7149dbac335de870a30956a68
[ "Apache-2.0" ]
null
null
null
from dcos_migrate.plugins.plugin import MigratePlugin from dcos_migrate.plugins.cluster import ClusterPlugin from dcos_migrate.system import DCOSClient, BackupList, Backup, Manifest, ManifestList from kubernetes.client.models import V1Secret, V1ObjectMeta import urllib import base64 import logging from base64 import b64encode class DCOSSecretsService: def __init__(self, client: DCOSClient): self.client = client self.url = "{}/{}".format(self.client.dcos_url, 'secrets/v1') self.store = 'default' def list(self, path: str = ''): u = '{url}/secret/{store}/{path}?list=true'.format( url=self.url, store=urllib.parse.quote(self.store), path=urllib.parse.quote(path) ) r = self.client.get(u) r.raise_for_status() return r.json()['array'] def get(self, path: str, key: str): # There are two types of secrets: text and binary. Using `Accept: */*` # the returned `Content-Type` will be `application/octet-stream` for # binary secrets and `application/json` for text secrets. # # Returns the secret as: # { # "path": "...", # "key": "...", # "type": "{text|binary}", # "value": "base64(value)" # } full_path = (path + '/' + key).strip('/') url = self.url + '/secret/{store}/{path}'.format( store=urllib.parse.quote(self.store), path=urllib.parse.quote(full_path) ) r = self.client.get(url) r.raise_for_status() content_type = r.headers['Content-Type'] if content_type == 'application/octet-stream': response = { 'type': 'binary', 'value': base64.b64encode(r.content).decode('ascii') } else: assert content_type == 'application/json', content_type response = r.json() response['type'] = 'text' # Always encode the secret as base64, even when it is safe UTF-8 text. # This obscures the values to prevent unintentional exposure. response['value'] = base64.b64encode( response['value'].encode('utf-8')).decode('ascii') # Always add the `path` and `key` values to the JSON response. Ensure the key always has a # value by taking the last component of the path if necessary. if not key: parts = path.rsplit('/', 1) key = parts.pop() parts.append('') path = parts[0] response['path'] = path response['key'] = key return response class SecretPlugin(MigratePlugin): """docstring for SecretPlugin.""" plugin_name = "secret" depends_migrate = [ClusterPlugin.plugin_name] def __init__(self): super(SecretPlugin, self).__init__() def backup(self, client: DCOSClient, **kwargs) -> BackupList: backupList = BackupList() sec = DCOSSecretsService(client) path = "" keys = sec.list(path) if keys: for key in keys: secData = sec.get(path, key) backupList.append( Backup(self.plugin_name, Backup.renderBackupName(path+key), data=secData)) return backupList def migrate(self, backupList: BackupList, manifestList: ManifestList, **kwargs) -> ManifestList: ml = ManifestList() metadata = V1ObjectMeta() clusterManifests = manifestList.manifests(pluginName='cluster') if clusterManifests: # we expect a single manifest if clusterManifests[0][0]: # set default annotations from cluster metadata.annotations = clusterManifests[0][0].metadata.annotations for ba in backupList.backups(pluginName='secret'): logging.debug("Found backup {}".format(ba)) b = ba.data fullPath = "/".join(filter(None, [b["path"], b["key"]])) name = Manifest.renderManifestName(b["key"]) metadata.annotations["migration.dcos.d2iq.com/secrets/secretpath"] = fullPath metadata.name = name sec = V1Secret(metadata=metadata) sec.api_version = 'v1' sec.kind = 'Secret' sec.data = {} sec.data[name] = b64encode( b['value'].encode('ascii')).decode('ascii') manifest = Manifest(pluginName=self.plugin_name, manifestName=Manifest.renderManifestName(fullPath)) manifest.append(sec) ml.append(manifest) return ml
35.580153
100
0.57906
from dcos_migrate.plugins.plugin import MigratePlugin from dcos_migrate.plugins.cluster import ClusterPlugin from dcos_migrate.system import DCOSClient, BackupList, Backup, Manifest, ManifestList from kubernetes.client.models import V1Secret, V1ObjectMeta import urllib import base64 import logging from base64 import b64encode class DCOSSecretsService: def __init__(self, client: DCOSClient): self.client = client self.url = "{}/{}".format(self.client.dcos_url, 'secrets/v1') self.store = 'default' def list(self, path: str = ''): u = '{url}/secret/{store}/{path}?list=true'.format( url=self.url, store=urllib.parse.quote(self.store), path=urllib.parse.quote(path) ) r = self.client.get(u) r.raise_for_status() return r.json()['array'] def get(self, path: str, key: str): full_path = (path + '/' + key).strip('/') url = self.url + '/secret/{store}/{path}'.format( store=urllib.parse.quote(self.store), path=urllib.parse.quote(full_path) ) r = self.client.get(url) r.raise_for_status() content_type = r.headers['Content-Type'] if content_type == 'application/octet-stream': response = { 'type': 'binary', 'value': base64.b64encode(r.content).decode('ascii') } else: assert content_type == 'application/json', content_type response = r.json() response['type'] = 'text' response['value'] = base64.b64encode( response['value'].encode('utf-8')).decode('ascii') if not key: parts = path.rsplit('/', 1) key = parts.pop() parts.append('') path = parts[0] response['path'] = path response['key'] = key return response class SecretPlugin(MigratePlugin): plugin_name = "secret" depends_migrate = [ClusterPlugin.plugin_name] def __init__(self): super(SecretPlugin, self).__init__() def backup(self, client: DCOSClient, **kwargs) -> BackupList: backupList = BackupList() sec = DCOSSecretsService(client) path = "" keys = sec.list(path) if keys: for key in keys: secData = sec.get(path, key) backupList.append( Backup(self.plugin_name, Backup.renderBackupName(path+key), data=secData)) return backupList def migrate(self, backupList: BackupList, manifestList: ManifestList, **kwargs) -> ManifestList: ml = ManifestList() metadata = V1ObjectMeta() clusterManifests = manifestList.manifests(pluginName='cluster') if clusterManifests: if clusterManifests[0][0]: metadata.annotations = clusterManifests[0][0].metadata.annotations for ba in backupList.backups(pluginName='secret'): logging.debug("Found backup {}".format(ba)) b = ba.data fullPath = "/".join(filter(None, [b["path"], b["key"]])) name = Manifest.renderManifestName(b["key"]) metadata.annotations["migration.dcos.d2iq.com/secrets/secretpath"] = fullPath metadata.name = name sec = V1Secret(metadata=metadata) sec.api_version = 'v1' sec.kind = 'Secret' sec.data = {} sec.data[name] = b64encode( b['value'].encode('ascii')).decode('ascii') manifest = Manifest(pluginName=self.plugin_name, manifestName=Manifest.renderManifestName(fullPath)) manifest.append(sec) ml.append(manifest) return ml
true
true
f7f670cffd657a94a9c93767c76abe3554a18a6a
95,313
py
Python
TWLight/users/tests.py
aacaldwell/TWLight
68e6d0d81ddd52596025f15d2c9a75dcdf504734
[ "MIT" ]
67
2017-12-14T22:27:48.000Z
2022-03-13T18:21:31.000Z
TWLight/users/tests.py
aacaldwell/TWLight
68e6d0d81ddd52596025f15d2c9a75dcdf504734
[ "MIT" ]
433
2017-03-24T22:51:23.000Z
2022-03-31T19:36:22.000Z
TWLight/users/tests.py
aacaldwell/TWLight
68e6d0d81ddd52596025f15d2c9a75dcdf504734
[ "MIT" ]
105
2017-06-23T03:53:41.000Z
2022-03-30T17:24:29.000Z
# -*- coding: utf-8 -*- import copy from datetime import datetime, date, timedelta import json import re from unittest.mock import patch, Mock from urllib.parse import urlparse from django.conf import settings from django.contrib.auth.models import User, AnonymousUser from django.core.exceptions import ( PermissionDenied, SuspiciousOperation, ValidationError, ) from django.urls import resolve, reverse from django.core.management import call_command from django.test import TestCase, Client, RequestFactory from django.utils.translation import get_language from django.utils.html import escape from django.utils.timezone import now from TWLight.applications.factories import ApplicationFactory from TWLight.applications.models import Application from TWLight.resources.factories import PartnerFactory from TWLight.resources.filters import INSTANT, MULTI_STEP from TWLight.resources.models import Partner from TWLight.resources.tests import EditorCraftRoom from . import views from .oauth import OAuthBackend from .helpers.validation import validate_partners from .helpers.authorizations import get_all_bundle_authorizations from .helpers.wiki_list import WIKIS, LANGUAGE_CODES from .factories import EditorFactory, UserFactory from .groups import get_coordinators, get_restricted from .models import UserProfile, Editor, Authorization from .views import MyLibraryView from TWLight.users.helpers.editor_data import ( editor_valid, editor_account_old_enough, editor_enough_edits, editor_not_blocked, editor_reg_date, editor_bundle_eligible, ) FAKE_IDENTITY_DATA = {"query": {"userinfo": {"options": {"disablemail": 0}}}} FAKE_IDENTITY = { "editcount": 5000, "registered": "20151106154629", # Well before first commit. "blocked": False, "iss": urlparse(settings.TWLIGHT_OAUTH_PROVIDER_URL).scheme + urlparse(settings.TWLIGHT_OAUTH_PROVIDER_URL).netloc, "sub": 567823, "rights": ["deletion", "spaceflight", "autoconfirmed"], "groups": ["charismatic megafauna"], "email": "alice@example.com", "username": "alice", } FAKE_MERGED_ACCOUNTS = [ { "wiki": "enwiki", "url": "https://en.wikipedia.org", "timestamp": "2015-11-06T15:46:29Z", "method": "login", "editcount": 100, "registration": "2015-11-06T15:46:29Z", "groups": ["extendedconfirmed"], } ] FAKE_MERGED_ACCOUNTS_BLOCKED = [ { "wiki": "enwiki", "url": "https://en.wikipedia.org", "timestamp": "2015-11-06T15:46:29Z", "method": "login", "editcount": 100, "registration": "2015-11-06T15:46:29Z", "groups": ["extendedconfirmed"], "blocked": {"expiry": "infinity", "reason": "bad editor!"}, } ] FAKE_GLOBAL_USERINFO = { "home": "enwiki", "id": 567823, "registration": "2015-11-06T15:46:29Z", # Well before first commit. "name": "alice", "editcount": 5000, "merged": copy.copy(FAKE_MERGED_ACCOUNTS), } # CSRF middleware is helpful for site security, but not helpful for testing # the rendered output of a page. def remove_csrfmiddlewaretoken(rendered_html): csrfmiddlewaretoken_pattern = ( r"<input type=\"hidden\" name=\"csrfmiddlewaretoken\" value=\".+\">" ) return re.sub(csrfmiddlewaretoken_pattern, "", rendered_html) class ViewsTestCase(TestCase): @classmethod def setUpTestData(cls): super().setUpTestData() cls.client = Client() # User 1: regular Editor cls.username1 = "alice" cls.user_editor = UserFactory(username=cls.username1) cls.editor1 = EditorFactory(user=cls.user_editor) cls.editor1.wp_bundle_eligible = True cls.editor1.save() cls.url1 = reverse("users:editor_detail", kwargs={"pk": cls.editor1.pk}) # User 2: regular Editor cls.username2 = "bob" cls.user_editor2 = UserFactory(username=cls.username2) cls.editor2 = EditorFactory(user=cls.user_editor2) cls.url2 = reverse("users:editor_detail", kwargs={"pk": cls.editor2.pk}) # User 3: Site administrator cls.username3 = "carol" cls.user_superuser = UserFactory(username=cls.username3) cls.user_superuser.is_superuser = True cls.user_superuser.save() cls.editor3 = EditorFactory(user=cls.user_superuser) # User 4: Coordinator cls.username4 = "eve" cls.user_coordinator = UserFactory(username=cls.username4) cls.editor4 = EditorFactory(user=cls.user_coordinator) get_coordinators().user_set.add(cls.user_coordinator) # We should mock out any call to messages call in the view, since # RequestFactory (unlike Client) doesn't run middleware. If you # actually want to test that messages are displayed, use Client(), # and stop/restart the patcher. cls.message_patcher = patch("TWLight.applications.views.messages.add_message") cls.message_patcher.start() @classmethod def tearDownClass(cls): super().tearDownClass() cls.user_editor.delete() cls.editor1.delete() cls.user_editor2.delete() cls.editor2.delete() cls.user_superuser.delete() cls.editor3.delete() cls.user_coordinator.delete() cls.editor4.delete() cls.message_patcher.stop() def test_editor_detail_url_resolves(self): """ The EditorDetailView resolves. """ _ = resolve(self.url1) def test_anon_user_cannot_see_editor_details(self): """ If an AnonymousUser hits an editor page, they are redirected to login. """ response = self.client.get(self.url1) self.assertEqual(response.status_code, 302) self.assertEqual(urlparse(response.url).path, settings.LOGIN_URL) def test_editor_can_see_own_page(self): """Check that editors can see their own pages.""" factory = RequestFactory() request = factory.get(self.url1) request.user = self.user_editor response = views.EditorDetailView.as_view()(request, pk=self.editor1.pk) self.assertEqual(response.status_code, 200) def test_user_view_no_coordinators(self): """Check that users with no coordinators can see their own pages.""" get_coordinators().user_set.remove(self.user_coordinator) factory = RequestFactory() request = factory.get(self.url1) request.user = self.user_editor response = views.EditorDetailView.as_view()(request, pk=self.editor1.pk) self.assertEqual(response.status_code, 200) def test_editor_cannot_see_other_editor_page(self): """Editors cannot see other editors' pages.""" factory = RequestFactory() request = factory.get(self.url2) request.user = self.user_editor # Make sure the editor is not a coordinator, because coordinators *can* # see others' pages! coordinators = get_coordinators() try: assert self.user_editor not in coordinators.user_set.all() except AssertionError: coordinators.user_set.remove(self.user_editor) with self.assertRaises(PermissionDenied): _ = views.EditorDetailView.as_view()(request, pk=self.editor2.pk) def test_coordinator_access(self): """Coordinators can see someone else's page.""" factory = RequestFactory() request = factory.get(self.url1) request.user = self.user_coordinator # Define a partner partner = PartnerFactory() # Editor applies to the partner app = ApplicationFactory( status=Application.PENDING, editor=self.editor1, partner=partner ) app.save() # Editor details should not be visible to just any coordinator try: response = views.EditorDetailView.as_view()(request, pk=self.editor1.pk) self.fail("Editor details should not be visible to just any coordinator.") except PermissionDenied: pass # Designate the coordinator partner.coordinator = request.user partner.save() # Editor details should be visible to the designated coordinator response = views.EditorDetailView.as_view()(request, pk=self.editor1.pk) self.assertEqual(response.status_code, 200) def test_site_admin_can_see_other_editor_page(self): """Site admins can see someone else's page.""" factory = RequestFactory() request = factory.get(self.url1) request.user = self.user_superuser response = views.EditorDetailView.as_view()(request, pk=self.editor1.pk) self.assertEqual(response.status_code, 200) def test_editor_page_has_editor_data(self): """Expected editor personal data is in their page.""" factory = RequestFactory() request = factory.get(self.url1) request.user = self.user_editor response = views.EditorDetailView.as_view()(request, pk=self.editor1.pk) content = response.render().content.decode("utf-8") # This uses default data from EditorFactory, except for the username, # which is randomly generated (hence has no default). self.assertIn(self.editor1.wp_username, content) self.assertIn("42", content) self.assertIn("Cat floofing, telemetry, fermentation", content) def test_my_applications_page_has_application_history(self): """Expected editor application oauth_data is in their page.""" app1 = ApplicationFactory( status=Application.PENDING, editor=self.user_editor.editor ) app2 = ApplicationFactory( status=Application.QUESTION, editor=self.user_editor.editor ) app3 = ApplicationFactory( status=Application.APPROVED, editor=self.user_editor.editor ) app4 = ApplicationFactory( status=Application.NOT_APPROVED, editor=self.user_editor.editor ) # Bundle applications shouldn't be listed on this page app5 = ApplicationFactory( status=Application.APPROVED, partner=PartnerFactory(authorization_method=Partner.BUNDLE), editor=self.user_editor.editor, ) app6 = ApplicationFactory( status=Application.PENDING, partner=PartnerFactory(authorization_method=Partner.BUNDLE), editor=self.user_editor.editor, ) app7 = ApplicationFactory( status=Application.INVALID, partner=PartnerFactory(authorization_method=Partner.BUNDLE), editor=self.user_editor.editor, ) factory = RequestFactory() request = factory.get( reverse("users:my_applications", kwargs={"pk": self.editor1.pk}) ) request.user = self.user_editor response = views.ListApplicationsUserView.as_view()(request, pk=self.editor1.pk) self.assertEqual( set(response.context_data["object_list"]), {app1, app2, app3, app4} ) content = response.render().content.decode("utf-8") self.assertIn(escape(app1.partner.company_name), content) self.assertIn(escape(app2.partner.company_name), content) self.assertIn(escape(app3.partner.company_name), content) self.assertIn(escape(app4.partner.company_name), content) # No Bundle applications self.assertNotIn(escape(app5.partner.company_name), content) self.assertNotIn(escape(app6.partner.company_name), content) self.assertNotIn(escape(app7.partner.company_name), content) # We can't use assertTemplateUsed with RequestFactory (only with # Client), and testing that the rendered content is equal to an # expected string is too fragile. def test_withdraw_application(self): app = ApplicationFactory( status=Application.PENDING, partner=PartnerFactory(authorization_method=Partner.BUNDLE), editor=self.user_editor.editor, ) factory = RequestFactory() request = factory.get( reverse("users:withdraw", kwargs={"pk": self.editor1.pk, "id": app.pk}) ) request.user = self.user_editor response = views.WithdrawApplication.as_view()( request, pk=self.editor1.pk, id=app.pk ) app.refresh_from_db() # withdrawing application should set date closed self.assertNotEqual(app.date_closed, None) self.assertEqual(app.status, Application.INVALID) def test_sent_application(self): app = ApplicationFactory( status=Application.SENT, partner=PartnerFactory(authorization_method=Partner.BUNDLE), editor=self.user_editor.editor, sent_by=self.user_coordinator, ) factory = RequestFactory() request = factory.get( reverse("users:my_applications", kwargs={"pk": self.editor1.pk}) ) request.user = self.user_editor response = views.ListApplicationsUserView.as_view()( request, pk=self.editor1.pk, ) app.refresh_from_db() self.assertNotIn("Withdraw", response.render().content.decode("utf-8")) def test_return_authorization(self): # Simulate a valid user trying to return their access editor = EditorCraftRoom(self, Terms=True, Coordinator=False) partner = PartnerFactory(authorization_method=Partner.PROXY) app = ApplicationFactory( status=Application.SENT, editor=editor, partner=partner, sent_by=self.user_coordinator, ) authorization = Authorization.objects.get(user=editor.user, partners=partner) self.assertEqual(authorization.get_latest_app(), app) return_url = reverse( "users:return_authorization", kwargs={"pk": authorization.pk} ) response = self.client.get(return_url, follow=True) return_form = response.context["form"] self.client.post(return_url, return_form.initial) yesterday = datetime.now().date() - timedelta(days=1) authorization.refresh_from_db() self.assertEqual(authorization.date_expires, yesterday) # Simulate an invalid user trying to return access of some other user someday = yesterday + timedelta(days=30) authorization.date_expires = someday authorization.save() EditorCraftRoom(self, Terms=True, Coordinator=False) return_url = reverse( "users:return_authorization", kwargs={"pk": authorization.pk} ) response = self.client.get(return_url, follow=True) self.assertEqual(response.status_code, 403) response = self.client.post(return_url, return_form.initial) self.assertEqual(response.status_code, 403) authorization.refresh_from_db() self.assertEqual(authorization.date_expires, someday) def test_latest_application(self): # Create an editor with a session. editor = EditorCraftRoom(self, Terms=True, Coordinator=False) partner = PartnerFactory(authorization_method=Partner.PROXY) app = ApplicationFactory( status=Application.SENT, editor=editor, partner=partner, sent_by=self.user_coordinator, ) authorization = Authorization.objects.get(user=editor.user, partners=partner) self.assertEqual(authorization.get_latest_app(), app) # Simulate a valid user trying to return their access return_url = reverse( "users:return_authorization", kwargs={"pk": authorization.pk} ) response = self.client.get(return_url, follow=True) return_form = response.context["form"] self.client.post(return_url, return_form.initial) yesterday = datetime.now().date() - timedelta(days=1) authorization.refresh_from_db() self.assertEqual(authorization.date_expires, yesterday) # Create a new application to the same partner (in reality this # is most likely to be a renewal) app_renewal = ApplicationFactory( status=Application.SENT, editor=editor, partner=partner, sent_by=self.user_coordinator, ) app_renewal.save() # return access authorization.refresh_from_db() return_url = reverse( "users:return_authorization", kwargs={"pk": authorization.pk} ) response = self.client.get(return_url, follow=True) return_form = response.context["form"] self.client.post(return_url, return_form.initial) yesterday = datetime.now().date() - timedelta(days=1) authorization.refresh_from_db() self.assertEqual(authorization.date_expires, yesterday) # Renew again, but deny this time. app_renewal2 = ApplicationFactory(editor=editor, partner=partner) app_renewal2.status = Application.NOT_APPROVED app_renewal2.save() authorization.refresh_from_db() self.assertEqual(authorization.get_latest_app(), app_renewal) self.assertEqual(authorization.get_latest_sent_app(), app_renewal) def test_user_home_view_anon(self): """ If an AnonymousUser hits UserHomeView, they are redirected to login. """ factory = RequestFactory() request = factory.get(reverse("users:home")) request.user = AnonymousUser() response = views.UserHomeView.as_view()(request) self.assertEqual(response.status_code, 302) self.assertEqual(urlparse(response.url).path, settings.LOGIN_URL) def test_user_home_view_is_editor(self): """ If a User who is an editor hits UserHomeView, they see EditorDetailView. TODO: Change this test's assertions (they might break when the csrf token is rendered differently) """ user = UserFactory() editor = EditorFactory(user=user) factory = RequestFactory() home_request = factory.get(reverse("users:home")) home_request.user = user home_response = views.UserHomeView.as_view()(home_request) detail_request = factory.get( reverse("users:editor_detail", kwargs={"pk": editor.pk}) ) detail_request.user = user detail_response = views.EditorDetailView.as_view()(detail_request, pk=editor.pk) # We can't actually check that EditorDetailView was used by UserHomeView # directly, because its as_view function has already been processed # and all we have access to is a return value. So let's check that the # output of the two pages is the same - the user would have seen the # same thing on either page. self.assertEqual(home_response.status_code, 200) expected_detail_view = remove_csrfmiddlewaretoken( detail_response.rendered_content ) home_view = remove_csrfmiddlewaretoken(home_response.rendered_content) self.assertEqual(expected_detail_view, home_view) @patch("TWLight.users.views.UserDetailView.as_view") def test_user_home_view_non_editor(self, mock_view): """ A User who isn't an editor hitting UserHomeView sees UserDetailView. """ user = UserFactory(username="not_an_editor") self.assertFalse(hasattr(user, "editor")) factory = RequestFactory() request = factory.get(reverse("users:home")) request.user = user _ = views.UserHomeView.as_view()(request) # For this we can't even check that the rendered content is the same, # because we don't have a URL allowing us to render UserDetailView # correctly; we'll mock out its as_view function and make sure it got # called. mock_view.assert_called_once_with() def test_coordinator_restricted(self): # If a coordinator restricts their data processing # they should stop being a coordinator. restrict_url = reverse("users:restrict_data") coordinators = get_coordinators() restricted = get_restricted() # Double check that the coordinator still has the relevant group assert self.user_coordinator in coordinators.user_set.all() # Need a password so we can login self.user_coordinator.set_password("editor") self.user_coordinator.save() self.client = Client() session = self.client.session self.client.login(username=self.username4, password="editor") restrict = self.client.get(restrict_url, follow=True) restrict_form = restrict.context["form"] data = restrict_form.initial data["restricted"] = True data["submit"] = True agree = self.client.post(restrict_url, data) assert self.user_coordinator not in coordinators.user_set.all() assert self.user_coordinator in restricted.user_set.all() def test_user_delete(self): """ Verify that deleted users have no user object. """ delete_url = reverse("users:delete_data", kwargs={"pk": self.user_editor.pk}) # Need a password so we can login self.user_editor.set_password("editor") self.user_editor.save() self.client = Client() session = self.client.session self.client.login(username=self.username1, password="editor") submit = self.client.post(delete_url) assert not User.objects.filter(username=self.username1).exists() # Check that the associated Editor also got deleted. assert not Editor.objects.filter(user=self.user_editor).exists() def test_user_delete_authorizations(self): """ Verify that deleted user authorizations are expired and contain no user links """ delete_url = reverse("users:delete_data", kwargs={"pk": self.user_editor.pk}) # Need a password so we can login self.user_editor.set_password("editor") self.user_editor.save() self.client = Client() session = self.client.session self.client.login(username=self.username1, password="editor") partner = PartnerFactory() user_auth = Authorization( user=self.user_editor, authorizer=self.user_coordinator, date_authorized=date.today(), date_expires=date.today() + timedelta(days=30), ) user_auth.save() user_auth.partners.add(partner) submit = self.client.post(delete_url) user_auth.refresh_from_db() self.assertEqual(user_auth.date_expires, date.today() - timedelta(days=1)) def test_user_delete_bundle_authorizations(self): """ Verify that deleted user authorizations are expired and contain no user links """ delete_url = reverse("users:delete_data", kwargs={"pk": self.user_editor.pk}) # Need a password so we can login self.editor1.user.set_password("editor") self.editor1.user.save() bundle_partner_1 = PartnerFactory(authorization_method=Partner.BUNDLE) bundle_partner_2 = PartnerFactory(authorization_method=Partner.BUNDLE) self.client = Client() session = self.client.session self.client.login(username=self.username1, password="editor") # Bundle authorization should be created self.editor1.update_bundle_authorization() self.editor1.refresh_from_db() bundle_auth = self.editor1.get_bundle_authorization self.assertTrue(bundle_auth.is_bundle) # Saving the bundle authorization id so we can query it after to make # sure it's been deleted bundle_auth_id = bundle_auth.pk submit = self.client.post(delete_url) editor_count = Editor.objects.filter(pk=self.editor1.pk).count() self.assertEqual(editor_count, 0) bundle_auth_count = Authorization.objects.filter(pk=bundle_auth_id).count() self.assertEqual(bundle_auth_count, 0) def test_user_data_download(self): """ Verify that if users try to download their personal data they are actually sent a file. """ # Need a password so we can login self.user_editor2.set_password("editor") self.user_editor2.save() self.client = Client() session = self.client.session self.client.login(username=self.username2, password="editor") response = self.client.post(self.url2, {"download": "Download"}) self.assertEqual( response.get("Content-Disposition"), "attachment; filename=user_data.json" ) def test_terms_of_use_on_editor_detail_page_show(self): """Editor who agreed term of use, can see checkbox to disagree""" user_agreed_TOU = UserFactory() user_agreed_TOU.userprofile.terms_of_use = True editor_agreed_TOU = EditorFactory(user=user_agreed_TOU) factory = RequestFactory() detail_request = factory.get( reverse("users:editor_detail", kwargs={"pk": editor_agreed_TOU.pk}) ) detail_request.user = user_agreed_TOU response = views.EditorDetailView.as_view()( detail_request, pk=editor_agreed_TOU.pk ) content = response.render().content.decode("utf-8") self.assertIn("By unchecking this box and clicking “Update", content) def test_terms_of_use_on_editor_detail_page_not_show(self): """Editor who hasn't agreed term of use, won't see checkbox to disagree""" user_not_agreed_TOU = UserFactory() user_not_agreed_TOU.userprofile.terms_of_use = False editor_not_agreed_TOU = EditorFactory(user=user_not_agreed_TOU) factory = RequestFactory() detail_request = factory.get( reverse("users:editor_detail", kwargs={"pk": editor_not_agreed_TOU.pk}) ) detail_request.user = user_not_agreed_TOU response = views.EditorDetailView.as_view()( detail_request, pk=editor_not_agreed_TOU.pk ) content = response.render().content.decode("utf-8") self.assertNotIn("By unchecking this box and clicking “Update", content) def test_user_email_form(self): """ Users have a form available on their user pages which enables them to control which emails they receive. Verify that they can post this form without error. """ # Need a password so we can login self.user_editor2.set_password("editor") self.user_editor2.save() self.client = Client() session = self.client.session self.client.login(username=self.username2, password="editor") response = self.client.post(self.url2, {"update_email_settings": ["Update"]}) # Should be successfully redirected back to the user page. self.assertEqual(response.status_code, 302) def test_user_email_preferences_disable_update(self): """ Verify that users can disable renewal notices and coordinator reminder emails in the email form. """ # Need a password so we can login self.user_editor2.set_password("editor") self.user_editor2.save() # Only coordinators get to change their reminder preferences get_coordinators().user_set.add(self.user_editor2) self.client = Client() session = self.client.session self.client.login(username=self.username2, password="editor") response = self.client.post(self.url2, {"update_email_settings": ["Update"]}) # Should be successfully redirected back to the user page. self.assertEqual(response.status_code, 302) self.user_editor2.userprofile.refresh_from_db() # We didn't send send_renewal_notices or send_pending_application_reminders # or send_discussion_application_reminders or send_approved_application_reminders # in POST to simulate an unchecked box. self.assertEqual(self.user_editor2.userprofile.send_renewal_notices, False) self.assertEqual(self.user_editor2.userprofile.pending_app_reminders, False) self.assertEqual(self.user_editor2.userprofile.discussion_app_reminders, False) self.assertEqual(self.user_editor2.userprofile.approved_app_reminders, False) def test_user_email_preferences_enable_update(self): """ Verify that users can email renewal notices and coordinator reminder emails in the email form. """ # Need a password so we can login self.user_editor2.set_password("editor") self.user_editor2.userprofile.send_renewal_notices = False self.user_editor2.userprofile.pending_app_reminders = False self.user_editor2.userprofile.discussion_app_reminders = False self.user_editor2.userprofile.approved_app_reminders = False self.user_editor2.save() # Only coordinators get to change their reminder preferences get_coordinators().user_set.add(self.user_editor2) self.client = Client() session = self.client.session self.client.login(username=self.username2, password="editor") response = self.client.post( self.url2, { "update_email_settings": ["Update"], "send_renewal_notices": ["on"], "send_pending_application_reminders": ["on"], "send_discussion_application_reminders": ["on"], "send_approved_application_reminders": ["on"], }, ) # Should be successfully redirected back to the user page. self.assertEqual(response.status_code, 302) self.user_editor2.userprofile.refresh_from_db() self.assertEqual(self.user_editor2.userprofile.send_renewal_notices, True) self.assertEqual(self.user_editor2.userprofile.pending_app_reminders, True) self.assertEqual(self.user_editor2.userprofile.discussion_app_reminders, True) self.assertEqual(self.user_editor2.userprofile.approved_app_reminders, True) def test_user_email_preferences_update_non_coordinator(self): # Need a password so we can login self.user_editor2.set_password("editor") self.user_editor2.userprofile.send_renewal_notices = False self.user_editor2.save() self.client = Client() session = self.client.session self.client.login(username=self.username2, password="editor") response = self.client.post( self.url2, {"update_email_settings": ["Update"], "send_renewal_notices": ["on"]}, ) # Should be successfully redirected back to the user page. self.assertEqual(response.status_code, 302) self.user_editor2.userprofile.refresh_from_db() self.assertEqual(self.user_editor2.userprofile.send_renewal_notices, True) # Only coordinators get to change their reminder preferences self.assertEqual(self.user_editor2.userprofile.pending_app_reminders, True) self.assertEqual(self.user_editor2.userprofile.discussion_app_reminders, True) self.assertEqual(self.user_editor2.userprofile.approved_app_reminders, True) class UserProfileModelTestCase(TestCase): @classmethod def setUpTestData(cls): super().setUpTestData() cls.bundle_partner_1 = PartnerFactory(authorization_method=Partner.BUNDLE) cls.bundle_partner_2 = PartnerFactory(authorization_method=Partner.BUNDLE) cls.proxy_partner_1 = PartnerFactory(authorization_method=Partner.PROXY) cls.user_coordinator = UserFactory(username="Jon Snow") cls.editor = EditorFactory() cls.editor.wp_bundle_eligible = True cls.editor.save() get_coordinators().user_set.add(cls.user_coordinator) def test_user_profile_created(self): """ UserProfile should be created on user creation. """ user = UserFactory() # If the signal has not created a UserProfile, this line will throw # a DoesNotExist and the test will fail, which is what we want. UserProfile.objects.get(user=user) user.delete() def test_user_profile_sets_tou_to_false(self): # Don't use UserFactory, since it forces the related profile to have # agreed to the terms for simplicity in most tests! Use the user # creation function that we actually use in production. user = User.objects.create_user( username="profiler", email="profiler@example.com" ) profile = UserProfile.objects.get(user=user) self.assertEqual(profile.terms_of_use, False) user.delete() def test_user_profile_sets_use_wp_email_to_true(self): """ Verify that UserProfile.use_wp_email defaults to True. (Editor.update_from_wikipedia assumes this to be the case.) """ user = User.objects.create_user( username="profiler", email="profiler@example.com" ) profile = UserProfile.objects.get(user=user) self.assertEqual(profile.use_wp_email, True) user.delete() def test_add_favorite_collection_valid(self): """ Tests that a valid collection (one a user has access to) is successfully added to the favorites """ profile = UserProfile.objects.get(user=self.editor.user) # Create an authorization object so that the partner can be added to a # user's favorites collection app_bundle_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_1, sent_by=self.user_coordinator, ) app_bundle_partner_2 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_2, sent_by=self.user_coordinator, ) app_proxy_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.proxy_partner_1, sent_by=self.user_coordinator, ) profile.favorites.add(self.bundle_partner_1) profile.favorites.add(self.bundle_partner_2) profile.favorites.add(self.proxy_partner_1) self.assertIn(self.proxy_partner_1, profile.favorites.all()) self.assertIn(self.bundle_partner_1, profile.favorites.all()) self.assertIn(self.bundle_partner_2, profile.favorites.all()) def test_add_favorite_expired_collection_valid(self): """ Tests that a valid collection (one a user has access to, even if it has expired) is successfully added to the favorites """ profile = UserProfile.objects.get(user=self.editor.user) app_proxy_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.proxy_partner_1, sent_by=self.user_coordinator, ) someday = date.today() - timedelta(days=60) authorization = Authorization.objects.get( user=self.editor.user, partners=self.proxy_partner_1 ) authorization.date_expires = someday authorization.save() profile.favorites.add(self.proxy_partner_1) self.assertIn(self.proxy_partner_1, profile.favorites.all()) def test_add_favorite_collection_invalid(self): """ Tests that an invalid collection (one a user does not has access to) is not added to the favorites and that a ValidationError is raised """ profile = UserProfile.objects.get(user=self.editor.user) with self.assertRaises(ValidationError): profile.favorites.add(self.proxy_partner_1) class EditorModelTestCase(TestCase): @classmethod def setUpTestData(cls): super().setUpTestData() for editor in Editor.objects.all(): # The test case succeeds when runs alone but fails when run # as part of the whole suite, because it grabs the wrong editor # object from the db. Kill them all with fire. # (Why does it do this? Because our queries look for editors by # username or wikipedia sub, not by foreign key - we have to use the # information that we have from the wikipedia API, which knows # nothing about our database. But the test runner doesn't actually # *delete* database objects between runs, for performance reasons; # it simply truncates them by nulling out their foreign keys. # This means that if you are searching for db objects by properties # other than foreign key, you *still find them*.) editor.delete() # Wiki 'zh-classical' is 'zh-classical.wikipedia.org'. It's also the # longest wiki name in wiki_list. cls.editor = EditorFactory( wp_username="editor_model_test", wp_rights=json.dumps(["cat floofing", "the big red button"]), wp_groups=json.dumps(["sysops", "bureaucrats"]), wp_registered=None, ) cls.editor.user.userprofile.terms_of_use = True cls.editor.user.userprofile.save() cls.editor.user.save() cls.editor.save() @classmethod def tearDownClass(cls): super().tearDownClass() cls.editor.delete() def test_encoder_works_with_special_character_username(self): test = Editor().encode_wp_username("editor model&test") self.assertEqual(test, "editor%20model%26test") def test_wp_user_page_url(self): expected_url = settings.TWLIGHT_OAUTH_PROVIDER_URL + "/User:editor_model_test" self.assertEqual(expected_url, self.editor.wp_user_page_url) def test_wp_link_central_auth(self): expected_url = "https://meta.wikimedia.org/w/index.php?title=Special%3ACentralAuth&target=editor_model_test" self.assertEqual(expected_url, self.editor.wp_link_central_auth) def test_get_wp_rights_display(self): expected_text = ["cat floofing", "the big red button"] self.assertEqual(expected_text, self.editor.get_wp_rights_display) def test_get_wp_groups_display(self): expected_text = ["sysops", "bureaucrats"] self.assertEqual(expected_text, self.editor.get_wp_groups_display) def test_is_user_valid(self): """ Users must: * Have >= 500 edits * Be active for >= 6 months * Have Special:Email User enabled * Not be blocked on any projects """ identity = copy.copy(FAKE_IDENTITY) global_userinfo = copy.copy(FAKE_GLOBAL_USERINFO) # Valid data global_userinfo["editcount"] = 500 self.editor.update_editcount(global_userinfo["editcount"]) enough_edits = editor_enough_edits(self.editor.wp_editcount) registered = editor_reg_date(identity, global_userinfo) account_old_enough = editor_account_old_enough(registered) not_blocked = editor_not_blocked(global_userinfo["merged"]) ignore_wp_blocks = False valid = editor_valid( enough_edits, account_old_enough, not_blocked, ignore_wp_blocks ) self.assertTrue(valid) # Too few edits global_userinfo["editcount"] = 499 self.editor.update_editcount(global_userinfo["editcount"]) enough_edits = editor_enough_edits(self.editor.wp_editcount) valid = editor_valid( enough_edits, account_old_enough, not_blocked, ignore_wp_blocks ) self.assertFalse(valid) # Oauth says the account is too new, but global_userinfo says it's fine global_userinfo["editcount"] = 500 self.editor.update_editcount(global_userinfo["editcount"]) enough_edits = editor_enough_edits(self.editor.wp_editcount) identity["registered"] = datetime.today().strftime("%Y%m%d%H%M%S") registered = editor_reg_date(identity, global_userinfo) account_old_enough = editor_account_old_enough(registered) valid = editor_valid( enough_edits, account_old_enough, not_blocked, ignore_wp_blocks ) self.assertTrue(valid) # Oauth says the account is fine, but global_userinfo says it's too new global_userinfo["editcount"] = 500 global_userinfo["registration"] = datetime.today() self.editor.update_editcount(global_userinfo["editcount"]) enough_edits = editor_enough_edits(self.editor.wp_editcount) identity["registered"] = (datetime.today() - timedelta(days=365)).strftime( "%Y%m%d%H%M%S" ) registered = editor_reg_date(identity, global_userinfo) account_old_enough = editor_account_old_enough(registered) valid = editor_valid( enough_edits, account_old_enough, not_blocked, ignore_wp_blocks ) self.assertTrue(valid) # Account created too recently global_userinfo["editcount"] = 500 global_userinfo["registration"] = datetime.today() self.editor.update_editcount(global_userinfo["editcount"]) enough_edits = editor_enough_edits(self.editor.wp_editcount) identity["registered"] = datetime.today().strftime("%Y%m%d%H%M%S") registered = editor_reg_date(identity, global_userinfo) account_old_enough = editor_account_old_enough(registered) valid = editor_valid( enough_edits, account_old_enough, not_blocked, ignore_wp_blocks ) self.assertFalse(valid) # Edge case: this shouldn't work. almost_6_months_ago = datetime.today() - timedelta(days=181) global_userinfo["registration"] = almost_6_months_ago identity["registered"] = almost_6_months_ago.strftime("%Y%m%d%H%M%S") registered = editor_reg_date(identity, global_userinfo) account_old_enough = editor_account_old_enough(registered) valid = editor_valid( enough_edits, account_old_enough, not_blocked, ignore_wp_blocks ) self.assertFalse(valid) # Edge case: this should work. almost_6_months_ago = datetime.today() - timedelta(days=182) global_userinfo["registration"] = almost_6_months_ago identity["registered"] = almost_6_months_ago.strftime("%Y%m%d%H%M%S") registered = editor_reg_date(identity, global_userinfo) account_old_enough = editor_account_old_enough(registered) valid = editor_valid( enough_edits, account_old_enough, not_blocked, ignore_wp_blocks ) self.assertTrue(valid) # Bad editor! No biscuit. global_userinfo["merged"] = copy.copy(FAKE_MERGED_ACCOUNTS_BLOCKED) not_blocked = editor_not_blocked(global_userinfo["merged"]) valid = editor_valid( enough_edits, account_old_enough, not_blocked, ignore_wp_blocks ) self.assertFalse(valid) # Aw, you're not that bad. Have a cookie. global_userinfo["merged"] = copy.copy(FAKE_MERGED_ACCOUNTS_BLOCKED) not_blocked = editor_not_blocked(global_userinfo["merged"]) ignore_wp_blocks = True valid = editor_valid( enough_edits, account_old_enough, not_blocked, ignore_wp_blocks ) self.assertTrue(valid) def test_is_user_bundle_eligible(self): """ Users must: * Be valid * Have made 10 edits in the last 30 days (with some wiggle room, as you will see) """ # Valid data lang = get_language() identity = copy.copy(FAKE_IDENTITY) identity["sub"] = self.editor.wp_sub identity["editcount"] = 500 global_userinfo = copy.copy(FAKE_GLOBAL_USERINFO) global_userinfo["id"] = self.editor.wp_sub global_userinfo["editcount"] = 500 # 1st time bundle check should always pass for a valid user. self.editor.update_from_wikipedia( identity, lang, global_userinfo=global_userinfo ) self.editor.refresh_from_db() self.assertTrue(self.editor.wp_bundle_eligible) # A valid user should pass up to 30 days after their first login, even if they haven't made anymore edits. for day in range(29): self.editor.update_from_wikipedia( identity, lang, global_userinfo, self.editor.wp_editcount_updated + timedelta(days=1), ) self.editor.update_editcount( global_userinfo["editcount"], self.editor.wp_editcount_updated + timedelta(hours=23, minutes=59), ) self.editor.refresh_from_db() self.assertTrue(self.editor.wp_bundle_eligible) # A valid user should fail 30 days after their last edit. self.editor.update_from_wikipedia( identity, lang, global_userinfo, self.editor.wp_editcount_updated + timedelta(minutes=1), ) self.editor.refresh_from_db() self.assertFalse(self.editor.wp_bundle_eligible) # A valid user should pass if they have made enough recent edits. global_userinfo["editcount"] = 510 self.editor.update_from_wikipedia( identity, lang, global_userinfo, self.editor.wp_editcount_updated + timedelta(minutes=1), ) self.editor.refresh_from_db() self.assertTrue(self.editor.wp_bundle_eligible) # Bad editor! No biscuit, even if you have enough edits. global_userinfo["merged"] = copy.copy(FAKE_MERGED_ACCOUNTS_BLOCKED) self.editor.update_from_wikipedia( identity, lang, global_userinfo, self.editor.wp_editcount_updated + timedelta(minutes=1), ) self.editor.refresh_from_db() self.assertEqual(self.editor.wp_editcount, 510) self.assertEqual( self.editor.wp_editcount_prev( current_datetime=self.editor.wp_editcount_updated ), 500, ) self.assertFalse(self.editor.wp_bundle_eligible) def test_update_bundle_authorization_creation(self): """ update_bundle_authorization() should create a new bundle authorization if one didn't exist when the user is bundle eligible. """ editor = EditorFactory() bundle_partner_1 = PartnerFactory(authorization_method=Partner.BUNDLE) bundle_partner_2 = PartnerFactory(authorization_method=Partner.BUNDLE) # Check we don't already have a Bundle authorization with self.assertRaises(Authorization.DoesNotExist): bundle_authorization = Authorization.objects.get( user=editor.user, partners__authorization_method=Partner.BUNDLE ) editor.wp_bundle_eligible = True editor.save() editor.update_bundle_authorization() bundle_authorization = Authorization.objects.filter( user=editor.user, partners__authorization_method=Partner.BUNDLE ).distinct() # We should now have created a single authorization to # Bundle partners. self.assertEqual(bundle_authorization.count(), 1) def test_update_bundle_authorization_expiry(self): """ update_bundle_authorization() should expire existing bundle authorizations if the user is no longer eligible """ editor = EditorFactory() bundle_partner_1 = PartnerFactory(authorization_method=Partner.BUNDLE) bundle_partner_2 = PartnerFactory(authorization_method=Partner.BUNDLE) editor.wp_bundle_eligible = True editor.save() editor.update_bundle_authorization() bundle_authorization = Authorization.objects.filter( user=editor.user, partners__authorization_method=Partner.BUNDLE ).distinct() editor.wp_bundle_eligible = False editor.save() editor.update_bundle_authorization() bundle_authorization = Authorization.objects.filter( user=editor.user, partners__authorization_method=Partner.BUNDLE ).distinct() # Authorization should still exist self.assertEqual(bundle_authorization.count(), 1) # But it should have now expired self.assertEqual( bundle_authorization.first().date_expires, date.today() - timedelta(days=1) ) def test_update_bundle_authorization_user_eligible_again(self): """ update_bundle_authorization() should undo expiry of existing bundle authorizations if the user is now eligible again """ editor = EditorFactory() bundle_partner_1 = PartnerFactory(authorization_method=Partner.BUNDLE) bundle_partner_2 = PartnerFactory(authorization_method=Partner.BUNDLE) editor.wp_bundle_eligible = True editor.save() editor.update_bundle_authorization() editor.wp_bundle_eligible = False editor.save() editor.update_bundle_authorization() # Marking them as eligible a 2nd time should update their # expired authorization to remove the expiry date. editor.wp_bundle_eligible = True editor.save() editor.update_bundle_authorization() bundle_authorization = Authorization.objects.filter( user=editor.user, partners__authorization_method=Partner.BUNDLE ).distinct() # Authorization should still exist self.assertEqual(bundle_authorization.count(), 1) # It should have no expiry date, i.e. it's now active again. self.assertEqual(bundle_authorization.get().date_expires, None) def test_wp_bundle_authorized_no_bundle_auth(self): """ If a user has no authorization to Bundle resources, wp_bundle_authorized should return False """ editor = EditorFactory() self.assertFalse(editor.wp_bundle_authorized) def test_wp_bundle_authorized_true(self): """ If a user has an active authorization to Bundle resources, wp_bundle_authorized should return True """ editor = EditorFactory() bundle_partner_1 = PartnerFactory(authorization_method=Partner.BUNDLE) bundle_partner_2 = PartnerFactory(authorization_method=Partner.BUNDLE) editor.wp_bundle_eligible = True editor.save() # Create Bundle auth for this user editor.update_bundle_authorization() self.assertTrue(editor.wp_bundle_authorized) def test_wp_bundle_authorized_false(self): """ If a user has an expired authorization to Bundle resources, wp_bundle_authorized should return False """ editor = EditorFactory() bundle_partner_1 = PartnerFactory(authorization_method=Partner.BUNDLE) bundle_partner_2 = PartnerFactory(authorization_method=Partner.BUNDLE) editor.wp_bundle_eligible = True editor.save() # Create Bundle auth for this user editor.update_bundle_authorization() editor.wp_bundle_eligible = False editor.save() # Expire the user's auth editor.update_bundle_authorization() self.assertFalse(editor.wp_bundle_authorized) def test_update_from_wikipedia(self): identity = {} identity["username"] = "evil_dr_porkchop" # Users' unique WP IDs should not change across API calls, but are # needed by update_from_wikipedia. identity["sub"] = self.editor.wp_sub identity["rights"] = ["deletion", "spaceflight"] identity["groups"] = ["charismatic megafauna"] # We should now be ignoring the oauth editcount identity["editcount"] = 42 identity["email"] = "porkchop@example.com" identity["iss"] = "zh-classical.wikipedia.org" identity["registered"] = "20130205230142" # validity identity["blocked"] = False global_userinfo = {} global_userinfo["home"] = "zh_classicalwiki" global_userinfo["id"] = identity["sub"] global_userinfo["registration"] = "2013-02-05T23:01:42Z" global_userinfo["name"] = identity["username"] # We should now be using the global_userinfo editcount global_userinfo["editcount"] = 960 global_userinfo["merged"] = copy.copy(FAKE_MERGED_ACCOUNTS_BLOCKED) # Don't change self.editor, or other tests will fail! Make a new one # to test instead. new_editor = EditorFactory(wp_registered=None) new_identity = dict(identity) new_global_userinfo = dict(global_userinfo) new_identity["sub"] = new_editor.wp_sub new_global_userinfo["id"] = new_identity["sub"] lang = get_language() new_editor.update_from_wikipedia( new_identity, lang, new_global_userinfo ) # This call also saves the editor self.assertEqual(new_editor.wp_username, "evil_dr_porkchop") self.assertEqual(new_editor.wp_rights, json.dumps(["deletion", "spaceflight"])) self.assertEqual(new_editor.wp_groups, json.dumps(["charismatic megafauna"])) self.assertEqual(new_editor.wp_editcount, 960) self.assertEqual(new_editor.user.email, "porkchop@example.com") self.assertEqual(new_editor.wp_registered, datetime(2013, 2, 5).date()) # Now check what happens if their wikipedia ID number has changed - this # should throw an error as we can no longer verify they're the same # editor. with self.assertRaises(SuspiciousOperation): new_identity["sub"] = new_editor.wp_sub + 1 new_global_userinfo["id"] = new_identity["sub"] new_editor.update_from_wikipedia( new_identity, lang, new_global_userinfo ) # This call also saves the editor class OAuthTestCase(TestCase): @classmethod def setUpTestData(cls): super().setUpTestData() # Prevent failures due to side effects from database artifacts. for editor in Editor.objects.all(): editor.delete() @patch("urllib.request.urlopen") def test_create_user_and_editor(self, mock_urlopen): """ OAuthBackend._create_user_and_editor() should: * create a user * with a suitable username and email * without a password * And a matching editor """ oauth_backend = OAuthBackend() oauth_data = FAKE_IDENTITY_DATA identity = FAKE_IDENTITY mock_response = Mock() mock_response.read.side_effect = [json.dumps(oauth_data)] * 7 mock_urlopen.return_value = mock_response user, editor = oauth_backend._create_user_and_editor(identity) self.assertEqual(user.email, "alice@example.com") self.assertEqual(user.username, "567823") self.assertFalse(user.has_usable_password()) self.assertEqual(editor.user, user) self.assertEqual(editor.wp_sub, 567823) # We won't test the fields set by update_from_wikipedia, as they are # tested elsewhere. # We mock out this function for two reasons: # 1) To prevent its call to an external API, which we would have otherwise # had to mock anyway; # 2) So we can assert that it was called. @patch("TWLight.users.models.Editor.update_from_wikipedia") def test_get_and_update_user_from_identity_existing_user(self, mock_update): """ OAuthBackend._get_and_update_user_from_identity() should: * If there is an Editor whose wp_sub = identity['sub']: * Return the user FKed onto that * Return created = False * Call Editor.update_from_wikipedia """ # Make sure the test user has the username and language anticipated by our backend. username = FAKE_IDENTITY["sub"] lang = get_language() existing_user = UserFactory(username=username) params = {"user": existing_user, "wp_sub": FAKE_IDENTITY["sub"]} _ = EditorFactory(**params) oauth_backend = OAuthBackend() user, created = oauth_backend._get_and_update_user_from_identity(FAKE_IDENTITY) self.assertFalse(created) self.assertTrue(hasattr(user, "editor")) self.assertEqual(user, existing_user) mock_update.assert_called_once_with(FAKE_IDENTITY, lang) @patch("TWLight.users.models.Editor.update_from_wikipedia") def test_get_and_update_user_from_identity_new_user(self, mock_update): """ OAuthBackend._get_and_update_user_from_identity() should: * Otherwise: * Return a new user * Return created = True * Call Editor.update_from_wikipedia """ oauth_backend = OAuthBackend() identity = copy.copy(FAKE_IDENTITY) lang = get_language() new_sub = 57381037 identity["sub"] = new_sub self.assertFalse(Editor.objects.filter(wp_sub=new_sub).count()) user, created = oauth_backend._get_and_update_user_from_identity(identity) self.assertTrue(created) self.assertTrue(hasattr(user, "editor")) self.assertEqual(user.editor.wp_sub, new_sub) mock_update.assert_called_once_with(identity, lang) class TermsTestCase(TestCase): def test_terms_page_displays(self): """ Terms page should display for authenticated users. We had a bug where attempting to view the page caused a 500 error. """ _ = User.objects.create_user(username="termstestcase", password="bar") url = reverse("terms") c = Client() c.login(username="termstestcase", password="bar") response = c.get(url) self.assertEqual(response.status_code, 200) class HelpersTestCase(TestCase): """ We list some things in .helpers.wiki_list, but we should test to make sure they are kept in sync. Formats: WIKIS: ('ab', 'ab.wikipedia.org') LANGUAGE_CODES: 'ab': 'Abkhazian' """ def test_wikis_match_language_codes(self): WIKIS_LANGUAGES = set([wiki[0] for wiki in WIKIS]) LANGUAGES = set(LANGUAGE_CODES.keys()) self.assertEqual(WIKIS_LANGUAGES, LANGUAGES) class AuthorizationsHelpersTestCase(TestCase): @classmethod def setUpTestData(cls): super().setUpTestData() cls.bundle_partner_1 = PartnerFactory(authorization_method=Partner.BUNDLE) cls.bundle_partner_2 = PartnerFactory(authorization_method=Partner.BUNDLE) cls.bundle_partner_3 = PartnerFactory(authorization_method=Partner.BUNDLE) cls.proxy_partner_1 = PartnerFactory(authorization_method=Partner.PROXY) cls.proxy_partner_2 = PartnerFactory(authorization_method=Partner.PROXY) def test_validate_partners_for_bundle_auth(self): """ Passing a queryset of partners which are all set to the BUNDLE authorization method should raise no errors """ partner_queryset = Partner.objects.filter(authorization_method=Partner.BUNDLE) try: validation = validate_partners(partner_queryset) except ValidationError: self.fail("validate_partners() raised ValidationError unexpectedly.") def test_validate_partners_for_mixed_auth_types(self): """ Passing a queryset with both BUNDLE and PROXY authorization types to validate_partners() should raise a ValidationError """ partner_queryset = Partner.objects.filter( authorization_method__in=[Partner.BUNDLE, Partner.PROXY] ) with self.assertRaises(ValidationError): validate_partners(partner_queryset) def test_validate_partners_for_wrong_auth_type(self): """ Passing a queryset with multiple PROXY partners to validate_partners() should raise a ValidationError """ partner_queryset = Partner.objects.filter(authorization_method=Partner.PROXY) with self.assertRaises(ValidationError): validate_partners(partner_queryset) def test_get_all_bundle_authorizations(self): """ The get_all_bundle_authorizations() helper function should return a Queryset of all authorizations for the Library Bundle, both active and not. """ editor = EditorFactory() editor.wp_bundle_eligible = True editor.save() # This should create an authorization linked to # bundle partners. editor.update_bundle_authorization() all_auths = get_all_bundle_authorizations() # One editor has Bundle auths, so this should be a # Queryset with 1 entry. self.assertEqual(all_auths.count(), 1) class ManagementCommandsTestCase(TestCase): @classmethod def setUpTestData(cls): super().setUpTestData() """ Creates a bundle-eligible editor. Returns ------- None """ cls.editor = EditorFactory() cls.editor.wp_bundle_eligible = True cls.editor.update_editcount(42, now() - timedelta(days=30)) cls.editor.wp_account_old_enough = True cls.editor.user.userprofile.terms_of_use = True cls.editor.user.userprofile.save() cls.editor.user.save() cls.editor.save() cls.global_userinfo_editor = { "home": "enwiki", "id": cls.editor.wp_sub, "registration": "2015-11-06T15:46:29Z", # Well before first commit. "name": "user328", "editcount": 5000, "merged": copy.copy(FAKE_MERGED_ACCOUNTS), } def test_user_update_eligibility_command_valid(self): """ user_update_eligibility command should check and update Bundle eligible editors correctly. Returns ------- None """ # 1st time bundle check should always pass for a valid editor. self.assertTrue(self.editor.wp_bundle_eligible) # A valid editor should pass editcount checks for 30 days after their first login, even if they haven't made any more edits. for day in range(30): call_command( "user_update_eligibility", datetime=datetime.isoformat( self.editor.wp_editcount_updated + timedelta(days=1) ), wp_username=self.editor.wp_username, global_userinfo=self.global_userinfo_editor, ) self.editor.refresh_from_db() self.assertEqual(self.editor.wp_editcount, 5000) self.assertEqual( self.editor.wp_editcount_prev( current_datetime=self.editor.wp_editcount_updated ), 42, ) self.assertEqual( self.editor.wp_editcount_recent( current_datetime=self.editor.wp_editcount_updated ), 4958, ) self.assertTrue(self.editor.wp_bundle_eligible) # A valid Editor should fail 31 days after their last edit. call_command( "user_update_eligibility", datetime=datetime.isoformat( self.editor.wp_editcount_updated + timedelta(days=1) ), global_userinfo=self.global_userinfo_editor, ) self.editor.refresh_from_db() self.assertFalse(self.editor.wp_bundle_eligible) # A valid Editor should then pass if they make at least 10 edits. self.global_userinfo_editor["editcount"] = 5010 call_command( "user_update_eligibility", datetime=datetime.isoformat( self.editor.wp_editcount_updated + timedelta(minutes=1) ), global_userinfo=self.global_userinfo_editor, ) self.editor.refresh_from_db() self.assertEqual(self.editor.wp_editcount, 5010) self.assertEqual( self.editor.wp_editcount_prev( current_datetime=self.editor.wp_editcount_updated ), 5000, ) self.assertEqual( self.editor.wp_editcount_recent(self.editor.wp_editcount_updated), 10 ) self.assertTrue(self.editor.wp_bundle_eligible) # Editors whose editcount has been updated within the last 30 days should be left alone. call_command( "user_update_eligibility", datetime=datetime.isoformat( self.editor.wp_editcount_updated + timedelta(days=29, hours=23, minutes=59, seconds=59) ), global_userinfo=self.global_userinfo_editor, ) self.editor.refresh_from_db() self.assertEqual(self.editor.wp_editcount, 5010) self.assertEqual( self.editor.wp_editcount_prev( current_datetime=self.editor.wp_editcount_updated ), 5000, ) self.assertEqual( self.editor.wp_editcount_recent( current_datetime=self.editor.wp_editcount_updated ), 10, ) self.assertTrue(self.editor.wp_bundle_eligible) def test_user_update_eligibility_command_terms_not_accepted(self): """ Editors who don't agree to terms are not bundle eligible. Returns ------- None """ # The editor hasn't accepted the terms of use self.editor.user.userprofile.terms_of_use = False self.editor.user.userprofile.save() self.editor.user.save() self.editor.save() self.assertTrue(self.editor.wp_bundle_eligible) call_command( "user_update_eligibility", datetime=datetime.isoformat( self.editor.wp_editcount_updated + timedelta(days=1) ), wp_username=self.editor.wp_username, global_userinfo=self.global_userinfo_editor, ) self.editor.refresh_from_db() self.assertFalse(self.editor.wp_bundle_eligible) class MyLibraryViewsTest(TestCase): @classmethod def setUpTestData(cls): super().setUpTestData() cls.bundle_partner_1 = PartnerFactory( authorization_method=Partner.BUNDLE, new_tags={"tags": ["earth-sciences_tag"]}, searchable=Partner.SEARCHABLE, ) cls.bundle_partner_2 = PartnerFactory( authorization_method=Partner.BUNDLE, new_tags={"tags": ["art_tag"]}, searchable=Partner.PARTIALLY_SEARCHABLE, ) cls.bundle_partner_3 = PartnerFactory( authorization_method=Partner.BUNDLE, searchable=Partner.PARTIALLY_SEARCHABLE, ) cls.bundle_partner_3.new_tags = {"tags": ["art_tag"]} cls.bundle_partner_3.save() cls.bundle_partner_4 = PartnerFactory( authorization_method=Partner.BUNDLE, searchable=Partner.SEARCHABLE, ) cls.bundle_partner_4.new_tags = {"tags": ["multidisciplinary_tag"]} cls.bundle_partner_4.save() cls.proxy_partner_1 = PartnerFactory( authorization_method=Partner.PROXY, searchable=Partner.SEARCHABLE, ) cls.proxy_partner_1.new_tags = {"tags": ["earth-sciences_tag"]} cls.proxy_partner_1.save() cls.proxy_partner_2 = PartnerFactory( authorization_method=Partner.PROXY, searchable=Partner.SEARCHABLE, ) cls.proxy_partner_2.new_tags = {"tags": ["earth-sciences_tag"]} cls.proxy_partner_2.save() cls.proxy_partner_3 = PartnerFactory(authorization_method=Partner.PROXY) cls.proxy_partner_3.new_tags = {"tags": ["multidisciplinary_tag"]} cls.proxy_partner_3.save() cls.email_partner_1 = PartnerFactory(authorization_method=Partner.EMAIL) cls.email_partner_2 = PartnerFactory(authorization_method=Partner.EMAIL) cls.user_coordinator = UserFactory(username="Jon Snow") cls.editor = EditorFactory() cls.editor.wp_bundle_eligible = True cls.editor.save() get_coordinators().user_set.add(cls.user_coordinator) def test_user_collections(self): """ Tests that only user collections are shown """ app_bundle_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_1, sent_by=self.user_coordinator, ) app_bundle_partner_2 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_2, sent_by=self.user_coordinator, ) app_bundle_partner_3 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_3, sent_by=self.user_coordinator, ) app_bundle_partner_4 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_4, sent_by=self.user_coordinator, ) app_proxy_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.proxy_partner_1, sent_by=self.user_coordinator, ) factory = RequestFactory() url = reverse("users:my_library") request = factory.get(url) request.user = self.editor.user response = MyLibraryView.as_view()(request) self.assertEqual(response.status_code, 200) content = response.render().content.decode("utf-8") self.assertIn(escape(self.bundle_partner_1.company_name), content) self.assertIn(escape(self.bundle_partner_2.company_name), content) self.assertIn(escape(self.bundle_partner_3.company_name), content) self.assertIn(escape(self.proxy_partner_1.company_name), content) self.assertIn(escape(self.bundle_partner_4.company_name), content) # Even though this partner is not visible, it still appears in the HTML # render self.assertIn(escape(self.proxy_partner_2.company_name), content) self.assertIn(escape(self.proxy_partner_3.company_name), content) def test_user_collections_show_expiry_date_extend(self): """ Tests that the expiry date and the Extend button are shown """ app_proxy_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.proxy_partner_1, sent_by=self.user_coordinator, ) someday = date.today() + timedelta(days=60) authorization = Authorization.objects.get( user=self.editor.user, partners=self.proxy_partner_1 ) authorization.date_expires = someday authorization.save() factory = RequestFactory() url = reverse("users:my_library") request = factory.get(url) request.user = self.editor.user response = MyLibraryView.as_view()(request) self.assertEqual(response.status_code, 200) content = response.render().content.decode("utf-8") someday_fmt = datetime.strftime(someday, "%b %d, %Y") self.assertIn(escape(self.proxy_partner_1.company_name), content) self.assertIn(someday_fmt, content) self.assertIn("Extend", content) def test_user_collections_show_expiry_date_renew(self): """ Tests that the expiry date and the Renew button are shown """ app_proxy_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.proxy_partner_1, sent_by=self.user_coordinator, ) someday = date.today() - timedelta(days=60) authorization = Authorization.objects.get( user=self.editor.user, partners=self.proxy_partner_1 ) authorization.date_expires = someday authorization.save() factory = RequestFactory() url = reverse("users:my_library") request = factory.get(url) request.user = self.editor.user response = MyLibraryView.as_view()(request) self.assertEqual(response.status_code, 200) content = response.render().content.decode("utf-8") someday_fmt = datetime.strftime(someday, "%b %d, %Y") self.assertIn(escape(self.proxy_partner_1.company_name), content) self.assertIn(someday_fmt, content) self.assertIn("Renew", content) def test_user_collections_show_expiry_date_not_shown(self): """ Tests that the expiry date is not shown """ app_proxy_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.proxy_partner_1, sent_by=self.user_coordinator, ) authorization = Authorization.objects.get( user=self.editor.user, partners=self.proxy_partner_1 ) authorization.date_expires = None authorization.save() factory = RequestFactory() url = reverse("users:my_library") request = factory.get(url) request.user = self.editor.user response = MyLibraryView.as_view()(request) self.assertEqual(response.status_code, 200) content = response.render().content.decode("utf-8") self.assertIn(escape(self.proxy_partner_1.company_name), content) self.assertNotIn("Expiry date: ", content) def test_user_collections_has_open_application(self): """ Tests that the Go to application button is shown when an application is open """ old_app = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.proxy_partner_1, sent_by=self.user_coordinator, ) app_proxy_partner_1 = ApplicationFactory( status=Application.PENDING, editor=self.editor, partner=self.proxy_partner_1, sent_by=self.user_coordinator, ) authorization = Authorization.objects.get( user=self.editor.user, partners=self.proxy_partner_1 ) factory = RequestFactory() url = reverse("users:my_library") request = factory.get(url) request.user = self.editor.user response = MyLibraryView.as_view()(request) self.assertEqual(response.status_code, 200) content = response.render().content.decode("utf-8") self.assertIn(escape(self.proxy_partner_1.company_name), content) self.assertIn("Go to application", content) def test_collection_filters_art_tag(self): """ Tests that only user collections that match the filter are shown """ app_bundle_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_1, sent_by=self.user_coordinator, ) app_bundle_partner_2 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_2, sent_by=self.user_coordinator, ) app_bundle_partner_3 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_3, sent_by=self.user_coordinator, ) app_bundle_partner_4 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_4, sent_by=self.user_coordinator, ) app_proxy_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.proxy_partner_1, sent_by=self.user_coordinator, ) factory = RequestFactory() url = reverse("users:my_library") url_with_art_tag_param = "{url}?tags=art_tag".format(url=url) request = factory.get(url_with_art_tag_param) request.user = self.editor.user response = MyLibraryView.as_view()(request) self.assertEqual(response.status_code, 200) content = response.render().content.decode("utf-8") self.assertIn(escape(self.bundle_partner_2.company_name), content) self.assertIn(escape(self.bundle_partner_3.company_name), content) # Multidisciplinary partners should also appear when filtering self.assertIn(escape(self.bundle_partner_4.company_name), content) self.assertIn(escape(self.proxy_partner_3.company_name), content) self.assertNotIn(escape(self.bundle_partner_1.company_name), content) self.assertNotIn(escape(self.proxy_partner_1.company_name), content) self.assertNotIn(escape(self.proxy_partner_2.company_name), content) def test_collection_filters_earth_sciences_tag(self): """ Tests that only user collections that match the filter are shown """ app_bundle_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_1, sent_by=self.user_coordinator, ) app_bundle_partner_2 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_2, sent_by=self.user_coordinator, ) app_bundle_partner_3 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_3, sent_by=self.user_coordinator, ) app_bundle_partner_4 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_4, sent_by=self.user_coordinator, ) app_proxy_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.proxy_partner_1, sent_by=self.user_coordinator, ) factory = RequestFactory() url = reverse("users:my_library") url_with_earth_sciences_tag_param = "{url}?tags=earth-sciences_tag".format( url=url ) request = factory.get(url_with_earth_sciences_tag_param) request.user = self.editor.user response = MyLibraryView.as_view()(request) self.assertEqual(response.status_code, 200) content = response.render().content.decode("utf-8") self.assertNotIn(escape(self.bundle_partner_2.company_name), content) self.assertNotIn(escape(self.bundle_partner_3.company_name), content) # Multidisciplinary partners should also appear when filtering self.assertIn(escape(self.bundle_partner_4.company_name), content) self.assertIn(escape(self.proxy_partner_3.company_name), content) self.assertIn(escape(self.bundle_partner_1.company_name), content) self.assertIn(escape(self.proxy_partner_1.company_name), content) self.assertIn(escape(self.proxy_partner_2.company_name), content) def test_collection_show_waitlisted_badge(self): """ Tests that the Waitlisted badge is shown because the authorization has expired """ waitlisted_partner = PartnerFactory( authorization_method=Partner.PROXY, status=Partner.WAITLIST ) app_proxy_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=waitlisted_partner, sent_by=self.user_coordinator, ) someday = date.today() - timedelta(days=60) authorization = Authorization.objects.get( user=self.editor.user, partners=waitlisted_partner ) authorization.date_expires = someday authorization.save() factory = RequestFactory() url = reverse("users:my_library") request = factory.get(url) request.user = self.editor.user response = MyLibraryView.as_view()(request) self.assertEqual(response.status_code, 200) content = response.render().content.decode("utf-8") self.assertIn(escape(waitlisted_partner.company_name), content) self.assertIn("Waitlisted", content) def test_collection_dont_show_waitlisted_badge(self): """ Tests that the Waitlisted badge is not shown because the authorization has not expired """ waitlisted_partner = PartnerFactory( authorization_method=Partner.PROXY, status=Partner.WAITLIST ) app_proxy_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=waitlisted_partner, sent_by=self.user_coordinator, ) someday = date.today() + timedelta(days=60) authorization = Authorization.objects.get( user=self.editor.user, partners=waitlisted_partner ) authorization.date_expires = someday authorization.save() factory = RequestFactory() url = reverse("users:my_library") request = factory.get(url) request.user = self.editor.user response = MyLibraryView.as_view()(request) self.assertEqual(response.status_code, 200) content = response.render().content.decode("utf-8") self.assertIn(escape(waitlisted_partner.company_name), content) self.assertNotIn("Waitlisted", content) def test_collection_show_not_available_badge(self): """ Tests that the Not Available badge is shown """ not_available_partner = PartnerFactory( authorization_method=Partner.PROXY, status=Partner.NOT_AVAILABLE ) # Make the user staff so they can see unavailable collections self.editor.user.is_staff = True self.editor.user.save() self.editor.save() factory = RequestFactory() url = reverse("users:my_library") request = factory.get(url) request.user = self.editor.user response = MyLibraryView.as_view()(request) self.assertEqual(response.status_code, 200) content = response.render().content.decode("utf-8") self.assertIn(escape(not_available_partner.company_name), content) self.assertIn("Not Available", content) def test_user_not_eligible_eligibility_modal_shown(self): """ Tests that, when a user is not eligible to access the library, the eligibility modal will be shown """ # Make the user not eligible so they can see the eligibility modal self.editor.wp_bundle_eligible = False self.editor.save() factory = RequestFactory() url = reverse("users:my_library") request = factory.get(url) request.user = self.editor.user response = MyLibraryView.as_view()(request) self.assertEqual(response.status_code, 200) content = response.render().content.decode("utf-8") eligibility_message = "Sorry, your Wikipedia account doesn’t currently qualify to access The Wikipedia Library." self.assertIn(eligibility_message, content) def test_collection_filters_searchable(self): """ Tests that only user collections that match the filter are shown """ app_bundle_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_1, sent_by=self.user_coordinator, ) app_bundle_partner_2 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_2, sent_by=self.user_coordinator, ) app_bundle_partner_3 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_3, sent_by=self.user_coordinator, ) app_bundle_partner_4 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_4, sent_by=self.user_coordinator, ) app_proxy_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.proxy_partner_1, sent_by=self.user_coordinator, ) factory = RequestFactory() url = reverse("users:my_library") url_with_searchable_param = "{url}?searchable={searchable}".format( url=url, searchable=Partner.SEARCHABLE ) request = factory.get(url_with_searchable_param) request.user = self.editor.user response = MyLibraryView.as_view()(request) self.assertEqual(response.status_code, 200) content = response.render().content.decode("utf-8") self.assertIn(escape(self.bundle_partner_1.company_name), content) self.assertIn(escape(self.bundle_partner_4.company_name), content) self.assertIn(escape(self.proxy_partner_1.company_name), content) self.assertIn(escape(self.proxy_partner_2.company_name), content) self.assertNotIn(escape(self.bundle_partner_2.company_name), content) self.assertNotIn(escape(self.bundle_partner_3.company_name), content) self.assertNotIn(escape(self.proxy_partner_3.company_name), content) def test_collection_filters_partially_searchable(self): """ Tests that only user collections that match the filter are shown """ app_bundle_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_1, sent_by=self.user_coordinator, ) app_bundle_partner_2 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_2, sent_by=self.user_coordinator, ) app_bundle_partner_3 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_3, sent_by=self.user_coordinator, ) app_bundle_partner_4 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_4, sent_by=self.user_coordinator, ) app_proxy_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.proxy_partner_1, sent_by=self.user_coordinator, ) factory = RequestFactory() url = reverse("users:my_library") url_with_searchable_param = "{url}?searchable={searchable}".format( url=url, searchable=Partner.PARTIALLY_SEARCHABLE ) request = factory.get(url_with_searchable_param) request.user = self.editor.user response = MyLibraryView.as_view()(request) self.assertEqual(response.status_code, 200) content = response.render().content.decode("utf-8") self.assertIn(escape(self.bundle_partner_2.company_name), content) self.assertIn(escape(self.bundle_partner_3.company_name), content) self.assertNotIn(escape(self.bundle_partner_1.company_name), content) self.assertNotIn(escape(self.bundle_partner_4.company_name), content) self.assertNotIn(escape(self.proxy_partner_1.company_name), content) self.assertNotIn(escape(self.proxy_partner_2.company_name), content) self.assertNotIn(escape(self.proxy_partner_3.company_name), content) def test_collection_filters_not_searchable(self): """ Tests that only user collections that match the filter are shown """ app_bundle_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_1, sent_by=self.user_coordinator, ) app_bundle_partner_2 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_2, sent_by=self.user_coordinator, ) app_bundle_partner_3 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_3, sent_by=self.user_coordinator, ) app_bundle_partner_4 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_4, sent_by=self.user_coordinator, ) app_proxy_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.proxy_partner_1, sent_by=self.user_coordinator, ) factory = RequestFactory() url = reverse("users:my_library") url_with_searchable_param = "{url}?searchable={searchable}".format( url=url, searchable=Partner.NOT_SEARCHABLE ) request = factory.get(url_with_searchable_param) request.user = self.editor.user response = MyLibraryView.as_view()(request) self.assertEqual(response.status_code, 200) content = response.render().content.decode("utf-8") self.assertIn(escape(self.proxy_partner_3.company_name), content) self.assertNotIn(escape(self.bundle_partner_1.company_name), content) self.assertNotIn(escape(self.bundle_partner_2.company_name), content) self.assertNotIn(escape(self.bundle_partner_3.company_name), content) self.assertNotIn(escape(self.bundle_partner_4.company_name), content) self.assertNotIn(escape(self.proxy_partner_1.company_name), content) self.assertNotIn(escape(self.proxy_partner_2.company_name), content) def test_instant_access_filter(self): """ Tests that only instant access collections are shown """ app_bundle_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_1, sent_by=self.user_coordinator, ) app_bundle_partner_2 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_2, sent_by=self.user_coordinator, ) app_proxy_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.proxy_partner_1, sent_by=self.user_coordinator, ) app_proxy_partner_2 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.proxy_partner_2, sent_by=self.user_coordinator, ) app_email_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.email_partner_1, sent_by=self.user_coordinator, ) app_email_partner_2 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_2, sent_by=self.user_coordinator, ) factory = RequestFactory() url = reverse("users:my_library") url_with_access_param = "{url}?access={access}".format(url=url, access=INSTANT) request = factory.get(url_with_access_param) request.user = self.editor.user response = MyLibraryView.as_view()(request) self.assertEqual(response.status_code, 200) content = response.render().content.decode("utf-8") self.assertIn(escape(self.bundle_partner_1.company_name), content) self.assertIn(escape(self.bundle_partner_2.company_name), content) self.assertIn(escape(self.proxy_partner_1.company_name), content) self.assertIn(escape(self.proxy_partner_2.company_name), content) self.assertNotIn(escape(self.email_partner_1.company_name), content) self.assertNotIn(escape(self.email_partner_2.company_name), content) def test_multi_step_access_filter(self): """ Tests that only instant access collections are shown """ app_bundle_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_1, sent_by=self.user_coordinator, ) app_bundle_partner_2 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_2, sent_by=self.user_coordinator, ) app_proxy_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.proxy_partner_1, sent_by=self.user_coordinator, ) app_proxy_partner_2 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.proxy_partner_2, sent_by=self.user_coordinator, ) app_email_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.email_partner_1, sent_by=self.user_coordinator, ) app_email_partner_2 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_2, sent_by=self.user_coordinator, ) factory = RequestFactory() url = reverse("users:my_library") url_with_access_param = "{url}?access={access}".format( url=url, access=MULTI_STEP ) request = factory.get(url_with_access_param) request.user = self.editor.user response = MyLibraryView.as_view()(request) self.assertEqual(response.status_code, 200) content = response.render().content.decode("utf-8") self.assertNotIn(escape(self.bundle_partner_1.company_name), content) self.assertNotIn(escape(self.bundle_partner_2.company_name), content) self.assertNotIn(escape(self.proxy_partner_1.company_name), content) self.assertNotIn(escape(self.proxy_partner_2.company_name), content) self.assertIn(escape(self.email_partner_1.company_name), content) self.assertIn(escape(self.email_partner_2.company_name), content)
37.792625
132
0.657256
import copy from datetime import datetime, date, timedelta import json import re from unittest.mock import patch, Mock from urllib.parse import urlparse from django.conf import settings from django.contrib.auth.models import User, AnonymousUser from django.core.exceptions import ( PermissionDenied, SuspiciousOperation, ValidationError, ) from django.urls import resolve, reverse from django.core.management import call_command from django.test import TestCase, Client, RequestFactory from django.utils.translation import get_language from django.utils.html import escape from django.utils.timezone import now from TWLight.applications.factories import ApplicationFactory from TWLight.applications.models import Application from TWLight.resources.factories import PartnerFactory from TWLight.resources.filters import INSTANT, MULTI_STEP from TWLight.resources.models import Partner from TWLight.resources.tests import EditorCraftRoom from . import views from .oauth import OAuthBackend from .helpers.validation import validate_partners from .helpers.authorizations import get_all_bundle_authorizations from .helpers.wiki_list import WIKIS, LANGUAGE_CODES from .factories import EditorFactory, UserFactory from .groups import get_coordinators, get_restricted from .models import UserProfile, Editor, Authorization from .views import MyLibraryView from TWLight.users.helpers.editor_data import ( editor_valid, editor_account_old_enough, editor_enough_edits, editor_not_blocked, editor_reg_date, editor_bundle_eligible, ) FAKE_IDENTITY_DATA = {"query": {"userinfo": {"options": {"disablemail": 0}}}} FAKE_IDENTITY = { "editcount": 5000, "registered": "20151106154629", "blocked": False, "iss": urlparse(settings.TWLIGHT_OAUTH_PROVIDER_URL).scheme + urlparse(settings.TWLIGHT_OAUTH_PROVIDER_URL).netloc, "sub": 567823, "rights": ["deletion", "spaceflight", "autoconfirmed"], "groups": ["charismatic megafauna"], "email": "alice@example.com", "username": "alice", } FAKE_MERGED_ACCOUNTS = [ { "wiki": "enwiki", "url": "https://en.wikipedia.org", "timestamp": "2015-11-06T15:46:29Z", "method": "login", "editcount": 100, "registration": "2015-11-06T15:46:29Z", "groups": ["extendedconfirmed"], } ] FAKE_MERGED_ACCOUNTS_BLOCKED = [ { "wiki": "enwiki", "url": "https://en.wikipedia.org", "timestamp": "2015-11-06T15:46:29Z", "method": "login", "editcount": 100, "registration": "2015-11-06T15:46:29Z", "groups": ["extendedconfirmed"], "blocked": {"expiry": "infinity", "reason": "bad editor!"}, } ] FAKE_GLOBAL_USERINFO = { "home": "enwiki", "id": 567823, "registration": "2015-11-06T15:46:29Z", "name": "alice", "editcount": 5000, "merged": copy.copy(FAKE_MERGED_ACCOUNTS), } def remove_csrfmiddlewaretoken(rendered_html): csrfmiddlewaretoken_pattern = ( r"<input type=\"hidden\" name=\"csrfmiddlewaretoken\" value=\".+\">" ) return re.sub(csrfmiddlewaretoken_pattern, "", rendered_html) class ViewsTestCase(TestCase): @classmethod def setUpTestData(cls): super().setUpTestData() cls.client = Client() cls.username1 = "alice" cls.user_editor = UserFactory(username=cls.username1) cls.editor1 = EditorFactory(user=cls.user_editor) cls.editor1.wp_bundle_eligible = True cls.editor1.save() cls.url1 = reverse("users:editor_detail", kwargs={"pk": cls.editor1.pk}) cls.username2 = "bob" cls.user_editor2 = UserFactory(username=cls.username2) cls.editor2 = EditorFactory(user=cls.user_editor2) cls.url2 = reverse("users:editor_detail", kwargs={"pk": cls.editor2.pk}) cls.username3 = "carol" cls.user_superuser = UserFactory(username=cls.username3) cls.user_superuser.is_superuser = True cls.user_superuser.save() cls.editor3 = EditorFactory(user=cls.user_superuser) cls.username4 = "eve" cls.user_coordinator = UserFactory(username=cls.username4) cls.editor4 = EditorFactory(user=cls.user_coordinator) get_coordinators().user_set.add(cls.user_coordinator) # actually want to test that messages are displayed, use Client(), # and stop/restart the patcher. cls.message_patcher = patch("TWLight.applications.views.messages.add_message") cls.message_patcher.start() @classmethod def tearDownClass(cls): super().tearDownClass() cls.user_editor.delete() cls.editor1.delete() cls.user_editor2.delete() cls.editor2.delete() cls.user_superuser.delete() cls.editor3.delete() cls.user_coordinator.delete() cls.editor4.delete() cls.message_patcher.stop() def test_editor_detail_url_resolves(self): _ = resolve(self.url1) def test_anon_user_cannot_see_editor_details(self): response = self.client.get(self.url1) self.assertEqual(response.status_code, 302) self.assertEqual(urlparse(response.url).path, settings.LOGIN_URL) def test_editor_can_see_own_page(self): factory = RequestFactory() request = factory.get(self.url1) request.user = self.user_editor response = views.EditorDetailView.as_view()(request, pk=self.editor1.pk) self.assertEqual(response.status_code, 200) def test_user_view_no_coordinators(self): get_coordinators().user_set.remove(self.user_coordinator) factory = RequestFactory() request = factory.get(self.url1) request.user = self.user_editor response = views.EditorDetailView.as_view()(request, pk=self.editor1.pk) self.assertEqual(response.status_code, 200) def test_editor_cannot_see_other_editor_page(self): factory = RequestFactory() request = factory.get(self.url2) request.user = self.user_editor # Make sure the editor is not a coordinator, because coordinators *can* # see others' pages! coordinators = get_coordinators() try: assert self.user_editor not in coordinators.user_set.all() except AssertionError: coordinators.user_set.remove(self.user_editor) with self.assertRaises(PermissionDenied): _ = views.EditorDetailView.as_view()(request, pk=self.editor2.pk) def test_coordinator_access(self): factory = RequestFactory() request = factory.get(self.url1) request.user = self.user_coordinator partner = PartnerFactory() app = ApplicationFactory( status=Application.PENDING, editor=self.editor1, partner=partner ) app.save() try: response = views.EditorDetailView.as_view()(request, pk=self.editor1.pk) self.fail("Editor details should not be visible to just any coordinator.") except PermissionDenied: pass partner.coordinator = request.user partner.save() response = views.EditorDetailView.as_view()(request, pk=self.editor1.pk) self.assertEqual(response.status_code, 200) def test_site_admin_can_see_other_editor_page(self): factory = RequestFactory() request = factory.get(self.url1) request.user = self.user_superuser response = views.EditorDetailView.as_view()(request, pk=self.editor1.pk) self.assertEqual(response.status_code, 200) def test_editor_page_has_editor_data(self): factory = RequestFactory() request = factory.get(self.url1) request.user = self.user_editor response = views.EditorDetailView.as_view()(request, pk=self.editor1.pk) content = response.render().content.decode("utf-8") self.assertIn(self.editor1.wp_username, content) self.assertIn("42", content) self.assertIn("Cat floofing, telemetry, fermentation", content) def test_my_applications_page_has_application_history(self): app1 = ApplicationFactory( status=Application.PENDING, editor=self.user_editor.editor ) app2 = ApplicationFactory( status=Application.QUESTION, editor=self.user_editor.editor ) app3 = ApplicationFactory( status=Application.APPROVED, editor=self.user_editor.editor ) app4 = ApplicationFactory( status=Application.NOT_APPROVED, editor=self.user_editor.editor ) app5 = ApplicationFactory( status=Application.APPROVED, partner=PartnerFactory(authorization_method=Partner.BUNDLE), editor=self.user_editor.editor, ) app6 = ApplicationFactory( status=Application.PENDING, partner=PartnerFactory(authorization_method=Partner.BUNDLE), editor=self.user_editor.editor, ) app7 = ApplicationFactory( status=Application.INVALID, partner=PartnerFactory(authorization_method=Partner.BUNDLE), editor=self.user_editor.editor, ) factory = RequestFactory() request = factory.get( reverse("users:my_applications", kwargs={"pk": self.editor1.pk}) ) request.user = self.user_editor response = views.ListApplicationsUserView.as_view()(request, pk=self.editor1.pk) self.assertEqual( set(response.context_data["object_list"]), {app1, app2, app3, app4} ) content = response.render().content.decode("utf-8") self.assertIn(escape(app1.partner.company_name), content) self.assertIn(escape(app2.partner.company_name), content) self.assertIn(escape(app3.partner.company_name), content) self.assertIn(escape(app4.partner.company_name), content) # No Bundle applications self.assertNotIn(escape(app5.partner.company_name), content) self.assertNotIn(escape(app6.partner.company_name), content) self.assertNotIn(escape(app7.partner.company_name), content) # We can't use assertTemplateUsed with RequestFactory (only with def test_withdraw_application(self): app = ApplicationFactory( status=Application.PENDING, partner=PartnerFactory(authorization_method=Partner.BUNDLE), editor=self.user_editor.editor, ) factory = RequestFactory() request = factory.get( reverse("users:withdraw", kwargs={"pk": self.editor1.pk, "id": app.pk}) ) request.user = self.user_editor response = views.WithdrawApplication.as_view()( request, pk=self.editor1.pk, id=app.pk ) app.refresh_from_db() self.assertNotEqual(app.date_closed, None) self.assertEqual(app.status, Application.INVALID) def test_sent_application(self): app = ApplicationFactory( status=Application.SENT, partner=PartnerFactory(authorization_method=Partner.BUNDLE), editor=self.user_editor.editor, sent_by=self.user_coordinator, ) factory = RequestFactory() request = factory.get( reverse("users:my_applications", kwargs={"pk": self.editor1.pk}) ) request.user = self.user_editor response = views.ListApplicationsUserView.as_view()( request, pk=self.editor1.pk, ) app.refresh_from_db() self.assertNotIn("Withdraw", response.render().content.decode("utf-8")) def test_return_authorization(self): editor = EditorCraftRoom(self, Terms=True, Coordinator=False) partner = PartnerFactory(authorization_method=Partner.PROXY) app = ApplicationFactory( status=Application.SENT, editor=editor, partner=partner, sent_by=self.user_coordinator, ) authorization = Authorization.objects.get(user=editor.user, partners=partner) self.assertEqual(authorization.get_latest_app(), app) return_url = reverse( "users:return_authorization", kwargs={"pk": authorization.pk} ) response = self.client.get(return_url, follow=True) return_form = response.context["form"] self.client.post(return_url, return_form.initial) yesterday = datetime.now().date() - timedelta(days=1) authorization.refresh_from_db() self.assertEqual(authorization.date_expires, yesterday) someday = yesterday + timedelta(days=30) authorization.date_expires = someday authorization.save() EditorCraftRoom(self, Terms=True, Coordinator=False) return_url = reverse( "users:return_authorization", kwargs={"pk": authorization.pk} ) response = self.client.get(return_url, follow=True) self.assertEqual(response.status_code, 403) response = self.client.post(return_url, return_form.initial) self.assertEqual(response.status_code, 403) authorization.refresh_from_db() self.assertEqual(authorization.date_expires, someday) def test_latest_application(self): editor = EditorCraftRoom(self, Terms=True, Coordinator=False) partner = PartnerFactory(authorization_method=Partner.PROXY) app = ApplicationFactory( status=Application.SENT, editor=editor, partner=partner, sent_by=self.user_coordinator, ) authorization = Authorization.objects.get(user=editor.user, partners=partner) self.assertEqual(authorization.get_latest_app(), app) return_url = reverse( "users:return_authorization", kwargs={"pk": authorization.pk} ) response = self.client.get(return_url, follow=True) return_form = response.context["form"] self.client.post(return_url, return_form.initial) yesterday = datetime.now().date() - timedelta(days=1) authorization.refresh_from_db() self.assertEqual(authorization.date_expires, yesterday) app_renewal = ApplicationFactory( status=Application.SENT, editor=editor, partner=partner, sent_by=self.user_coordinator, ) app_renewal.save() authorization.refresh_from_db() return_url = reverse( "users:return_authorization", kwargs={"pk": authorization.pk} ) response = self.client.get(return_url, follow=True) return_form = response.context["form"] self.client.post(return_url, return_form.initial) yesterday = datetime.now().date() - timedelta(days=1) authorization.refresh_from_db() self.assertEqual(authorization.date_expires, yesterday) app_renewal2 = ApplicationFactory(editor=editor, partner=partner) app_renewal2.status = Application.NOT_APPROVED app_renewal2.save() authorization.refresh_from_db() self.assertEqual(authorization.get_latest_app(), app_renewal) self.assertEqual(authorization.get_latest_sent_app(), app_renewal) def test_user_home_view_anon(self): factory = RequestFactory() request = factory.get(reverse("users:home")) request.user = AnonymousUser() response = views.UserHomeView.as_view()(request) self.assertEqual(response.status_code, 302) self.assertEqual(urlparse(response.url).path, settings.LOGIN_URL) def test_user_home_view_is_editor(self): user = UserFactory() editor = EditorFactory(user=user) factory = RequestFactory() home_request = factory.get(reverse("users:home")) home_request.user = user home_response = views.UserHomeView.as_view()(home_request) detail_request = factory.get( reverse("users:editor_detail", kwargs={"pk": editor.pk}) ) detail_request.user = user detail_response = views.EditorDetailView.as_view()(detail_request, pk=editor.pk) # directly, because its as_view function has already been processed # and all we have access to is a return value. So let's check that the self.assertEqual(home_response.status_code, 200) expected_detail_view = remove_csrfmiddlewaretoken( detail_response.rendered_content ) home_view = remove_csrfmiddlewaretoken(home_response.rendered_content) self.assertEqual(expected_detail_view, home_view) @patch("TWLight.users.views.UserDetailView.as_view") def test_user_home_view_non_editor(self, mock_view): user = UserFactory(username="not_an_editor") self.assertFalse(hasattr(user, "editor")) factory = RequestFactory() request = factory.get(reverse("users:home")) request.user = user _ = views.UserHomeView.as_view()(request) # because we don't have a URL allowing us to render UserDetailView # called. mock_view.assert_called_once_with() def test_coordinator_restricted(self): # If a coordinator restricts their data processing # they should stop being a coordinator. restrict_url = reverse("users:restrict_data") coordinators = get_coordinators() restricted = get_restricted() # Double check that the coordinator still has the relevant group assert self.user_coordinator in coordinators.user_set.all() # Need a password so we can login self.user_coordinator.set_password("editor") self.user_coordinator.save() self.client = Client() session = self.client.session self.client.login(username=self.username4, password="editor") restrict = self.client.get(restrict_url, follow=True) restrict_form = restrict.context["form"] data = restrict_form.initial data["restricted"] = True data["submit"] = True agree = self.client.post(restrict_url, data) assert self.user_coordinator not in coordinators.user_set.all() assert self.user_coordinator in restricted.user_set.all() def test_user_delete(self): delete_url = reverse("users:delete_data", kwargs={"pk": self.user_editor.pk}) # Need a password so we can login self.user_editor.set_password("editor") self.user_editor.save() self.client = Client() session = self.client.session self.client.login(username=self.username1, password="editor") submit = self.client.post(delete_url) assert not User.objects.filter(username=self.username1).exists() # Check that the associated Editor also got deleted. assert not Editor.objects.filter(user=self.user_editor).exists() def test_user_delete_authorizations(self): delete_url = reverse("users:delete_data", kwargs={"pk": self.user_editor.pk}) # Need a password so we can login self.user_editor.set_password("editor") self.user_editor.save() self.client = Client() session = self.client.session self.client.login(username=self.username1, password="editor") partner = PartnerFactory() user_auth = Authorization( user=self.user_editor, authorizer=self.user_coordinator, date_authorized=date.today(), date_expires=date.today() + timedelta(days=30), ) user_auth.save() user_auth.partners.add(partner) submit = self.client.post(delete_url) user_auth.refresh_from_db() self.assertEqual(user_auth.date_expires, date.today() - timedelta(days=1)) def test_user_delete_bundle_authorizations(self): delete_url = reverse("users:delete_data", kwargs={"pk": self.user_editor.pk}) # Need a password so we can login self.editor1.user.set_password("editor") self.editor1.user.save() bundle_partner_1 = PartnerFactory(authorization_method=Partner.BUNDLE) bundle_partner_2 = PartnerFactory(authorization_method=Partner.BUNDLE) self.client = Client() session = self.client.session self.client.login(username=self.username1, password="editor") # Bundle authorization should be created self.editor1.update_bundle_authorization() self.editor1.refresh_from_db() bundle_auth = self.editor1.get_bundle_authorization self.assertTrue(bundle_auth.is_bundle) # Saving the bundle authorization id so we can query it after to make # sure it's been deleted bundle_auth_id = bundle_auth.pk submit = self.client.post(delete_url) editor_count = Editor.objects.filter(pk=self.editor1.pk).count() self.assertEqual(editor_count, 0) bundle_auth_count = Authorization.objects.filter(pk=bundle_auth_id).count() self.assertEqual(bundle_auth_count, 0) def test_user_data_download(self): self.user_editor2.set_password("editor") self.user_editor2.save() self.client = Client() session = self.client.session self.client.login(username=self.username2, password="editor") response = self.client.post(self.url2, {"download": "Download"}) self.assertEqual( response.get("Content-Disposition"), "attachment; filename=user_data.json" ) def test_terms_of_use_on_editor_detail_page_show(self): user_agreed_TOU = UserFactory() user_agreed_TOU.userprofile.terms_of_use = True editor_agreed_TOU = EditorFactory(user=user_agreed_TOU) factory = RequestFactory() detail_request = factory.get( reverse("users:editor_detail", kwargs={"pk": editor_agreed_TOU.pk}) ) detail_request.user = user_agreed_TOU response = views.EditorDetailView.as_view()( detail_request, pk=editor_agreed_TOU.pk ) content = response.render().content.decode("utf-8") self.assertIn("By unchecking this box and clicking “Update", content) def test_terms_of_use_on_editor_detail_page_not_show(self): user_not_agreed_TOU = UserFactory() user_not_agreed_TOU.userprofile.terms_of_use = False editor_not_agreed_TOU = EditorFactory(user=user_not_agreed_TOU) factory = RequestFactory() detail_request = factory.get( reverse("users:editor_detail", kwargs={"pk": editor_not_agreed_TOU.pk}) ) detail_request.user = user_not_agreed_TOU response = views.EditorDetailView.as_view()( detail_request, pk=editor_not_agreed_TOU.pk ) content = response.render().content.decode("utf-8") self.assertNotIn("By unchecking this box and clicking “Update", content) def test_user_email_form(self): self.user_editor2.set_password("editor") self.user_editor2.save() self.client = Client() session = self.client.session self.client.login(username=self.username2, password="editor") response = self.client.post(self.url2, {"update_email_settings": ["Update"]}) self.assertEqual(response.status_code, 302) def test_user_email_preferences_disable_update(self): self.user_editor2.set_password("editor") self.user_editor2.save() get_coordinators().user_set.add(self.user_editor2) self.client = Client() session = self.client.session self.client.login(username=self.username2, password="editor") response = self.client.post(self.url2, {"update_email_settings": ["Update"]}) self.assertEqual(response.status_code, 302) self.user_editor2.userprofile.refresh_from_db() # or send_discussion_application_reminders or send_approved_application_reminders # in POST to simulate an unchecked box. self.assertEqual(self.user_editor2.userprofile.send_renewal_notices, False) self.assertEqual(self.user_editor2.userprofile.pending_app_reminders, False) self.assertEqual(self.user_editor2.userprofile.discussion_app_reminders, False) self.assertEqual(self.user_editor2.userprofile.approved_app_reminders, False) def test_user_email_preferences_enable_update(self): # Need a password so we can login self.user_editor2.set_password("editor") self.user_editor2.userprofile.send_renewal_notices = False self.user_editor2.userprofile.pending_app_reminders = False self.user_editor2.userprofile.discussion_app_reminders = False self.user_editor2.userprofile.approved_app_reminders = False self.user_editor2.save() # Only coordinators get to change their reminder preferences get_coordinators().user_set.add(self.user_editor2) self.client = Client() session = self.client.session self.client.login(username=self.username2, password="editor") response = self.client.post( self.url2, { "update_email_settings": ["Update"], "send_renewal_notices": ["on"], "send_pending_application_reminders": ["on"], "send_discussion_application_reminders": ["on"], "send_approved_application_reminders": ["on"], }, ) # Should be successfully redirected back to the user page. self.assertEqual(response.status_code, 302) self.user_editor2.userprofile.refresh_from_db() self.assertEqual(self.user_editor2.userprofile.send_renewal_notices, True) self.assertEqual(self.user_editor2.userprofile.pending_app_reminders, True) self.assertEqual(self.user_editor2.userprofile.discussion_app_reminders, True) self.assertEqual(self.user_editor2.userprofile.approved_app_reminders, True) def test_user_email_preferences_update_non_coordinator(self): # Need a password so we can login self.user_editor2.set_password("editor") self.user_editor2.userprofile.send_renewal_notices = False self.user_editor2.save() self.client = Client() session = self.client.session self.client.login(username=self.username2, password="editor") response = self.client.post( self.url2, {"update_email_settings": ["Update"], "send_renewal_notices": ["on"]}, ) # Should be successfully redirected back to the user page. self.assertEqual(response.status_code, 302) self.user_editor2.userprofile.refresh_from_db() self.assertEqual(self.user_editor2.userprofile.send_renewal_notices, True) # Only coordinators get to change their reminder preferences self.assertEqual(self.user_editor2.userprofile.pending_app_reminders, True) self.assertEqual(self.user_editor2.userprofile.discussion_app_reminders, True) self.assertEqual(self.user_editor2.userprofile.approved_app_reminders, True) class UserProfileModelTestCase(TestCase): @classmethod def setUpTestData(cls): super().setUpTestData() cls.bundle_partner_1 = PartnerFactory(authorization_method=Partner.BUNDLE) cls.bundle_partner_2 = PartnerFactory(authorization_method=Partner.BUNDLE) cls.proxy_partner_1 = PartnerFactory(authorization_method=Partner.PROXY) cls.user_coordinator = UserFactory(username="Jon Snow") cls.editor = EditorFactory() cls.editor.wp_bundle_eligible = True cls.editor.save() get_coordinators().user_set.add(cls.user_coordinator) def test_user_profile_created(self): user = UserFactory() # If the signal has not created a UserProfile, this line will throw # a DoesNotExist and the test will fail, which is what we want. UserProfile.objects.get(user=user) user.delete() def test_user_profile_sets_tou_to_false(self): # Don't use UserFactory, since it forces the related profile to have user = User.objects.create_user( username="profiler", email="profiler@example.com" ) profile = UserProfile.objects.get(user=user) self.assertEqual(profile.terms_of_use, False) user.delete() def test_user_profile_sets_use_wp_email_to_true(self): user = User.objects.create_user( username="profiler", email="profiler@example.com" ) profile = UserProfile.objects.get(user=user) self.assertEqual(profile.use_wp_email, True) user.delete() def test_add_favorite_collection_valid(self): profile = UserProfile.objects.get(user=self.editor.user) app_bundle_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_1, sent_by=self.user_coordinator, ) app_bundle_partner_2 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_2, sent_by=self.user_coordinator, ) app_proxy_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.proxy_partner_1, sent_by=self.user_coordinator, ) profile.favorites.add(self.bundle_partner_1) profile.favorites.add(self.bundle_partner_2) profile.favorites.add(self.proxy_partner_1) self.assertIn(self.proxy_partner_1, profile.favorites.all()) self.assertIn(self.bundle_partner_1, profile.favorites.all()) self.assertIn(self.bundle_partner_2, profile.favorites.all()) def test_add_favorite_expired_collection_valid(self): profile = UserProfile.objects.get(user=self.editor.user) app_proxy_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.proxy_partner_1, sent_by=self.user_coordinator, ) someday = date.today() - timedelta(days=60) authorization = Authorization.objects.get( user=self.editor.user, partners=self.proxy_partner_1 ) authorization.date_expires = someday authorization.save() profile.favorites.add(self.proxy_partner_1) self.assertIn(self.proxy_partner_1, profile.favorites.all()) def test_add_favorite_collection_invalid(self): profile = UserProfile.objects.get(user=self.editor.user) with self.assertRaises(ValidationError): profile.favorites.add(self.proxy_partner_1) class EditorModelTestCase(TestCase): @classmethod def setUpTestData(cls): super().setUpTestData() for editor in Editor.objects.all(): # The test case succeeds when runs alone but fails when run # as part of the whole suite, because it grabs the wrong editor # object from the db. Kill them all with fire. # (Why does it do this? Because our queries look for editors by # username or wikipedia sub, not by foreign key - we have to use the # information that we have from the wikipedia API, which knows # nothing about our database. But the test runner doesn't actually editor.delete() # longest wiki name in wiki_list. cls.editor = EditorFactory( wp_username="editor_model_test", wp_rights=json.dumps(["cat floofing", "the big red button"]), wp_groups=json.dumps(["sysops", "bureaucrats"]), wp_registered=None, ) cls.editor.user.userprofile.terms_of_use = True cls.editor.user.userprofile.save() cls.editor.user.save() cls.editor.save() @classmethod def tearDownClass(cls): super().tearDownClass() cls.editor.delete() def test_encoder_works_with_special_character_username(self): test = Editor().encode_wp_username("editor model&test") self.assertEqual(test, "editor%20model%26test") def test_wp_user_page_url(self): expected_url = settings.TWLIGHT_OAUTH_PROVIDER_URL + "/User:editor_model_test" self.assertEqual(expected_url, self.editor.wp_user_page_url) def test_wp_link_central_auth(self): expected_url = "https://meta.wikimedia.org/w/index.php?title=Special%3ACentralAuth&target=editor_model_test" self.assertEqual(expected_url, self.editor.wp_link_central_auth) def test_get_wp_rights_display(self): expected_text = ["cat floofing", "the big red button"] self.assertEqual(expected_text, self.editor.get_wp_rights_display) def test_get_wp_groups_display(self): expected_text = ["sysops", "bureaucrats"] self.assertEqual(expected_text, self.editor.get_wp_groups_display) def test_is_user_valid(self): identity = copy.copy(FAKE_IDENTITY) global_userinfo = copy.copy(FAKE_GLOBAL_USERINFO) # Valid data global_userinfo["editcount"] = 500 self.editor.update_editcount(global_userinfo["editcount"]) enough_edits = editor_enough_edits(self.editor.wp_editcount) registered = editor_reg_date(identity, global_userinfo) account_old_enough = editor_account_old_enough(registered) not_blocked = editor_not_blocked(global_userinfo["merged"]) ignore_wp_blocks = False valid = editor_valid( enough_edits, account_old_enough, not_blocked, ignore_wp_blocks ) self.assertTrue(valid) # Too few edits global_userinfo["editcount"] = 499 self.editor.update_editcount(global_userinfo["editcount"]) enough_edits = editor_enough_edits(self.editor.wp_editcount) valid = editor_valid( enough_edits, account_old_enough, not_blocked, ignore_wp_blocks ) self.assertFalse(valid) # Oauth says the account is too new, but global_userinfo says it's fine global_userinfo["editcount"] = 500 self.editor.update_editcount(global_userinfo["editcount"]) enough_edits = editor_enough_edits(self.editor.wp_editcount) identity["registered"] = datetime.today().strftime("%Y%m%d%H%M%S") registered = editor_reg_date(identity, global_userinfo) account_old_enough = editor_account_old_enough(registered) valid = editor_valid( enough_edits, account_old_enough, not_blocked, ignore_wp_blocks ) self.assertTrue(valid) global_userinfo["editcount"] = 500 global_userinfo["registration"] = datetime.today() self.editor.update_editcount(global_userinfo["editcount"]) enough_edits = editor_enough_edits(self.editor.wp_editcount) identity["registered"] = (datetime.today() - timedelta(days=365)).strftime( "%Y%m%d%H%M%S" ) registered = editor_reg_date(identity, global_userinfo) account_old_enough = editor_account_old_enough(registered) valid = editor_valid( enough_edits, account_old_enough, not_blocked, ignore_wp_blocks ) self.assertTrue(valid) # Account created too recently global_userinfo["editcount"] = 500 global_userinfo["registration"] = datetime.today() self.editor.update_editcount(global_userinfo["editcount"]) enough_edits = editor_enough_edits(self.editor.wp_editcount) identity["registered"] = datetime.today().strftime("%Y%m%d%H%M%S") registered = editor_reg_date(identity, global_userinfo) account_old_enough = editor_account_old_enough(registered) valid = editor_valid( enough_edits, account_old_enough, not_blocked, ignore_wp_blocks ) self.assertFalse(valid) # Edge case: this shouldn't work. almost_6_months_ago = datetime.today() - timedelta(days=181) global_userinfo["registration"] = almost_6_months_ago identity["registered"] = almost_6_months_ago.strftime("%Y%m%d%H%M%S") registered = editor_reg_date(identity, global_userinfo) account_old_enough = editor_account_old_enough(registered) valid = editor_valid( enough_edits, account_old_enough, not_blocked, ignore_wp_blocks ) self.assertFalse(valid) almost_6_months_ago = datetime.today() - timedelta(days=182) global_userinfo["registration"] = almost_6_months_ago identity["registered"] = almost_6_months_ago.strftime("%Y%m%d%H%M%S") registered = editor_reg_date(identity, global_userinfo) account_old_enough = editor_account_old_enough(registered) valid = editor_valid( enough_edits, account_old_enough, not_blocked, ignore_wp_blocks ) self.assertTrue(valid) global_userinfo["merged"] = copy.copy(FAKE_MERGED_ACCOUNTS_BLOCKED) not_blocked = editor_not_blocked(global_userinfo["merged"]) valid = editor_valid( enough_edits, account_old_enough, not_blocked, ignore_wp_blocks ) self.assertFalse(valid) global_userinfo["merged"] = copy.copy(FAKE_MERGED_ACCOUNTS_BLOCKED) not_blocked = editor_not_blocked(global_userinfo["merged"]) ignore_wp_blocks = True valid = editor_valid( enough_edits, account_old_enough, not_blocked, ignore_wp_blocks ) self.assertTrue(valid) def test_is_user_bundle_eligible(self): # Valid data lang = get_language() identity = copy.copy(FAKE_IDENTITY) identity["sub"] = self.editor.wp_sub identity["editcount"] = 500 global_userinfo = copy.copy(FAKE_GLOBAL_USERINFO) global_userinfo["id"] = self.editor.wp_sub global_userinfo["editcount"] = 500 # 1st time bundle check should always pass for a valid user. self.editor.update_from_wikipedia( identity, lang, global_userinfo=global_userinfo ) self.editor.refresh_from_db() self.assertTrue(self.editor.wp_bundle_eligible) # A valid user should pass up to 30 days after their first login, even if they haven't made anymore edits. for day in range(29): self.editor.update_from_wikipedia( identity, lang, global_userinfo, self.editor.wp_editcount_updated + timedelta(days=1), ) self.editor.update_editcount( global_userinfo["editcount"], self.editor.wp_editcount_updated + timedelta(hours=23, minutes=59), ) self.editor.refresh_from_db() self.assertTrue(self.editor.wp_bundle_eligible) self.editor.update_from_wikipedia( identity, lang, global_userinfo, self.editor.wp_editcount_updated + timedelta(minutes=1), ) self.editor.refresh_from_db() self.assertFalse(self.editor.wp_bundle_eligible) global_userinfo["editcount"] = 510 self.editor.update_from_wikipedia( identity, lang, global_userinfo, self.editor.wp_editcount_updated + timedelta(minutes=1), ) self.editor.refresh_from_db() self.assertTrue(self.editor.wp_bundle_eligible) global_userinfo["merged"] = copy.copy(FAKE_MERGED_ACCOUNTS_BLOCKED) self.editor.update_from_wikipedia( identity, lang, global_userinfo, self.editor.wp_editcount_updated + timedelta(minutes=1), ) self.editor.refresh_from_db() self.assertEqual(self.editor.wp_editcount, 510) self.assertEqual( self.editor.wp_editcount_prev( current_datetime=self.editor.wp_editcount_updated ), 500, ) self.assertFalse(self.editor.wp_bundle_eligible) def test_update_bundle_authorization_creation(self): editor = EditorFactory() bundle_partner_1 = PartnerFactory(authorization_method=Partner.BUNDLE) bundle_partner_2 = PartnerFactory(authorization_method=Partner.BUNDLE) with self.assertRaises(Authorization.DoesNotExist): bundle_authorization = Authorization.objects.get( user=editor.user, partners__authorization_method=Partner.BUNDLE ) editor.wp_bundle_eligible = True editor.save() editor.update_bundle_authorization() bundle_authorization = Authorization.objects.filter( user=editor.user, partners__authorization_method=Partner.BUNDLE ).distinct() # We should now have created a single authorization to # Bundle partners. self.assertEqual(bundle_authorization.count(), 1) def test_update_bundle_authorization_expiry(self): editor = EditorFactory() bundle_partner_1 = PartnerFactory(authorization_method=Partner.BUNDLE) bundle_partner_2 = PartnerFactory(authorization_method=Partner.BUNDLE) editor.wp_bundle_eligible = True editor.save() editor.update_bundle_authorization() bundle_authorization = Authorization.objects.filter( user=editor.user, partners__authorization_method=Partner.BUNDLE ).distinct() editor.wp_bundle_eligible = False editor.save() editor.update_bundle_authorization() bundle_authorization = Authorization.objects.filter( user=editor.user, partners__authorization_method=Partner.BUNDLE ).distinct() # Authorization should still exist self.assertEqual(bundle_authorization.count(), 1) # But it should have now expired self.assertEqual( bundle_authorization.first().date_expires, date.today() - timedelta(days=1) ) def test_update_bundle_authorization_user_eligible_again(self): editor = EditorFactory() bundle_partner_1 = PartnerFactory(authorization_method=Partner.BUNDLE) bundle_partner_2 = PartnerFactory(authorization_method=Partner.BUNDLE) editor.wp_bundle_eligible = True editor.save() editor.update_bundle_authorization() editor.wp_bundle_eligible = False editor.save() editor.update_bundle_authorization() # Marking them as eligible a 2nd time should update their # expired authorization to remove the expiry date. editor.wp_bundle_eligible = True editor.save() editor.update_bundle_authorization() bundle_authorization = Authorization.objects.filter( user=editor.user, partners__authorization_method=Partner.BUNDLE ).distinct() # Authorization should still exist self.assertEqual(bundle_authorization.count(), 1) # It should have no expiry date, i.e. it's now active again. self.assertEqual(bundle_authorization.get().date_expires, None) def test_wp_bundle_authorized_no_bundle_auth(self): editor = EditorFactory() self.assertFalse(editor.wp_bundle_authorized) def test_wp_bundle_authorized_true(self): editor = EditorFactory() bundle_partner_1 = PartnerFactory(authorization_method=Partner.BUNDLE) bundle_partner_2 = PartnerFactory(authorization_method=Partner.BUNDLE) editor.wp_bundle_eligible = True editor.save() editor.update_bundle_authorization() self.assertTrue(editor.wp_bundle_authorized) def test_wp_bundle_authorized_false(self): editor = EditorFactory() bundle_partner_1 = PartnerFactory(authorization_method=Partner.BUNDLE) bundle_partner_2 = PartnerFactory(authorization_method=Partner.BUNDLE) editor.wp_bundle_eligible = True editor.save() editor.update_bundle_authorization() editor.wp_bundle_eligible = False editor.save() editor.update_bundle_authorization() self.assertFalse(editor.wp_bundle_authorized) def test_update_from_wikipedia(self): identity = {} identity["username"] = "evil_dr_porkchop" # Users' unique WP IDs should not change across API calls, but are identity["sub"] = self.editor.wp_sub identity["rights"] = ["deletion", "spaceflight"] identity["groups"] = ["charismatic megafauna"] identity["editcount"] = 42 identity["email"] = "porkchop@example.com" identity["iss"] = "zh-classical.wikipedia.org" identity["registered"] = "20130205230142" identity["blocked"] = False global_userinfo = {} global_userinfo["home"] = "zh_classicalwiki" global_userinfo["id"] = identity["sub"] global_userinfo["registration"] = "2013-02-05T23:01:42Z" global_userinfo["name"] = identity["username"] global_userinfo["editcount"] = 960 global_userinfo["merged"] = copy.copy(FAKE_MERGED_ACCOUNTS_BLOCKED) # to test instead. new_editor = EditorFactory(wp_registered=None) new_identity = dict(identity) new_global_userinfo = dict(global_userinfo) new_identity["sub"] = new_editor.wp_sub new_global_userinfo["id"] = new_identity["sub"] lang = get_language() new_editor.update_from_wikipedia( new_identity, lang, new_global_userinfo ) # This call also saves the editor self.assertEqual(new_editor.wp_username, "evil_dr_porkchop") self.assertEqual(new_editor.wp_rights, json.dumps(["deletion", "spaceflight"])) self.assertEqual(new_editor.wp_groups, json.dumps(["charismatic megafauna"])) self.assertEqual(new_editor.wp_editcount, 960) self.assertEqual(new_editor.user.email, "porkchop@example.com") self.assertEqual(new_editor.wp_registered, datetime(2013, 2, 5).date()) # Now check what happens if their wikipedia ID number has changed - this # should throw an error as we can no longer verify they're the same with self.assertRaises(SuspiciousOperation): new_identity["sub"] = new_editor.wp_sub + 1 new_global_userinfo["id"] = new_identity["sub"] new_editor.update_from_wikipedia( new_identity, lang, new_global_userinfo ) class OAuthTestCase(TestCase): @classmethod def setUpTestData(cls): super().setUpTestData() for editor in Editor.objects.all(): editor.delete() @patch("urllib.request.urlopen") def test_create_user_and_editor(self, mock_urlopen): oauth_backend = OAuthBackend() oauth_data = FAKE_IDENTITY_DATA identity = FAKE_IDENTITY mock_response = Mock() mock_response.read.side_effect = [json.dumps(oauth_data)] * 7 mock_urlopen.return_value = mock_response user, editor = oauth_backend._create_user_and_editor(identity) self.assertEqual(user.email, "alice@example.com") self.assertEqual(user.username, "567823") self.assertFalse(user.has_usable_password()) self.assertEqual(editor.user, user) self.assertEqual(editor.wp_sub, 567823) # tested elsewhere. # We mock out this function for two reasons: # 1) To prevent its call to an external API, which we would have otherwise # had to mock anyway; # 2) So we can assert that it was called. @patch("TWLight.users.models.Editor.update_from_wikipedia") def test_get_and_update_user_from_identity_existing_user(self, mock_update): # Make sure the test user has the username and language anticipated by our backend. username = FAKE_IDENTITY["sub"] lang = get_language() existing_user = UserFactory(username=username) params = {"user": existing_user, "wp_sub": FAKE_IDENTITY["sub"]} _ = EditorFactory(**params) oauth_backend = OAuthBackend() user, created = oauth_backend._get_and_update_user_from_identity(FAKE_IDENTITY) self.assertFalse(created) self.assertTrue(hasattr(user, "editor")) self.assertEqual(user, existing_user) mock_update.assert_called_once_with(FAKE_IDENTITY, lang) @patch("TWLight.users.models.Editor.update_from_wikipedia") def test_get_and_update_user_from_identity_new_user(self, mock_update): oauth_backend = OAuthBackend() identity = copy.copy(FAKE_IDENTITY) lang = get_language() new_sub = 57381037 identity["sub"] = new_sub self.assertFalse(Editor.objects.filter(wp_sub=new_sub).count()) user, created = oauth_backend._get_and_update_user_from_identity(identity) self.assertTrue(created) self.assertTrue(hasattr(user, "editor")) self.assertEqual(user.editor.wp_sub, new_sub) mock_update.assert_called_once_with(identity, lang) class TermsTestCase(TestCase): def test_terms_page_displays(self): _ = User.objects.create_user(username="termstestcase", password="bar") url = reverse("terms") c = Client() c.login(username="termstestcase", password="bar") response = c.get(url) self.assertEqual(response.status_code, 200) class HelpersTestCase(TestCase): def test_wikis_match_language_codes(self): WIKIS_LANGUAGES = set([wiki[0] for wiki in WIKIS]) LANGUAGES = set(LANGUAGE_CODES.keys()) self.assertEqual(WIKIS_LANGUAGES, LANGUAGES) class AuthorizationsHelpersTestCase(TestCase): @classmethod def setUpTestData(cls): super().setUpTestData() cls.bundle_partner_1 = PartnerFactory(authorization_method=Partner.BUNDLE) cls.bundle_partner_2 = PartnerFactory(authorization_method=Partner.BUNDLE) cls.bundle_partner_3 = PartnerFactory(authorization_method=Partner.BUNDLE) cls.proxy_partner_1 = PartnerFactory(authorization_method=Partner.PROXY) cls.proxy_partner_2 = PartnerFactory(authorization_method=Partner.PROXY) def test_validate_partners_for_bundle_auth(self): partner_queryset = Partner.objects.filter(authorization_method=Partner.BUNDLE) try: validation = validate_partners(partner_queryset) except ValidationError: self.fail("validate_partners() raised ValidationError unexpectedly.") def test_validate_partners_for_mixed_auth_types(self): partner_queryset = Partner.objects.filter( authorization_method__in=[Partner.BUNDLE, Partner.PROXY] ) with self.assertRaises(ValidationError): validate_partners(partner_queryset) def test_validate_partners_for_wrong_auth_type(self): partner_queryset = Partner.objects.filter(authorization_method=Partner.PROXY) with self.assertRaises(ValidationError): validate_partners(partner_queryset) def test_get_all_bundle_authorizations(self): editor = EditorFactory() editor.wp_bundle_eligible = True editor.save() # This should create an authorization linked to # bundle partners. editor.update_bundle_authorization() all_auths = get_all_bundle_authorizations() # One editor has Bundle auths, so this should be a # Queryset with 1 entry. self.assertEqual(all_auths.count(), 1) class ManagementCommandsTestCase(TestCase): @classmethod def setUpTestData(cls): super().setUpTestData() cls.editor = EditorFactory() cls.editor.wp_bundle_eligible = True cls.editor.update_editcount(42, now() - timedelta(days=30)) cls.editor.wp_account_old_enough = True cls.editor.user.userprofile.terms_of_use = True cls.editor.user.userprofile.save() cls.editor.user.save() cls.editor.save() cls.global_userinfo_editor = { "home": "enwiki", "id": cls.editor.wp_sub, "registration": "2015-11-06T15:46:29Z", # Well before first commit. "name": "user328", "editcount": 5000, "merged": copy.copy(FAKE_MERGED_ACCOUNTS), } def test_user_update_eligibility_command_valid(self): # 1st time bundle check should always pass for a valid editor. self.assertTrue(self.editor.wp_bundle_eligible) # A valid editor should pass editcount checks for 30 days after their first login, even if they haven't made any more edits. for day in range(30): call_command( "user_update_eligibility", datetime=datetime.isoformat( self.editor.wp_editcount_updated + timedelta(days=1) ), wp_username=self.editor.wp_username, global_userinfo=self.global_userinfo_editor, ) self.editor.refresh_from_db() self.assertEqual(self.editor.wp_editcount, 5000) self.assertEqual( self.editor.wp_editcount_prev( current_datetime=self.editor.wp_editcount_updated ), 42, ) self.assertEqual( self.editor.wp_editcount_recent( current_datetime=self.editor.wp_editcount_updated ), 4958, ) self.assertTrue(self.editor.wp_bundle_eligible) call_command( "user_update_eligibility", datetime=datetime.isoformat( self.editor.wp_editcount_updated + timedelta(days=1) ), global_userinfo=self.global_userinfo_editor, ) self.editor.refresh_from_db() self.assertFalse(self.editor.wp_bundle_eligible) self.global_userinfo_editor["editcount"] = 5010 call_command( "user_update_eligibility", datetime=datetime.isoformat( self.editor.wp_editcount_updated + timedelta(minutes=1) ), global_userinfo=self.global_userinfo_editor, ) self.editor.refresh_from_db() self.assertEqual(self.editor.wp_editcount, 5010) self.assertEqual( self.editor.wp_editcount_prev( current_datetime=self.editor.wp_editcount_updated ), 5000, ) self.assertEqual( self.editor.wp_editcount_recent(self.editor.wp_editcount_updated), 10 ) self.assertTrue(self.editor.wp_bundle_eligible) call_command( "user_update_eligibility", datetime=datetime.isoformat( self.editor.wp_editcount_updated + timedelta(days=29, hours=23, minutes=59, seconds=59) ), global_userinfo=self.global_userinfo_editor, ) self.editor.refresh_from_db() self.assertEqual(self.editor.wp_editcount, 5010) self.assertEqual( self.editor.wp_editcount_prev( current_datetime=self.editor.wp_editcount_updated ), 5000, ) self.assertEqual( self.editor.wp_editcount_recent( current_datetime=self.editor.wp_editcount_updated ), 10, ) self.assertTrue(self.editor.wp_bundle_eligible) def test_user_update_eligibility_command_terms_not_accepted(self): self.editor.user.userprofile.terms_of_use = False self.editor.user.userprofile.save() self.editor.user.save() self.editor.save() self.assertTrue(self.editor.wp_bundle_eligible) call_command( "user_update_eligibility", datetime=datetime.isoformat( self.editor.wp_editcount_updated + timedelta(days=1) ), wp_username=self.editor.wp_username, global_userinfo=self.global_userinfo_editor, ) self.editor.refresh_from_db() self.assertFalse(self.editor.wp_bundle_eligible) class MyLibraryViewsTest(TestCase): @classmethod def setUpTestData(cls): super().setUpTestData() cls.bundle_partner_1 = PartnerFactory( authorization_method=Partner.BUNDLE, new_tags={"tags": ["earth-sciences_tag"]}, searchable=Partner.SEARCHABLE, ) cls.bundle_partner_2 = PartnerFactory( authorization_method=Partner.BUNDLE, new_tags={"tags": ["art_tag"]}, searchable=Partner.PARTIALLY_SEARCHABLE, ) cls.bundle_partner_3 = PartnerFactory( authorization_method=Partner.BUNDLE, searchable=Partner.PARTIALLY_SEARCHABLE, ) cls.bundle_partner_3.new_tags = {"tags": ["art_tag"]} cls.bundle_partner_3.save() cls.bundle_partner_4 = PartnerFactory( authorization_method=Partner.BUNDLE, searchable=Partner.SEARCHABLE, ) cls.bundle_partner_4.new_tags = {"tags": ["multidisciplinary_tag"]} cls.bundle_partner_4.save() cls.proxy_partner_1 = PartnerFactory( authorization_method=Partner.PROXY, searchable=Partner.SEARCHABLE, ) cls.proxy_partner_1.new_tags = {"tags": ["earth-sciences_tag"]} cls.proxy_partner_1.save() cls.proxy_partner_2 = PartnerFactory( authorization_method=Partner.PROXY, searchable=Partner.SEARCHABLE, ) cls.proxy_partner_2.new_tags = {"tags": ["earth-sciences_tag"]} cls.proxy_partner_2.save() cls.proxy_partner_3 = PartnerFactory(authorization_method=Partner.PROXY) cls.proxy_partner_3.new_tags = {"tags": ["multidisciplinary_tag"]} cls.proxy_partner_3.save() cls.email_partner_1 = PartnerFactory(authorization_method=Partner.EMAIL) cls.email_partner_2 = PartnerFactory(authorization_method=Partner.EMAIL) cls.user_coordinator = UserFactory(username="Jon Snow") cls.editor = EditorFactory() cls.editor.wp_bundle_eligible = True cls.editor.save() get_coordinators().user_set.add(cls.user_coordinator) def test_user_collections(self): app_bundle_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_1, sent_by=self.user_coordinator, ) app_bundle_partner_2 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_2, sent_by=self.user_coordinator, ) app_bundle_partner_3 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_3, sent_by=self.user_coordinator, ) app_bundle_partner_4 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_4, sent_by=self.user_coordinator, ) app_proxy_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.proxy_partner_1, sent_by=self.user_coordinator, ) factory = RequestFactory() url = reverse("users:my_library") request = factory.get(url) request.user = self.editor.user response = MyLibraryView.as_view()(request) self.assertEqual(response.status_code, 200) content = response.render().content.decode("utf-8") self.assertIn(escape(self.bundle_partner_1.company_name), content) self.assertIn(escape(self.bundle_partner_2.company_name), content) self.assertIn(escape(self.bundle_partner_3.company_name), content) self.assertIn(escape(self.proxy_partner_1.company_name), content) self.assertIn(escape(self.bundle_partner_4.company_name), content) # Even though this partner is not visible, it still appears in the HTML # render self.assertIn(escape(self.proxy_partner_2.company_name), content) self.assertIn(escape(self.proxy_partner_3.company_name), content) def test_user_collections_show_expiry_date_extend(self): app_proxy_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.proxy_partner_1, sent_by=self.user_coordinator, ) someday = date.today() + timedelta(days=60) authorization = Authorization.objects.get( user=self.editor.user, partners=self.proxy_partner_1 ) authorization.date_expires = someday authorization.save() factory = RequestFactory() url = reverse("users:my_library") request = factory.get(url) request.user = self.editor.user response = MyLibraryView.as_view()(request) self.assertEqual(response.status_code, 200) content = response.render().content.decode("utf-8") someday_fmt = datetime.strftime(someday, "%b %d, %Y") self.assertIn(escape(self.proxy_partner_1.company_name), content) self.assertIn(someday_fmt, content) self.assertIn("Extend", content) def test_user_collections_show_expiry_date_renew(self): app_proxy_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.proxy_partner_1, sent_by=self.user_coordinator, ) someday = date.today() - timedelta(days=60) authorization = Authorization.objects.get( user=self.editor.user, partners=self.proxy_partner_1 ) authorization.date_expires = someday authorization.save() factory = RequestFactory() url = reverse("users:my_library") request = factory.get(url) request.user = self.editor.user response = MyLibraryView.as_view()(request) self.assertEqual(response.status_code, 200) content = response.render().content.decode("utf-8") someday_fmt = datetime.strftime(someday, "%b %d, %Y") self.assertIn(escape(self.proxy_partner_1.company_name), content) self.assertIn(someday_fmt, content) self.assertIn("Renew", content) def test_user_collections_show_expiry_date_not_shown(self): app_proxy_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.proxy_partner_1, sent_by=self.user_coordinator, ) authorization = Authorization.objects.get( user=self.editor.user, partners=self.proxy_partner_1 ) authorization.date_expires = None authorization.save() factory = RequestFactory() url = reverse("users:my_library") request = factory.get(url) request.user = self.editor.user response = MyLibraryView.as_view()(request) self.assertEqual(response.status_code, 200) content = response.render().content.decode("utf-8") self.assertIn(escape(self.proxy_partner_1.company_name), content) self.assertNotIn("Expiry date: ", content) def test_user_collections_has_open_application(self): old_app = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.proxy_partner_1, sent_by=self.user_coordinator, ) app_proxy_partner_1 = ApplicationFactory( status=Application.PENDING, editor=self.editor, partner=self.proxy_partner_1, sent_by=self.user_coordinator, ) authorization = Authorization.objects.get( user=self.editor.user, partners=self.proxy_partner_1 ) factory = RequestFactory() url = reverse("users:my_library") request = factory.get(url) request.user = self.editor.user response = MyLibraryView.as_view()(request) self.assertEqual(response.status_code, 200) content = response.render().content.decode("utf-8") self.assertIn(escape(self.proxy_partner_1.company_name), content) self.assertIn("Go to application", content) def test_collection_filters_art_tag(self): app_bundle_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_1, sent_by=self.user_coordinator, ) app_bundle_partner_2 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_2, sent_by=self.user_coordinator, ) app_bundle_partner_3 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_3, sent_by=self.user_coordinator, ) app_bundle_partner_4 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_4, sent_by=self.user_coordinator, ) app_proxy_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.proxy_partner_1, sent_by=self.user_coordinator, ) factory = RequestFactory() url = reverse("users:my_library") url_with_art_tag_param = "{url}?tags=art_tag".format(url=url) request = factory.get(url_with_art_tag_param) request.user = self.editor.user response = MyLibraryView.as_view()(request) self.assertEqual(response.status_code, 200) content = response.render().content.decode("utf-8") self.assertIn(escape(self.bundle_partner_2.company_name), content) self.assertIn(escape(self.bundle_partner_3.company_name), content) # Multidisciplinary partners should also appear when filtering self.assertIn(escape(self.bundle_partner_4.company_name), content) self.assertIn(escape(self.proxy_partner_3.company_name), content) self.assertNotIn(escape(self.bundle_partner_1.company_name), content) self.assertNotIn(escape(self.proxy_partner_1.company_name), content) self.assertNotIn(escape(self.proxy_partner_2.company_name), content) def test_collection_filters_earth_sciences_tag(self): app_bundle_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_1, sent_by=self.user_coordinator, ) app_bundle_partner_2 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_2, sent_by=self.user_coordinator, ) app_bundle_partner_3 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_3, sent_by=self.user_coordinator, ) app_bundle_partner_4 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_4, sent_by=self.user_coordinator, ) app_proxy_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.proxy_partner_1, sent_by=self.user_coordinator, ) factory = RequestFactory() url = reverse("users:my_library") url_with_earth_sciences_tag_param = "{url}?tags=earth-sciences_tag".format( url=url ) request = factory.get(url_with_earth_sciences_tag_param) request.user = self.editor.user response = MyLibraryView.as_view()(request) self.assertEqual(response.status_code, 200) content = response.render().content.decode("utf-8") self.assertNotIn(escape(self.bundle_partner_2.company_name), content) self.assertNotIn(escape(self.bundle_partner_3.company_name), content) # Multidisciplinary partners should also appear when filtering self.assertIn(escape(self.bundle_partner_4.company_name), content) self.assertIn(escape(self.proxy_partner_3.company_name), content) self.assertIn(escape(self.bundle_partner_1.company_name), content) self.assertIn(escape(self.proxy_partner_1.company_name), content) self.assertIn(escape(self.proxy_partner_2.company_name), content) def test_collection_show_waitlisted_badge(self): waitlisted_partner = PartnerFactory( authorization_method=Partner.PROXY, status=Partner.WAITLIST ) app_proxy_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=waitlisted_partner, sent_by=self.user_coordinator, ) someday = date.today() - timedelta(days=60) authorization = Authorization.objects.get( user=self.editor.user, partners=waitlisted_partner ) authorization.date_expires = someday authorization.save() factory = RequestFactory() url = reverse("users:my_library") request = factory.get(url) request.user = self.editor.user response = MyLibraryView.as_view()(request) self.assertEqual(response.status_code, 200) content = response.render().content.decode("utf-8") self.assertIn(escape(waitlisted_partner.company_name), content) self.assertIn("Waitlisted", content) def test_collection_dont_show_waitlisted_badge(self): waitlisted_partner = PartnerFactory( authorization_method=Partner.PROXY, status=Partner.WAITLIST ) app_proxy_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=waitlisted_partner, sent_by=self.user_coordinator, ) someday = date.today() + timedelta(days=60) authorization = Authorization.objects.get( user=self.editor.user, partners=waitlisted_partner ) authorization.date_expires = someday authorization.save() factory = RequestFactory() url = reverse("users:my_library") request = factory.get(url) request.user = self.editor.user response = MyLibraryView.as_view()(request) self.assertEqual(response.status_code, 200) content = response.render().content.decode("utf-8") self.assertIn(escape(waitlisted_partner.company_name), content) self.assertNotIn("Waitlisted", content) def test_collection_show_not_available_badge(self): not_available_partner = PartnerFactory( authorization_method=Partner.PROXY, status=Partner.NOT_AVAILABLE ) # Make the user staff so they can see unavailable collections self.editor.user.is_staff = True self.editor.user.save() self.editor.save() factory = RequestFactory() url = reverse("users:my_library") request = factory.get(url) request.user = self.editor.user response = MyLibraryView.as_view()(request) self.assertEqual(response.status_code, 200) content = response.render().content.decode("utf-8") self.assertIn(escape(not_available_partner.company_name), content) self.assertIn("Not Available", content) def test_user_not_eligible_eligibility_modal_shown(self): # Make the user not eligible so they can see the eligibility modal self.editor.wp_bundle_eligible = False self.editor.save() factory = RequestFactory() url = reverse("users:my_library") request = factory.get(url) request.user = self.editor.user response = MyLibraryView.as_view()(request) self.assertEqual(response.status_code, 200) content = response.render().content.decode("utf-8") eligibility_message = "Sorry, your Wikipedia account doesn’t currently qualify to access The Wikipedia Library." self.assertIn(eligibility_message, content) def test_collection_filters_searchable(self): app_bundle_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_1, sent_by=self.user_coordinator, ) app_bundle_partner_2 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_2, sent_by=self.user_coordinator, ) app_bundle_partner_3 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_3, sent_by=self.user_coordinator, ) app_bundle_partner_4 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_4, sent_by=self.user_coordinator, ) app_proxy_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.proxy_partner_1, sent_by=self.user_coordinator, ) factory = RequestFactory() url = reverse("users:my_library") url_with_searchable_param = "{url}?searchable={searchable}".format( url=url, searchable=Partner.SEARCHABLE ) request = factory.get(url_with_searchable_param) request.user = self.editor.user response = MyLibraryView.as_view()(request) self.assertEqual(response.status_code, 200) content = response.render().content.decode("utf-8") self.assertIn(escape(self.bundle_partner_1.company_name), content) self.assertIn(escape(self.bundle_partner_4.company_name), content) self.assertIn(escape(self.proxy_partner_1.company_name), content) self.assertIn(escape(self.proxy_partner_2.company_name), content) self.assertNotIn(escape(self.bundle_partner_2.company_name), content) self.assertNotIn(escape(self.bundle_partner_3.company_name), content) self.assertNotIn(escape(self.proxy_partner_3.company_name), content) def test_collection_filters_partially_searchable(self): app_bundle_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_1, sent_by=self.user_coordinator, ) app_bundle_partner_2 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_2, sent_by=self.user_coordinator, ) app_bundle_partner_3 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_3, sent_by=self.user_coordinator, ) app_bundle_partner_4 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_4, sent_by=self.user_coordinator, ) app_proxy_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.proxy_partner_1, sent_by=self.user_coordinator, ) factory = RequestFactory() url = reverse("users:my_library") url_with_searchable_param = "{url}?searchable={searchable}".format( url=url, searchable=Partner.PARTIALLY_SEARCHABLE ) request = factory.get(url_with_searchable_param) request.user = self.editor.user response = MyLibraryView.as_view()(request) self.assertEqual(response.status_code, 200) content = response.render().content.decode("utf-8") self.assertIn(escape(self.bundle_partner_2.company_name), content) self.assertIn(escape(self.bundle_partner_3.company_name), content) self.assertNotIn(escape(self.bundle_partner_1.company_name), content) self.assertNotIn(escape(self.bundle_partner_4.company_name), content) self.assertNotIn(escape(self.proxy_partner_1.company_name), content) self.assertNotIn(escape(self.proxy_partner_2.company_name), content) self.assertNotIn(escape(self.proxy_partner_3.company_name), content) def test_collection_filters_not_searchable(self): app_bundle_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_1, sent_by=self.user_coordinator, ) app_bundle_partner_2 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_2, sent_by=self.user_coordinator, ) app_bundle_partner_3 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_3, sent_by=self.user_coordinator, ) app_bundle_partner_4 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_4, sent_by=self.user_coordinator, ) app_proxy_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.proxy_partner_1, sent_by=self.user_coordinator, ) factory = RequestFactory() url = reverse("users:my_library") url_with_searchable_param = "{url}?searchable={searchable}".format( url=url, searchable=Partner.NOT_SEARCHABLE ) request = factory.get(url_with_searchable_param) request.user = self.editor.user response = MyLibraryView.as_view()(request) self.assertEqual(response.status_code, 200) content = response.render().content.decode("utf-8") self.assertIn(escape(self.proxy_partner_3.company_name), content) self.assertNotIn(escape(self.bundle_partner_1.company_name), content) self.assertNotIn(escape(self.bundle_partner_2.company_name), content) self.assertNotIn(escape(self.bundle_partner_3.company_name), content) self.assertNotIn(escape(self.bundle_partner_4.company_name), content) self.assertNotIn(escape(self.proxy_partner_1.company_name), content) self.assertNotIn(escape(self.proxy_partner_2.company_name), content) def test_instant_access_filter(self): app_bundle_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_1, sent_by=self.user_coordinator, ) app_bundle_partner_2 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_2, sent_by=self.user_coordinator, ) app_proxy_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.proxy_partner_1, sent_by=self.user_coordinator, ) app_proxy_partner_2 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.proxy_partner_2, sent_by=self.user_coordinator, ) app_email_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.email_partner_1, sent_by=self.user_coordinator, ) app_email_partner_2 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_2, sent_by=self.user_coordinator, ) factory = RequestFactory() url = reverse("users:my_library") url_with_access_param = "{url}?access={access}".format(url=url, access=INSTANT) request = factory.get(url_with_access_param) request.user = self.editor.user response = MyLibraryView.as_view()(request) self.assertEqual(response.status_code, 200) content = response.render().content.decode("utf-8") self.assertIn(escape(self.bundle_partner_1.company_name), content) self.assertIn(escape(self.bundle_partner_2.company_name), content) self.assertIn(escape(self.proxy_partner_1.company_name), content) self.assertIn(escape(self.proxy_partner_2.company_name), content) self.assertNotIn(escape(self.email_partner_1.company_name), content) self.assertNotIn(escape(self.email_partner_2.company_name), content) def test_multi_step_access_filter(self): app_bundle_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_1, sent_by=self.user_coordinator, ) app_bundle_partner_2 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_2, sent_by=self.user_coordinator, ) app_proxy_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.proxy_partner_1, sent_by=self.user_coordinator, ) app_proxy_partner_2 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.proxy_partner_2, sent_by=self.user_coordinator, ) app_email_partner_1 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.email_partner_1, sent_by=self.user_coordinator, ) app_email_partner_2 = ApplicationFactory( status=Application.SENT, editor=self.editor, partner=self.bundle_partner_2, sent_by=self.user_coordinator, ) factory = RequestFactory() url = reverse("users:my_library") url_with_access_param = "{url}?access={access}".format( url=url, access=MULTI_STEP ) request = factory.get(url_with_access_param) request.user = self.editor.user response = MyLibraryView.as_view()(request) self.assertEqual(response.status_code, 200) content = response.render().content.decode("utf-8") self.assertNotIn(escape(self.bundle_partner_1.company_name), content) self.assertNotIn(escape(self.bundle_partner_2.company_name), content) self.assertNotIn(escape(self.proxy_partner_1.company_name), content) self.assertNotIn(escape(self.proxy_partner_2.company_name), content) self.assertIn(escape(self.email_partner_1.company_name), content) self.assertIn(escape(self.email_partner_2.company_name), content)
true
true
f7f67140a26bd51d41d001df1f78a970573ea917
3,271
py
Python
ast_to_xml.py
flyte/ast-to-xml
41464c689063246c913eea360e7afd066e267646
[ "MIT" ]
null
null
null
ast_to_xml.py
flyte/ast-to-xml
41464c689063246c913eea360e7afd066e267646
[ "MIT" ]
null
null
null
ast_to_xml.py
flyte/ast-to-xml
41464c689063246c913eea360e7afd066e267646
[ "MIT" ]
1
2021-04-04T15:06:20.000Z
2021-04-04T15:06:20.000Z
import ast import textwrap from importlib import import_module from types import ModuleType from lxml import etree as ET ATTRS = ("lineno", "col_offset", "end_lineno", "end_col_offset") def ast_node_attrs(ast_node): attrs = {} for key in ATTRS: try: attrs[key] = str(getattr(ast_node, key)) except AttributeError: continue return attrs def visit_node(ast_node, parent_xml_node=None): xml_node_name = ast_node.__class__.__name__ if parent_xml_node is None: xml_node = ET.Element(xml_node_name) else: xml_node = ET.SubElement(parent_xml_node, xml_node_name) xml_node.attrib.update(ast_node_attrs(ast_node)) for key, value in ast_node.__dict__.items(): if key.startswith("_") or key in ATTRS: continue if isinstance(value, ast.AST): sub_node = ET.SubElement(xml_node, key) visit_node(value, sub_node) elif isinstance(value, list): if all(isinstance(x, ast.AST) for x in value): sub_node = ET.SubElement(xml_node, key) for node in value: visit_node(node, sub_node) else: node = ET.SubElement(xml_node, key) node.attrib["type"] = type(value).__name__ node.text = str(value) return xml_node def xml(src): ast_tree = ast.parse(src) return visit_node(ast_tree) def file_xml(src_path): with open(src_path) as src_file: src = src_file.read() return xml(src) def module_xml(module_or_path): if isinstance(module_or_path, ModuleType): module = module_or_path else: module = import_module(module_or_path) return file_xml(module.__file__) def source(src, xpath, until_xpath=None, dedent=True): xml_tree = xml(src) src_lines = src.split("\n") sources = [] until_lineno = None if until_xpath is not None: until = xml_tree.xpath(until_xpath) assert len(until) == 1, "until_xpath must return only one result" until_lineno = int(until[0].attrib["lineno"]) - 1 for node in xml_tree.xpath(xpath): try: start_lineno = int(node.attrib["lineno"]) - 1 if until_lineno is not None: if until_lineno < start_lineno: raise ValueError( f"until_lineno ({until_lineno}) must not be lower than " f"start_lineno ({start_lineno})" ) end_lineno = until_lineno else: end_lineno = int(node.attrib["end_lineno"]) src = "\n".join(src_lines[start_lineno:end_lineno]) except AttributeError: continue if dedent: src = textwrap.dedent(src) sources.append((src, node.attrib)) return sources def file_source(src_path, *args, **kwargs): with open(src_path) as src_file: src = src_file.read() return source(src, *args, **kwargs) def module_source(module_or_path, *args, **kwargs): if isinstance(module_or_path, ModuleType): module = module_or_path else: module = import_module(module_or_path) return file_source(module.__file__, *args, **kwargs)
28.692982
80
0.614797
import ast import textwrap from importlib import import_module from types import ModuleType from lxml import etree as ET ATTRS = ("lineno", "col_offset", "end_lineno", "end_col_offset") def ast_node_attrs(ast_node): attrs = {} for key in ATTRS: try: attrs[key] = str(getattr(ast_node, key)) except AttributeError: continue return attrs def visit_node(ast_node, parent_xml_node=None): xml_node_name = ast_node.__class__.__name__ if parent_xml_node is None: xml_node = ET.Element(xml_node_name) else: xml_node = ET.SubElement(parent_xml_node, xml_node_name) xml_node.attrib.update(ast_node_attrs(ast_node)) for key, value in ast_node.__dict__.items(): if key.startswith("_") or key in ATTRS: continue if isinstance(value, ast.AST): sub_node = ET.SubElement(xml_node, key) visit_node(value, sub_node) elif isinstance(value, list): if all(isinstance(x, ast.AST) for x in value): sub_node = ET.SubElement(xml_node, key) for node in value: visit_node(node, sub_node) else: node = ET.SubElement(xml_node, key) node.attrib["type"] = type(value).__name__ node.text = str(value) return xml_node def xml(src): ast_tree = ast.parse(src) return visit_node(ast_tree) def file_xml(src_path): with open(src_path) as src_file: src = src_file.read() return xml(src) def module_xml(module_or_path): if isinstance(module_or_path, ModuleType): module = module_or_path else: module = import_module(module_or_path) return file_xml(module.__file__) def source(src, xpath, until_xpath=None, dedent=True): xml_tree = xml(src) src_lines = src.split("\n") sources = [] until_lineno = None if until_xpath is not None: until = xml_tree.xpath(until_xpath) assert len(until) == 1, "until_xpath must return only one result" until_lineno = int(until[0].attrib["lineno"]) - 1 for node in xml_tree.xpath(xpath): try: start_lineno = int(node.attrib["lineno"]) - 1 if until_lineno is not None: if until_lineno < start_lineno: raise ValueError( f"until_lineno ({until_lineno}) must not be lower than " f"start_lineno ({start_lineno})" ) end_lineno = until_lineno else: end_lineno = int(node.attrib["end_lineno"]) src = "\n".join(src_lines[start_lineno:end_lineno]) except AttributeError: continue if dedent: src = textwrap.dedent(src) sources.append((src, node.attrib)) return sources def file_source(src_path, *args, **kwargs): with open(src_path) as src_file: src = src_file.read() return source(src, *args, **kwargs) def module_source(module_or_path, *args, **kwargs): if isinstance(module_or_path, ModuleType): module = module_or_path else: module = import_module(module_or_path) return file_source(module.__file__, *args, **kwargs)
true
true
f7f671a1438be1f3a8791cf089a2da43c38c1602
1,302
py
Python
immudb/handler/sqlquery.py
cn-demo/immudb-py
c480211ddf94f64cf45cac8c48a4b60a37b5bf4a
[ "Apache-2.0" ]
30
2020-09-11T14:30:19.000Z
2022-03-29T17:37:52.000Z
immudb/handler/sqlquery.py
cn-demo/immudb-py
c480211ddf94f64cf45cac8c48a4b60a37b5bf4a
[ "Apache-2.0" ]
17
2020-09-16T09:27:33.000Z
2022-02-01T17:47:50.000Z
immudb/handler/sqlquery.py
cn-demo/immudb-py
c480211ddf94f64cf45cac8c48a4b60a37b5bf4a
[ "Apache-2.0" ]
6
2020-09-16T18:19:38.000Z
2021-12-23T20:33:48.000Z
# Copyright 2021 CodeNotary, Inc. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from immudb.grpc import schema_pb2 from immudb.grpc import schema_pb2_grpc from immudb.rootService import RootService from immudb.typeconv import py_to_sqlvalue from immudb.typeconv import sqlvalue_to_py def call(service: schema_pb2_grpc.ImmuServiceStub, rs: RootService, query, params): paramsObj = [] for key, value in params.items(): paramsObj.append(schema_pb2.NamedParam( name=key, value=py_to_sqlvalue(value))) request = schema_pb2.SQLQueryRequest( sql=query, params=paramsObj) resp = service.SQLQuery(request) result = [] for row in resp.rows: result.append(tuple([sqlvalue_to_py(i) for i in row.values])) return result
37.2
83
0.742704
from immudb.grpc import schema_pb2 from immudb.grpc import schema_pb2_grpc from immudb.rootService import RootService from immudb.typeconv import py_to_sqlvalue from immudb.typeconv import sqlvalue_to_py def call(service: schema_pb2_grpc.ImmuServiceStub, rs: RootService, query, params): paramsObj = [] for key, value in params.items(): paramsObj.append(schema_pb2.NamedParam( name=key, value=py_to_sqlvalue(value))) request = schema_pb2.SQLQueryRequest( sql=query, params=paramsObj) resp = service.SQLQuery(request) result = [] for row in resp.rows: result.append(tuple([sqlvalue_to_py(i) for i in row.values])) return result
true
true
f7f672ab488d0e8d14ee5ebc5dca8975aef14c74
902
py
Python
1_webelement/send_keys_example.py
pavlovprojects/python_qa_webelements
987a6bc18918b5ad12bec1d699c31322f6889e29
[ "MIT" ]
null
null
null
1_webelement/send_keys_example.py
pavlovprojects/python_qa_webelements
987a6bc18918b5ad12bec1d699c31322f6889e29
[ "MIT" ]
null
null
null
1_webelement/send_keys_example.py
pavlovprojects/python_qa_webelements
987a6bc18918b5ad12bec1d699c31322f6889e29
[ "MIT" ]
null
null
null
import time from conftest import DRIVERS from selenium import webdriver from selenium.webdriver.common.keys import Keys chrome = webdriver.Chrome(executable_path=DRIVERS + "/chromedriver") chrome.implicitly_wait(5) chrome.get("https://konflic.github.io/front_example/") input_field = chrome.find_element_by_id("inp") input_field.send_keys("Hello, my dear friend!") time.sleep(1) input_field.clear() SPEED = 0.5 time.sleep(SPEED) input_field.send_keys("-=[ ]=-") input_field.send_keys(Keys.ARROW_LEFT) time.sleep(SPEED) input_field.send_keys(Keys.ARROW_LEFT) time.sleep(SPEED) input_field.send_keys(Keys.ARROW_LEFT) time.sleep(SPEED) input_field.send_keys(Keys.ARROW_LEFT) time.sleep(SPEED) input_field.send_keys(Keys.SPACE) time.sleep(SPEED) input_field.send_keys("COBRA") for _ in range(5): time.sleep(SPEED) input_field.send_keys(Keys.BACKSPACE) input_field.send_keys("SELENIUM")
21.47619
68
0.788248
import time from conftest import DRIVERS from selenium import webdriver from selenium.webdriver.common.keys import Keys chrome = webdriver.Chrome(executable_path=DRIVERS + "/chromedriver") chrome.implicitly_wait(5) chrome.get("https://konflic.github.io/front_example/") input_field = chrome.find_element_by_id("inp") input_field.send_keys("Hello, my dear friend!") time.sleep(1) input_field.clear() SPEED = 0.5 time.sleep(SPEED) input_field.send_keys("-=[ ]=-") input_field.send_keys(Keys.ARROW_LEFT) time.sleep(SPEED) input_field.send_keys(Keys.ARROW_LEFT) time.sleep(SPEED) input_field.send_keys(Keys.ARROW_LEFT) time.sleep(SPEED) input_field.send_keys(Keys.ARROW_LEFT) time.sleep(SPEED) input_field.send_keys(Keys.SPACE) time.sleep(SPEED) input_field.send_keys("COBRA") for _ in range(5): time.sleep(SPEED) input_field.send_keys(Keys.BACKSPACE) input_field.send_keys("SELENIUM")
true
true
f7f672bdc36f42e8c8b343ab747ef64b6c7ab9e4
17,128
py
Python
claripy/frontends/composite_frontend.py
embg/claripy
1a5e0ca61d3f480e541226f103900e983f025e4a
[ "BSD-2-Clause" ]
null
null
null
claripy/frontends/composite_frontend.py
embg/claripy
1a5e0ca61d3f480e541226f103900e983f025e4a
[ "BSD-2-Clause" ]
null
null
null
claripy/frontends/composite_frontend.py
embg/claripy
1a5e0ca61d3f480e541226f103900e983f025e4a
[ "BSD-2-Clause" ]
null
null
null
import logging l = logging.getLogger("claripy.frontends.composite_frontend") import weakref import itertools symbolic_count = itertools.count() from .constrained_frontend import ConstrainedFrontend from claripy.ast.strings import String class CompositeFrontend(ConstrainedFrontend): def __init__(self, template_frontend, template_frontend_string, track=False, **kwargs): super(CompositeFrontend, self).__init__(**kwargs) self._solvers = { } self._owned_solvers = weakref.WeakKeyDictionary() self._template_frontend = template_frontend self._template_frontend_string = template_frontend_string self._unsat = False self._track = track def _blank_copy(self, c): super(CompositeFrontend, self)._blank_copy(c) c._owned_solvers = weakref.WeakKeyDictionary() c._solvers = { } c._template_frontend = self._template_frontend if hasattr(self, '_template_frontend_string'): c._template_frontend_string = self._template_frontend_string c._unsat = False c._track = self._track def _copy(self, c): super(CompositeFrontend, self)._copy(c) c._unsat = self._unsat c._track = self._track c._solvers = dict(self._solvers) self._owned_solvers = weakref.WeakKeyDictionary() # for the COW return c # # Serialization stuff # def __getstate__(self): return self._solvers, self._template_frontend, self._unsat, self._track, super().__getstate__() def __setstate__(self, s): self._solvers, self._template_frontend, self._unsat, self._track, base_state = s self._owned_solvers = weakref.WeakKeyDictionary({s:True for s in self._solver_list}) super().__setstate__(base_state) def downsize(self): for e in self._solver_list: e.downsize() # # Frontend management # @property def _solver_list(self): seen_solvers = set() solver_list = [ ] for s in self._solvers.values(): if id(s) in seen_solvers: continue seen_solvers.add(id(s)) solver_list.append(s) return solver_list @property def variables(self): if len(self._solver_list) == 0: return set() else: return set.union(*[s.variables for s in self._solver_list]) # this is really hacky, but we want to avoid having our variables messed with @variables.setter def variables(self, v): pass # # Solver list management # def _solvers_for_variables(self, names): seen_solvers = set() existing_solvers = [ ] for n in names: if n not in self._solvers: continue s = self._solvers[n] if id(s) in seen_solvers: continue seen_solvers.add(id(s)) existing_solvers.append(s) return existing_solvers @staticmethod def _names_for(names=None, lst=None, lst2=None, e=None, v=None): if names is None: names = set() if e is not None and isinstance(e, Base): names.update(e.variables) if v is not None and isinstance(v, Base): names.update(v.variables) if lst is not None: for ee in lst: if isinstance(ee, Base): names.update(ee.variables) if lst2 is not None: for ee in lst2: if isinstance(ee, Base): names.update(ee.variables) return names def _merged_solver_for(self, *args, **kwargs): return self._solver_for_names(self._names_for(*args, **kwargs)) def _solver_for_names(self, names): l.debug("composite_solver._merged_solver_for() running with %d names", len(names)) solvers = self._solvers_for_variables(names) if len(solvers) == 0: if any(var for var in names if var.startswith(String.STRING_TYPE_IDENTIFIER)): l.debug("... creating new solver for strings") return self._template_frontend_string.blank_copy() else: l.debug("... creating new solver") return self._template_frontend.blank_copy() elif len(solvers) == 1: l.debug("... got one solver") return solvers[0] else: l.debug(".... combining %d solvers", len(solvers)) return solvers[0].combine(solvers[1:]) def _shared_solvers(self, others): """ Returns a sequence of the solvers that self and others share. """ solvers_by_id = { id(s): s for s in self._solver_list } common_solvers = set(solvers_by_id.keys()) other_sets = [ { id(s) for s in cs._solver_list } for cs in others ] for o in other_sets: common_solvers &= o return [ solvers_by_id[s] for s in common_solvers ] def _variable_sets(self): return { s.variables for s in self._solver_list } def _shared_varsets(self, others): common_varsets = self._variable_sets() for o in others: common_varsets &= o.all_varsets() return common_varsets def _split_child(self, s): ss = s.split() if len(ss) == 1: return [ s ] l.debug("... split solver %r into %d parts", s, len(ss)) l.debug("... variable counts: %s", [ len(cs.variables) for cs in ss ]) for ns in ss: self._owned_solvers[ns] = True self._store_child(ns) return ss def _reabsorb_solver(self, s): try: if len(s.variables) == 0 or self._solvers[min(iter(s.variables))] is s: return except KeyError: # this happens when a variable is introduced due to constraint expansion return if isinstance(s, ModelCacheMixin): new_solvers = s.split() old_solvers = self._solvers_for_variables(s.variables) if len(new_solvers) == len(old_solvers): done = set() for ss in s.split(): if ss in done: continue done.add(ss) v = min(iter(ss.variables)) self._solvers[v].update(ss) else: for ns in new_solvers: self._owned_solvers[ns] = True self._store_child(ns) def _store_child(self, ns, extra_names=frozenset()): for v in ns.variables | extra_names: #os = self._solvers[v] self._solvers[v] = ns #if isinstance(s, ModelCacheMixin): # if len(os._models) < len(ns._models): # print("GOT %d NEW MODELS (before: %d)" % ( # len(ns._models) - len(os._models), len(os._models) # )) # elif len(os._models) > len(ns._models): # print("WARNING: LOST %d NEW MODELS (before: %d)" % ( # len(os._models) - len(ns._models), len(os._models) # )) # else: # print("Remained at %d models." % len(os._models)) # # Constraints # def _claim(self, s): if s not in self._owned_solvers: sc = s.branch() self._owned_solvers[sc] = True return sc else: return s def _add_dependent_constraints(self, names, constraints, invalidate_cache=True, **kwargs): if not invalidate_cache and len(self._solvers_for_variables(names)) > 1: l.debug("Ignoring cross-solver helper constraints.") return [ ] l.debug("Adding %d constraints to %d names", len(constraints), len(names)) s = self._claim(self._merged_solver_for(names=names)) added = s.add(constraints, invalidate_cache=invalidate_cache, **kwargs) self._store_child(s) return added def add(self, constraints, **kwargs): #pylint:disable=arguments-differ split = self._split_constraints(constraints) child_added = [ ] l.debug("%s, solvers before: %d", self, len(self._solvers)) unsure = [ ] for names,set_constraints in split: if names == { 'CONCRETE' }: try: if any(backends.concrete.convert(c) is False for c in set_constraints): self._unsat = True except BackendError: unsure.extend(set_constraints) else: child_added += self._add_dependent_constraints(names, set_constraints, **kwargs) l.debug("... solvers after add: %d", len(self._solver_list)) if len(unsure) > 0: for s in self._solver_list: s = self._claim(s) s.add(unsure) self._store_child(s) return super(CompositeFrontend, self).add(child_added) # # Solving # def _ensure_sat(self, extra_constraints): if self._unsat or (len(extra_constraints) == 0 and not self.satisfiable()): raise UnsatError("CompositeSolver is already unsat") def check_satisfiability(self, extra_constraints=(), exact=None): if self._unsat: return 'UNSAT' l.debug("%r checking satisfiability...", self) if len(extra_constraints) != 0: extra_solver = self._merged_solver_for(lst=extra_constraints) extra_solver_satness = extra_solver.check_satisfiability(extra_constraints=extra_constraints, exact=exact) if extra_solver_satness in {'UNSAT', 'UNKNOWN'}: return extra_solver_satness satnesses = [ s.check_satisfiability(exact=exact) for s in self._solver_list if s.variables.isdisjoint(extra_solver.variables) ] self._reabsorb_solver(extra_solver) for satness in satnesses: if satness in {'UNSAT', 'UNKNOWN'}: return satness return 'SAT' else: for s in self._solver_list: satness = s.check_satisfiability() if satness in {'UNSAT', 'UNKNOWN'}: return satness return 'SAT' def satisfiable(self, extra_constraints=(), exact=None): if self._unsat: return False l.debug("%r checking satisfiability...", self) if len(extra_constraints) != 0: extra_solver = self._merged_solver_for(lst=extra_constraints) if not extra_solver.satisfiable(extra_constraints=extra_constraints, exact=exact): return False r = all( s.satisfiable(exact=exact) for s in self._solver_list if s.variables.isdisjoint(extra_solver.variables) ) self._reabsorb_solver(extra_solver) return r else: return all(s.satisfiable(exact=exact) for s in self._solver_list) def eval(self, e, n, extra_constraints=(), exact=None): self._ensure_sat(extra_constraints=extra_constraints) ms = self._merged_solver_for(e=e, lst=extra_constraints) r = ms.eval(e, n, extra_constraints=extra_constraints, exact=exact) self._reabsorb_solver(ms) return r def batch_eval(self, exprs, n, extra_constraints=(), exact=None): self._ensure_sat(extra_constraints=extra_constraints) ms = self._merged_solver_for(lst2=exprs, lst=extra_constraints) r = ms.batch_eval(exprs, n, extra_constraints=extra_constraints, exact=exact) self._reabsorb_solver(ms) return r def max(self, e, extra_constraints=(), exact=None): self._ensure_sat(extra_constraints=extra_constraints) ms = self._merged_solver_for(e=e, lst=extra_constraints) r = ms.max(e, extra_constraints=extra_constraints, exact=exact) self._reabsorb_solver(ms) return r def min(self, e, extra_constraints=(), exact=None): self._ensure_sat(extra_constraints=extra_constraints) ms = self._merged_solver_for(e=e, lst=extra_constraints) r = ms.min(e, extra_constraints=extra_constraints, exact=exact) self._reabsorb_solver(ms) return r def solution(self, e, v, extra_constraints=(), exact=None): self._ensure_sat(extra_constraints=extra_constraints) ms = self._merged_solver_for(e=e, v=v, lst=extra_constraints) r = ms.solution(e, v, extra_constraints=extra_constraints, exact=exact) self._reabsorb_solver(ms) return r def is_true(self, e, extra_constraints=(), exact=None): #self._ensure_sat(extra_constraints=extra_constraints) ms = self._merged_solver_for(e=e, lst=extra_constraints) r = ms.is_true(e, extra_constraints=extra_constraints, exact=exact) #self._reabsorb_solver(ms) return r def is_false(self, e, extra_constraints=(), exact=None): #self._ensure_sat(extra_constraints=extra_constraints) ms = self._merged_solver_for(e=e, lst=extra_constraints) r = ms.is_false(e, extra_constraints=extra_constraints, exact=exact) #self._reabsorb_solver(ms) return r def unsat_core(self, extra_constraints=()): if self.satisfiable(extra_constraints=extra_constraints): return tuple() cores = [ ] for solver in self._solver_list: cores.extend(list(solver.unsat_core())) return cores def simplify(self): if self._unsat: return self.constraints new_constraints = [ ] l.debug("Simplifying %r with %d solvers", self, len(self._solver_list)) for s in self._solver_list: if isinstance(s, SimplifySkipperMixin) and s._simplified: new_constraints += s.constraints continue l.debug("... simplifying child solver %r", s) s.simplify() results = self._split_child(s) for ns in results: if isinstance(ns, SimplifySkipperMixin): ns._simplified = True new_constraints += s.constraints l.debug("... after-split, %r has %d solvers", self, len(self._solver_list)) self.constraints = new_constraints return new_constraints # # Merging and splitting # def finalize(self): for s in self._solver_list: s.finalize() @property def timeout(self): return self._template_frontend.timeout @timeout.setter def timeout(self, t): self._template_frontend.timeout = t for s in self._solver_list: s.timeout = t @staticmethod def _merge_with_ancestor(common_ancestor, merge_conditions): merged = common_ancestor.branch() merged.add([Or(*merge_conditions)]) #import ipdb; ipdb.set_trace() return True, merged def merge(self, others, merge_conditions, common_ancestor=None): if common_ancestor is not None: return self._merge_with_ancestor(common_ancestor, merge_conditions) l.debug("Merging %s with %d other solvers.", self, len(others)) merged = self.blank_copy() common_solvers = self._shared_solvers(others) common_ids = { id(s) for s in common_solvers } l.debug("... %s common solvers", len(common_solvers)) for s in common_solvers: self._owned_solvers.pop(s, None) for o in others: o._owned_solvers.pop(s, None) for v in s.variables: merged._solvers[v] = s noncommon_solvers = [ [ s for s in cs._solver_list if id(s) not in common_ids ] for cs in [self]+others ] l.debug("... merging noncommon solvers") combined_noncommons = [ ] for ns in noncommon_solvers: l.debug("... %d", len(ns)) if len(ns) == 0: pass elif len(ns) == 1: combined_noncommons.append(ns[0]) else: combined_noncommons.append(ns[0].combine(ns[1:])) if len(combined_noncommons): _, merged_noncommon = combined_noncommons[0].merge( combined_noncommons[1:], merge_conditions ) merged._owned_solvers[merged_noncommon] = True merged._store_child(merged_noncommon) merged.constraints = list( itertools.chain.from_iterable(a.constraints for a in merged._solver_list) ) return True, merged def combine(self, others): combined = self.blank_copy() combined.add(self.constraints) for o in others: combined.add(o.constraints) return combined def split(self): return [ s.branch() for s in self._solver_list ] from ..ast import Base from ..ast.bool import Or from .. import backends from ..errors import BackendError, UnsatError from ..frontend_mixins.model_cache_mixin import ModelCacheMixin from ..frontend_mixins.simplify_skipper_mixin import SimplifySkipperMixin
34.742394
118
0.603748
import logging l = logging.getLogger("claripy.frontends.composite_frontend") import weakref import itertools symbolic_count = itertools.count() from .constrained_frontend import ConstrainedFrontend from claripy.ast.strings import String class CompositeFrontend(ConstrainedFrontend): def __init__(self, template_frontend, template_frontend_string, track=False, **kwargs): super(CompositeFrontend, self).__init__(**kwargs) self._solvers = { } self._owned_solvers = weakref.WeakKeyDictionary() self._template_frontend = template_frontend self._template_frontend_string = template_frontend_string self._unsat = False self._track = track def _blank_copy(self, c): super(CompositeFrontend, self)._blank_copy(c) c._owned_solvers = weakref.WeakKeyDictionary() c._solvers = { } c._template_frontend = self._template_frontend if hasattr(self, '_template_frontend_string'): c._template_frontend_string = self._template_frontend_string c._unsat = False c._track = self._track def _copy(self, c): super(CompositeFrontend, self)._copy(c) c._unsat = self._unsat c._track = self._track c._solvers = dict(self._solvers) self._owned_solvers = weakref.WeakKeyDictionary() return c def __getstate__(self): return self._solvers, self._template_frontend, self._unsat, self._track, super().__getstate__() def __setstate__(self, s): self._solvers, self._template_frontend, self._unsat, self._track, base_state = s self._owned_solvers = weakref.WeakKeyDictionary({s:True for s in self._solver_list}) super().__setstate__(base_state) def downsize(self): for e in self._solver_list: e.downsize() @property def _solver_list(self): seen_solvers = set() solver_list = [ ] for s in self._solvers.values(): if id(s) in seen_solvers: continue seen_solvers.add(id(s)) solver_list.append(s) return solver_list @property def variables(self): if len(self._solver_list) == 0: return set() else: return set.union(*[s.variables for s in self._solver_list]) @variables.setter def variables(self, v): pass def _solvers_for_variables(self, names): seen_solvers = set() existing_solvers = [ ] for n in names: if n not in self._solvers: continue s = self._solvers[n] if id(s) in seen_solvers: continue seen_solvers.add(id(s)) existing_solvers.append(s) return existing_solvers @staticmethod def _names_for(names=None, lst=None, lst2=None, e=None, v=None): if names is None: names = set() if e is not None and isinstance(e, Base): names.update(e.variables) if v is not None and isinstance(v, Base): names.update(v.variables) if lst is not None: for ee in lst: if isinstance(ee, Base): names.update(ee.variables) if lst2 is not None: for ee in lst2: if isinstance(ee, Base): names.update(ee.variables) return names def _merged_solver_for(self, *args, **kwargs): return self._solver_for_names(self._names_for(*args, **kwargs)) def _solver_for_names(self, names): l.debug("composite_solver._merged_solver_for() running with %d names", len(names)) solvers = self._solvers_for_variables(names) if len(solvers) == 0: if any(var for var in names if var.startswith(String.STRING_TYPE_IDENTIFIER)): l.debug("... creating new solver for strings") return self._template_frontend_string.blank_copy() else: l.debug("... creating new solver") return self._template_frontend.blank_copy() elif len(solvers) == 1: l.debug("... got one solver") return solvers[0] else: l.debug(".... combining %d solvers", len(solvers)) return solvers[0].combine(solvers[1:]) def _shared_solvers(self, others): solvers_by_id = { id(s): s for s in self._solver_list } common_solvers = set(solvers_by_id.keys()) other_sets = [ { id(s) for s in cs._solver_list } for cs in others ] for o in other_sets: common_solvers &= o return [ solvers_by_id[s] for s in common_solvers ] def _variable_sets(self): return { s.variables for s in self._solver_list } def _shared_varsets(self, others): common_varsets = self._variable_sets() for o in others: common_varsets &= o.all_varsets() return common_varsets def _split_child(self, s): ss = s.split() if len(ss) == 1: return [ s ] l.debug("... split solver %r into %d parts", s, len(ss)) l.debug("... variable counts: %s", [ len(cs.variables) for cs in ss ]) for ns in ss: self._owned_solvers[ns] = True self._store_child(ns) return ss def _reabsorb_solver(self, s): try: if len(s.variables) == 0 or self._solvers[min(iter(s.variables))] is s: return except KeyError: return if isinstance(s, ModelCacheMixin): new_solvers = s.split() old_solvers = self._solvers_for_variables(s.variables) if len(new_solvers) == len(old_solvers): done = set() for ss in s.split(): if ss in done: continue done.add(ss) v = min(iter(ss.variables)) self._solvers[v].update(ss) else: for ns in new_solvers: self._owned_solvers[ns] = True self._store_child(ns) def _store_child(self, ns, extra_names=frozenset()): for v in ns.variables | extra_names: self._solvers[v] = ns def _claim(self, s): if s not in self._owned_solvers: sc = s.branch() self._owned_solvers[sc] = True return sc else: return s def _add_dependent_constraints(self, names, constraints, invalidate_cache=True, **kwargs): if not invalidate_cache and len(self._solvers_for_variables(names)) > 1: l.debug("Ignoring cross-solver helper constraints.") return [ ] l.debug("Adding %d constraints to %d names", len(constraints), len(names)) s = self._claim(self._merged_solver_for(names=names)) added = s.add(constraints, invalidate_cache=invalidate_cache, **kwargs) self._store_child(s) return added def add(self, constraints, **kwargs): split = self._split_constraints(constraints) child_added = [ ] l.debug("%s, solvers before: %d", self, len(self._solvers)) unsure = [ ] for names,set_constraints in split: if names == { 'CONCRETE' }: try: if any(backends.concrete.convert(c) is False for c in set_constraints): self._unsat = True except BackendError: unsure.extend(set_constraints) else: child_added += self._add_dependent_constraints(names, set_constraints, **kwargs) l.debug("... solvers after add: %d", len(self._solver_list)) if len(unsure) > 0: for s in self._solver_list: s = self._claim(s) s.add(unsure) self._store_child(s) return super(CompositeFrontend, self).add(child_added) def _ensure_sat(self, extra_constraints): if self._unsat or (len(extra_constraints) == 0 and not self.satisfiable()): raise UnsatError("CompositeSolver is already unsat") def check_satisfiability(self, extra_constraints=(), exact=None): if self._unsat: return 'UNSAT' l.debug("%r checking satisfiability...", self) if len(extra_constraints) != 0: extra_solver = self._merged_solver_for(lst=extra_constraints) extra_solver_satness = extra_solver.check_satisfiability(extra_constraints=extra_constraints, exact=exact) if extra_solver_satness in {'UNSAT', 'UNKNOWN'}: return extra_solver_satness satnesses = [ s.check_satisfiability(exact=exact) for s in self._solver_list if s.variables.isdisjoint(extra_solver.variables) ] self._reabsorb_solver(extra_solver) for satness in satnesses: if satness in {'UNSAT', 'UNKNOWN'}: return satness return 'SAT' else: for s in self._solver_list: satness = s.check_satisfiability() if satness in {'UNSAT', 'UNKNOWN'}: return satness return 'SAT' def satisfiable(self, extra_constraints=(), exact=None): if self._unsat: return False l.debug("%r checking satisfiability...", self) if len(extra_constraints) != 0: extra_solver = self._merged_solver_for(lst=extra_constraints) if not extra_solver.satisfiable(extra_constraints=extra_constraints, exact=exact): return False r = all( s.satisfiable(exact=exact) for s in self._solver_list if s.variables.isdisjoint(extra_solver.variables) ) self._reabsorb_solver(extra_solver) return r else: return all(s.satisfiable(exact=exact) for s in self._solver_list) def eval(self, e, n, extra_constraints=(), exact=None): self._ensure_sat(extra_constraints=extra_constraints) ms = self._merged_solver_for(e=e, lst=extra_constraints) r = ms.eval(e, n, extra_constraints=extra_constraints, exact=exact) self._reabsorb_solver(ms) return r def batch_eval(self, exprs, n, extra_constraints=(), exact=None): self._ensure_sat(extra_constraints=extra_constraints) ms = self._merged_solver_for(lst2=exprs, lst=extra_constraints) r = ms.batch_eval(exprs, n, extra_constraints=extra_constraints, exact=exact) self._reabsorb_solver(ms) return r def max(self, e, extra_constraints=(), exact=None): self._ensure_sat(extra_constraints=extra_constraints) ms = self._merged_solver_for(e=e, lst=extra_constraints) r = ms.max(e, extra_constraints=extra_constraints, exact=exact) self._reabsorb_solver(ms) return r def min(self, e, extra_constraints=(), exact=None): self._ensure_sat(extra_constraints=extra_constraints) ms = self._merged_solver_for(e=e, lst=extra_constraints) r = ms.min(e, extra_constraints=extra_constraints, exact=exact) self._reabsorb_solver(ms) return r def solution(self, e, v, extra_constraints=(), exact=None): self._ensure_sat(extra_constraints=extra_constraints) ms = self._merged_solver_for(e=e, v=v, lst=extra_constraints) r = ms.solution(e, v, extra_constraints=extra_constraints, exact=exact) self._reabsorb_solver(ms) return r def is_true(self, e, extra_constraints=(), exact=None): ms = self._merged_solver_for(e=e, lst=extra_constraints) r = ms.is_true(e, extra_constraints=extra_constraints, exact=exact) return r def is_false(self, e, extra_constraints=(), exact=None): ms = self._merged_solver_for(e=e, lst=extra_constraints) r = ms.is_false(e, extra_constraints=extra_constraints, exact=exact) return r def unsat_core(self, extra_constraints=()): if self.satisfiable(extra_constraints=extra_constraints): return tuple() cores = [ ] for solver in self._solver_list: cores.extend(list(solver.unsat_core())) return cores def simplify(self): if self._unsat: return self.constraints new_constraints = [ ] l.debug("Simplifying %r with %d solvers", self, len(self._solver_list)) for s in self._solver_list: if isinstance(s, SimplifySkipperMixin) and s._simplified: new_constraints += s.constraints continue l.debug("... simplifying child solver %r", s) s.simplify() results = self._split_child(s) for ns in results: if isinstance(ns, SimplifySkipperMixin): ns._simplified = True new_constraints += s.constraints l.debug("... after-split, %r has %d solvers", self, len(self._solver_list)) self.constraints = new_constraints return new_constraints def finalize(self): for s in self._solver_list: s.finalize() @property def timeout(self): return self._template_frontend.timeout @timeout.setter def timeout(self, t): self._template_frontend.timeout = t for s in self._solver_list: s.timeout = t @staticmethod def _merge_with_ancestor(common_ancestor, merge_conditions): merged = common_ancestor.branch() merged.add([Or(*merge_conditions)]) return True, merged def merge(self, others, merge_conditions, common_ancestor=None): if common_ancestor is not None: return self._merge_with_ancestor(common_ancestor, merge_conditions) l.debug("Merging %s with %d other solvers.", self, len(others)) merged = self.blank_copy() common_solvers = self._shared_solvers(others) common_ids = { id(s) for s in common_solvers } l.debug("... %s common solvers", len(common_solvers)) for s in common_solvers: self._owned_solvers.pop(s, None) for o in others: o._owned_solvers.pop(s, None) for v in s.variables: merged._solvers[v] = s noncommon_solvers = [ [ s for s in cs._solver_list if id(s) not in common_ids ] for cs in [self]+others ] l.debug("... merging noncommon solvers") combined_noncommons = [ ] for ns in noncommon_solvers: l.debug("... %d", len(ns)) if len(ns) == 0: pass elif len(ns) == 1: combined_noncommons.append(ns[0]) else: combined_noncommons.append(ns[0].combine(ns[1:])) if len(combined_noncommons): _, merged_noncommon = combined_noncommons[0].merge( combined_noncommons[1:], merge_conditions ) merged._owned_solvers[merged_noncommon] = True merged._store_child(merged_noncommon) merged.constraints = list( itertools.chain.from_iterable(a.constraints for a in merged._solver_list) ) return True, merged def combine(self, others): combined = self.blank_copy() combined.add(self.constraints) for o in others: combined.add(o.constraints) return combined def split(self): return [ s.branch() for s in self._solver_list ] from ..ast import Base from ..ast.bool import Or from .. import backends from ..errors import BackendError, UnsatError from ..frontend_mixins.model_cache_mixin import ModelCacheMixin from ..frontend_mixins.simplify_skipper_mixin import SimplifySkipperMixin
true
true
f7f674176dcb1db9d4e00b3aac62d41e414b613c
3,099
py
Python
src/courses/tests/record_api/tests_purchasing_record.py
vlad-pro/education-backend
f766decc68e820296b6ea347021cf16e76322f32
[ "MIT" ]
null
null
null
src/courses/tests/record_api/tests_purchasing_record.py
vlad-pro/education-backend
f766decc68e820296b6ea347021cf16e76322f32
[ "MIT" ]
1
2022-02-10T12:08:02.000Z
2022-02-10T12:08:02.000Z
src/courses/tests/record_api/tests_purchasing_record.py
vlad-pro/education-backend
f766decc68e820296b6ea347021cf16e76322f32
[ "MIT" ]
null
null
null
from decimal import Decimal import pytest from orders.models import Order from tinkoff.client import TinkoffBank pytestmark = [pytest.mark.django_db] @pytest.fixture(autouse=True) def record(mixer): return mixer.blend('courses.Record', slug='home-video') @pytest.fixture(autouse=True) def payment_url(mocker): return mocker.patch.object(TinkoffBank, 'get_initial_payment_url', return_value='https://bank.test/pay/') @pytest.fixture def bank(mocker): class FakeBank: @classmethod def get_initial_payment_url(self): return 'https://bank.test/pay/' return mocker.patch.object(TinkoffBank, '__init__', return_value=None) def get_order(): return Order.objects.last() def test_order(api, record): api.post('/api/v2/records/home-video/purchase/', { 'name': 'Забой Шахтёров', 'email': 'zaboy@gmail.com', 'price': 1900, }, format='multipart', expected_status_code=302) placed = get_order() assert placed.item == record assert placed.price == Decimal('1900.00') def test_user(api, record): api.post('/api/v2/records/home-video/purchase/', { 'name': 'Забой Шахтёров', 'email': 'zaboy@gmail.com', 'price': 1900, }, format='multipart', expected_status_code=302) placed = get_order() assert placed.user.first_name == 'Забой' assert placed.user.last_name == 'Шахтёров' assert placed.user.email == 'zaboy@gmail.com' @pytest.mark.parametrize('wants_to_subscribe', [True, False]) def test_user_auto_subscription(api, wants_to_subscribe): api.post('/api/v2/records/home-video/purchase/', { 'name': 'Забой Шахтёров', 'email': 'zaboy@gmail.com', 'price': 1900, 'subscribe': wants_to_subscribe, }, format='multipart', expected_status_code=302) placed = get_order() assert placed.user.subscribed is wants_to_subscribe def test_by_default_user_is_not_subscribed(api): api.post('/api/v2/records/home-video/purchase/', { 'name': 'Забой Шахтёров', 'email': 'zaboy@gmail.com', 'price': 1900, }, format='multipart', expected_status_code=302) placed = get_order() assert placed.user.subscribed is False def test_redirect(api, record): response = api.post('/api/v2/records/home-video/purchase/', { 'name': 'Забой Шахтёров', 'email': 'zaboy@gmail.com', 'price': 1900, }, format='multipart', expected_status_code=302, as_response=True) assert response.status_code == 302 assert response['Location'] == 'https://bank.test/pay/' def test_custom_success_url(api, record, bank): api.post('/api/v2/records/home-video/purchase/', { 'name': 'Забой Шахтёров', 'email': 'zaboy@gmail.com', 'price': 1900, 'success_url': 'https://ok.true/yes', }, format='multipart', expected_status_code=302) assert bank.call_args[1]['success_url'] == 'https://ok.true/yes' def test_invalid(client): response = client.post('/api/v2/records/home-video/purchase/', {}) assert response.status_code == 400
27.184211
109
0.664085
from decimal import Decimal import pytest from orders.models import Order from tinkoff.client import TinkoffBank pytestmark = [pytest.mark.django_db] @pytest.fixture(autouse=True) def record(mixer): return mixer.blend('courses.Record', slug='home-video') @pytest.fixture(autouse=True) def payment_url(mocker): return mocker.patch.object(TinkoffBank, 'get_initial_payment_url', return_value='https://bank.test/pay/') @pytest.fixture def bank(mocker): class FakeBank: @classmethod def get_initial_payment_url(self): return 'https://bank.test/pay/' return mocker.patch.object(TinkoffBank, '__init__', return_value=None) def get_order(): return Order.objects.last() def test_order(api, record): api.post('/api/v2/records/home-video/purchase/', { 'name': 'Забой Шахтёров', 'email': 'zaboy@gmail.com', 'price': 1900, }, format='multipart', expected_status_code=302) placed = get_order() assert placed.item == record assert placed.price == Decimal('1900.00') def test_user(api, record): api.post('/api/v2/records/home-video/purchase/', { 'name': 'Забой Шахтёров', 'email': 'zaboy@gmail.com', 'price': 1900, }, format='multipart', expected_status_code=302) placed = get_order() assert placed.user.first_name == 'Забой' assert placed.user.last_name == 'Шахтёров' assert placed.user.email == 'zaboy@gmail.com' @pytest.mark.parametrize('wants_to_subscribe', [True, False]) def test_user_auto_subscription(api, wants_to_subscribe): api.post('/api/v2/records/home-video/purchase/', { 'name': 'Забой Шахтёров', 'email': 'zaboy@gmail.com', 'price': 1900, 'subscribe': wants_to_subscribe, }, format='multipart', expected_status_code=302) placed = get_order() assert placed.user.subscribed is wants_to_subscribe def test_by_default_user_is_not_subscribed(api): api.post('/api/v2/records/home-video/purchase/', { 'name': 'Забой Шахтёров', 'email': 'zaboy@gmail.com', 'price': 1900, }, format='multipart', expected_status_code=302) placed = get_order() assert placed.user.subscribed is False def test_redirect(api, record): response = api.post('/api/v2/records/home-video/purchase/', { 'name': 'Забой Шахтёров', 'email': 'zaboy@gmail.com', 'price': 1900, }, format='multipart', expected_status_code=302, as_response=True) assert response.status_code == 302 assert response['Location'] == 'https://bank.test/pay/' def test_custom_success_url(api, record, bank): api.post('/api/v2/records/home-video/purchase/', { 'name': 'Забой Шахтёров', 'email': 'zaboy@gmail.com', 'price': 1900, 'success_url': 'https://ok.true/yes', }, format='multipart', expected_status_code=302) assert bank.call_args[1]['success_url'] == 'https://ok.true/yes' def test_invalid(client): response = client.post('/api/v2/records/home-video/purchase/', {}) assert response.status_code == 400
true
true
f7f6742ab53bf622bcfe22680b6450833f1dccd6
24,740
py
Python
superset/connectors/sqla/views.py
ayuanty/superset
132a8ef2cb55fa6692ea31d5c278f102d6c2886b
[ "Apache-2.0" ]
19
2018-09-02T10:52:23.000Z
2022-03-24T09:43:48.000Z
superset/connectors/sqla/views.py
ayuanty/superset
132a8ef2cb55fa6692ea31d5c278f102d6c2886b
[ "Apache-2.0" ]
63
2021-06-12T18:25:14.000Z
2022-03-21T07:57:02.000Z
superset/connectors/sqla/views.py
ayuanty/superset
132a8ef2cb55fa6692ea31d5c278f102d6c2886b
[ "Apache-2.0" ]
15
2019-04-29T05:38:31.000Z
2022-02-12T10:47:54.000Z
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Views used by the SqlAlchemy connector""" import logging import re from dataclasses import dataclass, field from typing import Any, cast, Dict, List, Union from flask import current_app, flash, Markup, redirect from flask_appbuilder import CompactCRUDMixin, expose from flask_appbuilder.actions import action from flask_appbuilder.fieldwidgets import Select2Widget from flask_appbuilder.hooks import before_request from flask_appbuilder.models.sqla.interface import SQLAInterface from flask_appbuilder.security.decorators import has_access from flask_babel import gettext as __, lazy_gettext as _ from werkzeug.exceptions import NotFound from wtforms.ext.sqlalchemy.fields import QuerySelectField from wtforms.validators import Regexp from superset import app, db, is_feature_enabled from superset.connectors.base.views import DatasourceModelView from superset.connectors.sqla import models from superset.constants import MODEL_VIEW_RW_METHOD_PERMISSION_MAP, RouteMethod from superset.typing import FlaskResponse from superset.utils import core as utils from superset.views.base import ( check_ownership, create_table_permissions, DatasourceFilter, DeleteMixin, ListWidgetWithCheckboxes, SupersetListWidget, SupersetModelView, validate_sqlatable, YamlExportMixin, ) logger = logging.getLogger(__name__) class TableColumnInlineView( # pylint: disable=too-many-ancestors CompactCRUDMixin, SupersetModelView ): datamodel = SQLAInterface(models.TableColumn) # TODO TODO, review need for this on related_views class_permission_name = "Dataset" method_permission_name = MODEL_VIEW_RW_METHOD_PERMISSION_MAP include_route_methods = RouteMethod.RELATED_VIEW_SET | RouteMethod.API_SET list_title = _("Columns") show_title = _("Show Column") add_title = _("Add Column") edit_title = _("Edit Column") can_delete = False list_widget = ListWidgetWithCheckboxes edit_columns = [ "column_name", "verbose_name", "description", "type", "groupby", "filterable", "table", "expression", "is_dttm", "python_date_format", ] add_columns = edit_columns list_columns = [ "column_name", "verbose_name", "type", "groupby", "filterable", "is_dttm", ] page_size = 500 description_columns = { "is_dttm": _( "Whether to make this column available as a " "[Time Granularity] option, column has to be DATETIME or " "DATETIME-like" ), "filterable": _( "Whether this column is exposed in the `Filters` section " "of the explore view." ), "type": _( "The data type that was inferred by the database. " "It may be necessary to input a type manually for " "expression-defined columns in some cases. In most case " "users should not need to alter this." ), "expression": utils.markdown( "a valid, *non-aggregating* SQL expression as supported by the " "underlying backend. Example: `substr(name, 1, 1)`", True, ), "python_date_format": utils.markdown( Markup( "The pattern of timestamp format. For strings use " '<a href="https://docs.python.org/2/library/' 'datetime.html#strftime-strptime-behavior">' "python datetime string pattern</a> expression which needs to " 'adhere to the <a href="https://en.wikipedia.org/wiki/ISO_8601">' "ISO 8601</a> standard to ensure that the lexicographical ordering " "coincides with the chronological ordering. If the timestamp " "format does not adhere to the ISO 8601 standard you will need to " "define an expression and type for transforming the string into a " "date or timestamp. Note currently time zones are not supported. " "If time is stored in epoch format, put `epoch_s` or `epoch_ms`." "If no pattern is specified we fall back to using the optional " "defaults on a per database/column name level via the extra parameter." "" ), True, ), } label_columns = { "column_name": _("Column"), "verbose_name": _("Verbose Name"), "description": _("Description"), "groupby": _("Groupable"), "filterable": _("Filterable"), "table": _("Table"), "expression": _("Expression"), "is_dttm": _("Is temporal"), "python_date_format": _("Datetime Format"), "type": _("Type"), } validators_columns = { "python_date_format": [ # Restrict viable values to epoch_s, epoch_ms, or a strftime format # which adhere's to the ISO 8601 format (without time zone). Regexp( re.compile( r""" ^( epoch_s|epoch_ms| (?P<date>%Y(-%m(-%d)?)?)([\sT](?P<time>%H(:%M(:%S(\.%f)?)?)?))? )$ """, re.VERBOSE, ), message=_("Invalid date/timestamp format"), ) ] } add_form_extra_fields = { "table": QuerySelectField( "Table", query_factory=lambda: db.session.query(models.SqlaTable), allow_blank=True, widget=Select2Widget(extra_classes="readonly"), ) } edit_form_extra_fields = add_form_extra_fields def pre_add(self, item: "models.SqlMetric") -> None: logger.warning( "This endpoint is deprecated and will be removed in version 2.0.0" ) if app.config["OLD_API_CHECK_DATASET_OWNERSHIP"]: check_ownership(item.table) def pre_update(self, item: "models.SqlMetric") -> None: logger.warning( "This endpoint is deprecated and will be removed in version 2.0.0" ) if app.config["OLD_API_CHECK_DATASET_OWNERSHIP"]: check_ownership(item.table) def pre_delete(self, item: "models.SqlMetric") -> None: logger.warning( "This endpoint is deprecated and will be removed in version 2.0.0" ) if app.config["OLD_API_CHECK_DATASET_OWNERSHIP"]: check_ownership(item.table) class SqlMetricInlineView( # pylint: disable=too-many-ancestors CompactCRUDMixin, SupersetModelView ): datamodel = SQLAInterface(models.SqlMetric) class_permission_name = "Dataset" method_permission_name = MODEL_VIEW_RW_METHOD_PERMISSION_MAP include_route_methods = RouteMethod.RELATED_VIEW_SET | RouteMethod.API_SET list_title = _("Metrics") show_title = _("Show Metric") add_title = _("Add Metric") edit_title = _("Edit Metric") list_columns = ["metric_name", "verbose_name", "metric_type"] edit_columns = [ "metric_name", "description", "verbose_name", "metric_type", "expression", "table", "d3format", "extra", "warning_text", ] description_columns = { "expression": utils.markdown( "a valid, *aggregating* SQL expression as supported by the " "underlying backend. Example: `count(DISTINCT userid)`", True, ), "d3format": utils.markdown( "d3 formatting string as defined [here]" "(https://github.com/d3/d3-format/blob/master/README.md#format). " "For instance, this default formatting applies in the Table " "visualization and allow for different metric to use different " "formats", True, ), "extra": utils.markdown( "Extra data to specify metric metadata. Currently supports " 'metadata of the format: `{ "certification": { "certified_by": ' '"Data Platform Team", "details": "This metric is the source of truth." ' '}, "warning_markdown": "This is a warning." }`. This should be modified ' "from the edit datasource model in Explore to ensure correct formatting.", True, ), } add_columns = edit_columns page_size = 500 label_columns = { "metric_name": _("Metric"), "description": _("Description"), "verbose_name": _("Verbose Name"), "metric_type": _("Type"), "expression": _("SQL Expression"), "table": _("Table"), "d3format": _("D3 Format"), "extra": _("Extra"), "warning_text": _("Warning Message"), } add_form_extra_fields = { "table": QuerySelectField( "Table", query_factory=lambda: db.session.query(models.SqlaTable), allow_blank=True, widget=Select2Widget(extra_classes="readonly"), ) } edit_form_extra_fields = add_form_extra_fields def pre_add(self, item: "models.SqlMetric") -> None: logger.warning( "This endpoint is deprecated and will be removed in version 2.0.0" ) if app.config["OLD_API_CHECK_DATASET_OWNERSHIP"]: check_ownership(item.table) def pre_update(self, item: "models.SqlMetric") -> None: logger.warning( "This endpoint is deprecated and will be removed in version 2.0.0" ) if app.config["OLD_API_CHECK_DATASET_OWNERSHIP"]: check_ownership(item.table) def pre_delete(self, item: "models.SqlMetric") -> None: logger.warning( "This endpoint is deprecated and will be removed in version 2.0.0" ) if app.config["OLD_API_CHECK_DATASET_OWNERSHIP"]: check_ownership(item.table) class RowLevelSecurityListWidget( SupersetListWidget ): # pylint: disable=too-few-public-methods template = "superset/models/rls/list.html" def __init__(self, **kwargs: Any): kwargs["appbuilder"] = current_app.appbuilder super().__init__(**kwargs) class RowLevelSecurityFiltersModelView( # pylint: disable=too-many-ancestors SupersetModelView, DeleteMixin ): datamodel = SQLAInterface(models.RowLevelSecurityFilter) list_widget = cast(SupersetListWidget, RowLevelSecurityListWidget) list_title = _("Row level security filter") show_title = _("Show Row level security filter") add_title = _("Add Row level security filter") edit_title = _("Edit Row level security filter") list_columns = [ "filter_type", "tables", "roles", "group_key", "clause", "creator", "modified", ] order_columns = ["filter_type", "group_key", "clause", "modified"] edit_columns = ["filter_type", "tables", "roles", "group_key", "clause"] show_columns = edit_columns search_columns = ("filter_type", "tables", "roles", "group_key", "clause") add_columns = edit_columns base_order = ("changed_on", "desc") description_columns = { "filter_type": _( "Regular filters add where clauses to queries if a user belongs to a " "role referenced in the filter. Base filters apply filters to all queries " "except the roles defined in the filter, and can be used to define what " "users can see if no RLS filters within a filter group apply to them." ), "tables": _("These are the tables this filter will be applied to."), "roles": _( "For regular filters, these are the roles this filter will be " "applied to. For base filters, these are the roles that the " "filter DOES NOT apply to, e.g. Admin if admin should see all " "data." ), "group_key": _( "Filters with the same group key will be ORed together within the group, " "while different filter groups will be ANDed together. Undefined group " "keys are treated as unique groups, i.e. are not grouped together. " "For example, if a table has three filters, of which two are for " "departments Finance and Marketing (group key = 'department'), and one " "refers to the region Europe (group key = 'region'), the filter clause " "would apply the filter (department = 'Finance' OR department = " "'Marketing') AND (region = 'Europe')." ), "clause": _( "This is the condition that will be added to the WHERE clause. " "For example, to only return rows for a particular client, " "you might define a regular filter with the clause `client_id = 9`. To " "display no rows unless a user belongs to a RLS filter role, a base " "filter can be created with the clause `1 = 0` (always false)." ), } label_columns = { "tables": _("Tables"), "roles": _("Roles"), "clause": _("Clause"), "creator": _("Creator"), "modified": _("Modified"), } if app.config["RLS_FORM_QUERY_REL_FIELDS"]: add_form_query_rel_fields = app.config["RLS_FORM_QUERY_REL_FIELDS"] edit_form_query_rel_fields = add_form_query_rel_fields @staticmethod def is_enabled() -> bool: return is_feature_enabled("ROW_LEVEL_SECURITY") @before_request def ensure_enabled(self) -> None: if not self.is_enabled(): raise NotFound() class TableModelView( # pylint: disable=too-many-ancestors DatasourceModelView, DeleteMixin, YamlExportMixin ): datamodel = SQLAInterface(models.SqlaTable) class_permission_name = "Dataset" method_permission_name = MODEL_VIEW_RW_METHOD_PERMISSION_MAP include_route_methods = RouteMethod.CRUD_SET list_title = _("Tables") show_title = _("Show Table") add_title = _("Import a table definition") edit_title = _("Edit Table") list_columns = ["link", "database_name", "changed_by_", "modified"] order_columns = ["modified"] add_columns = ["database", "schema", "table_name"] edit_columns = [ "table_name", "sql", "filter_select_enabled", "fetch_values_predicate", "database", "schema", "description", "owners", "main_dttm_col", "default_endpoint", "offset", "cache_timeout", "is_sqllab_view", "template_params", "extra", ] base_filters = [["id", DatasourceFilter, lambda: []]] show_columns = edit_columns + ["perm", "slices"] related_views = [ TableColumnInlineView, SqlMetricInlineView, ] base_order = ("changed_on", "desc") search_columns = ("database", "schema", "table_name", "owners", "is_sqllab_view") description_columns = { "slices": _( "The list of charts associated with this table. By " "altering this datasource, you may change how these associated " "charts behave. " "Also note that charts need to point to a datasource, so " "this form will fail at saving if removing charts from a " "datasource. If you want to change the datasource for a chart, " "overwrite the chart from the 'explore view'" ), "offset": _("Timezone offset (in hours) for this datasource"), "table_name": _("Name of the table that exists in the source database"), "schema": _( "Schema, as used only in some databases like Postgres, Redshift " "and DB2" ), "description": Markup( 'Supports <a href="https://daringfireball.net/projects/markdown/">' "markdown</a>" ), "sql": _( "This fields acts a Superset view, meaning that Superset will " "run a query against this string as a subquery." ), "fetch_values_predicate": _( "Predicate applied when fetching distinct value to " "populate the filter control component. Supports " "jinja template syntax. Applies only when " "`Enable Filter Select` is on." ), "default_endpoint": _( "Redirects to this endpoint when clicking on the table " "from the table list" ), "filter_select_enabled": _( "Whether to populate the filter's dropdown in the explore " "view's filter section with a list of distinct values fetched " "from the backend on the fly" ), "is_sqllab_view": _( "Whether the table was generated by the 'Visualize' flow " "in SQL Lab" ), "template_params": _( "A set of parameters that become available in the query using " "Jinja templating syntax" ), "cache_timeout": _( "Duration (in seconds) of the caching timeout for this table. " "A timeout of 0 indicates that the cache never expires. " "Note this defaults to the database timeout if undefined." ), "extra": utils.markdown( "Extra data to specify table metadata. Currently supports " 'metadata of the format: `{ "certification": { "certified_by": ' '"Data Platform Team", "details": "This table is the source of truth." ' '}, "warning_markdown": "This is a warning." }`.', True, ), } label_columns = { "slices": _("Associated Charts"), "link": _("Table"), "changed_by_": _("Changed By"), "database": _("Database"), "database_name": _("Database"), "changed_on_": _("Last Changed"), "filter_select_enabled": _("Enable Filter Select"), "schema": _("Schema"), "default_endpoint": _("Default Endpoint"), "offset": _("Offset"), "cache_timeout": _("Cache Timeout"), "table_name": _("Table Name"), "fetch_values_predicate": _("Fetch Values Predicate"), "owners": _("Owners"), "main_dttm_col": _("Main Datetime Column"), "description": _("Description"), "is_sqllab_view": _("SQL Lab View"), "template_params": _("Template parameters"), "extra": _("Extra"), "modified": _("Modified"), } edit_form_extra_fields = { "database": QuerySelectField( "Database", query_factory=lambda: db.session.query(models.Database), widget=Select2Widget(extra_classes="readonly"), ) } def pre_add(self, item: "TableModelView") -> None: logger.warning( "This endpoint is deprecated and will be removed in version 2.0.0" ) validate_sqlatable(item) def pre_update(self, item: "TableModelView") -> None: logger.warning( "This endpoint is deprecated and will be removed in version 2.0.0" ) if app.config["OLD_API_CHECK_DATASET_OWNERSHIP"]: check_ownership(item) def post_add( # pylint: disable=arguments-differ self, item: "TableModelView", flash_message: bool = True, fetch_metadata: bool = True, ) -> None: if fetch_metadata: item.fetch_metadata() create_table_permissions(item) if flash_message: flash( _( "The table was created. " "As part of this two-phase configuration " "process, you should now click the edit button by " "the new table to configure it." ), "info", ) def post_update(self, item: "TableModelView") -> None: self.post_add(item, flash_message=False, fetch_metadata=False) def _delete(self, pk: int) -> None: DeleteMixin._delete(self, pk) @expose("/edit/<pk>", methods=["GET", "POST"]) @has_access def edit(self, pk: str) -> FlaskResponse: """Simple hack to redirect to explore view after saving""" resp = super().edit(pk) if isinstance(resp, str): return resp return redirect("/superset/explore/table/{}/".format(pk)) @action( "refresh", __("Refresh Metadata"), __("Refresh column metadata"), "fa-refresh" ) def refresh( # pylint: disable=no-self-use, too-many-branches self, tables: Union["TableModelView", List["TableModelView"]] ) -> FlaskResponse: logger.warning( "This endpoint is deprecated and will be removed in version 2.0.0" ) if not isinstance(tables, list): tables = [tables] @dataclass class RefreshResults: successes: List[TableModelView] = field(default_factory=list) failures: List[TableModelView] = field(default_factory=list) added: Dict[str, List[str]] = field(default_factory=dict) removed: Dict[str, List[str]] = field(default_factory=dict) modified: Dict[str, List[str]] = field(default_factory=dict) results = RefreshResults() for table_ in tables: try: metadata_results = table_.fetch_metadata() if metadata_results.added: results.added[table_.table_name] = metadata_results.added if metadata_results.removed: results.removed[table_.table_name] = metadata_results.removed if metadata_results.modified: results.modified[table_.table_name] = metadata_results.modified results.successes.append(table_) except Exception: # pylint: disable=broad-except results.failures.append(table_) if len(results.successes) > 0: success_msg = _( "Metadata refreshed for the following table(s): %(tables)s", tables=", ".join([t.table_name for t in results.successes]), ) flash(success_msg, "info") if results.added: added_tables = [] for table, cols in results.added.items(): added_tables.append(f"{table} ({', '.join(cols)})") flash( _( "The following tables added new columns: %(tables)s", tables=", ".join(added_tables), ), "info", ) if results.removed: removed_tables = [] for table, cols in results.removed.items(): removed_tables.append(f"{table} ({', '.join(cols)})") flash( _( "The following tables removed columns: %(tables)s", tables=", ".join(removed_tables), ), "info", ) if results.modified: modified_tables = [] for table, cols in results.modified.items(): modified_tables.append(f"{table} ({', '.join(cols)})") flash( _( "The following tables update column metadata: %(tables)s", tables=", ".join(modified_tables), ), "info", ) if len(results.failures) > 0: failure_msg = _( "Unable to refresh metadata for the following table(s): %(tables)s", tables=", ".join([t.table_name for t in results.failures]), ) flash(failure_msg, "danger") return redirect("/tablemodelview/list/") @expose("/list/") @has_access def list(self) -> FlaskResponse: if not is_feature_enabled("ENABLE_REACT_CRUD_VIEWS"): return super().list() return super().render_app_template()
38.003072
87
0.597898
import logging import re from dataclasses import dataclass, field from typing import Any, cast, Dict, List, Union from flask import current_app, flash, Markup, redirect from flask_appbuilder import CompactCRUDMixin, expose from flask_appbuilder.actions import action from flask_appbuilder.fieldwidgets import Select2Widget from flask_appbuilder.hooks import before_request from flask_appbuilder.models.sqla.interface import SQLAInterface from flask_appbuilder.security.decorators import has_access from flask_babel import gettext as __, lazy_gettext as _ from werkzeug.exceptions import NotFound from wtforms.ext.sqlalchemy.fields import QuerySelectField from wtforms.validators import Regexp from superset import app, db, is_feature_enabled from superset.connectors.base.views import DatasourceModelView from superset.connectors.sqla import models from superset.constants import MODEL_VIEW_RW_METHOD_PERMISSION_MAP, RouteMethod from superset.typing import FlaskResponse from superset.utils import core as utils from superset.views.base import ( check_ownership, create_table_permissions, DatasourceFilter, DeleteMixin, ListWidgetWithCheckboxes, SupersetListWidget, SupersetModelView, validate_sqlatable, YamlExportMixin, ) logger = logging.getLogger(__name__) class TableColumnInlineView( CompactCRUDMixin, SupersetModelView ): datamodel = SQLAInterface(models.TableColumn) class_permission_name = "Dataset" method_permission_name = MODEL_VIEW_RW_METHOD_PERMISSION_MAP include_route_methods = RouteMethod.RELATED_VIEW_SET | RouteMethod.API_SET list_title = _("Columns") show_title = _("Show Column") add_title = _("Add Column") edit_title = _("Edit Column") can_delete = False list_widget = ListWidgetWithCheckboxes edit_columns = [ "column_name", "verbose_name", "description", "type", "groupby", "filterable", "table", "expression", "is_dttm", "python_date_format", ] add_columns = edit_columns list_columns = [ "column_name", "verbose_name", "type", "groupby", "filterable", "is_dttm", ] page_size = 500 description_columns = { "is_dttm": _( "Whether to make this column available as a " "[Time Granularity] option, column has to be DATETIME or " "DATETIME-like" ), "filterable": _( "Whether this column is exposed in the `Filters` section " "of the explore view." ), "type": _( "The data type that was inferred by the database. " "It may be necessary to input a type manually for " "expression-defined columns in some cases. In most case " "users should not need to alter this." ), "expression": utils.markdown( "a valid, *non-aggregating* SQL expression as supported by the " "underlying backend. Example: `substr(name, 1, 1)`", True, ), "python_date_format": utils.markdown( Markup( "The pattern of timestamp format. For strings use " '<a href="https://docs.python.org/2/library/' 'datetime.html#strftime-strptime-behavior">' "python datetime string pattern</a> expression which needs to " 'adhere to the <a href="https://en.wikipedia.org/wiki/ISO_8601">' "ISO 8601</a> standard to ensure that the lexicographical ordering " "coincides with the chronological ordering. If the timestamp " "format does not adhere to the ISO 8601 standard you will need to " "define an expression and type for transforming the string into a " "date or timestamp. Note currently time zones are not supported. " "If time is stored in epoch format, put `epoch_s` or `epoch_ms`." "If no pattern is specified we fall back to using the optional " "defaults on a per database/column name level via the extra parameter." "" ), True, ), } label_columns = { "column_name": _("Column"), "verbose_name": _("Verbose Name"), "description": _("Description"), "groupby": _("Groupable"), "filterable": _("Filterable"), "table": _("Table"), "expression": _("Expression"), "is_dttm": _("Is temporal"), "python_date_format": _("Datetime Format"), "type": _("Type"), } validators_columns = { "python_date_format": [ Regexp( re.compile( r""" ^( epoch_s|epoch_ms| (?P<date>%Y(-%m(-%d)?)?)([\sT](?P<time>%H(:%M(:%S(\.%f)?)?)?))? )$ """, re.VERBOSE, ), message=_("Invalid date/timestamp format"), ) ] } add_form_extra_fields = { "table": QuerySelectField( "Table", query_factory=lambda: db.session.query(models.SqlaTable), allow_blank=True, widget=Select2Widget(extra_classes="readonly"), ) } edit_form_extra_fields = add_form_extra_fields def pre_add(self, item: "models.SqlMetric") -> None: logger.warning( "This endpoint is deprecated and will be removed in version 2.0.0" ) if app.config["OLD_API_CHECK_DATASET_OWNERSHIP"]: check_ownership(item.table) def pre_update(self, item: "models.SqlMetric") -> None: logger.warning( "This endpoint is deprecated and will be removed in version 2.0.0" ) if app.config["OLD_API_CHECK_DATASET_OWNERSHIP"]: check_ownership(item.table) def pre_delete(self, item: "models.SqlMetric") -> None: logger.warning( "This endpoint is deprecated and will be removed in version 2.0.0" ) if app.config["OLD_API_CHECK_DATASET_OWNERSHIP"]: check_ownership(item.table) class SqlMetricInlineView( # pylint: disable=too-many-ancestors CompactCRUDMixin, SupersetModelView ): datamodel = SQLAInterface(models.SqlMetric) class_permission_name = "Dataset" method_permission_name = MODEL_VIEW_RW_METHOD_PERMISSION_MAP include_route_methods = RouteMethod.RELATED_VIEW_SET | RouteMethod.API_SET list_title = _("Metrics") show_title = _("Show Metric") add_title = _("Add Metric") edit_title = _("Edit Metric") list_columns = ["metric_name", "verbose_name", "metric_type"] edit_columns = [ "metric_name", "description", "verbose_name", "metric_type", "expression", "table", "d3format", "extra", "warning_text", ] description_columns = { "expression": utils.markdown( "a valid, *aggregating* SQL expression as supported by the " "underlying backend. Example: `count(DISTINCT userid)`", True, ), "d3format": utils.markdown( "d3 formatting string as defined [here]" "(https://github.com/d3/d3-format/blob/master/README.md#format). " "For instance, this default formatting applies in the Table " "visualization and allow for different metric to use different " "formats", True, ), "extra": utils.markdown( "Extra data to specify metric metadata. Currently supports " 'metadata of the format: `{ "certification": { "certified_by": ' '"Data Platform Team", "details": "This metric is the source of truth." ' '}, "warning_markdown": "This is a warning." }`. This should be modified ' "from the edit datasource model in Explore to ensure correct formatting.", True, ), } add_columns = edit_columns page_size = 500 label_columns = { "metric_name": _("Metric"), "description": _("Description"), "verbose_name": _("Verbose Name"), "metric_type": _("Type"), "expression": _("SQL Expression"), "table": _("Table"), "d3format": _("D3 Format"), "extra": _("Extra"), "warning_text": _("Warning Message"), } add_form_extra_fields = { "table": QuerySelectField( "Table", query_factory=lambda: db.session.query(models.SqlaTable), allow_blank=True, widget=Select2Widget(extra_classes="readonly"), ) } edit_form_extra_fields = add_form_extra_fields def pre_add(self, item: "models.SqlMetric") -> None: logger.warning( "This endpoint is deprecated and will be removed in version 2.0.0" ) if app.config["OLD_API_CHECK_DATASET_OWNERSHIP"]: check_ownership(item.table) def pre_update(self, item: "models.SqlMetric") -> None: logger.warning( "This endpoint is deprecated and will be removed in version 2.0.0" ) if app.config["OLD_API_CHECK_DATASET_OWNERSHIP"]: check_ownership(item.table) def pre_delete(self, item: "models.SqlMetric") -> None: logger.warning( "This endpoint is deprecated and will be removed in version 2.0.0" ) if app.config["OLD_API_CHECK_DATASET_OWNERSHIP"]: check_ownership(item.table) class RowLevelSecurityListWidget( SupersetListWidget ): # pylint: disable=too-few-public-methods template = "superset/models/rls/list.html" def __init__(self, **kwargs: Any): kwargs["appbuilder"] = current_app.appbuilder super().__init__(**kwargs) class RowLevelSecurityFiltersModelView( # pylint: disable=too-many-ancestors SupersetModelView, DeleteMixin ): datamodel = SQLAInterface(models.RowLevelSecurityFilter) list_widget = cast(SupersetListWidget, RowLevelSecurityListWidget) list_title = _("Row level security filter") show_title = _("Show Row level security filter") add_title = _("Add Row level security filter") edit_title = _("Edit Row level security filter") list_columns = [ "filter_type", "tables", "roles", "group_key", "clause", "creator", "modified", ] order_columns = ["filter_type", "group_key", "clause", "modified"] edit_columns = ["filter_type", "tables", "roles", "group_key", "clause"] show_columns = edit_columns search_columns = ("filter_type", "tables", "roles", "group_key", "clause") add_columns = edit_columns base_order = ("changed_on", "desc") description_columns = { "filter_type": _( "Regular filters add where clauses to queries if a user belongs to a " "role referenced in the filter. Base filters apply filters to all queries " "except the roles defined in the filter, and can be used to define what " "users can see if no RLS filters within a filter group apply to them." ), "tables": _("These are the tables this filter will be applied to."), "roles": _( "For regular filters, these are the roles this filter will be " "applied to. For base filters, these are the roles that the " "filter DOES NOT apply to, e.g. Admin if admin should see all " "data." ), "group_key": _( "Filters with the same group key will be ORed together within the group, " "while different filter groups will be ANDed together. Undefined group " "keys are treated as unique groups, i.e. are not grouped together. " "For example, if a table has three filters, of which two are for " "departments Finance and Marketing (group key = 'department'), and one " "refers to the region Europe (group key = 'region'), the filter clause " "would apply the filter (department = 'Finance' OR department = " "'Marketing') AND (region = 'Europe')." ), "clause": _( "This is the condition that will be added to the WHERE clause. " "For example, to only return rows for a particular client, " "you might define a regular filter with the clause `client_id = 9`. To " "display no rows unless a user belongs to a RLS filter role, a base " "filter can be created with the clause `1 = 0` (always false)." ), } label_columns = { "tables": _("Tables"), "roles": _("Roles"), "clause": _("Clause"), "creator": _("Creator"), "modified": _("Modified"), } if app.config["RLS_FORM_QUERY_REL_FIELDS"]: add_form_query_rel_fields = app.config["RLS_FORM_QUERY_REL_FIELDS"] edit_form_query_rel_fields = add_form_query_rel_fields @staticmethod def is_enabled() -> bool: return is_feature_enabled("ROW_LEVEL_SECURITY") @before_request def ensure_enabled(self) -> None: if not self.is_enabled(): raise NotFound() class TableModelView( # pylint: disable=too-many-ancestors DatasourceModelView, DeleteMixin, YamlExportMixin ): datamodel = SQLAInterface(models.SqlaTable) class_permission_name = "Dataset" method_permission_name = MODEL_VIEW_RW_METHOD_PERMISSION_MAP include_route_methods = RouteMethod.CRUD_SET list_title = _("Tables") show_title = _("Show Table") add_title = _("Import a table definition") edit_title = _("Edit Table") list_columns = ["link", "database_name", "changed_by_", "modified"] order_columns = ["modified"] add_columns = ["database", "schema", "table_name"] edit_columns = [ "table_name", "sql", "filter_select_enabled", "fetch_values_predicate", "database", "schema", "description", "owners", "main_dttm_col", "default_endpoint", "offset", "cache_timeout", "is_sqllab_view", "template_params", "extra", ] base_filters = [["id", DatasourceFilter, lambda: []]] show_columns = edit_columns + ["perm", "slices"] related_views = [ TableColumnInlineView, SqlMetricInlineView, ] base_order = ("changed_on", "desc") search_columns = ("database", "schema", "table_name", "owners", "is_sqllab_view") description_columns = { "slices": _( "The list of charts associated with this table. By " "altering this datasource, you may change how these associated " "charts behave. " "Also note that charts need to point to a datasource, so " "this form will fail at saving if removing charts from a " "datasource. If you want to change the datasource for a chart, " "overwrite the chart from the 'explore view'" ), "offset": _("Timezone offset (in hours) for this datasource"), "table_name": _("Name of the table that exists in the source database"), "schema": _( "Schema, as used only in some databases like Postgres, Redshift " "and DB2" ), "description": Markup( 'Supports <a href="https://daringfireball.net/projects/markdown/">' "markdown</a>" ), "sql": _( "This fields acts a Superset view, meaning that Superset will " "run a query against this string as a subquery." ), "fetch_values_predicate": _( "Predicate applied when fetching distinct value to " "populate the filter control component. Supports " "jinja template syntax. Applies only when " "`Enable Filter Select` is on." ), "default_endpoint": _( "Redirects to this endpoint when clicking on the table " "from the table list" ), "filter_select_enabled": _( "Whether to populate the filter's dropdown in the explore " "view's filter section with a list of distinct values fetched " "from the backend on the fly" ), "is_sqllab_view": _( "Whether the table was generated by the 'Visualize' flow " "in SQL Lab" ), "template_params": _( "A set of parameters that become available in the query using " "Jinja templating syntax" ), "cache_timeout": _( "Duration (in seconds) of the caching timeout for this table. " "A timeout of 0 indicates that the cache never expires. " "Note this defaults to the database timeout if undefined." ), "extra": utils.markdown( "Extra data to specify table metadata. Currently supports " 'metadata of the format: `{ "certification": { "certified_by": ' '"Data Platform Team", "details": "This table is the source of truth." ' '}, "warning_markdown": "This is a warning." }`.', True, ), } label_columns = { "slices": _("Associated Charts"), "link": _("Table"), "changed_by_": _("Changed By"), "database": _("Database"), "database_name": _("Database"), "changed_on_": _("Last Changed"), "filter_select_enabled": _("Enable Filter Select"), "schema": _("Schema"), "default_endpoint": _("Default Endpoint"), "offset": _("Offset"), "cache_timeout": _("Cache Timeout"), "table_name": _("Table Name"), "fetch_values_predicate": _("Fetch Values Predicate"), "owners": _("Owners"), "main_dttm_col": _("Main Datetime Column"), "description": _("Description"), "is_sqllab_view": _("SQL Lab View"), "template_params": _("Template parameters"), "extra": _("Extra"), "modified": _("Modified"), } edit_form_extra_fields = { "database": QuerySelectField( "Database", query_factory=lambda: db.session.query(models.Database), widget=Select2Widget(extra_classes="readonly"), ) } def pre_add(self, item: "TableModelView") -> None: logger.warning( "This endpoint is deprecated and will be removed in version 2.0.0" ) validate_sqlatable(item) def pre_update(self, item: "TableModelView") -> None: logger.warning( "This endpoint is deprecated and will be removed in version 2.0.0" ) if app.config["OLD_API_CHECK_DATASET_OWNERSHIP"]: check_ownership(item) def post_add( # pylint: disable=arguments-differ self, item: "TableModelView", flash_message: bool = True, fetch_metadata: bool = True, ) -> None: if fetch_metadata: item.fetch_metadata() create_table_permissions(item) if flash_message: flash( _( "The table was created. " "As part of this two-phase configuration " "process, you should now click the edit button by " "the new table to configure it." ), "info", ) def post_update(self, item: "TableModelView") -> None: self.post_add(item, flash_message=False, fetch_metadata=False) def _delete(self, pk: int) -> None: DeleteMixin._delete(self, pk) @expose("/edit/<pk>", methods=["GET", "POST"]) @has_access def edit(self, pk: str) -> FlaskResponse: resp = super().edit(pk) if isinstance(resp, str): return resp return redirect("/superset/explore/table/{}/".format(pk)) @action( "refresh", __("Refresh Metadata"), __("Refresh column metadata"), "fa-refresh" ) def refresh( # pylint: disable=no-self-use, too-many-branches self, tables: Union["TableModelView", List["TableModelView"]] ) -> FlaskResponse: logger.warning( "This endpoint is deprecated and will be removed in version 2.0.0" ) if not isinstance(tables, list): tables = [tables] @dataclass class RefreshResults: successes: List[TableModelView] = field(default_factory=list) failures: List[TableModelView] = field(default_factory=list) added: Dict[str, List[str]] = field(default_factory=dict) removed: Dict[str, List[str]] = field(default_factory=dict) modified: Dict[str, List[str]] = field(default_factory=dict) results = RefreshResults() for table_ in tables: try: metadata_results = table_.fetch_metadata() if metadata_results.added: results.added[table_.table_name] = metadata_results.added if metadata_results.removed: results.removed[table_.table_name] = metadata_results.removed if metadata_results.modified: results.modified[table_.table_name] = metadata_results.modified results.successes.append(table_) except Exception: # pylint: disable=broad-except results.failures.append(table_) if len(results.successes) > 0: success_msg = _( "Metadata refreshed for the following table(s): %(tables)s", tables=", ".join([t.table_name for t in results.successes]), ) flash(success_msg, "info") if results.added: added_tables = [] for table, cols in results.added.items(): added_tables.append(f"{table} ({', '.join(cols)})") flash( _( "The following tables added new columns: %(tables)s", tables=", ".join(added_tables), ), "info", ) if results.removed: removed_tables = [] for table, cols in results.removed.items(): removed_tables.append(f"{table} ({', '.join(cols)})") flash( _( "The following tables removed columns: %(tables)s", tables=", ".join(removed_tables), ), "info", ) if results.modified: modified_tables = [] for table, cols in results.modified.items(): modified_tables.append(f"{table} ({', '.join(cols)})") flash( _( "The following tables update column metadata: %(tables)s", tables=", ".join(modified_tables), ), "info", ) if len(results.failures) > 0: failure_msg = _( "Unable to refresh metadata for the following table(s): %(tables)s", tables=", ".join([t.table_name for t in results.failures]), ) flash(failure_msg, "danger") return redirect("/tablemodelview/list/") @expose("/list/") @has_access def list(self) -> FlaskResponse: if not is_feature_enabled("ENABLE_REACT_CRUD_VIEWS"): return super().list() return super().render_app_template()
true
true
f7f67484e0ce92f936518a75860e967f2c6761d2
5,823
py
Python
python_modules/dagster-test/dagster_test/toys/many_events.py
johannkm/dagster-okteto
7ad30528a4a92945967d68e59e27727a1e839c2b
[ "Apache-2.0" ]
1
2020-08-10T23:03:37.000Z
2020-08-10T23:03:37.000Z
python_modules/dagster-test/dagster_test/toys/many_events.py
johannkm/dagster-okteto
7ad30528a4a92945967d68e59e27727a1e839c2b
[ "Apache-2.0" ]
null
null
null
python_modules/dagster-test/dagster_test/toys/many_events.py
johannkm/dagster-okteto
7ad30528a4a92945967d68e59e27727a1e839c2b
[ "Apache-2.0" ]
1
2020-08-20T14:20:31.000Z
2020-08-20T14:20:31.000Z
from dagster import ( AssetMaterialization, EventMetadataEntry, ExpectationResult, InputDefinition, Nothing, Output, OutputDefinition, file_relative_path, pipeline, solid, ) MARKDOWN_EXAMPLE = 'markdown_example.md' raw_files = [ 'raw_file_users', 'raw_file_groups', 'raw_file_events', 'raw_file_friends', 'raw_file_pages', 'raw_file_fans', 'raw_file_event_admins', 'raw_file_group_admins', ] def create_raw_file_solid(name): def do_expectation(_context, _value): return ExpectationResult( success=True, label='output_table_exists', description='Checked {name} exists'.format(name=name), ) @solid( name=name, description='Inject raw file for input to table {} and do expectation on output'.format( name ), ) def raw_file_solid(_context): yield AssetMaterialization( asset_key='table_info', metadata_entries=[ EventMetadataEntry.path(label='table_path', path='/path/to/{}.raw'.format(name)) ], ) yield do_expectation(_context, name) yield Output(name) return raw_file_solid raw_tables = [ 'raw_users', 'raw_groups', 'raw_events', 'raw_friends', 'raw_pages', 'raw_fans', 'raw_event_admins', 'raw_group_admins', ] def create_raw_file_solids(): return list(map(create_raw_file_solid, raw_files)) def input_name_for_raw_file(raw_file): return raw_file + '_ready' @solid( input_defs=[InputDefinition('start', Nothing)], output_defs=[OutputDefinition(Nothing)], description='Load a bunch of raw tables from corresponding files', ) def many_table_materializations(_context): with open(file_relative_path(__file__, MARKDOWN_EXAMPLE), 'r') as f: md_str = f.read() for table in raw_tables: yield AssetMaterialization( asset_key='table_info', metadata_entries=[ EventMetadataEntry.text(text=table, label='table_name'), EventMetadataEntry.fspath(path='/path/to/{}'.format(table), label='table_path'), EventMetadataEntry.json(data={'name': table}, label='table_data'), EventMetadataEntry.url( url='https://bigty.pe/{}'.format(table), label='table_name_big' ), EventMetadataEntry.md(md_str=md_str, label='table_blurb'), ], ) @solid( input_defs=[InputDefinition('start', Nothing)], output_defs=[OutputDefinition(Nothing)], description='This simulates a solid that would wrap something like dbt, ' 'where it emits a bunch of tables and then say an expectation on each table, ' 'all in one solid', ) def many_materializations_and_passing_expectations(_context): tables = [ 'users', 'groups', 'events', 'friends', 'pages', 'fans', 'event_admins', 'group_admins', ] for table in tables: yield AssetMaterialization( asset_key='table_info', metadata_entries=[ EventMetadataEntry.path(label='table_path', path='/path/to/{}.raw'.format(table)) ], ) yield ExpectationResult( success=True, label='{table}.row_count'.format(table=table), description='Row count passed for {table}'.format(table=table), ) @solid( input_defs=[InputDefinition('start', Nothing)], output_defs=[], description='A solid that just does a couple inline expectations, one of which fails', ) def check_users_and_groups_one_fails_one_succeeds(_context): yield ExpectationResult( success=True, label='user_expectations', description='Battery of expectations for user', metadata_entries=[ EventMetadataEntry.json( label='table_summary', data={ 'columns': { 'name': {'nulls': 0, 'empty': 0, 'values': 123, 'average_length': 3.394893}, 'time_created': {'nulls': 1, 'empty': 2, 'values': 120, 'average': 1231283}, } }, ) ], ) yield ExpectationResult( success=False, label='groups_expectations', description='Battery of expectations for groups', metadata_entries=[ EventMetadataEntry.json( label='table_summary', data={ 'columns': { 'name': {'nulls': 1, 'empty': 0, 'values': 122, 'average_length': 3.394893}, 'time_created': {'nulls': 1, 'empty': 2, 'values': 120, 'average': 1231283}, } }, ) ], ) @solid( input_defs=[InputDefinition('start', Nothing)], output_defs=[], description='A solid that just does a couple inline expectations', ) def check_admins_both_succeed(_context): yield ExpectationResult(success=True, label='Group admins check out') yield ExpectationResult(success=True, label='Event admins check out') @pipeline( description=( 'Demo pipeline that yields AssetMaterializations and ExpectationResults, along with the ' 'various forms of metadata that can be attached to them.' ) ) def many_events(): raw_files_solids = [raw_file_solid() for raw_file_solid in create_raw_file_solids()] mtm = many_table_materializations(raw_files_solids) mmape = many_materializations_and_passing_expectations(mtm) check_users_and_groups_one_fails_one_succeeds(mmape) check_admins_both_succeed(mmape)
29.861538
100
0.603469
from dagster import ( AssetMaterialization, EventMetadataEntry, ExpectationResult, InputDefinition, Nothing, Output, OutputDefinition, file_relative_path, pipeline, solid, ) MARKDOWN_EXAMPLE = 'markdown_example.md' raw_files = [ 'raw_file_users', 'raw_file_groups', 'raw_file_events', 'raw_file_friends', 'raw_file_pages', 'raw_file_fans', 'raw_file_event_admins', 'raw_file_group_admins', ] def create_raw_file_solid(name): def do_expectation(_context, _value): return ExpectationResult( success=True, label='output_table_exists', description='Checked {name} exists'.format(name=name), ) @solid( name=name, description='Inject raw file for input to table {} and do expectation on output'.format( name ), ) def raw_file_solid(_context): yield AssetMaterialization( asset_key='table_info', metadata_entries=[ EventMetadataEntry.path(label='table_path', path='/path/to/{}.raw'.format(name)) ], ) yield do_expectation(_context, name) yield Output(name) return raw_file_solid raw_tables = [ 'raw_users', 'raw_groups', 'raw_events', 'raw_friends', 'raw_pages', 'raw_fans', 'raw_event_admins', 'raw_group_admins', ] def create_raw_file_solids(): return list(map(create_raw_file_solid, raw_files)) def input_name_for_raw_file(raw_file): return raw_file + '_ready' @solid( input_defs=[InputDefinition('start', Nothing)], output_defs=[OutputDefinition(Nothing)], description='Load a bunch of raw tables from corresponding files', ) def many_table_materializations(_context): with open(file_relative_path(__file__, MARKDOWN_EXAMPLE), 'r') as f: md_str = f.read() for table in raw_tables: yield AssetMaterialization( asset_key='table_info', metadata_entries=[ EventMetadataEntry.text(text=table, label='table_name'), EventMetadataEntry.fspath(path='/path/to/{}'.format(table), label='table_path'), EventMetadataEntry.json(data={'name': table}, label='table_data'), EventMetadataEntry.url( url='https://bigty.pe/{}'.format(table), label='table_name_big' ), EventMetadataEntry.md(md_str=md_str, label='table_blurb'), ], ) @solid( input_defs=[InputDefinition('start', Nothing)], output_defs=[OutputDefinition(Nothing)], description='This simulates a solid that would wrap something like dbt, ' 'where it emits a bunch of tables and then say an expectation on each table, ' 'all in one solid', ) def many_materializations_and_passing_expectations(_context): tables = [ 'users', 'groups', 'events', 'friends', 'pages', 'fans', 'event_admins', 'group_admins', ] for table in tables: yield AssetMaterialization( asset_key='table_info', metadata_entries=[ EventMetadataEntry.path(label='table_path', path='/path/to/{}.raw'.format(table)) ], ) yield ExpectationResult( success=True, label='{table}.row_count'.format(table=table), description='Row count passed for {table}'.format(table=table), ) @solid( input_defs=[InputDefinition('start', Nothing)], output_defs=[], description='A solid that just does a couple inline expectations, one of which fails', ) def check_users_and_groups_one_fails_one_succeeds(_context): yield ExpectationResult( success=True, label='user_expectations', description='Battery of expectations for user', metadata_entries=[ EventMetadataEntry.json( label='table_summary', data={ 'columns': { 'name': {'nulls': 0, 'empty': 0, 'values': 123, 'average_length': 3.394893}, 'time_created': {'nulls': 1, 'empty': 2, 'values': 120, 'average': 1231283}, } }, ) ], ) yield ExpectationResult( success=False, label='groups_expectations', description='Battery of expectations for groups', metadata_entries=[ EventMetadataEntry.json( label='table_summary', data={ 'columns': { 'name': {'nulls': 1, 'empty': 0, 'values': 122, 'average_length': 3.394893}, 'time_created': {'nulls': 1, 'empty': 2, 'values': 120, 'average': 1231283}, } }, ) ], ) @solid( input_defs=[InputDefinition('start', Nothing)], output_defs=[], description='A solid that just does a couple inline expectations', ) def check_admins_both_succeed(_context): yield ExpectationResult(success=True, label='Group admins check out') yield ExpectationResult(success=True, label='Event admins check out') @pipeline( description=( 'Demo pipeline that yields AssetMaterializations and ExpectationResults, along with the ' 'various forms of metadata that can be attached to them.' ) ) def many_events(): raw_files_solids = [raw_file_solid() for raw_file_solid in create_raw_file_solids()] mtm = many_table_materializations(raw_files_solids) mmape = many_materializations_and_passing_expectations(mtm) check_users_and_groups_one_fails_one_succeeds(mmape) check_admins_both_succeed(mmape)
true
true
f7f674ce1497bea15a75bed87fe3639c82743e91
7,358
py
Python
benchmarks/benchmarks/linalg.py
jcharlong/scipy
153467a9174b0c6f4b90ffeed5871e5018658108
[ "BSD-3-Clause" ]
9,095
2015-01-02T18:24:23.000Z
2022-03-31T20:35:31.000Z
benchmarks/benchmarks/linalg.py
jcharlong/scipy
153467a9174b0c6f4b90ffeed5871e5018658108
[ "BSD-3-Clause" ]
11,500
2015-01-01T01:15:30.000Z
2022-03-31T23:07:35.000Z
benchmarks/benchmarks/linalg.py
jcharlong/scipy
153467a9174b0c6f4b90ffeed5871e5018658108
[ "BSD-3-Clause" ]
5,838
2015-01-05T11:56:42.000Z
2022-03-31T23:21:19.000Z
import math import numpy.linalg as nl import numpy as np from numpy.testing import assert_ from numpy.random import rand from .common import Benchmark, safe_import with safe_import(): import scipy.linalg as sl def random(size): return rand(*size) class Bench(Benchmark): params = [ [20, 100, 500, 1000], ['contig', 'nocont'], ['numpy', 'scipy'] ] param_names = ['size', 'contiguous', 'module'] def __init__(self): # likely not useful to benchmark svd for large sizes self.time_svd.__func__.params = [[20, 100, 500]] + self.params[1:] def setup(self, size, contig, module): if module == 'numpy' and size >= 200: # skip: slow, and not useful to benchmark numpy raise NotImplementedError() a = random([size, size]) # larger diagonal ensures non-singularity: for i in range(size): a[i, i] = 10*(.1+a[i, i]) b = random([size]) if contig != 'contig': a = a[-1::-1, -1::-1] # turn into a non-contiguous array assert_(not a.flags['CONTIGUOUS']) self.a = a self.b = b def time_solve(self, size, contig, module): if module == 'numpy': nl.solve(self.a, self.b) else: sl.solve(self.a, self.b) def time_solve_triangular(self, size, contig, module): # treats self.a as a lower-triangular matrix by ignoring the strictly # upper-triangular part if module == 'numpy': pass else: sl.solve_triangular(self.a, self.b, lower=True) def time_inv(self, size, contig, module): if module == 'numpy': nl.inv(self.a) else: sl.inv(self.a) def time_det(self, size, contig, module): if module == 'numpy': nl.det(self.a) else: sl.det(self.a) def time_eigvals(self, size, contig, module): if module == 'numpy': nl.eigvals(self.a) else: sl.eigvals(self.a) def time_svd(self, size, contig, module): if module == 'numpy': nl.svd(self.a) else: sl.svd(self.a) # Retain old benchmark results (remove this if changing the benchmark) time_det.version = "87e530ee50eb6b6c06c7a8abe51c2168e133d5cbd486f4c1c2b9cedc5a078325" time_eigvals.version = "9d68d3a6b473df9bdda3d3fd25c7f9aeea7d5cee869eec730fb2a2bcd1dfb907" time_inv.version = "20beee193c84a5713da9749246a7c40ef21590186c35ed00a4fe854cce9e153b" time_solve.version = "1fe788070f1c9132cbe78a47fdb4cce58266427fc636d2aa9450e3c7d92c644c" time_svd.version = "0ccbda456d096e459d4a6eefc6c674a815179e215f83931a81cfa8c18e39d6e3" class Norm(Benchmark): params = [ [(20, 20), (100, 100), (1000, 1000), (20, 1000), (1000, 20)], ['contig', 'nocont'], ['numpy', 'scipy'] ] param_names = ['shape', 'contiguous', 'module'] def setup(self, shape, contig, module): a = np.random.randn(*shape) if contig != 'contig': a = a[-1::-1,-1::-1] # turn into a non-contiguous array assert_(not a.flags['CONTIGUOUS']) self.a = a def time_1_norm(self, size, contig, module): if module == 'numpy': nl.norm(self.a, ord=1) else: sl.norm(self.a, ord=1) def time_inf_norm(self, size, contig, module): if module == 'numpy': nl.norm(self.a, ord=np.inf) else: sl.norm(self.a, ord=np.inf) def time_frobenius_norm(self, size, contig, module): if module == 'numpy': nl.norm(self.a) else: sl.norm(self.a) class Lstsq(Benchmark): """ Test the speed of four least-squares solvers on not full rank matrices. Also check the difference in the solutions. The matrix has the size ``(m, 2/3*m)``; the rank is ``1/2 * m``. Matrix values are random in the range (-5, 5), the same is for the right hand side. The complex matrix is the sum of real and imaginary matrices. """ param_names = ['dtype', 'size', 'driver'] params = [ [np.float64, np.complex128], [10, 100, 1000], ['gelss', 'gelsy', 'gelsd', 'numpy'], ] def setup(self, dtype, size, lapack_driver): if lapack_driver == 'numpy' and size >= 200: # skip: slow, and not useful to benchmark numpy raise NotImplementedError() rng = np.random.default_rng(1234) n = math.ceil(2./3. * size) k = math.ceil(1./2. * size) m = size if dtype is np.complex128: A = ((10 * rng.random((m,k)) - 5) + 1j*(10 * rng.random((m,k)) - 5)) temp = ((10 * rng.random((k,n)) - 5) + 1j*(10 * rng.random((k,n)) - 5)) b = ((10 * rng.random((m,1)) - 5) + 1j*(10 * rng.random((m,1)) - 5)) else: A = (10 * rng.random((m,k)) - 5) temp = 10 * rng.random((k,n)) - 5 b = 10 * rng.random((m,1)) - 5 self.A = A.dot(temp) self.b = b def time_lstsq(self, dtype, size, lapack_driver): if lapack_driver == 'numpy': np.linalg.lstsq(self.A, self.b, rcond=np.finfo(self.A.dtype).eps * 100) else: sl.lstsq(self.A, self.b, cond=None, overwrite_a=False, overwrite_b=False, check_finite=False, lapack_driver=lapack_driver) # Retain old benchmark results (remove this if changing the benchmark) time_lstsq.version = "15ee0be14a0a597c7d1c9a3dab2c39e15c8ac623484410ffefa406bf6b596ebe" class SpecialMatrices(Benchmark): param_names = ['size'] params = [[4, 128]] def setup(self, size): self.x = np.arange(1, size + 1).astype(float) self.small_blocks = [np.ones([2, 2])] * (size//2) self.big_blocks = [np.ones([size//2, size//2]), np.ones([size//2, size//2])] def time_block_diag_small(self, size): sl.block_diag(*self.small_blocks) def time_block_diag_big(self, size): sl.block_diag(*self.big_blocks) def time_circulant(self, size): sl.circulant(self.x) def time_companion(self, size): sl.companion(self.x) def time_dft(self, size): sl.dft(size) def time_hadamard(self, size): sl.hadamard(size) def time_hankel(self, size): sl.hankel(self.x) def time_helmert(self, size): sl.helmert(size) def time_hilbert(self, size): sl.hilbert(size) def time_invhilbert(self, size): sl.invhilbert(size) def time_leslie(self, size): sl.leslie(self.x, self.x[1:]) def time_pascal(self, size): sl.pascal(size) def time_invpascal(self, size): sl.invpascal(size) def time_toeplitz(self, size): sl.toeplitz(self.x) def time_tri(self, size): sl.tri(size) class GetFuncs(Benchmark): def setup(self): self.x = np.eye(1) def time_get_blas_funcs(self): sl.blas.get_blas_funcs('gemm', dtype=float) def time_get_blas_funcs_2(self): sl.blas.get_blas_funcs(('gemm', 'axpy'), (self.x, self.x)) def time_small_cholesky(self): sl.cholesky(self.x)
29.198413
93
0.572302
import math import numpy.linalg as nl import numpy as np from numpy.testing import assert_ from numpy.random import rand from .common import Benchmark, safe_import with safe_import(): import scipy.linalg as sl def random(size): return rand(*size) class Bench(Benchmark): params = [ [20, 100, 500, 1000], ['contig', 'nocont'], ['numpy', 'scipy'] ] param_names = ['size', 'contiguous', 'module'] def __init__(self): self.time_svd.__func__.params = [[20, 100, 500]] + self.params[1:] def setup(self, size, contig, module): if module == 'numpy' and size >= 200: raise NotImplementedError() a = random([size, size]) for i in range(size): a[i, i] = 10*(.1+a[i, i]) b = random([size]) if contig != 'contig': a = a[-1::-1, -1::-1] assert_(not a.flags['CONTIGUOUS']) self.a = a self.b = b def time_solve(self, size, contig, module): if module == 'numpy': nl.solve(self.a, self.b) else: sl.solve(self.a, self.b) def time_solve_triangular(self, size, contig, module): if module == 'numpy': pass else: sl.solve_triangular(self.a, self.b, lower=True) def time_inv(self, size, contig, module): if module == 'numpy': nl.inv(self.a) else: sl.inv(self.a) def time_det(self, size, contig, module): if module == 'numpy': nl.det(self.a) else: sl.det(self.a) def time_eigvals(self, size, contig, module): if module == 'numpy': nl.eigvals(self.a) else: sl.eigvals(self.a) def time_svd(self, size, contig, module): if module == 'numpy': nl.svd(self.a) else: sl.svd(self.a) time_det.version = "87e530ee50eb6b6c06c7a8abe51c2168e133d5cbd486f4c1c2b9cedc5a078325" time_eigvals.version = "9d68d3a6b473df9bdda3d3fd25c7f9aeea7d5cee869eec730fb2a2bcd1dfb907" time_inv.version = "20beee193c84a5713da9749246a7c40ef21590186c35ed00a4fe854cce9e153b" time_solve.version = "1fe788070f1c9132cbe78a47fdb4cce58266427fc636d2aa9450e3c7d92c644c" time_svd.version = "0ccbda456d096e459d4a6eefc6c674a815179e215f83931a81cfa8c18e39d6e3" class Norm(Benchmark): params = [ [(20, 20), (100, 100), (1000, 1000), (20, 1000), (1000, 20)], ['contig', 'nocont'], ['numpy', 'scipy'] ] param_names = ['shape', 'contiguous', 'module'] def setup(self, shape, contig, module): a = np.random.randn(*shape) if contig != 'contig': a = a[-1::-1,-1::-1] assert_(not a.flags['CONTIGUOUS']) self.a = a def time_1_norm(self, size, contig, module): if module == 'numpy': nl.norm(self.a, ord=1) else: sl.norm(self.a, ord=1) def time_inf_norm(self, size, contig, module): if module == 'numpy': nl.norm(self.a, ord=np.inf) else: sl.norm(self.a, ord=np.inf) def time_frobenius_norm(self, size, contig, module): if module == 'numpy': nl.norm(self.a) else: sl.norm(self.a) class Lstsq(Benchmark): param_names = ['dtype', 'size', 'driver'] params = [ [np.float64, np.complex128], [10, 100, 1000], ['gelss', 'gelsy', 'gelsd', 'numpy'], ] def setup(self, dtype, size, lapack_driver): if lapack_driver == 'numpy' and size >= 200: raise NotImplementedError() rng = np.random.default_rng(1234) n = math.ceil(2./3. * size) k = math.ceil(1./2. * size) m = size if dtype is np.complex128: A = ((10 * rng.random((m,k)) - 5) + 1j*(10 * rng.random((m,k)) - 5)) temp = ((10 * rng.random((k,n)) - 5) + 1j*(10 * rng.random((k,n)) - 5)) b = ((10 * rng.random((m,1)) - 5) + 1j*(10 * rng.random((m,1)) - 5)) else: A = (10 * rng.random((m,k)) - 5) temp = 10 * rng.random((k,n)) - 5 b = 10 * rng.random((m,1)) - 5 self.A = A.dot(temp) self.b = b def time_lstsq(self, dtype, size, lapack_driver): if lapack_driver == 'numpy': np.linalg.lstsq(self.A, self.b, rcond=np.finfo(self.A.dtype).eps * 100) else: sl.lstsq(self.A, self.b, cond=None, overwrite_a=False, overwrite_b=False, check_finite=False, lapack_driver=lapack_driver) time_lstsq.version = "15ee0be14a0a597c7d1c9a3dab2c39e15c8ac623484410ffefa406bf6b596ebe" class SpecialMatrices(Benchmark): param_names = ['size'] params = [[4, 128]] def setup(self, size): self.x = np.arange(1, size + 1).astype(float) self.small_blocks = [np.ones([2, 2])] * (size//2) self.big_blocks = [np.ones([size//2, size//2]), np.ones([size//2, size//2])] def time_block_diag_small(self, size): sl.block_diag(*self.small_blocks) def time_block_diag_big(self, size): sl.block_diag(*self.big_blocks) def time_circulant(self, size): sl.circulant(self.x) def time_companion(self, size): sl.companion(self.x) def time_dft(self, size): sl.dft(size) def time_hadamard(self, size): sl.hadamard(size) def time_hankel(self, size): sl.hankel(self.x) def time_helmert(self, size): sl.helmert(size) def time_hilbert(self, size): sl.hilbert(size) def time_invhilbert(self, size): sl.invhilbert(size) def time_leslie(self, size): sl.leslie(self.x, self.x[1:]) def time_pascal(self, size): sl.pascal(size) def time_invpascal(self, size): sl.invpascal(size) def time_toeplitz(self, size): sl.toeplitz(self.x) def time_tri(self, size): sl.tri(size) class GetFuncs(Benchmark): def setup(self): self.x = np.eye(1) def time_get_blas_funcs(self): sl.blas.get_blas_funcs('gemm', dtype=float) def time_get_blas_funcs_2(self): sl.blas.get_blas_funcs(('gemm', 'axpy'), (self.x, self.x)) def time_small_cholesky(self): sl.cholesky(self.x)
true
true
f7f674d3ac4faefc56bb413133f2910a50d0ba60
66
py
Python
python/20190307/flask_qa_app/app/main/__init__.py
Realize0917/career
b5d02ac53cfc3ce3a2ca38d11480c51560283e67
[ "MIT" ]
3
2019-01-17T05:50:51.000Z
2019-03-15T10:10:07.000Z
python/20190307/flask_qa_app/app/main/__init__.py
Realize0917/career
b5d02ac53cfc3ce3a2ca38d11480c51560283e67
[ "MIT" ]
10
2019-01-17T06:07:03.000Z
2019-02-19T05:55:25.000Z
python/20190307/flask_qa_app/app/main/__init__.py
Realize0917/career
b5d02ac53cfc3ce3a2ca38d11480c51560283e67
[ "MIT" ]
4
2018-12-22T07:32:55.000Z
2019-03-06T09:13:48.000Z
#!/usr/bin/env python # encoding: utf-8 from .views import qa_bp
13.2
24
0.712121
from .views import qa_bp
true
true
f7f6759a8f2fdd1c6c604775c76a51d9e63af08e
15,309
py
Python
kinow_client/models/__init__.py
kinow-io/kaemo-python-sdk
610fce09e3a9e631babf09195b0492959d9e4d56
[ "Apache-2.0" ]
1
2017-05-03T12:48:22.000Z
2017-05-03T12:48:22.000Z
kinow_client/models/__init__.py
kinow-io/kaemo-python-sdk
610fce09e3a9e631babf09195b0492959d9e4d56
[ "Apache-2.0" ]
null
null
null
kinow_client/models/__init__.py
kinow-io/kaemo-python-sdk
610fce09e3a9e631babf09195b0492959d9e4d56
[ "Apache-2.0" ]
null
null
null
# coding: utf-8 """ Server API Reference for Server API (REST/Json) OpenAPI spec version: 2.0.9 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import # import models into model package from .actor import Actor from .actor_list_response import ActorListResponse from .actor_product import ActorProduct from .actor_product_list_response import ActorProductListResponse from .actor_product_role import ActorProductRole from .actor_product_role_list_response import ActorProductRoleListResponse from .actor_response import ActorResponse from .actor_role import ActorRole from .actor_role_list_response import ActorRoleListResponse from .add_product_to_cart_request import AddProductToCartRequest from .address import Address from .address_response import AddressResponse from .analytic import Analytic from .analytic_list_response import AnalyticListResponse from .attachment import Attachment from .blog_category import BlogCategory from .blog_category_list_response import BlogCategoryListResponse from .blog_category_response import BlogCategoryResponse from .blog_page import BlogPage from .blog_page_list_response import BlogPageListResponse from .blog_page_products_response import BlogPageProductsResponse from .blog_page_response import BlogPageResponse from .bonus import Bonus from .cms_categories_list_response import CMSCategoriesListResponse from .cms_category import CMSCategory from .cms_category_list_response import CMSCategoryListResponse from .cms_category_response import CMSCategoryResponse from .cms_page import CMSPage from .cms_page_list_response import CMSPageListResponse from .cms_page_response import CMSPageResponse from .cart import Cart from .cart_id_list import CartIDList from .cart_list_response import CartListResponse from .cart_list_response_1 import CartListResponse1 from .cart_price import CartPrice from .cart_price_request import CartPriceRequest from .cart_product import CartProduct from .cart_response import CartResponse from .cart_rule import CartRule from .cart_rule_list_response import CartRuleListResponse from .cart_rule_price import CartRulePrice from .cart_rule_response import CartRuleResponse from .cart_rule_restriction_group import CartRuleRestrictionGroup from .cart_rule_restriction_group_item import CartRuleRestrictionGroupItem from .category import Category from .category_actors_list_response import CategoryActorsListResponse from .category_directors_list_response import CategoryDirectorsListResponse from .category_images_list_response import CategoryImagesListResponse from .category_list_response import CategoryListResponse from .category_response import CategoryResponse from .configuration import Configuration from .configuration_list_response import ConfigurationListResponse from .configuration_response import ConfigurationResponse from .contact import Contact from .contact_list_response import ContactListResponse from .country import Country from .country_list_response import CountryListResponse from .create_actor_request import CreateActorRequest from .create_attribute_request import CreateAttributeRequest from .create_cms_category_request import CreateCMSCategoryRequest from .create_cms_page_request import CreateCMSPageRequest from .create_cart_request import CreateCartRequest from .create_cart_rule_request import CreateCartRuleRequest from .create_category_request import CreateCategoryRequest from .create_customer_request import CreateCustomerRequest from .create_device_request import CreateDeviceRequest from .create_director_request import CreateDirectorRequest from .create_extract_request import CreateExtractRequest from .create_extract_subtitle_request import CreateExtractSubtitleRequest from .create_free_gift_request import CreateFreeGiftRequest from .create_gift_request import CreateGiftRequest from .create_group_request import CreateGroupRequest from .create_media_file_request import CreateMediaFileRequest from .create_message_request import CreateMessageRequest from .create_product_access_request import CreateProductAccessRequest from .create_product_request import CreateProductRequest from .create_task_request import CreateTaskRequest from .create_video_request import CreateVideoRequest from .create_video_stat_session_request import CreateVideoStatSessionRequest from .create_video_stat_session_response import CreateVideoStatSessionResponse from .create_video_subtitle_request import CreateVideoSubtitleRequest from .credentials_validation_response import CredentialsValidationResponse from .currency import Currency from .currency_list_response import CurrencyListResponse from .currency_list_response_1 import CurrencyListResponse1 from .customer import Customer from .customer_current_views_response import CustomerCurrentViewsResponse from .customer_group_video_stats import CustomerGroupVideoStats from .customer_group_video_stats_list_response import CustomerGroupVideoStatsListResponse from .customer_id import CustomerId from .customer_list_response import CustomerListResponse from .customer_response import CustomerResponse from .customer_video_stats import CustomerVideoStats from .customer_video_stats_list_response import CustomerVideoStatsListResponse from .device import Device from .device_list_response import DeviceListResponse from .device_response import DeviceResponse from .director import Director from .director_list_response import DirectorListResponse from .director_product import DirectorProduct from .director_product_list_response import DirectorProductListResponse from .director_product_role import DirectorProductRole from .director_product_role_list_response import DirectorProductRoleListResponse from .director_response import DirectorResponse from .director_role import DirectorRole from .director_role_list_response import DirectorRoleListResponse from .download_informations import DownloadInformations from .employee import Employee from .employee_list_response import EmployeeListResponse from .employee_response import EmployeeResponse from .extract import Extract from .extract_access_info import ExtractAccessInfo from .extract_id_list import ExtractIDList from .extract_list_response import ExtractListResponse from .extract_response import ExtractResponse from .extract_subtitles_response import ExtractSubtitlesResponse from .feature import Feature from .feature_list_response import FeatureListResponse from .feature_value import FeatureValue from .feature_value_list_response import FeatureValueListResponse from .feature_value_list_response_1 import FeatureValueListResponse1 from .features import Features from .free_gift import FreeGift from .free_gift_list_response import FreeGiftListResponse from .free_gift_response import FreeGiftResponse from .gender import Gender from .gender_list_response import GenderListResponse from .geoloc import Geoloc from .geoloc_settings import GeolocSettings from .geoloc_settings_response import GeolocSettingsResponse from .geolocation_list_response import GeolocationListResponse from .gift import Gift from .gift_list_response import GiftListResponse from .gift_response import GiftResponse from .gift_token import GiftToken from .gift_token_response import GiftTokenResponse from .google_analytics_response import GoogleAnalyticsResponse from .group import Group from .group_list_response import GroupListResponse from .group_response import GroupResponse from .i18n_field import I18nField from .i18n_field_input import I18nFieldInput from .ip_coordinates import IPCoordinates from .ip_location import IPLocation from .ip_location_response import IPLocationResponse from .image import Image from .image_list_response import ImageListResponse from .image_response import ImageResponse from .image_type import ImageType from .language import Language from .language_list_response import LanguageListResponse from .logo_settings import LogoSettings from .media_file import MediaFile from .media_file_list_response import MediaFileListResponse from .media_file_response import MediaFileResponse from .media_source import MediaSource from .media_source_list_response import MediaSourceListResponse from .media_source_response import MediaSourceResponse from .order import Order from .order_history import OrderHistory from .order_history_list_response import OrderHistoryListResponse from .order_list_response import OrderListResponse from .order_response import OrderResponse from .order_state import OrderState from .order_state_list_response import OrderStateListResponse from .order_state_response import OrderStateResponse from .page import Page from .page_list_response import PageListResponse from .page_response import PageResponse from .pagination import Pagination from .payment_arguments import PaymentArguments from .payment_arguments_response import PaymentArgumentsResponse from .payment_details import PaymentDetails from .payment_details_response import PaymentDetailsResponse from .payment_methods import PaymentMethods from .payment_module import PaymentModule from .payment_module_list_response import PaymentModuleListResponse from .payment_module_list_response_1 import PaymentModuleListResponse1 from .payment_token import PaymentToken from .payment_token_1 import PaymentToken1 from .payment_url_response import PaymentUrlResponse from .platform_access import PlatformAccess from .platform_access_response import PlatformAccessResponse from .player import Player from .player_configuration import PlayerConfiguration from .playlist import Playlist from .playlist_list_response import PlaylistListResponse from .playlist_response import PlaylistResponse from .playlist_update import PlaylistUpdate from .prepayment_balance import PrepaymentBalance from .prepayment_bonus import PrepaymentBonus from .prepayment_bonus_amount import PrepaymentBonusAmount from .prepayment_bonus_id_list import PrepaymentBonusIDList from .prepayment_bonus_list_response import PrepaymentBonusListResponse from .prepayment_bonus_response import PrepaymentBonusResponse from .prepayment_operation import PrepaymentOperation from .prepayment_operation_amount import PrepaymentOperationAmount from .prepayment_operation_id_list import PrepaymentOperationIDList from .prepayment_operation_list_response import PrepaymentOperationListResponse from .prepayment_operation_response import PrepaymentOperationResponse from .prepayment_recharge import PrepaymentRecharge from .prepayment_recharge_list_response import PrepaymentRechargeListResponse from .prepayment_recharge_response import PrepaymentRechargeResponse from .product import Product from .product_access import ProductAccess from .product_access_info import ProductAccessInfo from .product_access_info_response import ProductAccessInfoResponse from .product_access_list_response import ProductAccessListResponse from .product_access_response import ProductAccessResponse from .product_attribute import ProductAttribute from .product_attribute_list_response import ProductAttributeListResponse from .product_categories import ProductCategories from .product_id_list import ProductIDList from .product_id_list_1 import ProductIDList1 from .product_image_list_response import ProductImageListResponse from .product_list_response import ProductListResponse from .product_price import ProductPrice from .product_price_attribute import ProductPriceAttribute from .product_response import ProductResponse from .product_video_list_response import ProductVideoListResponse from .promotion import Promotion from .registration_field import RegistrationField from .registration_fields_response import RegistrationFieldsResponse from .remove_product_from_cart_request import RemoveProductFromCartRequest from .session_video_stat import SessionVideoStat from .session_video_stat_list_response import SessionVideoStatListResponse from .social_settings import SocialSettings from .state import State from .state_list_response import StateListResponse from .subscription import Subscription from .subscription_list_response import SubscriptionListResponse from .subscription_response import SubscriptionResponse from .subtitle import Subtitle from .subtitle_file import SubtitleFile from .subtitle_file_list_response import SubtitleFileListResponse from .subtitle_list_response import SubtitleListResponse from .subtitle_response import SubtitleResponse from .support import Support from .support_message import SupportMessage from .support_response import SupportResponse from .tag import Tag from .task import Task from .task_response import TaskResponse from .tax_price import TaxPrice from .tax_rule import TaxRule from .tax_rule_list_response import TaxRuleListResponse from .token_response import TokenResponse from .update_actor_request import UpdateActorRequest from .update_address_request import UpdateAddressRequest from .update_cms_category_request import UpdateCMSCategoryRequest from .update_cms_page_request import UpdateCMSPageRequest from .update_cart_request import UpdateCartRequest from .update_cart_rule_request import UpdateCartRuleRequest from .update_category_request import UpdateCategoryRequest from .update_customer_request import UpdateCustomerRequest from .update_director_request import UpdateDirectorRequest from .update_extract_request import UpdateExtractRequest from .update_free_gift_request import UpdateFreeGiftRequest from .update_gift_request import UpdateGiftRequest from .update_payment_request import UpdatePaymentRequest from .update_product_access_request import UpdateProductAccessRequest from .update_product_request import UpdateProductRequest from .update_video_request import UpdateVideoRequest from .video import Video from .video_access_info import VideoAccessInfo from .video_access_info_response import VideoAccessInfoResponse from .video_category import VideoCategory from .video_category_list_response import VideoCategoryListResponse from .video_free_access import VideoFreeAccess from .video_group import VideoGroup from .video_group_list_response import VideoGroupListResponse from .video_group_response import VideoGroupResponse from .video_id_list import VideoIDList from .video_id_list_1 import VideoIDList1 from .video_id_list_2 import VideoIDList2 from .video_list_response import VideoListResponse from .video_response import VideoResponse from .video_stat import VideoStat from .video_stat_list_response import VideoStatListResponse from .video_stats_videos_watching_response import VideoStatsVideosWatchingResponse from .video_view_informations import VideoViewInformations from .video_views import VideoViews from .view import View from .widget_footer_menu import WidgetFooterMenu from .widget_footer_menu_list_response import WidgetFooterMenuListResponse from .widget_home_rail import WidgetHomeRail from .widget_home_rail_list_response import WidgetHomeRailListResponse from .widget_hook_phrase import WidgetHookPhrase from .widget_hook_phrase_list_response import WidgetHookPhraseListResponse from .widget_slider import WidgetSlider from .widget_slider_list_response import WidgetSliderListResponse from .widget_slider_response import WidgetSliderResponse from .widget_slider_video import WidgetSliderVideo from .widget_top_menu import WidgetTopMenu from .widget_top_menu_list_response import WidgetTopMenuListResponse
49.067308
89
0.898295
from __future__ import absolute_import from .actor import Actor from .actor_list_response import ActorListResponse from .actor_product import ActorProduct from .actor_product_list_response import ActorProductListResponse from .actor_product_role import ActorProductRole from .actor_product_role_list_response import ActorProductRoleListResponse from .actor_response import ActorResponse from .actor_role import ActorRole from .actor_role_list_response import ActorRoleListResponse from .add_product_to_cart_request import AddProductToCartRequest from .address import Address from .address_response import AddressResponse from .analytic import Analytic from .analytic_list_response import AnalyticListResponse from .attachment import Attachment from .blog_category import BlogCategory from .blog_category_list_response import BlogCategoryListResponse from .blog_category_response import BlogCategoryResponse from .blog_page import BlogPage from .blog_page_list_response import BlogPageListResponse from .blog_page_products_response import BlogPageProductsResponse from .blog_page_response import BlogPageResponse from .bonus import Bonus from .cms_categories_list_response import CMSCategoriesListResponse from .cms_category import CMSCategory from .cms_category_list_response import CMSCategoryListResponse from .cms_category_response import CMSCategoryResponse from .cms_page import CMSPage from .cms_page_list_response import CMSPageListResponse from .cms_page_response import CMSPageResponse from .cart import Cart from .cart_id_list import CartIDList from .cart_list_response import CartListResponse from .cart_list_response_1 import CartListResponse1 from .cart_price import CartPrice from .cart_price_request import CartPriceRequest from .cart_product import CartProduct from .cart_response import CartResponse from .cart_rule import CartRule from .cart_rule_list_response import CartRuleListResponse from .cart_rule_price import CartRulePrice from .cart_rule_response import CartRuleResponse from .cart_rule_restriction_group import CartRuleRestrictionGroup from .cart_rule_restriction_group_item import CartRuleRestrictionGroupItem from .category import Category from .category_actors_list_response import CategoryActorsListResponse from .category_directors_list_response import CategoryDirectorsListResponse from .category_images_list_response import CategoryImagesListResponse from .category_list_response import CategoryListResponse from .category_response import CategoryResponse from .configuration import Configuration from .configuration_list_response import ConfigurationListResponse from .configuration_response import ConfigurationResponse from .contact import Contact from .contact_list_response import ContactListResponse from .country import Country from .country_list_response import CountryListResponse from .create_actor_request import CreateActorRequest from .create_attribute_request import CreateAttributeRequest from .create_cms_category_request import CreateCMSCategoryRequest from .create_cms_page_request import CreateCMSPageRequest from .create_cart_request import CreateCartRequest from .create_cart_rule_request import CreateCartRuleRequest from .create_category_request import CreateCategoryRequest from .create_customer_request import CreateCustomerRequest from .create_device_request import CreateDeviceRequest from .create_director_request import CreateDirectorRequest from .create_extract_request import CreateExtractRequest from .create_extract_subtitle_request import CreateExtractSubtitleRequest from .create_free_gift_request import CreateFreeGiftRequest from .create_gift_request import CreateGiftRequest from .create_group_request import CreateGroupRequest from .create_media_file_request import CreateMediaFileRequest from .create_message_request import CreateMessageRequest from .create_product_access_request import CreateProductAccessRequest from .create_product_request import CreateProductRequest from .create_task_request import CreateTaskRequest from .create_video_request import CreateVideoRequest from .create_video_stat_session_request import CreateVideoStatSessionRequest from .create_video_stat_session_response import CreateVideoStatSessionResponse from .create_video_subtitle_request import CreateVideoSubtitleRequest from .credentials_validation_response import CredentialsValidationResponse from .currency import Currency from .currency_list_response import CurrencyListResponse from .currency_list_response_1 import CurrencyListResponse1 from .customer import Customer from .customer_current_views_response import CustomerCurrentViewsResponse from .customer_group_video_stats import CustomerGroupVideoStats from .customer_group_video_stats_list_response import CustomerGroupVideoStatsListResponse from .customer_id import CustomerId from .customer_list_response import CustomerListResponse from .customer_response import CustomerResponse from .customer_video_stats import CustomerVideoStats from .customer_video_stats_list_response import CustomerVideoStatsListResponse from .device import Device from .device_list_response import DeviceListResponse from .device_response import DeviceResponse from .director import Director from .director_list_response import DirectorListResponse from .director_product import DirectorProduct from .director_product_list_response import DirectorProductListResponse from .director_product_role import DirectorProductRole from .director_product_role_list_response import DirectorProductRoleListResponse from .director_response import DirectorResponse from .director_role import DirectorRole from .director_role_list_response import DirectorRoleListResponse from .download_informations import DownloadInformations from .employee import Employee from .employee_list_response import EmployeeListResponse from .employee_response import EmployeeResponse from .extract import Extract from .extract_access_info import ExtractAccessInfo from .extract_id_list import ExtractIDList from .extract_list_response import ExtractListResponse from .extract_response import ExtractResponse from .extract_subtitles_response import ExtractSubtitlesResponse from .feature import Feature from .feature_list_response import FeatureListResponse from .feature_value import FeatureValue from .feature_value_list_response import FeatureValueListResponse from .feature_value_list_response_1 import FeatureValueListResponse1 from .features import Features from .free_gift import FreeGift from .free_gift_list_response import FreeGiftListResponse from .free_gift_response import FreeGiftResponse from .gender import Gender from .gender_list_response import GenderListResponse from .geoloc import Geoloc from .geoloc_settings import GeolocSettings from .geoloc_settings_response import GeolocSettingsResponse from .geolocation_list_response import GeolocationListResponse from .gift import Gift from .gift_list_response import GiftListResponse from .gift_response import GiftResponse from .gift_token import GiftToken from .gift_token_response import GiftTokenResponse from .google_analytics_response import GoogleAnalyticsResponse from .group import Group from .group_list_response import GroupListResponse from .group_response import GroupResponse from .i18n_field import I18nField from .i18n_field_input import I18nFieldInput from .ip_coordinates import IPCoordinates from .ip_location import IPLocation from .ip_location_response import IPLocationResponse from .image import Image from .image_list_response import ImageListResponse from .image_response import ImageResponse from .image_type import ImageType from .language import Language from .language_list_response import LanguageListResponse from .logo_settings import LogoSettings from .media_file import MediaFile from .media_file_list_response import MediaFileListResponse from .media_file_response import MediaFileResponse from .media_source import MediaSource from .media_source_list_response import MediaSourceListResponse from .media_source_response import MediaSourceResponse from .order import Order from .order_history import OrderHistory from .order_history_list_response import OrderHistoryListResponse from .order_list_response import OrderListResponse from .order_response import OrderResponse from .order_state import OrderState from .order_state_list_response import OrderStateListResponse from .order_state_response import OrderStateResponse from .page import Page from .page_list_response import PageListResponse from .page_response import PageResponse from .pagination import Pagination from .payment_arguments import PaymentArguments from .payment_arguments_response import PaymentArgumentsResponse from .payment_details import PaymentDetails from .payment_details_response import PaymentDetailsResponse from .payment_methods import PaymentMethods from .payment_module import PaymentModule from .payment_module_list_response import PaymentModuleListResponse from .payment_module_list_response_1 import PaymentModuleListResponse1 from .payment_token import PaymentToken from .payment_token_1 import PaymentToken1 from .payment_url_response import PaymentUrlResponse from .platform_access import PlatformAccess from .platform_access_response import PlatformAccessResponse from .player import Player from .player_configuration import PlayerConfiguration from .playlist import Playlist from .playlist_list_response import PlaylistListResponse from .playlist_response import PlaylistResponse from .playlist_update import PlaylistUpdate from .prepayment_balance import PrepaymentBalance from .prepayment_bonus import PrepaymentBonus from .prepayment_bonus_amount import PrepaymentBonusAmount from .prepayment_bonus_id_list import PrepaymentBonusIDList from .prepayment_bonus_list_response import PrepaymentBonusListResponse from .prepayment_bonus_response import PrepaymentBonusResponse from .prepayment_operation import PrepaymentOperation from .prepayment_operation_amount import PrepaymentOperationAmount from .prepayment_operation_id_list import PrepaymentOperationIDList from .prepayment_operation_list_response import PrepaymentOperationListResponse from .prepayment_operation_response import PrepaymentOperationResponse from .prepayment_recharge import PrepaymentRecharge from .prepayment_recharge_list_response import PrepaymentRechargeListResponse from .prepayment_recharge_response import PrepaymentRechargeResponse from .product import Product from .product_access import ProductAccess from .product_access_info import ProductAccessInfo from .product_access_info_response import ProductAccessInfoResponse from .product_access_list_response import ProductAccessListResponse from .product_access_response import ProductAccessResponse from .product_attribute import ProductAttribute from .product_attribute_list_response import ProductAttributeListResponse from .product_categories import ProductCategories from .product_id_list import ProductIDList from .product_id_list_1 import ProductIDList1 from .product_image_list_response import ProductImageListResponse from .product_list_response import ProductListResponse from .product_price import ProductPrice from .product_price_attribute import ProductPriceAttribute from .product_response import ProductResponse from .product_video_list_response import ProductVideoListResponse from .promotion import Promotion from .registration_field import RegistrationField from .registration_fields_response import RegistrationFieldsResponse from .remove_product_from_cart_request import RemoveProductFromCartRequest from .session_video_stat import SessionVideoStat from .session_video_stat_list_response import SessionVideoStatListResponse from .social_settings import SocialSettings from .state import State from .state_list_response import StateListResponse from .subscription import Subscription from .subscription_list_response import SubscriptionListResponse from .subscription_response import SubscriptionResponse from .subtitle import Subtitle from .subtitle_file import SubtitleFile from .subtitle_file_list_response import SubtitleFileListResponse from .subtitle_list_response import SubtitleListResponse from .subtitle_response import SubtitleResponse from .support import Support from .support_message import SupportMessage from .support_response import SupportResponse from .tag import Tag from .task import Task from .task_response import TaskResponse from .tax_price import TaxPrice from .tax_rule import TaxRule from .tax_rule_list_response import TaxRuleListResponse from .token_response import TokenResponse from .update_actor_request import UpdateActorRequest from .update_address_request import UpdateAddressRequest from .update_cms_category_request import UpdateCMSCategoryRequest from .update_cms_page_request import UpdateCMSPageRequest from .update_cart_request import UpdateCartRequest from .update_cart_rule_request import UpdateCartRuleRequest from .update_category_request import UpdateCategoryRequest from .update_customer_request import UpdateCustomerRequest from .update_director_request import UpdateDirectorRequest from .update_extract_request import UpdateExtractRequest from .update_free_gift_request import UpdateFreeGiftRequest from .update_gift_request import UpdateGiftRequest from .update_payment_request import UpdatePaymentRequest from .update_product_access_request import UpdateProductAccessRequest from .update_product_request import UpdateProductRequest from .update_video_request import UpdateVideoRequest from .video import Video from .video_access_info import VideoAccessInfo from .video_access_info_response import VideoAccessInfoResponse from .video_category import VideoCategory from .video_category_list_response import VideoCategoryListResponse from .video_free_access import VideoFreeAccess from .video_group import VideoGroup from .video_group_list_response import VideoGroupListResponse from .video_group_response import VideoGroupResponse from .video_id_list import VideoIDList from .video_id_list_1 import VideoIDList1 from .video_id_list_2 import VideoIDList2 from .video_list_response import VideoListResponse from .video_response import VideoResponse from .video_stat import VideoStat from .video_stat_list_response import VideoStatListResponse from .video_stats_videos_watching_response import VideoStatsVideosWatchingResponse from .video_view_informations import VideoViewInformations from .video_views import VideoViews from .view import View from .widget_footer_menu import WidgetFooterMenu from .widget_footer_menu_list_response import WidgetFooterMenuListResponse from .widget_home_rail import WidgetHomeRail from .widget_home_rail_list_response import WidgetHomeRailListResponse from .widget_hook_phrase import WidgetHookPhrase from .widget_hook_phrase_list_response import WidgetHookPhraseListResponse from .widget_slider import WidgetSlider from .widget_slider_list_response import WidgetSliderListResponse from .widget_slider_response import WidgetSliderResponse from .widget_slider_video import WidgetSliderVideo from .widget_top_menu import WidgetTopMenu from .widget_top_menu_list_response import WidgetTopMenuListResponse
true
true
f7f675b8dbda3618d987c0a646527293c5db44f2
9,023
py
Python
metaflow_tabular/forecasting_flow.py
jimgoo/metaflow-tabular
66b02c1d53b1a64d19b4fc2f7f4a5f9a5ab1b422
[ "MIT" ]
null
null
null
metaflow_tabular/forecasting_flow.py
jimgoo/metaflow-tabular
66b02c1d53b1a64d19b4fc2f7f4a5f9a5ab1b422
[ "MIT" ]
null
null
null
metaflow_tabular/forecasting_flow.py
jimgoo/metaflow-tabular
66b02c1d53b1a64d19b4fc2f7f4a5f9a5ab1b422
[ "MIT" ]
null
null
null
""" To run this flow: ```python forecasting_flow.py --environment=conda run``` """ from functools import partial from metaflow import ( Flow, FlowSpec, IncludeFile, Parameter, batch, conda, conda_base, get_metadata, parallel_map, step, ) from pip_decorator import pip from forecasting_models import GluonTSModel, KatsModel, NeuralProphetModel, MerlionModel # this version is used in pre and post processing steps PANDAS_VERSION = "1.3.3" # this version is used when conda packages aren't available PIP_VERSION = "21.3.1" def run_model( model_config, wrapper_class, target_index, forecast_steps, train_df, data_freq ): try: model = wrapper_class( model_config, target_index, forecast_steps, data_freq=data_freq ) model.fit(train_df) forecast = model.predict(train_df) forecast["id"] = model_config["id"] return forecast except: print(f"Error with {model_config}") raise @conda_base(python="3.8.12") class ForecastingFlow(FlowSpec): """ A flow for benchmarking forecasting libraries. """ train_path = Parameter( "train_path", help="The path to a DataFrame file for training", default="https://jgoode.s3.amazonaws.com/ts-datasets/seattle-trail.csv", ) test_path = Parameter( "test_path", help="The path to a DataFrame file for testing", default=None, ) date_col = Parameter( "date_col", help="Column of the date in the input DataFrame", default="Date", ) target_col = Parameter( "target_col", help="Column of the target in the input DataFrame", default="BGT North of NE 70th Total", ) # data_config_path = Parameter( # "data_config_path", # help= model_config_path = Parameter( "model_config_path", help="The path to a model config file", default="../configs/forecasting/models/default.yaml", ) forecast_steps = Parameter( "forecast_steps", help="The number of steps ahead to forecast", default=10, ) @conda(libraries={"pandas": PANDAS_VERSION, "pyyaml": "6.0"}) @step def start(self): """ Start the flow by preprocessing the data. """ import pandas as pd from pprint import pprint import yaml # Print the Metaflow metadata provider print(f"Using metadata provider: {get_metadata()}") def load_df(path): df = pd.read_csv(path) assert self.date_col in df.columns, '"%s" not in columns' % self.date_col assert self.target_col in df.columns, ( '"%s" not in columns' % self.target_col ) # parse date column and set it as the index df[self.date_col] = pd.to_datetime(df[self.date_col]) df.set_index(self.date_col, inplace=True) return df self.train_df = load_df(self.train_path) if self.test_path is not None: self.test_df = load_df(self.test_path) assert ( self.train_df.columns == self.test_df.columns ).all(), "Columns do not match" else: self.test_df = None if self.test_df is None: n_train = 500 self.test_df = self.train_df.iloc[n_train : n_train + self.forecast_steps] self.train_df = self.train_df.iloc[:n_train] # get index of the target column self.target_index = self.train_df.columns.tolist().index(self.target_col) # get the frequency of the data self.freq = pd.infer_freq(self.train_df.index) # load the model config file with open(self.model_config_path, "r") as f: self.model_config = yaml.safe_load(f) print("train df") print(self.train_df) print("test df") print(self.test_df) print("model_config") pprint(self.model_config) # these branches will run in parallel # TODO: skip those with no entries in the model config self.next( self.run_merlion, self.run_gluonts, self.run_kats, self.run_neuralprophet, ) @conda(libraries={"salesforce-merlion": "1.0.2"}) @step def run_merlion(self): """ Run Merlion models. https://github.com/salesforce/Merlion """ self.forecasts = parallel_map( partial( run_model, wrapper_class=MerlionModel, target_index=self.target_index, forecast_steps=self.forecast_steps, train_df=self.train_df, data_freq=self.freq, ), self.model_config["libs"].get("merlion", []), ) self.next(self.join) # We use pip because mxnet 1.5.0 is broken and there's no newer conda version. @pip(libraries={"mxnet": "1.8.0.post0", "gluonts": "0.8.1"}) @conda(libraries={"pip": PIP_VERSION}) @step def run_gluonts(self): """ Run gluon-ts models. https://github.com/awslabs/gluon-ts """ self.forecasts = parallel_map( partial( run_model, wrapper_class=GluonTSModel, target_index=self.target_index, forecast_steps=self.forecast_steps, train_df=self.train_df, data_freq=self.freq, ), self.model_config["libs"].get("gluonts", []), ) self.next(self.join) @conda(libraries={"kats": "0.1.0"}) @step def run_kats(self): """ Run Kats models. https://github.com/facebookresearch/Kats """ self.forecasts = parallel_map( partial( run_model, wrapper_class=KatsModel, target_index=self.target_index, forecast_steps=self.forecast_steps, train_df=self.train_df, data_freq=self.freq, ), self.model_config["libs"].get("kats", []), ) self.next(self.join) # We use pip because there isn't a conda package for NeuralProphet. @pip(libraries={"neuralprophet": "0.3.0"}) @conda(libraries={"pip": PIP_VERSION}) @step def run_neuralprophet(self): """ Run NeuralProphet models. https://github.com/ourownstory/neural_prophet """ self.forecasts = parallel_map( partial( run_model, wrapper_class=NeuralProphetModel, target_index=self.target_index, forecast_steps=self.forecast_steps, train_df=self.train_df, data_freq=self.freq, ), self.model_config["libs"].get("neuralprophet", []), ) self.next(self.join) @conda(libraries={"pandas": PANDAS_VERSION}) @step def join(self, inputs): """ Compute performance metrics for each library. """ from collections import OrderedDict import numpy as np import pandas as pd forecasts = OrderedDict() # get forecasts for each library for lib in inputs: # carry these forward self.train_df = lib.train_df self.test_df = lib.test_df self.target_index = lib.target_index for forecast in lib.forecasts: assert ( forecast["id"] not in forecasts ), f"Duplicate forecast id: {forecast['id']}" forecasts[forecast["id"]] = forecast["y_hat"].reshape(-1) # get timestamps for the forecasts freq = self.train_df.index[1] - self.train_df.index[0] future_dates = pd.DatetimeIndex( [ self.train_df.index[-1] + (i + 1) * freq for i in range(self.forecast_steps) ] ) self.forecasts = pd.DataFrame(forecasts, index=future_dates) print("forecasts:") print(self.forecasts) if self.test_df is not None: # duplicate univariate target across columns for each model true = self.test_df.iloc[ : self.forecast_steps, [self.target_index] * self.forecasts.shape[1] ] pred = self.forecasts print("--> true") print(true) print("--> pred") print(pred) self.rmse = pd.Series( np.sqrt(np.mean((pred.values - true.values) ** 2, axis=0)), index=self.forecasts.columns, ).sort_values() print(f"RMSE:") print(self.rmse) self.next(self.end) @step def end(self): """ End of the flow """ pass if __name__ == "__main__": ForecastingFlow()
28.735669
88
0.56356
from functools import partial from metaflow import ( Flow, FlowSpec, IncludeFile, Parameter, batch, conda, conda_base, get_metadata, parallel_map, step, ) from pip_decorator import pip from forecasting_models import GluonTSModel, KatsModel, NeuralProphetModel, MerlionModel PANDAS_VERSION = "1.3.3" PIP_VERSION = "21.3.1" def run_model( model_config, wrapper_class, target_index, forecast_steps, train_df, data_freq ): try: model = wrapper_class( model_config, target_index, forecast_steps, data_freq=data_freq ) model.fit(train_df) forecast = model.predict(train_df) forecast["id"] = model_config["id"] return forecast except: print(f"Error with {model_config}") raise @conda_base(python="3.8.12") class ForecastingFlow(FlowSpec): train_path = Parameter( "train_path", help="The path to a DataFrame file for training", default="https://jgoode.s3.amazonaws.com/ts-datasets/seattle-trail.csv", ) test_path = Parameter( "test_path", help="The path to a DataFrame file for testing", default=None, ) date_col = Parameter( "date_col", help="Column of the date in the input DataFrame", default="Date", ) target_col = Parameter( "target_col", help="Column of the target in the input DataFrame", default="BGT North of NE 70th Total", ) # data_config_path = Parameter( # "data_config_path", # help= model_config_path = Parameter( "model_config_path", help="The path to a model config file", default="../configs/forecasting/models/default.yaml", ) forecast_steps = Parameter( "forecast_steps", help="The number of steps ahead to forecast", default=10, ) @conda(libraries={"pandas": PANDAS_VERSION, "pyyaml": "6.0"}) @step def start(self): import pandas as pd from pprint import pprint import yaml # Print the Metaflow metadata provider print(f"Using metadata provider: {get_metadata()}") def load_df(path): df = pd.read_csv(path) assert self.date_col in df.columns, '"%s" not in columns' % self.date_col assert self.target_col in df.columns, ( '"%s" not in columns' % self.target_col ) # parse date column and set it as the index df[self.date_col] = pd.to_datetime(df[self.date_col]) df.set_index(self.date_col, inplace=True) return df self.train_df = load_df(self.train_path) if self.test_path is not None: self.test_df = load_df(self.test_path) assert ( self.train_df.columns == self.test_df.columns ).all(), "Columns do not match" else: self.test_df = None if self.test_df is None: n_train = 500 self.test_df = self.train_df.iloc[n_train : n_train + self.forecast_steps] self.train_df = self.train_df.iloc[:n_train] # get index of the target column self.target_index = self.train_df.columns.tolist().index(self.target_col) # get the frequency of the data self.freq = pd.infer_freq(self.train_df.index) # load the model config file with open(self.model_config_path, "r") as f: self.model_config = yaml.safe_load(f) print("train df") print(self.train_df) print("test df") print(self.test_df) print("model_config") pprint(self.model_config) # these branches will run in parallel # TODO: skip those with no entries in the model config self.next( self.run_merlion, self.run_gluonts, self.run_kats, self.run_neuralprophet, ) @conda(libraries={"salesforce-merlion": "1.0.2"}) @step def run_merlion(self): self.forecasts = parallel_map( partial( run_model, wrapper_class=MerlionModel, target_index=self.target_index, forecast_steps=self.forecast_steps, train_df=self.train_df, data_freq=self.freq, ), self.model_config["libs"].get("merlion", []), ) self.next(self.join) # We use pip because mxnet 1.5.0 is broken and there's no newer conda version. @pip(libraries={"mxnet": "1.8.0.post0", "gluonts": "0.8.1"}) @conda(libraries={"pip": PIP_VERSION}) @step def run_gluonts(self): self.forecasts = parallel_map( partial( run_model, wrapper_class=GluonTSModel, target_index=self.target_index, forecast_steps=self.forecast_steps, train_df=self.train_df, data_freq=self.freq, ), self.model_config["libs"].get("gluonts", []), ) self.next(self.join) @conda(libraries={"kats": "0.1.0"}) @step def run_kats(self): self.forecasts = parallel_map( partial( run_model, wrapper_class=KatsModel, target_index=self.target_index, forecast_steps=self.forecast_steps, train_df=self.train_df, data_freq=self.freq, ), self.model_config["libs"].get("kats", []), ) self.next(self.join) @pip(libraries={"neuralprophet": "0.3.0"}) @conda(libraries={"pip": PIP_VERSION}) @step def run_neuralprophet(self): self.forecasts = parallel_map( partial( run_model, wrapper_class=NeuralProphetModel, target_index=self.target_index, forecast_steps=self.forecast_steps, train_df=self.train_df, data_freq=self.freq, ), self.model_config["libs"].get("neuralprophet", []), ) self.next(self.join) @conda(libraries={"pandas": PANDAS_VERSION}) @step def join(self, inputs): from collections import OrderedDict import numpy as np import pandas as pd forecasts = OrderedDict() # get forecasts for each library for lib in inputs: # carry these forward self.train_df = lib.train_df self.test_df = lib.test_df self.target_index = lib.target_index for forecast in lib.forecasts: assert ( forecast["id"] not in forecasts ), f"Duplicate forecast id: {forecast['id']}" forecasts[forecast["id"]] = forecast["y_hat"].reshape(-1) # get timestamps for the forecasts freq = self.train_df.index[1] - self.train_df.index[0] future_dates = pd.DatetimeIndex( [ self.train_df.index[-1] + (i + 1) * freq for i in range(self.forecast_steps) ] ) self.forecasts = pd.DataFrame(forecasts, index=future_dates) print("forecasts:") print(self.forecasts) if self.test_df is not None: # duplicate univariate target across columns for each model true = self.test_df.iloc[ : self.forecast_steps, [self.target_index] * self.forecasts.shape[1] ] pred = self.forecasts print("--> true") print(true) print("--> pred") print(pred) self.rmse = pd.Series( np.sqrt(np.mean((pred.values - true.values) ** 2, axis=0)), index=self.forecasts.columns, ).sort_values() print(f"RMSE:") print(self.rmse) self.next(self.end) @step def end(self): pass if __name__ == "__main__": ForecastingFlow()
true
true
f7f675e3ceedec6bb2a9a107a7fd4ca8ef08d42e
2,495
py
Python
library/postconf.py
chas0amx/ansible-postfix
b129c57fdddf00447a715cccea0758878de22d0b
[ "Apache-2.0" ]
1
2022-02-28T10:22:07.000Z
2022-02-28T10:22:07.000Z
library/postconf.py
chas0amx/ansible-postfix
b129c57fdddf00447a715cccea0758878de22d0b
[ "Apache-2.0" ]
7
2021-11-18T07:25:50.000Z
2022-03-31T12:25:24.000Z
library/postconf.py
chas0amx/ansible-postfix
b129c57fdddf00447a715cccea0758878de22d0b
[ "Apache-2.0" ]
1
2022-03-02T10:17:23.000Z
2022-03-02T10:17:23.000Z
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # (c) 2020, Bodo Schulz <bodo@boone-schulz.de> # BSD 2-clause (see LICENSE or https://opensource.org/licenses/BSD-2-Clause) from __future__ import absolute_import, division, print_function import re from ansible.module_utils.basic import AnsibleModule class PostfixPostconf(object): """ Main Class """ module = None def __init__(self, module): """ Initialize all needed Variables """ self.module = module self._postconf = module.get_bin_path('postconf', True) self.config_name = module.params.get("config_name") def run(self): """ runner """ result = dict( rc=127, failed=True, changed=False, ) rc, out, err = self._exec( [self._postconf, self.config_name] ) # version_string = "unknown" # debian: # "icinga2 - The Icinga 2 network monitoring daemon (version: r2.12.3-1)" # CentOS Linux: # "icinga2 - The Icinga 2 network monitoring daemon (version: 2.12.3)" pattern_1 = re.compile(r"{} = (?P<value_string>.*)".format(self.config_name)) version = re.search(pattern_1, out) if version: # version = re.search(pattern_2, version.group('version')) value_string = version.group('value_string') self.module.log(msg="value: {}".format(value_string)) result['rc'] = rc if rc == 0: result['failed'] = False result['postconf_value'] = value_string return result def _exec(self, cmd): ''' ''' self.module.log(msg="cmd: {}".format(cmd)) rc, out, err = self.module.run_command(cmd, check_rc=True) self.module.log(msg=" rc : '{}'".format(rc)) self.module.log(msg=" out: '{}' ({})".format(out, type(out))) self.module.log(msg=" err: '{}'".format(err)) return rc, out, err # =========================================== # Module execution. # def main(): module = AnsibleModule( argument_spec=dict( config_name=dict( required=True, ) ), supports_check_mode=True, ) icinga = PostfixPostconf(module) result = icinga.run() module.log(msg="= result: {}".format(result)) module.exit_json(**result) # import module snippets if __name__ == '__main__': main()
23.990385
85
0.552305
from __future__ import absolute_import, division, print_function import re from ansible.module_utils.basic import AnsibleModule class PostfixPostconf(object): module = None def __init__(self, module): self.module = module self._postconf = module.get_bin_path('postconf', True) self.config_name = module.params.get("config_name") def run(self): result = dict( rc=127, failed=True, changed=False, ) rc, out, err = self._exec( [self._postconf, self.config_name] ) pattern_1 = re.compile(r"{} = (?P<value_string>.*)".format(self.config_name)) version = re.search(pattern_1, out) if version: value_string = version.group('value_string') self.module.log(msg="value: {}".format(value_string)) result['rc'] = rc if rc == 0: result['failed'] = False result['postconf_value'] = value_string return result def _exec(self, cmd): self.module.log(msg="cmd: {}".format(cmd)) rc, out, err = self.module.run_command(cmd, check_rc=True) self.module.log(msg=" rc : '{}'".format(rc)) self.module.log(msg=" out: '{}' ({})".format(out, type(out))) self.module.log(msg=" err: '{}'".format(err)) return rc, out, err def main(): module = AnsibleModule( argument_spec=dict( config_name=dict( required=True, ) ), supports_check_mode=True, ) icinga = PostfixPostconf(module) result = icinga.run() module.log(msg="= result: {}".format(result)) module.exit_json(**result) if __name__ == '__main__': main()
true
true
f7f675faf2896972bfa757d07f83f25d814c5a54
183
py
Python
scripts/constants.py
ltnguyen14/Quant_stock
0c3dcbb3c6d63e6426fd7d261578b13e8d429ccb
[ "MIT" ]
168
2017-08-18T11:57:52.000Z
2022-03-25T01:43:11.000Z
scripts/constants.py
Rgveda/Quant_stock
0c3dcbb3c6d63e6426fd7d261578b13e8d429ccb
[ "MIT" ]
2
2017-10-30T19:17:43.000Z
2019-02-23T16:46:23.000Z
scripts/constants.py
Rgveda/Quant_stock
0c3dcbb3c6d63e6426fd7d261578b13e8d429ccb
[ "MIT" ]
69
2017-10-30T18:54:11.000Z
2022-02-10T15:53:14.000Z
hm_epoch = 5 # Number of epoch in training lag_range = 1 # Lag range lag_epoch_num = 1 # Number of epoch while finding lag learning_rate = 0.001 # Default learning rate
26.142857
54
0.699454
hm_epoch = 5 lag_range = 1 lag_epoch_num = 1 learning_rate = 0.001
true
true
f7f6762de8bf3f833dfcb45d5c21973d90c6da68
17,255
py
Python
src/oci/apigateway/models/certificate.py
Manny27nyc/oci-python-sdk
de60b04e07a99826254f7255e992f41772902df7
[ "Apache-2.0", "BSD-3-Clause" ]
249
2017-09-11T22:06:05.000Z
2022-03-04T17:09:29.000Z
src/oci/apigateway/models/certificate.py
Manny27nyc/oci-python-sdk
de60b04e07a99826254f7255e992f41772902df7
[ "Apache-2.0", "BSD-3-Clause" ]
228
2017-09-11T23:07:26.000Z
2022-03-23T10:58:50.000Z
src/oci/apigateway/models/certificate.py
Manny27nyc/oci-python-sdk
de60b04e07a99826254f7255e992f41772902df7
[ "Apache-2.0", "BSD-3-Clause" ]
224
2017-09-27T07:32:43.000Z
2022-03-25T16:55:42.000Z
# coding: utf-8 # Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. # This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401 from oci.decorators import init_model_state_from_kwargs @init_model_state_from_kwargs class Certificate(object): """ A certificate contains information to be installed on a gateway to secure the traffic going through it. For more information, see `API Gateway Concepts`__. __ https://docs.cloud.oracle.com/iaas/Content/APIGateway/Concepts/apigatewayconcepts.htm """ #: A constant which can be used with the lifecycle_state property of a Certificate. #: This constant has a value of "CREATING" LIFECYCLE_STATE_CREATING = "CREATING" #: A constant which can be used with the lifecycle_state property of a Certificate. #: This constant has a value of "ACTIVE" LIFECYCLE_STATE_ACTIVE = "ACTIVE" #: A constant which can be used with the lifecycle_state property of a Certificate. #: This constant has a value of "UPDATING" LIFECYCLE_STATE_UPDATING = "UPDATING" #: A constant which can be used with the lifecycle_state property of a Certificate. #: This constant has a value of "DELETING" LIFECYCLE_STATE_DELETING = "DELETING" #: A constant which can be used with the lifecycle_state property of a Certificate. #: This constant has a value of "DELETED" LIFECYCLE_STATE_DELETED = "DELETED" #: A constant which can be used with the lifecycle_state property of a Certificate. #: This constant has a value of "FAILED" LIFECYCLE_STATE_FAILED = "FAILED" def __init__(self, **kwargs): """ Initializes a new Certificate object with values from keyword arguments. The following keyword arguments are supported (corresponding to the getters/setters of this class): :param id: The value to assign to the id property of this Certificate. :type id: str :param display_name: The value to assign to the display_name property of this Certificate. :type display_name: str :param compartment_id: The value to assign to the compartment_id property of this Certificate. :type compartment_id: str :param subject_names: The value to assign to the subject_names property of this Certificate. :type subject_names: list[str] :param time_not_valid_after: The value to assign to the time_not_valid_after property of this Certificate. :type time_not_valid_after: datetime :param certificate: The value to assign to the certificate property of this Certificate. :type certificate: str :param intermediate_certificates: The value to assign to the intermediate_certificates property of this Certificate. :type intermediate_certificates: str :param time_created: The value to assign to the time_created property of this Certificate. :type time_created: datetime :param time_updated: The value to assign to the time_updated property of this Certificate. :type time_updated: datetime :param lifecycle_state: The value to assign to the lifecycle_state property of this Certificate. Allowed values for this property are: "CREATING", "ACTIVE", "UPDATING", "DELETING", "DELETED", "FAILED", 'UNKNOWN_ENUM_VALUE'. Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'. :type lifecycle_state: str :param lifecycle_details: The value to assign to the lifecycle_details property of this Certificate. :type lifecycle_details: str :param freeform_tags: The value to assign to the freeform_tags property of this Certificate. :type freeform_tags: dict(str, str) :param defined_tags: The value to assign to the defined_tags property of this Certificate. :type defined_tags: dict(str, dict(str, object)) """ self.swagger_types = { 'id': 'str', 'display_name': 'str', 'compartment_id': 'str', 'subject_names': 'list[str]', 'time_not_valid_after': 'datetime', 'certificate': 'str', 'intermediate_certificates': 'str', 'time_created': 'datetime', 'time_updated': 'datetime', 'lifecycle_state': 'str', 'lifecycle_details': 'str', 'freeform_tags': 'dict(str, str)', 'defined_tags': 'dict(str, dict(str, object))' } self.attribute_map = { 'id': 'id', 'display_name': 'displayName', 'compartment_id': 'compartmentId', 'subject_names': 'subjectNames', 'time_not_valid_after': 'timeNotValidAfter', 'certificate': 'certificate', 'intermediate_certificates': 'intermediateCertificates', 'time_created': 'timeCreated', 'time_updated': 'timeUpdated', 'lifecycle_state': 'lifecycleState', 'lifecycle_details': 'lifecycleDetails', 'freeform_tags': 'freeformTags', 'defined_tags': 'definedTags' } self._id = None self._display_name = None self._compartment_id = None self._subject_names = None self._time_not_valid_after = None self._certificate = None self._intermediate_certificates = None self._time_created = None self._time_updated = None self._lifecycle_state = None self._lifecycle_details = None self._freeform_tags = None self._defined_tags = None @property def id(self): """ **[Required]** Gets the id of this Certificate. The `OCID`__ of the resource. __ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm :return: The id of this Certificate. :rtype: str """ return self._id @id.setter def id(self, id): """ Sets the id of this Certificate. The `OCID`__ of the resource. __ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm :param id: The id of this Certificate. :type: str """ self._id = id @property def display_name(self): """ **[Required]** Gets the display_name of this Certificate. A user-friendly name. Does not have to be unique, and it's changeable. Avoid entering confidential information. Example: `My new resource` :return: The display_name of this Certificate. :rtype: str """ return self._display_name @display_name.setter def display_name(self, display_name): """ Sets the display_name of this Certificate. A user-friendly name. Does not have to be unique, and it's changeable. Avoid entering confidential information. Example: `My new resource` :param display_name: The display_name of this Certificate. :type: str """ self._display_name = display_name @property def compartment_id(self): """ **[Required]** Gets the compartment_id of this Certificate. The `OCID`__ of the compartment in which the resource is created. __ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm :return: The compartment_id of this Certificate. :rtype: str """ return self._compartment_id @compartment_id.setter def compartment_id(self, compartment_id): """ Sets the compartment_id of this Certificate. The `OCID`__ of the compartment in which the resource is created. __ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm :param compartment_id: The compartment_id of this Certificate. :type: str """ self._compartment_id = compartment_id @property def subject_names(self): """ **[Required]** Gets the subject_names of this Certificate. The entity to be secured by the certificate and additional host names. :return: The subject_names of this Certificate. :rtype: list[str] """ return self._subject_names @subject_names.setter def subject_names(self, subject_names): """ Sets the subject_names of this Certificate. The entity to be secured by the certificate and additional host names. :param subject_names: The subject_names of this Certificate. :type: list[str] """ self._subject_names = subject_names @property def time_not_valid_after(self): """ **[Required]** Gets the time_not_valid_after of this Certificate. The date and time the certificate will expire. :return: The time_not_valid_after of this Certificate. :rtype: datetime """ return self._time_not_valid_after @time_not_valid_after.setter def time_not_valid_after(self, time_not_valid_after): """ Sets the time_not_valid_after of this Certificate. The date and time the certificate will expire. :param time_not_valid_after: The time_not_valid_after of this Certificate. :type: datetime """ self._time_not_valid_after = time_not_valid_after @property def certificate(self): """ **[Required]** Gets the certificate of this Certificate. The data of the leaf certificate in pem format. :return: The certificate of this Certificate. :rtype: str """ return self._certificate @certificate.setter def certificate(self, certificate): """ Sets the certificate of this Certificate. The data of the leaf certificate in pem format. :param certificate: The certificate of this Certificate. :type: str """ self._certificate = certificate @property def intermediate_certificates(self): """ Gets the intermediate_certificates of this Certificate. The intermediate certificate data associated with the certificate in pem format. :return: The intermediate_certificates of this Certificate. :rtype: str """ return self._intermediate_certificates @intermediate_certificates.setter def intermediate_certificates(self, intermediate_certificates): """ Sets the intermediate_certificates of this Certificate. The intermediate certificate data associated with the certificate in pem format. :param intermediate_certificates: The intermediate_certificates of this Certificate. :type: str """ self._intermediate_certificates = intermediate_certificates @property def time_created(self): """ **[Required]** Gets the time_created of this Certificate. The time this resource was created. An RFC3339 formatted datetime string. :return: The time_created of this Certificate. :rtype: datetime """ return self._time_created @time_created.setter def time_created(self, time_created): """ Sets the time_created of this Certificate. The time this resource was created. An RFC3339 formatted datetime string. :param time_created: The time_created of this Certificate. :type: datetime """ self._time_created = time_created @property def time_updated(self): """ Gets the time_updated of this Certificate. The time this resource was last updated. An RFC3339 formatted datetime string. :return: The time_updated of this Certificate. :rtype: datetime """ return self._time_updated @time_updated.setter def time_updated(self, time_updated): """ Sets the time_updated of this Certificate. The time this resource was last updated. An RFC3339 formatted datetime string. :param time_updated: The time_updated of this Certificate. :type: datetime """ self._time_updated = time_updated @property def lifecycle_state(self): """ Gets the lifecycle_state of this Certificate. The current state of the certificate. Allowed values for this property are: "CREATING", "ACTIVE", "UPDATING", "DELETING", "DELETED", "FAILED", 'UNKNOWN_ENUM_VALUE'. Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'. :return: The lifecycle_state of this Certificate. :rtype: str """ return self._lifecycle_state @lifecycle_state.setter def lifecycle_state(self, lifecycle_state): """ Sets the lifecycle_state of this Certificate. The current state of the certificate. :param lifecycle_state: The lifecycle_state of this Certificate. :type: str """ allowed_values = ["CREATING", "ACTIVE", "UPDATING", "DELETING", "DELETED", "FAILED"] if not value_allowed_none_or_none_sentinel(lifecycle_state, allowed_values): lifecycle_state = 'UNKNOWN_ENUM_VALUE' self._lifecycle_state = lifecycle_state @property def lifecycle_details(self): """ Gets the lifecycle_details of this Certificate. A message describing the current state in more detail. For example, can be used to provide actionable information for a resource in a Failed state. :return: The lifecycle_details of this Certificate. :rtype: str """ return self._lifecycle_details @lifecycle_details.setter def lifecycle_details(self, lifecycle_details): """ Sets the lifecycle_details of this Certificate. A message describing the current state in more detail. For example, can be used to provide actionable information for a resource in a Failed state. :param lifecycle_details: The lifecycle_details of this Certificate. :type: str """ self._lifecycle_details = lifecycle_details @property def freeform_tags(self): """ Gets the freeform_tags of this Certificate. Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see `Resource Tags`__. Example: `{\"Department\": \"Finance\"}` __ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm :return: The freeform_tags of this Certificate. :rtype: dict(str, str) """ return self._freeform_tags @freeform_tags.setter def freeform_tags(self, freeform_tags): """ Sets the freeform_tags of this Certificate. Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see `Resource Tags`__. Example: `{\"Department\": \"Finance\"}` __ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm :param freeform_tags: The freeform_tags of this Certificate. :type: dict(str, str) """ self._freeform_tags = freeform_tags @property def defined_tags(self): """ Gets the defined_tags of this Certificate. Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see `Resource Tags`__. Example: `{\"Operations\": {\"CostCenter\": \"42\"}}` __ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm :return: The defined_tags of this Certificate. :rtype: dict(str, dict(str, object)) """ return self._defined_tags @defined_tags.setter def defined_tags(self, defined_tags): """ Sets the defined_tags of this Certificate. Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see `Resource Tags`__. Example: `{\"Operations\": {\"CostCenter\": \"42\"}}` __ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm :param defined_tags: The defined_tags of this Certificate. :type: dict(str, dict(str, object)) """ self._defined_tags = defined_tags def __repr__(self): return formatted_flat_dict(self) def __eq__(self, other): if other is None: return False return self.__dict__ == other.__dict__ def __ne__(self, other): return not self == other
32.929389
245
0.648913
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel from oci.decorators import init_model_state_from_kwargs @init_model_state_from_kwargs class Certificate(object): LIFECYCLE_STATE_CREATING = "CREATING" LIFECYCLE_STATE_ACTIVE = "ACTIVE" LIFECYCLE_STATE_UPDATING = "UPDATING" LIFECYCLE_STATE_DELETING = "DELETING" LIFECYCLE_STATE_DELETED = "DELETED" LIFECYCLE_STATE_FAILED = "FAILED" def __init__(self, **kwargs): self.swagger_types = { 'id': 'str', 'display_name': 'str', 'compartment_id': 'str', 'subject_names': 'list[str]', 'time_not_valid_after': 'datetime', 'certificate': 'str', 'intermediate_certificates': 'str', 'time_created': 'datetime', 'time_updated': 'datetime', 'lifecycle_state': 'str', 'lifecycle_details': 'str', 'freeform_tags': 'dict(str, str)', 'defined_tags': 'dict(str, dict(str, object))' } self.attribute_map = { 'id': 'id', 'display_name': 'displayName', 'compartment_id': 'compartmentId', 'subject_names': 'subjectNames', 'time_not_valid_after': 'timeNotValidAfter', 'certificate': 'certificate', 'intermediate_certificates': 'intermediateCertificates', 'time_created': 'timeCreated', 'time_updated': 'timeUpdated', 'lifecycle_state': 'lifecycleState', 'lifecycle_details': 'lifecycleDetails', 'freeform_tags': 'freeformTags', 'defined_tags': 'definedTags' } self._id = None self._display_name = None self._compartment_id = None self._subject_names = None self._time_not_valid_after = None self._certificate = None self._intermediate_certificates = None self._time_created = None self._time_updated = None self._lifecycle_state = None self._lifecycle_details = None self._freeform_tags = None self._defined_tags = None @property def id(self): return self._id @id.setter def id(self, id): self._id = id @property def display_name(self): return self._display_name @display_name.setter def display_name(self, display_name): self._display_name = display_name @property def compartment_id(self): return self._compartment_id @compartment_id.setter def compartment_id(self, compartment_id): self._compartment_id = compartment_id @property def subject_names(self): return self._subject_names @subject_names.setter def subject_names(self, subject_names): self._subject_names = subject_names @property def time_not_valid_after(self): return self._time_not_valid_after @time_not_valid_after.setter def time_not_valid_after(self, time_not_valid_after): self._time_not_valid_after = time_not_valid_after @property def certificate(self): return self._certificate @certificate.setter def certificate(self, certificate): self._certificate = certificate @property def intermediate_certificates(self): return self._intermediate_certificates @intermediate_certificates.setter def intermediate_certificates(self, intermediate_certificates): self._intermediate_certificates = intermediate_certificates @property def time_created(self): return self._time_created @time_created.setter def time_created(self, time_created): self._time_created = time_created @property def time_updated(self): return self._time_updated @time_updated.setter def time_updated(self, time_updated): self._time_updated = time_updated @property def lifecycle_state(self): return self._lifecycle_state @lifecycle_state.setter def lifecycle_state(self, lifecycle_state): allowed_values = ["CREATING", "ACTIVE", "UPDATING", "DELETING", "DELETED", "FAILED"] if not value_allowed_none_or_none_sentinel(lifecycle_state, allowed_values): lifecycle_state = 'UNKNOWN_ENUM_VALUE' self._lifecycle_state = lifecycle_state @property def lifecycle_details(self): return self._lifecycle_details @lifecycle_details.setter def lifecycle_details(self, lifecycle_details): self._lifecycle_details = lifecycle_details @property def freeform_tags(self): return self._freeform_tags @freeform_tags.setter def freeform_tags(self, freeform_tags): self._freeform_tags = freeform_tags @property def defined_tags(self): return self._defined_tags @defined_tags.setter def defined_tags(self, defined_tags): self._defined_tags = defined_tags def __repr__(self): return formatted_flat_dict(self) def __eq__(self, other): if other is None: return False return self.__dict__ == other.__dict__ def __ne__(self, other): return not self == other
true
true
f7f676c8b9cbe8c0e0ebf6c1796c802cead8c2d3
545
py
Python
frb/data/Halos/build_grids.py
KshitijAggarwal/FRB
3f732c6fa4fc79a5cfe69daf3cd88b51e6fb402b
[ "BSD-3-Clause" ]
39
2019-01-05T01:12:54.000Z
2021-12-15T16:53:51.000Z
frb/data/Halos/build_grids.py
KshitijAggarwal/FRB
3f732c6fa4fc79a5cfe69daf3cd88b51e6fb402b
[ "BSD-3-Clause" ]
80
2017-05-28T12:58:38.000Z
2022-01-25T23:22:29.000Z
frb/data/Halos/build_grids.py
KshitijAggarwal/FRB
3f732c6fa4fc79a5cfe69daf3cd88b51e6fb402b
[ "BSD-3-Clause" ]
20
2019-01-07T00:59:08.000Z
2022-03-29T11:38:19.000Z
from frb.halos import build_grid # Command line execution if __name__ == '__main__': #build_grid(outfile='z1_mNFW_10000', ntrial=10000) #build_grid(outfile='z1_mNFW_10000_21dec2018', ntrial=10000) #build_grid(outfile='test', ntrial=500, r_max=1.) #build_grid(outfile='test', ntrial=100, r_max=1.) #build_grid(outfile='test', ntrial=10) # Fiducial model build_grid(outfile='z1_mNFW_10000_rmax1', ntrial=10000, r_max=1., f_hot=0.75) # rmax=2 build_grid(outfile='z1_mNFW_10000_rmax2', ntrial=10000, r_max=2.)
34.0625
81
0.717431
from frb.halos import build_grid if __name__ == '__main__': build_grid(outfile='z1_mNFW_10000_rmax1', ntrial=10000, r_max=1., f_hot=0.75) build_grid(outfile='z1_mNFW_10000_rmax2', ntrial=10000, r_max=2.)
true
true
f7f676d1212b76e5950d052899c25e192a38e1a6
1,318
py
Python
Lib/site-packages/SiQt/siqt/tests/test_importers.py
fochoao/cpython
3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9
[ "bzip2-1.0.6", "0BSD" ]
null
null
null
Lib/site-packages/SiQt/siqt/tests/test_importers.py
fochoao/cpython
3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9
[ "bzip2-1.0.6", "0BSD" ]
20
2021-05-03T18:02:23.000Z
2022-03-12T12:01:04.000Z
Lib/site-packages/SiQt/siqt/tests/test_importers.py
fochoao/cpython
3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9
[ "bzip2-1.0.6", "0BSD" ]
null
null
null
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import functools from unittest.case import SkipTest from multiprocessing import Process, Queue from time import sleep, time def _run_process(func): @functools.wraps(func) def inner_wrapper(*args): def func_wrapper(func, q, args): try: res = func(*args) q.put(res) except Exception as e: q.put(e) q = Queue() p = Process(target=func_wrapper, args=(func, q, args)) p.start() result = q.get(block=True) if isinstance(result, Exception): raise result return result return inner_wrapper @_run_process def _check_importer(backend): import SiQt try: import SiQt.QtCore except ImportError: assert True # this is normal except: raise try: SiQt.use(backend, force=False) except ImportError: raise SkipTest except: raise import SiQt.QtCore def test_PyQt4_importer(): _check_importer('PyQt4') def test_PyQt5_importer(): _check_importer('PyQt5') def test_PySide_importer(): _check_importer('PySide')
21.258065
62
0.637329
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import functools from unittest.case import SkipTest from multiprocessing import Process, Queue from time import sleep, time def _run_process(func): @functools.wraps(func) def inner_wrapper(*args): def func_wrapper(func, q, args): try: res = func(*args) q.put(res) except Exception as e: q.put(e) q = Queue() p = Process(target=func_wrapper, args=(func, q, args)) p.start() result = q.get(block=True) if isinstance(result, Exception): raise result return result return inner_wrapper @_run_process def _check_importer(backend): import SiQt try: import SiQt.QtCore except ImportError: assert True except: raise try: SiQt.use(backend, force=False) except ImportError: raise SkipTest except: raise import SiQt.QtCore def test_PyQt4_importer(): _check_importer('PyQt4') def test_PyQt5_importer(): _check_importer('PyQt5') def test_PySide_importer(): _check_importer('PySide')
true
true
f7f677963d0e8c3b1c2fc5cef943dfb7aaad168e
411
py
Python
django101/django101/asgi.py
Nikolay1982Nikolaev/python-web-2020-09
1f3fa8f7188c0a63647e4224a82d04f3f97cd455
[ "MIT" ]
4
2020-10-30T23:13:50.000Z
2020-12-26T21:35:00.000Z
django101/django101/asgi.py
Nikolay1982Nikolaev/python-web-2020-09
1f3fa8f7188c0a63647e4224a82d04f3f97cd455
[ "MIT" ]
null
null
null
django101/django101/asgi.py
Nikolay1982Nikolaev/python-web-2020-09
1f3fa8f7188c0a63647e4224a82d04f3f97cd455
[ "MIT" ]
7
2020-09-17T13:08:35.000Z
2020-10-31T15:01:46.000Z
""" ASGI config for django101 project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django101.settings') application = get_asgi_application()
24.176471
79
0.756691
import os from django.core.asgi import get_asgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django101.settings') application = get_asgi_application()
true
true
f7f678f93e83cf7526c28d593431b57b59639a70
478
py
Python
Alp/cap_alp/Z.py
Ashokkommi0001/patterns
daa1a1d8f3bc6e021e02a0e34458e2c178fc71d2
[ "MIT" ]
2
2021-03-17T12:08:22.000Z
2021-03-17T12:11:10.000Z
Alp/cap_alp/Z.py
Ashokkommi0001/patterns
daa1a1d8f3bc6e021e02a0e34458e2c178fc71d2
[ "MIT" ]
null
null
null
Alp/cap_alp/Z.py
Ashokkommi0001/patterns
daa1a1d8f3bc6e021e02a0e34458e2c178fc71d2
[ "MIT" ]
1
2021-03-17T11:49:39.000Z
2021-03-17T11:49:39.000Z
def for_Z(): for row in range(7): for col in range(7): if row==0 or row==6 or row+col==6 : print("*",end=" ") else: print(end=" ") print() def while_Z(): i=0 while i<7: j=0 while j<7: if i==0 or i==6 or i+j==6 : print("*",end=" ") else: print(end=" ") j+=1 i+=1 print()
21.727273
48
0.317992
def for_Z(): for row in range(7): for col in range(7): if row==0 or row==6 or row+col==6 : print("*",end=" ") else: print(end=" ") print() def while_Z(): i=0 while i<7: j=0 while j<7: if i==0 or i==6 or i+j==6 : print("*",end=" ") else: print(end=" ") j+=1 i+=1 print()
true
true
f7f67992b961f2998416bc6eaf320823370b1e3b
10,005
py
Python
examples/flight_delays/logcox_implicit_scmmgp.py
axdahl/SC-MMGP
c6cd9d9de66bb7074925a4b6485f10a74bdd9f68
[ "Apache-2.0" ]
null
null
null
examples/flight_delays/logcox_implicit_scmmgp.py
axdahl/SC-MMGP
c6cd9d9de66bb7074925a4b6485f10a74bdd9f68
[ "Apache-2.0" ]
null
null
null
examples/flight_delays/logcox_implicit_scmmgp.py
axdahl/SC-MMGP
c6cd9d9de66bb7074925a4b6485f10a74bdd9f68
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- """ Script to execute example implicit sparse covarying MMGP with Poisson likelihood. The model invokes the 'implicit' sparse model class and accepts a degenerate kernel that is optionally corrected within the model class with diagonal correction. Inputs: Data training and test sets (dictionary pickle) Data for example: - count data for 50 airports - N_train = 20,000, N_test = 10,798, P = 50, D = 105 - Xtr[:, :4] ['time_index', 'dayofweek', 'dayofmonth', 'month'] - Xtr[:, 4:105] total scheduled arrivals and departures per airport - Xtr[:, 105] total activity (arrivals and departures) for all airports - link inputs is a 50x3 array (link inputs repeated for every group) with normalised lat,long and airport size (total scheduled flights over sample period) Model Options: - Sparse or full x-function covariance prior Krhh (set bool SPARSE_PRIOR) - Diagonal or Kronecker-structured variational posterior covariance Sr (set bool DIAG_POST) - Sparse or full covariance (when Kronecker posterior; set bool SPARSE_POST) - Diagonal correction required within class for degenerate kernels (set bool EXACT_SPARSE) Current Settings (degenerate scmmgp model with sparse Kronecker posterior): DIAG_POST = False SPARSE_PRIOR = True # set False for equivalent non-sparse scmmgp model SPARSE_POST = True EXACT_SPARSE = True # option for sparse prior low-rank adjustment Note on specifying group structure for F: Grouping occurs via block_struct, a nested list of grouping order Where functions [i] are independent i.e. in own block, set link_kernel[i] = link_inputs[i] = 1.0 See model class preamble and example below for further details. """ import os import numpy as np import pickle import pandas as pd import traceback import time import sklearn.cluster import csv import sys import mmgp from mmgp import likelihoods from mmgp import kernels import tensorflow as tf from mmgp import datasets from mmgp import losses from mmgp import util dpath = '/experiments/datasets/' dfile = 'logcox_nozeroy_aggx_inputsdict.pickle' dlinkfile = 'logcox_nozeroy_aggx_linkinputsarray.pickle' outdir = '/experiments/results/logcox_implicit_scmmgp' siteinclude = os.path.join(dpath, "airports_top50.csv") # contains order of output variables try: os.makedirs(outdir) except FileExistsError: pass def get_inputs(): """ inputsdict contains {'Yte': Yte, 'Ytr': Ytr, 'Xtr': Xtr, 'Xte': Xte} where values are np.arrays np. arrays are truncated to evenly split into batches of size = batchsize returns inputsdict, Xtr_link (ndarray, shape = [P, D_link_features]) """ with open(os.path.join(dpath, dfile), 'rb') as f: d_all = pickle.load(f) with open(os.path.join(dpath, dlinkfile), 'rb') as f: d_link = pickle.load(f) return d_all, d_link def init_z(train_inputs, num_inducing): # Initialize inducing points using clustering. mini_batch = sklearn.cluster.MiniBatchKMeans(num_inducing) cluster_indices = mini_batch.fit_predict(train_inputs) inducing_locations = mini_batch.cluster_centers_ return inducing_locations FLAGS = util.util.get_flags() BATCH_SIZE = 100 LEARNING_RATE = FLAGS.learning_rate DISPLAY_STEP = FLAGS.display_step EPOCHS = 150 NUM_SAMPLES = 200 PRED_SAMPLES = 500 NUM_INDUCING = 250 NUM_COMPONENTS = FLAGS.num_components IS_ARD = FLAGS.is_ard TOL = 0.0001 VAR_STEPS = FLAGS.var_steps DIAG_POST = False SPARSE_PRIOR = True SPARSE_POST = True # option for non-diag post EXACT_SPARSE = True # option for sparse prior low-rank adjustment MAXTIME = 1200 save_nlpds = True # If True saves samples of nlpds (mean and variance) print("settings done") # define GPRN P and Q output_dim = 50 #P locfeat_dim = 2 # [scheduled arrivals, scheduled departures] for time increment for airport commonfeats = list(range(4)) # [t_ix, dayofweek, dayofmonth, month] num_hubs = 5 # becomes nodedim # top 10 airports (select increasing subsets for varying nodedim) toplist = ['ATL', 'ORD', 'DFW', 'DEN', 'LAX', 'PHX', 'IAH', 'LAS', 'DTW', 'EWR'] use_sites = pd.read_csv(siteinclude,header=None).iloc[:,0].tolist() # order of output variables toplist = toplist[:num_hubs] hublocs = [use_sites.index(x) for x in toplist] nonhubs = [use_sites.index(x) for x in use_sites if x not in toplist] #non hub dims node_dim = len(hublocs) #Q # extract dataset d, d_link = get_inputs() Ytr, Yte, Xtr, Xte = d['Ytr'], d['Yte'], d['Xtr'], d['Xte'] data = datasets.DataSet(Xtr.astype(np.float32), Ytr.astype(np.float32), shuffle=False) test = datasets.DataSet(Xte.astype(np.float32), Yte.astype(np.float32), shuffle=False) print("dataset created") # lists required: block_struct, link_inputs, kern_link, kern # model config: block columns, leave f independent # order of block_struct is columns, node functions #block_struct nested list of grouping order weight_struct = [[] for _ in range(node_dim)] for i in range(node_dim): col = list(range(i*output_dim, i*output_dim + output_dim)) col_0 = col.pop(hublocs[i]) # bring hub to pivot position weight_struct[i] = [col_0] + col nodes = [[x] for x in list(range(output_dim * node_dim, output_dim * node_dim + node_dim))] block_struct = weight_struct + nodes # create link inputs (link inputs used repeatedly but can have link input per group) # permute to bring hub to first position link_inputs = [[] for _ in range(node_dim)] for i in range(node_dim): idx = list(range(d_link.shape[0])) link_inputs[i] = d_link[[idx.pop(hublocs[i])] + idx, :] # match inputs order to block_struct link_inputs = link_inputs + [1.0 for i in range(node_dim)] # link kernel klink_w = [kernels.WaveletSlice(3, active_dims=[0,1,2], input_scaling=IS_ARD) for i in range(len(weight_struct)) ] klink_f = [1.0 for i in range(node_dim)] kernlink = klink_w + klink_f # create 'within' kernel # kern k_w = [kernels.CompositeKernel('add',[kernels.RadialBasisSlice(Xtr.shape[1], active_dims= list(range(Xtr.shape[1])), std_dev = 1.0, white = 0.01, input_scaling = IS_ARD), kernels.PeriodicSlice(1, active_dims=[0], lengthscale=0.5, std_dev=1.0, period = 2.0) ]) for i in range(len(weight_struct))] k_f = [kernels.RadialBasisSlice(1, active_dims=[0], std_dev = 1.0, lengthscale=0.5, white = 0.01, input_scaling = IS_ARD) for i in range(node_dim)] kern = k_w + k_f print('len link_inputs ',len(link_inputs)) print('len kernlink ',len(kernlink)) print('len kern ', len(kern)) print('no. groups = ', len(block_struct), 'no. latent functions =', len([i for b in block_struct for i in b])) print('number latent functions', node_dim*(output_dim+1)) likelihood = likelihoods.SCMMGPLogCox(output_dim, node_dim, offset = 0.05) # output_dim, node_dim, offset print("likelihood and kernels set") Z = init_z(data.X, NUM_INDUCING) print('inducing points set') m = mmgp.ImplicitSCMMGP(output_dim, likelihood, kern, kernlink, block_struct, Z, link_inputs, num_components=NUM_COMPONENTS, diag_post=DIAG_POST, sparse_prior=SPARSE_PRIOR, sparse_post=SPARSE_POST, exact_sparse=EXACT_SPARSE, num_samples=NUM_SAMPLES, predict_samples=PRED_SAMPLES) print("model set") # initialise losses and logging error_rate = losses.RootMeanSqError(data.Dout) os.chdir(outdir) with open("log_results.csv", 'w', newline='') as f: csv.writer(f).writerow(['epoch', 'fit_runtime', 'nelbo', error_rate.get_name(),'generalised_nlpd']) with open("log_params.csv", 'w', newline='') as f: csv.writer(f).writerow(['epoch', 'raw_kernel_params', 'raw_kernlink_params', 'raw_likelihood_params', 'raw_weights']) with open("log_comp_time.csv", 'w', newline='') as f: csv.writer(f).writerow(['epoch', 'batch_time', 'nelbo_time', 'pred_time', 'gen_nlpd_time', error_rate.get_name()+'_time']) # optimise o = tf.train.AdamOptimizer(LEARNING_RATE, beta1=0.9,beta2=0.99) print("start time = ", time.strftime('%X %x %Z')) m.fit(data, o, var_steps = VAR_STEPS, epochs = EPOCHS, batch_size = BATCH_SIZE, display_step=DISPLAY_STEP, test = test, loss = error_rate, tolerance = TOL, max_time=MAXTIME ) print("optimisation complete") # export final predicted values and loss metrics ypred = m.predict(test.X, batch_size = BATCH_SIZE) #same batchsize used for convenience np.savetxt("predictions.csv", np.concatenate(ypred, axis=1), delimiter=",") if save_nlpds == True: nlpd_samples, nlpd_meanvar = m.nlpd_samples(test.X, test.Y, batch_size = BATCH_SIZE) try: np.savetxt("nlpd_meanvar.csv", nlpd_meanvar, delimiter=",") # N x 2P as for predictions except: print('nlpd_meanvar export fail') #try: # np.savetxt("nlpd_samples.csv", nlpd_samples, delimiter=",") # NP x S (NxS concat for P tasks) #except: # print('nlpd_samples export fail') print("Final " + error_rate.get_name() + "=" + "%.4f" % error_rate.eval(test.Y, ypred[0])) print("Final " + "generalised_nlpd" + "=" + "%.4f" % m.nlpd_general(test.X, test.Y, batch_size = BATCH_SIZE)) # any extra accuracy measures at end of routine error_rate_end = [losses.MeanAbsError(data.Dout)] print("Final ", [e.get_name() for e in error_rate_end]) print([e.eval(test.Y, ypred[0]) for e in error_rate_end]) predvar = [np.mean(np.mean(ypred[1]))] print("Final predvar ", predvar) with open("final_losses.csv", 'w', newline='') as f: csv.writer(f).writerows([[e.get_name() for e in error_rate_end] + ['pred_var'], [e.eval(test.Y, ypred[0]) for e in error_rate_end] + predvar]) print("finish time = " + time.strftime('%X %x %Z'))
40.670732
127
0.691554
import os import numpy as np import pickle import pandas as pd import traceback import time import sklearn.cluster import csv import sys import mmgp from mmgp import likelihoods from mmgp import kernels import tensorflow as tf from mmgp import datasets from mmgp import losses from mmgp import util dpath = '/experiments/datasets/' dfile = 'logcox_nozeroy_aggx_inputsdict.pickle' dlinkfile = 'logcox_nozeroy_aggx_linkinputsarray.pickle' outdir = '/experiments/results/logcox_implicit_scmmgp' siteinclude = os.path.join(dpath, "airports_top50.csv") try: os.makedirs(outdir) except FileExistsError: pass def get_inputs(): with open(os.path.join(dpath, dfile), 'rb') as f: d_all = pickle.load(f) with open(os.path.join(dpath, dlinkfile), 'rb') as f: d_link = pickle.load(f) return d_all, d_link def init_z(train_inputs, num_inducing): mini_batch = sklearn.cluster.MiniBatchKMeans(num_inducing) cluster_indices = mini_batch.fit_predict(train_inputs) inducing_locations = mini_batch.cluster_centers_ return inducing_locations FLAGS = util.util.get_flags() BATCH_SIZE = 100 LEARNING_RATE = FLAGS.learning_rate DISPLAY_STEP = FLAGS.display_step EPOCHS = 150 NUM_SAMPLES = 200 PRED_SAMPLES = 500 NUM_INDUCING = 250 NUM_COMPONENTS = FLAGS.num_components IS_ARD = FLAGS.is_ard TOL = 0.0001 VAR_STEPS = FLAGS.var_steps DIAG_POST = False SPARSE_PRIOR = True SPARSE_POST = True EXACT_SPARSE = True MAXTIME = 1200 save_nlpds = True print("settings done") output_dim = 50 locfeat_dim = 2 commonfeats = list(range(4)) num_hubs = 5 toplist = ['ATL', 'ORD', 'DFW', 'DEN', 'LAX', 'PHX', 'IAH', 'LAS', 'DTW', 'EWR'] use_sites = pd.read_csv(siteinclude,header=None).iloc[:,0].tolist() toplist = toplist[:num_hubs] hublocs = [use_sites.index(x) for x in toplist] nonhubs = [use_sites.index(x) for x in use_sites if x not in toplist] node_dim = len(hublocs) d, d_link = get_inputs() Ytr, Yte, Xtr, Xte = d['Ytr'], d['Yte'], d['Xtr'], d['Xte'] data = datasets.DataSet(Xtr.astype(np.float32), Ytr.astype(np.float32), shuffle=False) test = datasets.DataSet(Xte.astype(np.float32), Yte.astype(np.float32), shuffle=False) print("dataset created") weight_struct = [[] for _ in range(node_dim)] for i in range(node_dim): col = list(range(i*output_dim, i*output_dim + output_dim)) col_0 = col.pop(hublocs[i]) weight_struct[i] = [col_0] + col nodes = [[x] for x in list(range(output_dim * node_dim, output_dim * node_dim + node_dim))] block_struct = weight_struct + nodes link_inputs = [[] for _ in range(node_dim)] for i in range(node_dim): idx = list(range(d_link.shape[0])) link_inputs[i] = d_link[[idx.pop(hublocs[i])] + idx, :] link_inputs = link_inputs + [1.0 for i in range(node_dim)] klink_w = [kernels.WaveletSlice(3, active_dims=[0,1,2], input_scaling=IS_ARD) for i in range(len(weight_struct)) ] klink_f = [1.0 for i in range(node_dim)] kernlink = klink_w + klink_f k_w = [kernels.CompositeKernel('add',[kernels.RadialBasisSlice(Xtr.shape[1], active_dims= list(range(Xtr.shape[1])), std_dev = 1.0, white = 0.01, input_scaling = IS_ARD), kernels.PeriodicSlice(1, active_dims=[0], lengthscale=0.5, std_dev=1.0, period = 2.0) ]) for i in range(len(weight_struct))] k_f = [kernels.RadialBasisSlice(1, active_dims=[0], std_dev = 1.0, lengthscale=0.5, white = 0.01, input_scaling = IS_ARD) for i in range(node_dim)] kern = k_w + k_f print('len link_inputs ',len(link_inputs)) print('len kernlink ',len(kernlink)) print('len kern ', len(kern)) print('no. groups = ', len(block_struct), 'no. latent functions =', len([i for b in block_struct for i in b])) print('number latent functions', node_dim*(output_dim+1)) likelihood = likelihoods.SCMMGPLogCox(output_dim, node_dim, offset = 0.05) print("likelihood and kernels set") Z = init_z(data.X, NUM_INDUCING) print('inducing points set') m = mmgp.ImplicitSCMMGP(output_dim, likelihood, kern, kernlink, block_struct, Z, link_inputs, num_components=NUM_COMPONENTS, diag_post=DIAG_POST, sparse_prior=SPARSE_PRIOR, sparse_post=SPARSE_POST, exact_sparse=EXACT_SPARSE, num_samples=NUM_SAMPLES, predict_samples=PRED_SAMPLES) print("model set") error_rate = losses.RootMeanSqError(data.Dout) os.chdir(outdir) with open("log_results.csv", 'w', newline='') as f: csv.writer(f).writerow(['epoch', 'fit_runtime', 'nelbo', error_rate.get_name(),'generalised_nlpd']) with open("log_params.csv", 'w', newline='') as f: csv.writer(f).writerow(['epoch', 'raw_kernel_params', 'raw_kernlink_params', 'raw_likelihood_params', 'raw_weights']) with open("log_comp_time.csv", 'w', newline='') as f: csv.writer(f).writerow(['epoch', 'batch_time', 'nelbo_time', 'pred_time', 'gen_nlpd_time', error_rate.get_name()+'_time']) o = tf.train.AdamOptimizer(LEARNING_RATE, beta1=0.9,beta2=0.99) print("start time = ", time.strftime('%X %x %Z')) m.fit(data, o, var_steps = VAR_STEPS, epochs = EPOCHS, batch_size = BATCH_SIZE, display_step=DISPLAY_STEP, test = test, loss = error_rate, tolerance = TOL, max_time=MAXTIME ) print("optimisation complete") ypred = m.predict(test.X, batch_size = BATCH_SIZE) np.savetxt("predictions.csv", np.concatenate(ypred, axis=1), delimiter=",") if save_nlpds == True: nlpd_samples, nlpd_meanvar = m.nlpd_samples(test.X, test.Y, batch_size = BATCH_SIZE) try: np.savetxt("nlpd_meanvar.csv", nlpd_meanvar, delimiter=",") except: print('nlpd_meanvar export fail') r_rate.get_name() + "=" + "%.4f" % error_rate.eval(test.Y, ypred[0])) print("Final " + "generalised_nlpd" + "=" + "%.4f" % m.nlpd_general(test.X, test.Y, batch_size = BATCH_SIZE)) error_rate_end = [losses.MeanAbsError(data.Dout)] print("Final ", [e.get_name() for e in error_rate_end]) print([e.eval(test.Y, ypred[0]) for e in error_rate_end]) predvar = [np.mean(np.mean(ypred[1]))] print("Final predvar ", predvar) with open("final_losses.csv", 'w', newline='') as f: csv.writer(f).writerows([[e.get_name() for e in error_rate_end] + ['pred_var'], [e.eval(test.Y, ypred[0]) for e in error_rate_end] + predvar]) print("finish time = " + time.strftime('%X %x %Z'))
true
true
f7f67a272bdc58ebe68462646753c1b07b16884c
84,043
py
Python
fivepseq/logic/structures/fivepseq_counts.py
opplatek/fivepseq
9a531520e29aa9b3edbf1823c6cfd249f97bba9b
[ "BSD-3-Clause" ]
5
2018-10-27T20:37:26.000Z
2021-06-14T10:23:53.000Z
fivepseq/logic/structures/fivepseq_counts.py
opplatek/fivepseq
9a531520e29aa9b3edbf1823c6cfd249f97bba9b
[ "BSD-3-Clause" ]
1
2021-12-14T17:20:08.000Z
2021-12-14T17:20:08.000Z
fivepseq/logic/structures/fivepseq_counts.py
opplatek/fivepseq
9a531520e29aa9b3edbf1823c6cfd249f97bba9b
[ "BSD-3-Clause" ]
1
2020-12-22T17:27:40.000Z
2020-12-22T17:27:40.000Z
import collections import logging import os from math import floor import numpy as np import pandas as pd import plastid from preconditions import preconditions from scipy import stats from fivepseq import config from fivepseq.logic.structures import codons from fivepseq.logic.structures.codons import Codons from fivepseq.util.writers import FivePSeqOut class FivePSeqCounts: """ This class wraps annotation, alignment and genome objects in one place. Algorithms extracting count information from these objects are implemented in this class as functions. Algorithms able to work with count arrays and dataframes alone are in the algorithms package. """ START = "START" TERM = "STOP" FULL_LENGTH = "full_length" ALL = "all" START_CODON = "start" STOP_CODON = "stop" TRANSCRIPT_LENGTH = "len" TRANSCRIPT_3NT = "3nt" NUMBER_READS = "NumOfReads" NUMBER_READS_DOWNSAMPLED = "NumOfReadsDownsampled" NUMBER_POSITIONS = "NumOfMapPositions" COUNT_THRESHOLD = 100 logger = logging.getLogger(config.FIVEPSEQ_LOGGER) count_distribution_dict = None outlier_lower = None downsample_constant = None outlier_probability = None config = None alignment = None annotation = None genome = None count_vector_list_start = None count_vector_list_term = None count_vector_list_full_length = None meta_count_series_start = None meta_count_series_term = None frame_counts_df_start = None frame_counts_df_term = None codon_genome_usage_df = None codon_count_df = None amino_acid_count_df = None dicodon_count_df = None dipeptide_count_df = None tricodon_count_df = None tripeptide_count_df = None codon_stats_df = None amino_acid_stats_df = None codon_genome_usage_df = None amino_acid_genome_usage_df = None start_codon_dict = None stop_codon_dict = None canonical_transcript_index = None transcript_descriptors = None outliers = None is_geneset = False loci_overlaps = None READ_LOCATIONS_ALL = "_ALL" READ_LOCATIONS_3UTR = "_3UTR" READ_LOCATIONS_5UTR = "_5UTR" READ_LOCATIONS_CDS = "_CDS" MASK_DIST = 20 TRIPEPTIDE_POS = -11 DIPEPTIDE_POS = -14 missing_chroms = [] def __init__(self, alignment, annotation, genome, config, downsample_constant, is_geneset=False, transcript_filter=None): """ Initializes a FivePSeqCounts object with Alignment and Annotation instances. :param alignment: fivepseq.logic.structures.Alignment type object :param annotation: fivepseq.logic.structures.Annotation type object :param genome: fivepseq.logic.structures.Genome: Genome type object :param outlier_probability: a float setting the probability threshold for Poisson distribution that will be used to downsample outliers :param downsample_constant: a float specifying a constant threshold: higher values will be down-sampled to this constant (without Poisson check) """ self.alignment = alignment self.annotation = annotation self.genome = genome self.transcript_filter = transcript_filter self.config = config self.outlier_probability = config.args.op self.outlier_lower = downsample_constant self.outliers = [] self.start_codon_dict = {} self.stop_codon_dict = {} self.canonical_transcript_index = [] self.is_geneset = is_geneset self.logger.info("Initiated a FivePSeqCounts object with" "\n\talignment from file %s" "\n\tannotation from file %s " "\n\tgenome from file %s" % (alignment.alignment_file.filename, annotation.file_path, genome.fasta_file)) def get_transcript_descriptors(self): if self.transcript_descriptors is None: self.generate_transcript_descriptors() return self.transcript_descriptors def get_start_codon_dict(self): if self.start_codon_dict is None: self.generate_transcript_descriptors() return self.start_codon_dict def get_stop_codon_dict(self): if self.stop_codon_dict is None: self.generate_transcript_descriptors() return self.stop_codon_dict def generate_transcript_descriptors(self): """ Generates and stores the basic statistics on transcript sequences and counts. The following objects are generated and kept in self: transcript_descriptors:: pandas DataFrame - columns: START, TERM codons, transcript length, transcript length divisible by three, number of reads mapping within coding region - rows: transcripts :return: """ # info self.logger.info("Generating transcript descriptors") transcript_assembly = self.annotation.get_transcript_assembly(span_size=0) transcript_count = len(transcript_assembly) self.transcript_descriptors = pd.DataFrame(data=None, index=range(transcript_count), columns=[self.START_CODON, self.STOP_CODON, self.TRANSCRIPT_LENGTH, self.TRANSCRIPT_3NT, self.NUMBER_READS, self.NUMBER_READS_DOWNSAMPLED, self.NUMBER_POSITIONS]) count_distribution_dict = {} for transcript_ind in range(transcript_count): transcript = transcript_assembly[transcript_ind] cds_sequence = self.get_cds_sequence_safe(transcript, 0) count_vector = self.get_count_vector_safe(transcript, 0) # NOTE the count distribution does not include values 0 to avoid skewness for outlier detection for c in count_vector: if c > 0: if c in count_distribution_dict: count_distribution_dict[c] += 1 else: count_distribution_dict[c] = 1 start_codon = cds_sequence[0:3] stop_codon = cds_sequence[len(cds_sequence) - 3:len(cds_sequence)] if (start_codon == codons.Codons.START_CODON) & (stop_codon in codons.Codons.stop_codons): self.canonical_transcript_index.append(transcript_ind) self.transcript_descriptors.at[transcript_ind, self.START_CODON] = start_codon self.transcript_descriptors.at[transcript_ind, self.STOP_CODON] = stop_codon self.transcript_descriptors.at[transcript_ind, self.TRANSCRIPT_3NT] = str(len(cds_sequence) % 3 == 0) self.transcript_descriptors.at[transcript_ind, self.TRANSCRIPT_LENGTH] = len(cds_sequence) self.transcript_descriptors.at[transcript_ind, self.NUMBER_READS] = int(np.sum(count_vector)) self.transcript_descriptors.at[transcript_ind, self.NUMBER_POSITIONS] = np.count_nonzero(count_vector) if start_codon in self.start_codon_dict.keys(): self.start_codon_dict[start_codon] += 1 else: self.start_codon_dict.update({start_codon: 1}) if stop_codon in self.stop_codon_dict.keys(): self.stop_codon_dict[stop_codon] += 1 else: self.stop_codon_dict.update({stop_codon: 1}) self.count_distribution_dict = collections.OrderedDict(sorted(count_distribution_dict.items())) self.outlier_lower = self.get_outlier_lower() self.logger.info("The lower bound for outliers set as %f " % self.outlier_lower) # also store downsampled transcript counts for transcript_ind in range(transcript_count): transcript = transcript_assembly[transcript_ind] count_vector_downsampled = self.get_count_vector(transcript, span_size=0, region=self.FULL_LENGTH, downsample=True) self.transcript_descriptors.at[transcript_ind, self.NUMBER_READS_DOWNSAMPLED] = int( np.sum(count_vector_downsampled)) self.logger.info("Done generating transcript descriptors") def get_count_distribution_dict(self): return self.count_distribution_dict def get_count_distribution(self): if self.count_distribution_dict is None: self.generate_transcript_descriptors() count_distribution = [] for c, f in self.count_distribution_dict.items(): for i in range(f): count_distribution.append(c) return count_distribution def set_count_distribution_dict(self, count_distribution_dict): """ Sets the count distribution according to the specified count vector. :param count_distribution_dict: an ordered dictionary of count frequencies :return: """ if len(count_distribution_dict) == 0: self.count_distribution_dict = None else: self.count_distribution_dict = count_distribution_dict def get_outlier_lower(self): """ Returns the lower bound for outliers detected as points lying self.downsample_by number times higher than the 25-75% interquartile range. :return: """ if self.outlier_lower is not None: return self.outlier_lower count_distribution = self.get_count_distribution() if len(count_distribution) == 0: self.outlier_lower = 0 return 0 scd = sorted(count_distribution) lam = np.mean(scd) ps = [1 - stats.poisson.cdf(x, lam) for x in scd] ind = np.where(np.asarray(ps) <= self.outlier_probability)[0].tolist() if len(ind) > 0: # outliers = [scd[i] for i in ind] outlier_lower = scd[min(ind) - 1] else: outlier_lower = max(scd) + 1 self.outlier_lower = outlier_lower return outlier_lower def set_outlier_lower(self, outlier_lower): # TODO add checks, set preconditions self.outlier_lower = outlier_lower def generate_count_vector_lists(self): """ Generates read count vectors for full length transcripts, terminus- and start- aligned sections, spanning respective regions of each transcript in the transcript assembly. The region is spanned according to the span_size set in annotation. :return: [[int]]: array of counts arrays of 5' mapping counts per position of the specified region of each transcript """ # if counts are already computed, return the existing ones logging.getLogger(config.FIVEPSEQ_LOGGER).info("Generating count vectors") if self.count_vector_list_full_length is not None: if self.count_vector_list_term is not None: if self.count_vector_list_start is not None: logging.getLogger(config.FIVEPSEQ_LOGGER).warning("All count vectors are already generated") # otherwise, retrieve the counts from the alignment file, referencing the transcript assembly self.logger.info("Retrieving counts (span size :%d)..." % self.annotation.span_size) # initialize empty vectors transcript_count = len(self.annotation.get_transcript_assembly()) self.count_vector_list_full_length = [None] * transcript_count self.count_vector_list_term = [None] * transcript_count self.count_vector_list_start = [None] * transcript_count # setup the the counter counter = 1 ta = self.annotation.get_transcript_assembly() for i in range(transcript_count): transcript = ta[i] # update to console if counter % 10000 == 0: self.logger.info("\r>>Transcript count: %d (%d%s)\t" % ( counter, floor(100 * (counter - 1) / self.annotation.transcript_count), '%'), ) # retrieve actual counts for current transcript try: count_vector = self.get_count_vector(transcript, self.annotation.span_size, self.FULL_LENGTH) self.count_vector_list_full_length[counter - 1] = count_vector self.count_vector_list_start[counter - 1] = count_vector[:2 * self.annotation.span_size] self.count_vector_list_term[counter - 1] = count_vector[-(2 * self.annotation.span_size):] except Exception as e: error_message = "Problem retrieving counts for transcript %s. Reason: %s" \ % (transcript.get_name(), e.message) self.logger.error(error_message) raise Exception(error_message) counter += 1 self.check_for_codons = False # report successful retrieval self.logger.info("Finished retrieving count vectors") @preconditions(lambda region: isinstance(region, str)) def get_count_vector_list(self, region): """ Returns arrays of read count vectors spanning the given region of each transcript in the transcript assembly. The region is spanned according to the span_size set in annotation. :param region: str: Specifies the region of the transcript to span around :return: [[int]]: array of counts arrays of 5' mapping counts per position of the specified region of each transcript """ # if counts are already computed, return the existing ones else generate count vector lists first if self.count_vector_list_full_length is None: self.generate_count_vector_lists() if region == self.FULL_LENGTH: return self.count_vector_list_full_length elif region == self.START: return self.count_vector_list_start elif region == self.TERM: return self.count_vector_list_term else: error_message = "Cannot retrieve the counts. " \ "Invalid region \"%s\" specified: should be one of (%s, %s, %s)." \ % (region, self.FULL_LENGTH, self.START, self.TERM) self.logger.error(error_message) raise ValueError(error_message) @preconditions(lambda span_size: isinstance(span_size, int), lambda region: isinstance(region, str)) def get_count_vector(self, transcript, span_size, region, downsample=True): """ Returns the vector of counts for the given transcript within the given spanning region. :param region: str: Specifies the region of the transcript to span for count vector generation :param transcript: plastid.Transcript: The transcript to return the counts for: is is already spanned with the specified span_size :param span_size: int: Specifies how many nucleotides to span around the specified region :param transcript_ind: int: the index of transcript in the transcript assembly :return: [int]: array of 5' mapping counts per position of the specified transcript region """ try: # retrieve the count vector using plastid function "get_counts" called from the given Transcript object count_vector = self.get_count_vector_safe(transcript, span_size) if downsample and any(x > self.outlier_lower for x in count_vector): count_vector_ds = [0] * len(count_vector) for i in range(len(count_vector_ds)): if count_vector[i] > self.outlier_lower: count_vector_ds[i] = self.outlier_lower outlier_params = [FivePSeqOut.get_transcript_attr(transcript, "ID"), FivePSeqOut.get_transcript_attr(transcript, "Name"), i - span_size, len(count_vector) - i - span_size, count_vector[i], count_vector_ds[i]] if outlier_params not in self.outliers: self.outliers.append(outlier_params) else: count_vector_ds[i] = count_vector[i] count_vector = count_vector_ds count_vector = count_vector[transcript.cds_start: transcript.cds_end + 2 * span_size] # return only the region of the vector that is specified by region and span_size parameters if region == self.FULL_LENGTH: # the full vector will be returned pass elif region == self.START: count_vector = count_vector[:2 * span_size] elif region == self.TERM: count_vector = count_vector[-(2 * span_size):] else: error_message = "Cannot retrieve a count vector for the transcript %s. " \ "Invalid region \"%s\" specified: should be one of (%s, %s, %s)." \ % (transcript.get_name(), region, self.FULL_LENGTH, self.START, self.TERM) self.logger.error(error_message) raise ValueError(error_message) except Exception as e: error_message = "Problem retrieving the count vector for the transcript %s. Reason:%s" % ( transcript.get_name(), e.message) self.logger.error(error_message) raise Exception(error_message) # convert the count array to an int vector if not isinstance(count_vector, list): count_vector = count_vector.tolist() # if not isinstance(count_vector[0], int): count_vector = list(map(int, count_vector)) return count_vector def get_count_vector_safe(self, transcript, span_size): """ A safe method to return count vector accounting for transcripts that span before or after genome start and end. :param transcript: :param span_size: :return: """ try: count_vector = transcript.get_counts(self.alignment.bam_array) except Exception as e: if transcript.spanning_segment.start < 0: diff = -1 * transcript.spanning_segment.start t_subchain = transcript.get_subchain(diff, transcript.spanning_segment.end, stranded=False) subchain_counts = list(t_subchain.get_counts(self.alignment.bam_array)) count_vector = [0] * diff + subchain_counts logging.getLogger(config.FIVEPSEQ_LOGGER). \ debug("Transcript %s at the beginning of the genome padded with %d zeros" % (FivePSeqOut.get_transcript_attr(transcript, "Name"), diff)) else: t_len = transcript.spanning_segment.end - transcript.spanning_segment.start diff = transcript.spanning_segment.end - len(self.genome.genome_dict[transcript.chrom].seq) if diff > span_size: # NOTE wrongly annotated transcripts go outside genome boundaries, # NOTE return an empty vector spanned by span size as a safe way of discarding such transcripts count_vector = [0] * t_len logging.getLogger(config.FIVEPSEQ_LOGGER). \ debug("Transcript %s exceeds genome dimensions by %d bases" % (FivePSeqOut.get_transcript_attr(transcript, "Name"), diff)) else: t_subchain = transcript.get_subchain(diff, transcript.spanning_segment.end, stranded=False) subchain_counts = list(t_subchain.get_counts(self.alignment.bam_array)) count_vector = subchain_counts + [0] * diff logging.getLogger(config.FIVEPSEQ_LOGGER). \ debug("Transcript %s at the end of the genome padded with %d zeros" % (FivePSeqOut.get_transcript_attr(transcript, "Name"), diff)) return count_vector def get_sequence(self, transcript, transcript_span_size, desired_span_size): if desired_span_size > transcript_span_size: raise ValueError("Desired span size %d bigger than the transcript span size %d" % (desired_span_size, transcript_span_size)) try: sequence = transcript.get_sequence(self.genome.genome_dict) desired_seq = sequence[transcript.cds_start + transcript_span_size - desired_span_size: transcript.cds_end + transcript_span_size + desired_span_size] except: t_len = transcript.cds_end - transcript.cds_start desired_seq = ''.join(['N'] * t_len + 2 * desired_span_size) return desired_seq def get_cds_sequence_safe(self, transcript, span_size): # NOTE a dangerous code here. Works correctly only if the input span size is the same as in the transcript. # TOCHANGE try: sequence = transcript.get_sequence(self.genome.genome_dict) cds_sequence = sequence[transcript.cds_start + span_size: transcript.cds_end + span_size] except: if transcript.chrom not in self.genome.genome_dict.keys(): if transcript.chrom not in self.missing_chroms: self.missing_chroms.append(transcript.chrom) logging.getLogger(config.FIVEPSEQ_LOGGER).warn( "No chromosome named %s found in the genome sequence" % transcript.chrom) t_len = transcript.spanning_segment.end - transcript.spanning_segment.start cds_sequence = ''.join(['N'] * t_len) elif transcript.spanning_segment.start < 0: diff = -1 * transcript.spanning_segment.start t_subchain = transcript.get_subchain(diff, transcript.spanning_segment.end, stranded=False) sequence = t_subchain.get_sequence(self.genome.genome_dict) if span_size < diff: cds_sequence = sequence[transcript.cds_start + span_size - diff: transcript.cds_end + span_size] else: # TODO I don't know how to get sequence in this case: need debugging cds_sequence = sequence[transcript.cds_start + span_size - diff: transcript.cds_end + span_size] logging.getLogger(config.FIVEPSEQ_LOGGER). \ debug("Transcript %s at the beginning of the genome padded with %d N's" % (FivePSeqOut.get_transcript_attr(transcript, "Name"), diff)) else: t_len = transcript.spanning_segment.end - transcript.spanning_segment.start diff = transcript.spanning_segment.end - len(self.genome.genome_dict[transcript.chrom].seq) if diff > span_size: # NOTE wrongly annotated transcripts go outside genome boundaries, # NOTE return an empty sequence spanned by span size as a safe way of discarding such transcripts cds_sequence = ''.join(['N'] * t_len) else: t_subchain = transcript.get_subchain(diff, transcript.spanning_segment.end, stranded=False) sequence = t_subchain.get_sequence(self.genome.genome_dict) cds_sequence = sequence[transcript.cds_start + span_size: transcript.cds_end + span_size - diff] return cds_sequence def get_outliers_df(self): """ Returns the outliers in the form of a data-frame with column names. :return: """ colnames = ["ID", "Name", "position_from_start", "position_from_term", "actual_count", "downsampled_count"] outliers_df = pd.DataFrame(self.outliers, index=None, columns=colnames) return outliers_df @preconditions(lambda region: isinstance(region, str)) def get_frame_counts_df(self, region): if region == self.START: if self.frame_counts_df_start is None: self.frame_counts_df_start = CountManager.extract_count_sums_per_frame_per_transcript( self.get_count_vector_list(FivePSeqCounts.FULL_LENGTH), self.annotation.span_size, FivePSeqCounts.START) return self.frame_counts_df_start elif region == self.TERM: if self.frame_counts_df_term is None: self.frame_counts_df_term = CountManager.extract_count_sums_per_frame_per_transcript( self.get_count_vector_list(FivePSeqCounts.FULL_LENGTH), self.annotation.span_size, FivePSeqCounts.TERM) return self.frame_counts_df_term else: err_msg = ("Wrong region %s provided: should be either %s or %s" % (region, self.START, self.TERM)) self.logger.error(err_msg) raise Exception(err_msg) @preconditions(lambda region: isinstance(region, str)) def get_meta_count_series(self, region): """ Computes counts of 5' mapping positions at all the transcripts on the specified region, within the specified span size, and returns the position-wise sum of counts as a single [int] array. :param region: str: the region of transcript (start (START) or terminus (TERM)) to span around :return: pd.Series{int: int}: series of position-wise sum of transcript-specific counts indexed according to the distance of genomic coordinates from the first nucleotides of the codon corresponding to the specified region (START or TERM) """ if region == self.FULL_LENGTH: error_message = "Cannot compute meta counts for full length transcript counts: the counts should be of " \ "the same length. " \ "Regions can be specified from choices (%s, %s)" % (self.START, self.TERM) self.logger.error(error_message) raise ValueError(error_message) elif region == self.START: if self.meta_count_series_start is not None: return self.meta_count_series_start elif region == self.TERM: if self.meta_count_series_term is not None: return self.meta_count_series_term else: error_message = "Problem retrieving meta_counts. " \ "Invalid region \"%s\" specified: should be one of (%s, %s)." \ % (region, self.START, self.TERM) self.logger.error(error_message) raise ValueError(error_message) try: count_vector_list = self.get_count_vector_list(region) except Exception as e: raise e meta_count_series = CountManager.count_vector_to_series( CountManager.compute_meta_counts(count_vector_list), region, tail=self.annotation.span_size) self.set_meta_count_series(meta_count_series, region) return meta_count_series @preconditions(lambda count_vector_list: isinstance(count_vector_list, list), lambda count_vector_list: isinstance(count_vector_list[0], list), lambda count_vector_list: isinstance(count_vector_list[0][0], int), lambda region: isinstance(region, str)) def set_count_vector_list(self, count_vector_list, region): """ Sets the retrieved counts as a class property for later use. The property is chosen is based on the region supplied. :param count_vector_list: [[int]]: the vector of count vectors per transcript :param region: str: the region for which the counts were computed :return: nothing to return """ if region == self.START: self.count_vector_list_start = count_vector_list elif region == self.TERM: self.count_vector_list_term = count_vector_list elif region == self.FULL_LENGTH: self.count_vector_list_full_length = count_vector_list else: error_message = "Cannot set counts: wrong region %s supplied: should be either of (%s, %s, %s)" \ % (region, self.START, self.TERM, self.FULL_LENGTH) self.logger.error(error_message) raise ValueError(error_message) @preconditions(lambda meta_count_series: isinstance(meta_count_series, pd.Series), lambda region: isinstance(region, str)) def set_meta_count_series(self, meta_count_series, region): """ Sets the retrieved meta-counts as a class property for later use. The property is chosen is based on the region supplied. :param meta_count_series: Series{int:int}: the panda Series of per-position mapped read sums across transcripts indexed by position from first nucleotide of START of STOP codon :param region: str: the region for which the counts were computed :return: nothing to return """ if region == self.START: self.meta_count_series_start = meta_count_series elif region == self.TERM: self.meta_count_series_term = meta_count_series @preconditions(lambda region: isinstance(region, str), lambda span_before: isinstance(span_before, int), lambda span_before: span_before >= 0, lambda span_after: isinstance(span_after, int), lambda span_after: span_after >= 0) def get_unique_sequences(self, region, span_before, span_after): """ Retrieves the unique set of sequences spanning the given region of all transcripts, with the specified parameters. :param region: str: the START or TERM parts of the transcript :param span_before: int: the number of nucleotides to span before the first codon of the specified region :param span_after: int: the number of nucleotides to span after the last codon of the specified region :return: dict{str:int}: a dictionary keyed by the unique sequences identified within the spanning regions and valued by the number of occurrences of that sequence in transcripts """ sequences = {} i = 0 for transcript in self.annotation.get_transcript_assembly(max(span_before, span_after)): sequence = transcript.get_sequence(self.genome.genome_dict) if region == self.TERM: endpoint = len(transcript.spanning_segment) - max(span_before, span_after) span_sequence = sequence[endpoint - span_before: endpoint + span_after] elif region == self.START: startpoint = max(span_before, span_after) span_sequence = sequence[startpoint - span_before: startpoint + span_after] else: raise Exception if span_sequence in sequences.keys(): sequences[span_sequence] += 1 else: sequences[span_sequence] = 1 i += 1 return sequences def get_amino_acid_pauses(self): if self.amino_acid_count_df is None: self.compute_codon_pauses() return self.amino_acid_count_df def get_codon_pauses(self): if self.codon_count_df is None: self.compute_codon_pauses() return self.codon_count_df def get_tricodon_pauses(self): if self.tricodon_count_df is None: self.compute_codon_pauses() return self.tricodon_count_df def get_dicodon_pauses(self): if self.dicodon_count_df is None: self.compute_codon_pauses() return self.dicodon_count_df def get_dipeptide_pauses(self): if self.dipeptide_count_df is None: self.compute_codon_pauses() return self.dipeptide_count_df def get_tripeptide_pauses(self): if self.tripeptide_count_df is None: self.compute_codon_pauses() return self.tripeptide_count_df # TODO a modification for di-codon counts to me incorporated inseat of get_codon_pauses in the future @preconditions(lambda dist_from: isinstance(dist_from, int), lambda dist_from: dist_from < 0, lambda dist_to: isinstance(dist_to, int), lambda dist_to: dist_to >= 0) def compute_codon_pauses(self, dist_from=-30, dist_to=3, downsample=True): """ Counts the meta-number of 5' mapping positions at the given distance from a codon or codon-pair Only transcripts with cds of length multiple of 3 are accounted for. The only frame in these transcripts is considered. :param codon: :param dist_from: negative distance from each codon or codon-pair :param dist_to: positive distance after each codon or codon-pair :param mask_dist: the number of positions to mask in the beginning and end of the gene body :return: """ self.logger.info( "Counting codon specific pauses within %d to %d nt distance from the first nucleotide of each codon" % (dist_from, dist_to)) if self.config.args.no_mask: mask_dist = 0 self.logger.info("Transcript boundaries will not be masked") else: if hasattr(config.args, "codon_mask_size"): mask_dist = config.args.codon_mask_size else: mask_dist = self.MASK_DIST self.logger.info("Transcript boundaries will be masked by %d nucleotides" % mask_dist) codon_count_df = pd.DataFrame(data=0, index=Codons.CODON_TABLE.keys(), columns=range(dist_from, dist_to)) dicodon_count_df = pd.DataFrame(data=0, index=Codons.get_dicodon_table().keys(), columns=range(dist_from + 3, dist_to + 3)) dipeptide_count_df = pd.DataFrame(data=0, index=Codons.get_dipeptide_list(), columns=range(dist_from + 3, dist_to + 3)) tricodon_count_df = pd.DataFrame(data=0, index=Codons.get_tricodon_table().keys(), columns=range(dist_from + 6, dist_to + 6)) tripeptide_count_df = pd.DataFrame(data=0, index=Codons.get_tripeptide_list(), columns=range(dist_from + 6, dist_to + 6)) self.codon_genome_usage_df = pd.DataFrame(data=0, index=Codons.CODON_TABLE.keys(), columns=['abs', 'fraction']) self.amino_acid_genome_usage_df = pd.DataFrame(data=0, index=Codons.AMINO_ACID_TABLE.keys(), columns=['abs', 'fraction']) counter = 1 transcript_assembly = self.annotation.get_transcript_assembly( span_size=0) # don't take more than the gene body (different from previous versions) transcript_count = len(transcript_assembly) for t in range(transcript_count): transcript = transcript_assembly[t] if np.floor(transcript_count / 1000) > 0 and counter % 1000 == 0: self.logger.info("\r>>Transcript count: %d (%d%s)\t" % ( counter, floor(100 * (counter - 1) / transcript_count), '%',), ) counter += 1 count_vector = self.get_count_vector(transcript, span_size=0, region=FivePSeqCounts.FULL_LENGTH, downsample=downsample) cds_sequence = self.get_cds_sequence_safe(transcript, 0) if sum(count_vector) == 0: continue if len(cds_sequence) != len(count_vector): self.logger.warning("Transcript num %d: cds sequence length %d not equal to count vector length %d" % (counter, len(cds_sequence), len(count_vector))) continue if (mask_dist >= 3): # v1.0b3 mask the first and last 20 counts to avoid initiation affecting codon-specific counts count_vector[0:mask_dist] = [0] * mask_dist # v1.0b3 mask the last 20 nucleotides to avoid termination affecting codon-specific counts, but keep STOP codon counts cds_sequence = cds_sequence[0:len(cds_sequence) - mask_dist] + ''.join( 'N' * (mask_dist - 3)) + cds_sequence[len(cds_sequence) - 3:len(cds_sequence)] # v1.0b3 add stretches of 0's to count_vector and N's to cds_sequence to avoid checking vector boundaries count_vector = [0] * (-1 * dist_from) + count_vector + [0] * dist_to cds_sequence = ''.join('N' * (-1 * dist_from)) + cds_sequence + ''.join('N' * dist_to) # store genome usage stats for i in range(0, len(cds_sequence), 3): codon = cds_sequence[i: i + 3].upper() if codon in self.codon_genome_usage_df.index: self.codon_genome_usage_df.at[codon, "abs"] += 1 amino_acid = Codons.CODON_TABLE.get(codon) self.amino_acid_genome_usage_df.at[amino_acid, "abs"] += 1 # identify 3nt bins with non-zero counts ind = np.array(range(0, len(count_vector), 3)) hits = [sum(count_vector[i:i + 3]) > 0 for i in ind] non_empty_ind = ind[hits] # loop through non-empty triplets only for i in non_empty_ind: # loop through all codons dist_from nucleotides downstream and dist_to nucleotides upstream j_range = list(np.arange(i, i - dist_to, -3))[::-1] + list(np.arange(i + 3, i + 3 - dist_from, 3)) for j in j_range: if j < 0: continue if j + 3 > len(cds_sequence): break codonA = cds_sequence[j: j + 3].upper() if j - 3 >= 0: codonP = cds_sequence[j - 3: j].upper() else: codonP = 'NNN' if j - 6 >= 0: codonE = cds_sequence[j - 6: j - 3].upper() else: codonE = 'NNN' if (len(codonA) == 3) & (codonA in Codons.CODON_TABLE.keys()): for p in range(0, 3): d = i - j + p try: codon_count_df.at[codonA, d] += count_vector[i + p] if len(codonP) == 3 and codonP in Codons.CODON_TABLE.keys(): dicodon_count_df.at[codonP + codonA, d + 3] += count_vector[i + p] dipeptide = Codons.get_peptide_from_codon_list([codonP, codonA]) dipeptide_count_df.at[dipeptide, d + 3] += count_vector[i + p] if len(codonE) == 3 and codonE in Codons.CODON_TABLE: tricodon_count_df.at[codonE + codonP + codonA, d + 6] += count_vector[i + p] tripeptide = Codons.get_peptide_from_codon_list([codonE, codonP, codonA]) tripeptide_count_df.at[tripeptide, d + 6] += count_vector[i + p] except Exception as e: self.logger.warn("Index out of range: i: %d, j: %d, p: %d, d: %d. %s" % (i, j, p, d, str(e))) self.codon_genome_usage_df.loc[:, "fraction"] = self.codon_genome_usage_df.loc[:, "abs"] / sum( self.codon_genome_usage_df.loc[:, "abs"]) self.amino_acid_genome_usage_df.loc[:, "fraction"] = self.amino_acid_genome_usage_df.loc[:, "abs"] / sum( self.amino_acid_genome_usage_df.loc[:, "abs"]) self.amino_acid_count_df = self.codon_to_amino_acid_count_df(codon_count_df) self.tripeptide_count_df = self.filter_codon_counts(tripeptide_count_df, self.get_tripeptide_pos()) self.dipeptide_count_df = self.filter_codon_counts(dipeptide_count_df, self.get_dipeptide_pos()) # rename codon_count_df indices by adding amino acid names new_index = [Codons.CODON_TABLE.get(codon) + '_' + codon for codon in codon_count_df.index] codon_count_df.index = new_index self.codon_count_df = codon_count_df # rename codon_count_df indices by adding amino acid names self.logger.info("Mapping tricodons to amino acid names") tricodon_count_df.index = Codons.get_tricodon_full_index() self.tricodon_count_df = self.filter_codon_counts(tricodon_count_df, self.get_tripeptide_pos()) # rename codon_count_df indices by adding amino acid names self.logger.info("Mapping dicodons to amino acid names") dicodon_count_df.index = Codons.get_dicodon_full_index() self.dicodon_count_df = self.filter_codon_counts(dicodon_count_df, self.get_dipeptide_pos()) return def codon_to_amino_acid_count_df(self, codon_count_df): amino_acid_count_df = pd.DataFrame(data=0, index=Codons.AMINO_ACID_TABLE.keys(), columns=codon_count_df.columns) for codon in codon_count_df.index: aa = Codons.CODON_TABLE.get(codon) amino_acid_count_df.loc[aa, :] += codon_count_df.loc[codon, :] return amino_acid_count_df def get_tripeptide_pos(self): if hasattr(config.args, "tripeptide_pos"): pos = config.args.tripeptide_pos else: pos = self.TRIPEPTIDE_POS return pos def get_dipeptide_pos(self): if hasattr(config.args, "dipeptide_pos"): pos = config.args.dipeptide_pos else: pos = self.DIPEPTIDE_POS return pos def filter_codon_counts(self, codon_count_df, pos, top=50): """ Filter the di/tricodon (or di/tripeptide) counts to exclude low counts (rowSums less than the specified threshold) and to include only the top di/tricodons with highest relative counts at the given position :param codon_count_df: the codon_df to filter :param top: the number of highest relative count tricodons to keep :param pos: the position to filter the top counts :return codon_filtered_df: the filtered count dataframe """ self.logger.info("Sorting and selecting top %d peptides/codons at position %d from the A site" % (top, pos)) codon_filtered_df = codon_count_df[codon_count_df.sum(1) >= self.COUNT_THRESHOLD] pos_rel_counts = codon_filtered_df[pos] / codon_filtered_df.sum(1) codon_filtered_df = codon_filtered_df.iloc[ sorted(range(len(pos_rel_counts)), reverse=True, key=lambda k: pos_rel_counts[k])[0:top]] return codon_filtered_df def get_amino_acid_stats(self): if self.amino_acid_stats_df is None: self.amino_acid_stats_df = self.compute_codon_stats_amino_acid() return self.amino_acid_stats_df def get_codon_stats(self): if self.codon_stats_df is None: self.codon_stats_df = self.compute_codon_stats_codon() return self.codon_stats_df def compute_codon_genome_usage(self): self.codon_genome_usage_df = pd.DataFrame(data=0, index=Codons.CODON_TABLE.keys(), columns=['abs', 'fraction']) self.amino_acid_genome_usage_df = pd.DataFrame(data=0, index=Codons.AMINO_ACID_TABLE.keys(), columns=['abs', 'fraction']) def compute_codon_stats_amino_acid(self): return self.compute_codon_stats(self.get_amino_acid_pauses(), self.amino_acid_genome_usage_df) def compute_codon_stats_codon(self): return self.compute_codon_stats(self.get_codon_pauses(), self.codon_genome_usage_df) def compute_codon_stats(self, codon_counts, codon_genome_usage, until=-3): """ Counts usage and frame protection stats for each codon/amino-acid. The following dataframe will be generated based on codon counts table: codon/aminoacid FPI Frame peak(pos) peak(scale) usage(sum of counts) genome_presence :return: dataframe """ self.logger.info("Counting codon usage statistics") try: stop_ind = codon_counts.keys().to_list().index(until) codon_counts = codon_counts.iloc[:, 0:stop_ind] f2 = sum([codon_counts.iloc[:, i] for i in reversed(range(stop_ind - 1, -1, -3))]) f1 = sum([codon_counts.iloc[:, i] for i in reversed(range(stop_ind - 2, -1, -3))]) f0 = sum([codon_counts.iloc[:, i] for i in reversed(range(stop_ind - 3, -1, -3))]) codon_stats = pd.DataFrame(list(zip(f0, f1, f2)), columns=['F0', 'F1', 'F2']) codon_stats['FPI'] = np.zeros(len(codon_stats)) codon_stats['F'] = np.zeros(len(codon_stats)) codon_stats['F_perc'] = np.zeros(len(codon_stats)) for i in range(len(codon_stats)): fpi, fmax, fperc = CountManager.fpi_stats_from_frame_counts(codon_stats.iloc[i, :]) codon_stats.loc[i, 'FPI'] = fpi codon_stats.loc[i, 'F'] = fmax codon_stats.loc[i, 'F_perc'] = fperc codon_stats['peak_pos'] = [np.argmax(codon_counts.iloc[i, :]) for i in range(len(codon_stats))] codon_stats['peak_scale'] = np.zeros(len(codon_stats)) for i in range(len(codon_stats)): for i in range(len(codon_stats)): counts = list(codon_counts.iloc[i, :]) if sum(counts) > 0: frame = int(codon_stats.loc[i, 'F']) frame_inds = [j for j in reversed(range(len(counts) - 3 + frame, -1, -3))] frame_counts = [counts[j] for j in frame_inds] codon_stats.loc[i, 'peak_scale'] = len(frame_counts) * max(frame_counts) / sum(frame_counts) codon_stats.loc[i, 'peak_pos'] = codon_counts.columns[frame_inds[np.argmax(frame_counts)]] codon_stats['usage'] = list(sum([codon_counts.iloc[:, i] for i in range(0, stop_ind)])) codon_stats['genome_usage_abs'] = list(codon_genome_usage.loc[:, 'abs']) codon_stats['genome_usage_fraction'] = list(codon_genome_usage.loc[:, 'fraction']) usage_norm = codon_stats['usage'] / codon_stats['genome_usage_fraction'] usage_norm /= sum(usage_norm) codon_stats['usage_normalized'] = usage_norm codon_stats.index = codon_counts.index return codon_stats except: self.logger.warning("Could not compute codon stats. Codon counts dataframe did not have column %d." % until) return None # exclude the counts downstream from -3 @preconditions(lambda loci_file: str) def get_pauses_from_loci(self, loci_file, read_locations=READ_LOCATIONS_ALL): """ Counts the meta-number of 5' mapping positions at the given distance from the specified loci The loci file should contain one locus per row. Two tab separated columns should indicate chromosome number and position. The distance of 5' mapping positions from each loci is counted within each cds. The padding sizes are subtracted from the start and end of each transcript. :param padding: int: padding, bp (not to count the first and last regions in the transcripts) :param loci_file: str: full path to the file specifying the loci. :return: """ self.logger.info( "Counting pauses in %s region from loci given in file %s" % (read_locations, loci_file)) loci = pd.read_csv(loci_file, sep="\t", index_col=None) self.loci_overlaps = [] # the results will be kept in a dictionary: # key - distance from any locus # value - number of mapping positions at key distance from any locus loci_pauses_dict = {} span_size = self.annotation.span_size counter = 0 loci_row = 0 done = False move_transcript = True move_locus = False tg = self.annotation.get_transcript_assembly(span_size) transcript = None while True: if counter % 1000 == 0: self.logger.info("\r>>Transcript count: %d (%d%s)\t" % ( counter, floor(100 * (counter - 1) / self.annotation.transcript_count), '%',), ) if move_locus: if loci.shape[0] == loci_row: self.logger.debug("Reached the end of loci file (row %d)" % loci_row) break loci_row += 1 move_locus = False continue if move_transcript: try: transcript = tg[counter] except: self.logger.debug("Reached the end of transcript assembly (counter: %d)" % counter) break counter += 1 move_transcript = False continue # check if the locus at the cursor is within the current transcript if loci_row < loci.shape[0]: if str(transcript.chrom) == str(loci.loc[loci_row, "chr"]): if loci.loc[loci_row, "str"] == "+": locus_pos = loci.loc[loci_row, "start"] else: locus_pos = loci.loc[loci_row, "end"] # locus is upstream of transcript -> move locus if transcript.cds_genome_start - span_size > locus_pos: move_locus = True continue # transcript is upstream of locus -> move transcript elif transcript.cds_genome_end + span_size < locus_pos: move_transcript = True continue elif str(transcript.strand) != str(loci.loc[loci_row, "str"]): move_locus = True continue else: count_vector = self.get_count_vector(transcript, span_size, FivePSeqCounts.FULL_LENGTH, downsample=True) transcript_genome_start = transcript.cds_genome_start - span_size transcript_genome_end = transcript.cds_genome_end + span_size if len(count_vector) != transcript_genome_end - transcript_genome_start: move_transcript = True continue if transcript.strand == "+": locus_ind = locus_pos - transcript_genome_start else: locus_ind = transcript_genome_end - locus_pos if read_locations == self.READ_LOCATIONS_ALL: ind = np.array(range(len(count_vector) - 2 * span_size, len(count_vector))) elif read_locations == self.READ_LOCATIONS_5UTR: ind = np.array(range(0, span_size)) elif read_locations == self.READ_LOCATIONS_3UTR: ind = np.array(range(len(count_vector) - span_size, len(count_vector))) elif read_locations == self.READ_LOCATIONS_CDS: ind = np.array(range(len(count_vector) - 2 * span_size, len(count_vector) - span_size)) else: ind = np.array(range(0, len(count_vector))) hits = [count_vector[i] > 0 for i in ind] non_empty_ind = ind[hits] for i in non_empty_ind: distance = i - locus_ind if distance < 2 * span_size and distance >= -2 * span_size: if distance in loci_pauses_dict.keys(): loci_pauses_dict[distance] += count_vector[i] else: loci_pauses_dict.update({distance: count_vector[i]}) overlap = [FivePSeqOut.get_transcript_attr(transcript, "ID"), FivePSeqOut.get_transcript_attr(transcript, "Name"), transcript.chrom, transcript.strand, transcript.cds_genome_start, transcript.cds_genome_end, loci.loc[loci_row, "symbol"], loci.loc[loci_row, "chr"], loci.loc[loci_row, "str"], loci.loc[loci_row, "start"], loci.loc[loci_row, "end"], i, distance, count_vector[i]] self.loci_overlaps.append(overlap) move_locus = True elif str(transcript.chrom) > str(loci.loc[loci_row, "chr"]): move_locus = True continue else: move_transcript = True continue else: break # turn the dictionary into a metacount vector, with indices from -1*maxdistance to maxdistance self.logger.debug("Merging the dictionary into metacounts") maxdist = 2 * span_size metacount_vector = [0] * 2 * maxdist for i in range(-1 * maxdist, maxdist): if i in loci_pauses_dict.keys(): metacount_vector[maxdist + i] = loci_pauses_dict[i] metacount_series = pd.Series(data=metacount_vector, index=np.arange(-1 * maxdist, maxdist)) return metacount_series def get_loci_overlaps_df(self): """ Returns the overlaps of transcripts with given loci in the form of a data-frame with column names. :return: """ colnames = ["ID", "Name", "chr", "str", "genome_start", "genome_end", "RBP", "loc_chr", "loc_str", "loc_start", "loc_end", "i", "dist", "count"] outliers_df = pd.DataFrame(self.loci_overlaps, index=None, columns=colnames) return outliers_df @preconditions(lambda num: isinstance(num, int)) def top_populated_transcript_indices(self, num=1000): """ Returns indices of top populated transcripts. A populated transcript is defined as the one with most length-relative number of positions with non-zero counts. :param num: int: number of transcript indices to return :return: [int]: a list of transcript indices in the transcript assembly """ populated = [0] * self.annotation.transcript_count for i in range(self.annotation.transcript_count): transcript = self.annotation.transcript_assembly[i] count_vector = self.get_count_vector(transcript, 0, FivePSeqCounts.FULL_LENGTH, downsample=False) populated[i] = sum(count_vector > 0) / len(count_vector) populated_indices = sorted(range(len(populated)), key=lambda k: populated[k]) return populated_indices class FivePSeqCountsContainer: """ A wraper for the following data structures: count_vector_list_start = None count_vector_list_term = None count_vector_list_full_length = None meta_count_series_start = None meta_count_series_term = None frame_counts_df_start = None frame_counts_df_term = None """ count_vector_list_start = None count_vector_list_term = None count_vector_list_full_length = None meta_count_series_start = None meta_count_series_term = None frame_counts_df_start = None frame_counts_df_term = None def __init__(self, count_vector_list_start, count_vector_list_term, count_vector_list_full_length, meta_count_series_start, meta_count_series_term, frame_counts_df_start, frame_counts_df_term): self.count_vector_list_start = count_vector_list_start self.count_vector_list_term = count_vector_list_term self.count_vector_list_full_length = count_vector_list_full_length self.meta_count_series_term = meta_count_series_term self.meta_count_series_start = meta_count_series_start self.frame_counts_df_start = frame_counts_df_start self.frame_counts_df_term = frame_counts_df_term class CountManager: """ This module implements a set of static functions to handle count vectors retrieved from FivePSeqCounts class. """ def __init__(self): pass @staticmethod @preconditions(lambda count_vector_list: isinstance(count_vector_list, list), lambda count_vector_list: isinstance(count_vector_list[0], list), lambda count_vector_list: isinstance(count_vector_list[0][0], int)) def compute_meta_counts(count_vector_list): """ Computes the sum of counts at each position across transcripts. :param count_vector_list: [[int]] a list of count vectors for all transcripts :return: [int]: a vector of position-wise count sums """ # TODO check that the count vectors have the same length max_len = 0 for i in range(len(count_vector_list)): if len(count_vector_list) > max_len: max_len = len(count_vector_list[i]) for i in range(len(count_vector_list)): if len(count_vector_list[i]) < max_len: short_vec = count_vector_list[i] long_vec = [0] * max_len long_vec[0:len(short_vec)] = short_vec count_vector_list[i] = long_vec # sum the position-wise counts meta_count_vector = np.vstack(count_vector_list).sum(axis=0).tolist() return meta_count_vector @staticmethod @preconditions(lambda count_vector: isinstance(count_vector, list), lambda count_vector: isinstance(count_vector[0], int), lambda span_size: isinstance(span_size, int), lambda region: isinstance(region, str), lambda include_span: isinstance(include_span, bool)) def extract_frame_count_vectors(count_vector, span_size, region=FivePSeqCounts.START, include_span=False): """ Takes a vector of position-wise int counts across full length transcripts and returns counts for three different frames from 0 to 2, relative to either START (default) or TERM regions. :param count_vector: [int]: a transcript-specific vector of 5P read counts per transcript position :param span_size: int: the size of regions spanning around the transcript cds :param include_span: if true returns frame counts including the spanning regions, and only the cds region otherwise :param region: region (START or TERM) relative to which to count the frames :return: a tuple of frame count arrays (frame0:[int], frame1:[int], frame2:[int]) """ # determine the tail size to be subtracted from the count_vector if include_span: tail = 0 else: tail = span_size # for START, start the Frame0 from tail to the length of the vector minus the tail if region == FivePSeqCounts.START: frame0_array = count_vector[0 + tail: len(count_vector) - tail: 3] frame1_array = count_vector[1 + tail: len(count_vector) - tail: 3] frame2_array = count_vector[2 + tail: len(count_vector) - tail: 3] elif region == FivePSeqCounts.TERM: # NOTE the frames relative to START and TERM should be aligned in the future # NOTE (if cds length is not a multiple of 3) frame0_array = [count_vector[i] for i in list(reversed(range(len(count_vector) - 3 - tail, -1 + tail, -3)))] frame1_array = [count_vector[i] for i in list(reversed(range(len(count_vector) - 2 - tail, -1 + tail, -3)))] frame2_array = [count_vector[i] for i in list(reversed(range(len(count_vector) - 1 - tail, -1 + tail, -3)))] else: error_message = "Invalid region %s specified: should be either %s or %s" \ % (region, FivePSeqCounts.START, FivePSeqCounts.TERM) logger = logging.getLogger(config.FIVEPSEQ_LOGGER) logger.error(error_message) raise Exception(error_message) return frame0_array, frame1_array, frame2_array @staticmethod @preconditions(lambda count_vector: isinstance(count_vector, list), lambda count_vector: isinstance(count_vector[0], int), lambda region: isinstance(region, str), lambda tail: isinstance(tail, int), lambda tail: tail >= 0) def count_vector_to_series(count_vector, region, tail=0): """ Takes a vector of counts, indexes them with distances from the specified region. Returns a series with indexes as genomic coordinates from start/stop codons, and values as counts at each coordinates. For the following REAL coordinates (R), D0, D1 and D2 will be converted to: Relative to START: R: 0, 1, ... S0, Sm, S1, ... L - 1 D: -T, -T+1, ... 0, 1, 2, ... L - T - 1 Relative to TERM: R: 0, 1, ... S0, Sm, S1, ... L - 1 D: -(L-T-3), -(L-T-2), ... 0, 1, 2, ... T + 2 Legend: R Real coordinates S0 First nucleotides of START or TERM codon S1 Last nucleotides of START or TERM codon T Tail L Vector length :param count_vector: [int]: a vector of summed position-wise counts for a (meta)-transcript :param region: str: the region respective to which the distance is calculated :param tail: int: :return: pandas.Series: a series with indices as genomic coordinates* and values as meta counts. *-corresponding to positions' distance from nucleotide 0 of START/TERM codons """ if region == FivePSeqCounts.START: d = np.arange(-tail, len(count_vector) - tail) elif region == FivePSeqCounts.TERM: d = np.arange(-(len(count_vector) - tail - 3), tail + 3) else: error_message = "Invalid region %s specified: should be either %s or %s" \ % (region, FivePSeqCounts.START, FivePSeqCounts.TERM) logger = logging.getLogger(config.FIVEPSEQ_LOGGER) logger.error(error_message) raise Exception(error_message) counts_series = pd.Series(data=count_vector, index=d) return counts_series @staticmethod @preconditions(lambda count_vector: isinstance(count_vector, list), lambda count_vector: isinstance(count_vector[0], int), lambda region: isinstance(region, str), lambda tail: isinstance(tail, int), lambda tail: tail >= 0) def count_vector_to_df(count_vector, region, tail=0): """ Takes a vector of counts, indexes them with distances from the specified region. Returns a dataframe with indexes as genomic coordinates from start/stop codons, and values as counts at each coordinates. For the following REAL coordinates (R), D0, D1 and D2 will be converted to: Relative to START: R: 0, 1, ... S0, Sm, S1, ... L - 1 D: -T, -T+1, ... 0, 1, 2, ... L - T - 1 Relative to TERM: R: 0, 1, ... S0, Sm, S1, ... L - 1 D: -(L-T-3), -(L-T-2), ... 0, 1, 2, ... T + 2 Legend: R Real coordinates S0 First nucleotides of START or TERM codon S1 Last nucleotides of START or TERM codon T Tail L Vector length :param count_vector: [int]: a vector of summed position-wise counts for a (meta)-transcript :param region: str: the region respective to which the distance is calculated :param tail: int: :return: pandas.Series: a series with indices as genomic coordinates* and values as meta counts. *-corresponding to positions' distance from nucleotide 0 of START/TERM codons """ if region == FivePSeqCounts.START: d = np.arange(-tail, len(count_vector) - tail) elif region == FivePSeqCounts.TERM: d = np.arange(-(len(count_vector) - tail - 3), tail + 3) else: error_message = "Invalid region %s specified: should be either %s or %s" \ % (region, FivePSeqCounts.START, FivePSeqCounts.TERM) logging.getLogger(config.FIVEPSEQ_LOGGER).error(error_message) raise Exception(error_message) counts_df = pd.DataFrame({'D': d, 'C': count_vector}) return counts_df @staticmethod @preconditions(lambda region: isinstance(region, str), lambda span_size: isinstance(span_size, int)) def extract_count_sums_per_frame_per_transcript(count_vector_list, span_size, region): """ Returns a data frame with rows representing transcripts and columns (F0, F1, F2) representing the sum of 5P read mapping counts at each frame. The transcripts are aligned at the start or the end, depending on the region specified. :param span_size: :param count_vector_list: the list of per-transcript count vectors :param region: str: the region to align the transcripts to :return: a dataframe with frame-based count-sums for each transcript """ logging.getLogger(config.FIVEPSEQ_LOGGER).debug( "Retrieving count-sums per frame relative to %s ..." % region) # Create an empty dataframe n = len(count_vector_list) frame_counts_df = pd.DataFrame({'F0': [0] * n, 'F1': [0] * n, 'F2': [0] * n}) for t_ind in range(0, n): # Print status update to console if t_ind % 10000 == 0: logging.getLogger(config.FIVEPSEQ_LOGGER).info("\r>>Transcript count: %d (%d%s)\t" % ( t_ind, np.floor(100 * (t_ind - 1) / n), '%')) # extract frame count vectors from count vectors count_vector = count_vector_list[t_ind] if sum(count_vector) == 0: for f in range(0, 3): frame_counts_df.iloc[t_ind, f] = 0 else: frame_counts = CountManager.extract_frame_count_vectors(count_vector, span_size, region) # sum-up counts in each frame and add to the dataframe for f in range(0, 3): frame_counts_df.iloc[t_ind, f] = sum(frame_counts[f]) return frame_counts_df @staticmethod @preconditions(lambda file_path: isinstance(file_path, str)) def read_index_as_list(file_path): """ Reads a new line separated list of integers from a file to a list of indices. :param file_path: str: full path to the file :return: [int]: list of indices :exception: raise IOError if file does not exist """ if not os.path.exists(file_path): error_message = "Problem reading counts: the file %s does not exist" % file_path logging.getLogger(config.FIVEPSEQ_LOGGER).error(error_message) raise IOError(error_message) logging.getLogger(config.FIVEPSEQ_LOGGER).debug("Reading count file %s" % file_path) indices = list(pd.read_csv(file_path, header=None).iloc[:, 0]) return indices @staticmethod @preconditions(lambda file_path: isinstance(file_path, str)) def read_counts_as_list(file_path): """ Reads and returns a list of count vectors, each corresponding to a transcript. :param file_path: str: full path to the file :return: [[int]]: list of count vectors (a count vector is a list of int counts) :exception: raises IOError if file does not exist """ if not os.path.exists(file_path): error_message = "Problem reading counts: the file %s does not exist" % file_path logging.getLogger(config.FIVEPSEQ_LOGGER).error(error_message) raise IOError(error_message) logging.getLogger(config.FIVEPSEQ_LOGGER).debug("Reading count file %s" % file_path) df = pd.read_csv(file_path, header=None, sep="|") count_vector_list = [[]] * len(df) for i in range(0, len(df)): count_vector_list[i] = list(map(int, df.iloc[i, 0].split("\t"))) return count_vector_list @staticmethod @preconditions(lambda file: isinstance(file, str)) def read_meta_counts(file): """ Reads the meta count file as pandas DataFrame. These files are saved with tab separator. They have two columns, but no column names. This function assigns names to read DataFrame: D (distance from START/TERM) C (meta counts) The number of rows corresponds to 2*span_size :param file: str: full path to the file :return: pandas DataFrame: a dataframe with D and C columns :exception: raises IOError if file does not exist """ if not os.path.exists(file): error_message = "Problem reading meta counts: the file %s does not exist" % file logging.getLogger(config.FIVEPSEQ_LOGGER).error(error_message) raise IOError(error_message) logging.getLogger(config.FIVEPSEQ_LOGGER).debug("Reading meta count file %s" % file) meta_count = pd.read_csv(file, sep="\t", header=None, names=["D", "C"]) return meta_count @staticmethod @preconditions(lambda file: isinstance(file, str)) def read_frame_counts(file): """ Reads the frame count file as pandas DataFrame. The file has a header with four columns: (no name: transcript number), F0, F1, F2 A four-column dataFrame is created and returned accordingly. The number of rows corresponds to the number of transcripts. :param file: str: full path to the file :return: pandas DataFrame: a dataframe with transcript number and F0, F1, F2 frame counts :exception: raises IOError if file doesn't exist """ if not os.path.exists(file): error_message = "Problem reading frame counts: the file %s does not exist" % file logging.getLogger(config.FIVEPSEQ_LOGGER).error(error_message) raise IOError(error_message) logging.getLogger(config.FIVEPSEQ_LOGGER).debug("Reading frame counts file %s" % file) frame_counts = pd.read_csv(file, sep="\t") return frame_counts @staticmethod @preconditions(lambda file: isinstance(file, str)) def read_amino_acid_df(file): """ Reads a pandas DataFrame from amino acid pauses file. The file is stored with a header indicating distance from amino acids. The file has row names indicating names of amino acids. The dataFrame is read with indicated columns and row names. :param file: str: full path to file :return: pandas DataFrame: index is amino acids, columns - distance :exception: raises IOError if file does not exist """ if not os.path.exists(file): error_message = "Problem reading amino acid pauses: the file %s does not exist" % file logging.getLogger(config.FIVEPSEQ_LOGGER).error(error_message) raise IOError(error_message) logging.getLogger(config.FIVEPSEQ_LOGGER).debug("Reading amino acids pauses file %s" % file) amino_acid_df = pd.read_csv(file, sep="\t", header=0, index_col=0) return amino_acid_df @staticmethod def top_populated_count_vector_indices(count_vector_list, num=1000): """ Returns indices of top populated count_vectors (transcripts). A populated count_vector (transcript) is defined as the one with most length-relative number of positions with non-zero counts. :param count_vector_list: [[int]]: a list of count vectors :param num: int: number of indices to return :return: [int]: a list of count_vector indices in the count_vector_list """ populated = [0] * len(count_vector_list) for i in range(len(count_vector_list)): count_vector = count_vector_list[i] populated[i] = float(np.count_nonzero(count_vector)) / len(count_vector) populated_indices = sorted(range(len(populated)), key=lambda k: populated[k], reverse=True) return populated_indices[0:num] @staticmethod def canonical_transcript_indices(count_dir): """ Reads and returns canonical transcript indices from the canonical transcript indices file, if such a file exists. :return: [int] indices of transcript with canonical start and stop codons or None if no such file exists. """ canonical_index_file = os.path.join(count_dir, FivePSeqOut.CANONICAL_TRANSCRIPT_INDEX_FILE) if os.path.exists(canonical_index_file): transcript_index = list(pd.read_csv(canonical_index_file, header=None).iloc[:, 0]) return transcript_index else: logging.getLogger(config.FIVEPSEQ_LOGGER).debug( "Problem retrieving canonical transcript indices. No file %s exists. " "The filter will return None." % canonical_index_file) return None @staticmethod @preconditions(lambda file_path: isinstance(file_path, str)) def read_count_vector(file_path): """ Reads and returns a list of counts from a new-line separated file of counts. :param file_path: str: full path to the file :return: [int]: a list of counts :exception: raises IOError if file does not exist """ if not os.path.exists(file_path): error_message = "Problem reading counts: the file %s does not exist" % file_path logging.getLogger(config.FIVEPSEQ_LOGGER).error(error_message) raise IOError(error_message) logging.getLogger(config.FIVEPSEQ_LOGGER).debug("Reading count file %s" % file_path) if os.stat(file_path).st_size == 0: counts = [] else: logging.getLogger("Reading in count distribution (this may last a few minutes for large libraries)") counts = pd.read_csv(file_path, header=None) counts = list(map(int, counts.iloc[:, 0])) return counts @staticmethod @preconditions(lambda file_path: isinstance(file_path, str)) def read_outlier_lower(file_path): """ Reads and returns the lower value of read numbers to be considered as outliers for downsampling. :param file_path: a file containing a single float number :return: float """ outlier_lower = float(pd.read_csv(file_path, header=None)) return outlier_lower @staticmethod @preconditions(lambda file_path: isinstance(file_path, str)) def read_count_dict(file_path): """ Reads a tab-delimited file and returns a dictionary of count frequencies. :param file_path: a file containing a dictionary of count frequencies :return: float """ count_freq_dict = {} dict_mat = pd.read_csv(file_path, header=None, delimiter="\t", index_col=0) for i in range(len(dict_mat)): count_freq_dict[dict_mat.index[i]] = dict_mat.iloc[i, 0] return collections.OrderedDict(sorted(count_freq_dict.items())) @staticmethod def filter_fivepseqCountsContainer(fivepseqcountsContainer, transcript_indices, span_size=100): """ Gets a fivepseq_counts instance with non-empty count items and filters each by provided transcript indices count_vector_list_start = None count_vector_list_term = None count_vector_list_full_length = None meta_count_series_start = None meta_count_series_term = None frame_counts_df_start = None frame_counts_df_term = None :param fivepseqcountsContainer: :param transcript_ind: :return: """ if fivepseqcountsContainer.count_vector_list_full_length is not None: count_vector_list_full_length = [fivepseqcountsContainer.count_vector_list_full_length[i] for i in transcript_indices] else: count_vector_list_full_length = None count_vector_list_term = [fivepseqcountsContainer.count_vector_list_term[i] for i in transcript_indices] count_vector_list_start = [fivepseqcountsContainer.count_vector_list_start[i] for i in transcript_indices] meta_count_series_term = CountManager.count_vector_to_df( CountManager.compute_meta_counts(count_vector_list_term), FivePSeqCounts.TERM, tail=span_size) meta_count_series_start = CountManager.count_vector_to_df( CountManager.compute_meta_counts(count_vector_list_start), FivePSeqCounts.START, tail=span_size) frame_counts_df_term = fivepseqcountsContainer.frame_counts_df_term.iloc[transcript_indices,] frame_counts_df_start = fivepseqcountsContainer.frame_counts_df_start.iloc[ transcript_indices,] fivepseq_counts_filtered = FivePSeqCountsContainer(count_vector_list_start, count_vector_list_term, count_vector_list_full_length, meta_count_series_start, meta_count_series_term, frame_counts_df_start, frame_counts_df_term) return fivepseq_counts_filtered @staticmethod def combine_count_series(count_series_dict, lib_size_dict=None, scale=False): """ Combines counts in the series dictionary and returns a single count series. If lib_size_dict is not None, than the counts are first weighted based on the library size and then combined. Weighting is done in a way to give higher weight to samples with larger library sizes. :param count_series_dict: :param lib_size_dict: :return: """ count_series_combined = None start = True for key in count_series_dict.keys(): count_series = count_series_dict[key].copy() if lib_size_dict is not None: if scale: count_series.C /= (float(lib_size_dict[key]) / (10 ** 6)) * len(lib_size_dict) else: count_series.C *= float(lib_size_dict[key]) / sum(lib_size_dict.values()) if start: count_series_combined = count_series.copy() start = False else: count_series_combined.C += count_series.C return count_series_combined @staticmethod def combine_frame_counts(frame_count_dict, lib_size_dict=None): """ Combines counts in the dataframe dictionary and returns a single dataframe. If lib_size_dict is not None, than the counts are first weighted based on the library size and then combined. Weighting is done in a way to give higher weight to samples with larger library sizes. :param count_series_dict: :param lib_size_dict: :return: """ frame_count_combined = None start = True for key in frame_count_dict.keys(): count_df = frame_count_dict[key] if lib_size_dict is not None: count_df.loc[:, ('F0', 'F1', 'F2')] *= float(lib_size_dict[key]) / sum(lib_size_dict.values()) if start: frame_count_combined = count_df.copy() start = False else: frame_count_combined.loc[:, ('F0', 'F1', 'F2')] += count_df.loc[:, ('F0', 'F1', 'F2')] return frame_count_combined @staticmethod def combine_amino_acid_dfs(amino_acid_df_dict, lib_size_dict=None): """ Combines counts in the dataframe dictionary and returns a single dataframe. If lib_size_dict is not None, than the counts are first weighted based on the library size and then combined. Weighting is done in a way to give higher weight to samples with larger library sizes. :param count_series_dict: :param lib_size_dict: :return: """ amino_acid_df_combined = None start = True for key in amino_acid_df_dict.keys(): count_df = amino_acid_df_dict[key].copy(deep=True) if lib_size_dict is not None: count_df *= float(lib_size_dict[key]) / sum(lib_size_dict.values()) if start: amino_acid_df_combined = count_df.copy() start = False else: amino_acid_df_combined += count_df return amino_acid_df_combined @staticmethod def fpi_stats_from_frame_counts(frame_counts): """ Takes as input a vector named [F0, F1, F2] and returns: (fpi, fmax, f_perc) fpi = frame protection index of the maximum frame fmax = the maximum frame f_perc = the fraction of counts in the maximum frame :param frame_counts: :return: """ f_counts = (frame_counts['F0'], frame_counts['F1'], frame_counts['F2']) fmax = np.argmax(f_counts) nom = f_counts[fmax] if nom == 0: fpi = None f_perc = None else: denom = (sum(f_counts) - nom) / 2. if denom == 0: fpi = np.log2(float(nom) / 0.5) else: fpi = np.log2(float(nom / denom)) f_perc = 100 * (float(f_counts[fmax]) / sum(f_counts)) return fpi, fmax, f_perc
45.111648
152
0.610319
import collections import logging import os from math import floor import numpy as np import pandas as pd import plastid from preconditions import preconditions from scipy import stats from fivepseq import config from fivepseq.logic.structures import codons from fivepseq.logic.structures.codons import Codons from fivepseq.util.writers import FivePSeqOut class FivePSeqCounts: START = "START" TERM = "STOP" FULL_LENGTH = "full_length" ALL = "all" START_CODON = "start" STOP_CODON = "stop" TRANSCRIPT_LENGTH = "len" TRANSCRIPT_3NT = "3nt" NUMBER_READS = "NumOfReads" NUMBER_READS_DOWNSAMPLED = "NumOfReadsDownsampled" NUMBER_POSITIONS = "NumOfMapPositions" COUNT_THRESHOLD = 100 logger = logging.getLogger(config.FIVEPSEQ_LOGGER) count_distribution_dict = None outlier_lower = None downsample_constant = None outlier_probability = None config = None alignment = None annotation = None genome = None count_vector_list_start = None count_vector_list_term = None count_vector_list_full_length = None meta_count_series_start = None meta_count_series_term = None frame_counts_df_start = None frame_counts_df_term = None codon_genome_usage_df = None codon_count_df = None amino_acid_count_df = None dicodon_count_df = None dipeptide_count_df = None tricodon_count_df = None tripeptide_count_df = None codon_stats_df = None amino_acid_stats_df = None codon_genome_usage_df = None amino_acid_genome_usage_df = None start_codon_dict = None stop_codon_dict = None canonical_transcript_index = None transcript_descriptors = None outliers = None is_geneset = False loci_overlaps = None READ_LOCATIONS_ALL = "_ALL" READ_LOCATIONS_3UTR = "_3UTR" READ_LOCATIONS_5UTR = "_5UTR" READ_LOCATIONS_CDS = "_CDS" MASK_DIST = 20 TRIPEPTIDE_POS = -11 DIPEPTIDE_POS = -14 missing_chroms = [] def __init__(self, alignment, annotation, genome, config, downsample_constant, is_geneset=False, transcript_filter=None): self.alignment = alignment self.annotation = annotation self.genome = genome self.transcript_filter = transcript_filter self.config = config self.outlier_probability = config.args.op self.outlier_lower = downsample_constant self.outliers = [] self.start_codon_dict = {} self.stop_codon_dict = {} self.canonical_transcript_index = [] self.is_geneset = is_geneset self.logger.info("Initiated a FivePSeqCounts object with" "\n\talignment from file %s" "\n\tannotation from file %s " "\n\tgenome from file %s" % (alignment.alignment_file.filename, annotation.file_path, genome.fasta_file)) def get_transcript_descriptors(self): if self.transcript_descriptors is None: self.generate_transcript_descriptors() return self.transcript_descriptors def get_start_codon_dict(self): if self.start_codon_dict is None: self.generate_transcript_descriptors() return self.start_codon_dict def get_stop_codon_dict(self): if self.stop_codon_dict is None: self.generate_transcript_descriptors() return self.stop_codon_dict def generate_transcript_descriptors(self): self.logger.info("Generating transcript descriptors") transcript_assembly = self.annotation.get_transcript_assembly(span_size=0) transcript_count = len(transcript_assembly) self.transcript_descriptors = pd.DataFrame(data=None, index=range(transcript_count), columns=[self.START_CODON, self.STOP_CODON, self.TRANSCRIPT_LENGTH, self.TRANSCRIPT_3NT, self.NUMBER_READS, self.NUMBER_READS_DOWNSAMPLED, self.NUMBER_POSITIONS]) count_distribution_dict = {} for transcript_ind in range(transcript_count): transcript = transcript_assembly[transcript_ind] cds_sequence = self.get_cds_sequence_safe(transcript, 0) count_vector = self.get_count_vector_safe(transcript, 0) for c in count_vector: if c > 0: if c in count_distribution_dict: count_distribution_dict[c] += 1 else: count_distribution_dict[c] = 1 start_codon = cds_sequence[0:3] stop_codon = cds_sequence[len(cds_sequence) - 3:len(cds_sequence)] if (start_codon == codons.Codons.START_CODON) & (stop_codon in codons.Codons.stop_codons): self.canonical_transcript_index.append(transcript_ind) self.transcript_descriptors.at[transcript_ind, self.START_CODON] = start_codon self.transcript_descriptors.at[transcript_ind, self.STOP_CODON] = stop_codon self.transcript_descriptors.at[transcript_ind, self.TRANSCRIPT_3NT] = str(len(cds_sequence) % 3 == 0) self.transcript_descriptors.at[transcript_ind, self.TRANSCRIPT_LENGTH] = len(cds_sequence) self.transcript_descriptors.at[transcript_ind, self.NUMBER_READS] = int(np.sum(count_vector)) self.transcript_descriptors.at[transcript_ind, self.NUMBER_POSITIONS] = np.count_nonzero(count_vector) if start_codon in self.start_codon_dict.keys(): self.start_codon_dict[start_codon] += 1 else: self.start_codon_dict.update({start_codon: 1}) if stop_codon in self.stop_codon_dict.keys(): self.stop_codon_dict[stop_codon] += 1 else: self.stop_codon_dict.update({stop_codon: 1}) self.count_distribution_dict = collections.OrderedDict(sorted(count_distribution_dict.items())) self.outlier_lower = self.get_outlier_lower() self.logger.info("The lower bound for outliers set as %f " % self.outlier_lower) for transcript_ind in range(transcript_count): transcript = transcript_assembly[transcript_ind] count_vector_downsampled = self.get_count_vector(transcript, span_size=0, region=self.FULL_LENGTH, downsample=True) self.transcript_descriptors.at[transcript_ind, self.NUMBER_READS_DOWNSAMPLED] = int( np.sum(count_vector_downsampled)) self.logger.info("Done generating transcript descriptors") def get_count_distribution_dict(self): return self.count_distribution_dict def get_count_distribution(self): if self.count_distribution_dict is None: self.generate_transcript_descriptors() count_distribution = [] for c, f in self.count_distribution_dict.items(): for i in range(f): count_distribution.append(c) return count_distribution def set_count_distribution_dict(self, count_distribution_dict): if len(count_distribution_dict) == 0: self.count_distribution_dict = None else: self.count_distribution_dict = count_distribution_dict def get_outlier_lower(self): if self.outlier_lower is not None: return self.outlier_lower count_distribution = self.get_count_distribution() if len(count_distribution) == 0: self.outlier_lower = 0 return 0 scd = sorted(count_distribution) lam = np.mean(scd) ps = [1 - stats.poisson.cdf(x, lam) for x in scd] ind = np.where(np.asarray(ps) <= self.outlier_probability)[0].tolist() if len(ind) > 0: outlier_lower = scd[min(ind) - 1] else: outlier_lower = max(scd) + 1 self.outlier_lower = outlier_lower return outlier_lower def set_outlier_lower(self, outlier_lower): self.outlier_lower = outlier_lower def generate_count_vector_lists(self): logging.getLogger(config.FIVEPSEQ_LOGGER).info("Generating count vectors") if self.count_vector_list_full_length is not None: if self.count_vector_list_term is not None: if self.count_vector_list_start is not None: logging.getLogger(config.FIVEPSEQ_LOGGER).warning("All count vectors are already generated") self.logger.info("Retrieving counts (span size :%d)..." % self.annotation.span_size) transcript_count = len(self.annotation.get_transcript_assembly()) self.count_vector_list_full_length = [None] * transcript_count self.count_vector_list_term = [None] * transcript_count self.count_vector_list_start = [None] * transcript_count counter = 1 ta = self.annotation.get_transcript_assembly() for i in range(transcript_count): transcript = ta[i] if counter % 10000 == 0: self.logger.info("\r>>Transcript count: %d (%d%s)\t" % ( counter, floor(100 * (counter - 1) / self.annotation.transcript_count), '%'), ) try: count_vector = self.get_count_vector(transcript, self.annotation.span_size, self.FULL_LENGTH) self.count_vector_list_full_length[counter - 1] = count_vector self.count_vector_list_start[counter - 1] = count_vector[:2 * self.annotation.span_size] self.count_vector_list_term[counter - 1] = count_vector[-(2 * self.annotation.span_size):] except Exception as e: error_message = "Problem retrieving counts for transcript %s. Reason: %s" \ % (transcript.get_name(), e.message) self.logger.error(error_message) raise Exception(error_message) counter += 1 self.check_for_codons = False self.logger.info("Finished retrieving count vectors") @preconditions(lambda region: isinstance(region, str)) def get_count_vector_list(self, region): if self.count_vector_list_full_length is None: self.generate_count_vector_lists() if region == self.FULL_LENGTH: return self.count_vector_list_full_length elif region == self.START: return self.count_vector_list_start elif region == self.TERM: return self.count_vector_list_term else: error_message = "Cannot retrieve the counts. " \ "Invalid region \"%s\" specified: should be one of (%s, %s, %s)." \ % (region, self.FULL_LENGTH, self.START, self.TERM) self.logger.error(error_message) raise ValueError(error_message) @preconditions(lambda span_size: isinstance(span_size, int), lambda region: isinstance(region, str)) def get_count_vector(self, transcript, span_size, region, downsample=True): try: count_vector = self.get_count_vector_safe(transcript, span_size) if downsample and any(x > self.outlier_lower for x in count_vector): count_vector_ds = [0] * len(count_vector) for i in range(len(count_vector_ds)): if count_vector[i] > self.outlier_lower: count_vector_ds[i] = self.outlier_lower outlier_params = [FivePSeqOut.get_transcript_attr(transcript, "ID"), FivePSeqOut.get_transcript_attr(transcript, "Name"), i - span_size, len(count_vector) - i - span_size, count_vector[i], count_vector_ds[i]] if outlier_params not in self.outliers: self.outliers.append(outlier_params) else: count_vector_ds[i] = count_vector[i] count_vector = count_vector_ds count_vector = count_vector[transcript.cds_start: transcript.cds_end + 2 * span_size] if region == self.FULL_LENGTH: pass elif region == self.START: count_vector = count_vector[:2 * span_size] elif region == self.TERM: count_vector = count_vector[-(2 * span_size):] else: error_message = "Cannot retrieve a count vector for the transcript %s. " \ "Invalid region \"%s\" specified: should be one of (%s, %s, %s)." \ % (transcript.get_name(), region, self.FULL_LENGTH, self.START, self.TERM) self.logger.error(error_message) raise ValueError(error_message) except Exception as e: error_message = "Problem retrieving the count vector for the transcript %s. Reason:%s" % ( transcript.get_name(), e.message) self.logger.error(error_message) raise Exception(error_message) if not isinstance(count_vector, list): count_vector = count_vector.tolist() count_vector = list(map(int, count_vector)) return count_vector def get_count_vector_safe(self, transcript, span_size): try: count_vector = transcript.get_counts(self.alignment.bam_array) except Exception as e: if transcript.spanning_segment.start < 0: diff = -1 * transcript.spanning_segment.start t_subchain = transcript.get_subchain(diff, transcript.spanning_segment.end, stranded=False) subchain_counts = list(t_subchain.get_counts(self.alignment.bam_array)) count_vector = [0] * diff + subchain_counts logging.getLogger(config.FIVEPSEQ_LOGGER). \ debug("Transcript %s at the beginning of the genome padded with %d zeros" % (FivePSeqOut.get_transcript_attr(transcript, "Name"), diff)) else: t_len = transcript.spanning_segment.end - transcript.spanning_segment.start diff = transcript.spanning_segment.end - len(self.genome.genome_dict[transcript.chrom].seq) if diff > span_size: count_vector = [0] * t_len logging.getLogger(config.FIVEPSEQ_LOGGER). \ debug("Transcript %s exceeds genome dimensions by %d bases" % (FivePSeqOut.get_transcript_attr(transcript, "Name"), diff)) else: t_subchain = transcript.get_subchain(diff, transcript.spanning_segment.end, stranded=False) subchain_counts = list(t_subchain.get_counts(self.alignment.bam_array)) count_vector = subchain_counts + [0] * diff logging.getLogger(config.FIVEPSEQ_LOGGER). \ debug("Transcript %s at the end of the genome padded with %d zeros" % (FivePSeqOut.get_transcript_attr(transcript, "Name"), diff)) return count_vector def get_sequence(self, transcript, transcript_span_size, desired_span_size): if desired_span_size > transcript_span_size: raise ValueError("Desired span size %d bigger than the transcript span size %d" % (desired_span_size, transcript_span_size)) try: sequence = transcript.get_sequence(self.genome.genome_dict) desired_seq = sequence[transcript.cds_start + transcript_span_size - desired_span_size: transcript.cds_end + transcript_span_size + desired_span_size] except: t_len = transcript.cds_end - transcript.cds_start desired_seq = ''.join(['N'] * t_len + 2 * desired_span_size) return desired_seq def get_cds_sequence_safe(self, transcript, span_size): try: sequence = transcript.get_sequence(self.genome.genome_dict) cds_sequence = sequence[transcript.cds_start + span_size: transcript.cds_end + span_size] except: if transcript.chrom not in self.genome.genome_dict.keys(): if transcript.chrom not in self.missing_chroms: self.missing_chroms.append(transcript.chrom) logging.getLogger(config.FIVEPSEQ_LOGGER).warn( "No chromosome named %s found in the genome sequence" % transcript.chrom) t_len = transcript.spanning_segment.end - transcript.spanning_segment.start cds_sequence = ''.join(['N'] * t_len) elif transcript.spanning_segment.start < 0: diff = -1 * transcript.spanning_segment.start t_subchain = transcript.get_subchain(diff, transcript.spanning_segment.end, stranded=False) sequence = t_subchain.get_sequence(self.genome.genome_dict) if span_size < diff: cds_sequence = sequence[transcript.cds_start + span_size - diff: transcript.cds_end + span_size] else: cds_sequence = sequence[transcript.cds_start + span_size - diff: transcript.cds_end + span_size] logging.getLogger(config.FIVEPSEQ_LOGGER). \ debug("Transcript %s at the beginning of the genome padded with %d N's" % (FivePSeqOut.get_transcript_attr(transcript, "Name"), diff)) else: t_len = transcript.spanning_segment.end - transcript.spanning_segment.start diff = transcript.spanning_segment.end - len(self.genome.genome_dict[transcript.chrom].seq) if diff > span_size: cds_sequence = ''.join(['N'] * t_len) else: t_subchain = transcript.get_subchain(diff, transcript.spanning_segment.end, stranded=False) sequence = t_subchain.get_sequence(self.genome.genome_dict) cds_sequence = sequence[transcript.cds_start + span_size: transcript.cds_end + span_size - diff] return cds_sequence def get_outliers_df(self): colnames = ["ID", "Name", "position_from_start", "position_from_term", "actual_count", "downsampled_count"] outliers_df = pd.DataFrame(self.outliers, index=None, columns=colnames) return outliers_df @preconditions(lambda region: isinstance(region, str)) def get_frame_counts_df(self, region): if region == self.START: if self.frame_counts_df_start is None: self.frame_counts_df_start = CountManager.extract_count_sums_per_frame_per_transcript( self.get_count_vector_list(FivePSeqCounts.FULL_LENGTH), self.annotation.span_size, FivePSeqCounts.START) return self.frame_counts_df_start elif region == self.TERM: if self.frame_counts_df_term is None: self.frame_counts_df_term = CountManager.extract_count_sums_per_frame_per_transcript( self.get_count_vector_list(FivePSeqCounts.FULL_LENGTH), self.annotation.span_size, FivePSeqCounts.TERM) return self.frame_counts_df_term else: err_msg = ("Wrong region %s provided: should be either %s or %s" % (region, self.START, self.TERM)) self.logger.error(err_msg) raise Exception(err_msg) @preconditions(lambda region: isinstance(region, str)) def get_meta_count_series(self, region): if region == self.FULL_LENGTH: error_message = "Cannot compute meta counts for full length transcript counts: the counts should be of " \ "the same length. " \ "Regions can be specified from choices (%s, %s)" % (self.START, self.TERM) self.logger.error(error_message) raise ValueError(error_message) elif region == self.START: if self.meta_count_series_start is not None: return self.meta_count_series_start elif region == self.TERM: if self.meta_count_series_term is not None: return self.meta_count_series_term else: error_message = "Problem retrieving meta_counts. " \ "Invalid region \"%s\" specified: should be one of (%s, %s)." \ % (region, self.START, self.TERM) self.logger.error(error_message) raise ValueError(error_message) try: count_vector_list = self.get_count_vector_list(region) except Exception as e: raise e meta_count_series = CountManager.count_vector_to_series( CountManager.compute_meta_counts(count_vector_list), region, tail=self.annotation.span_size) self.set_meta_count_series(meta_count_series, region) return meta_count_series @preconditions(lambda count_vector_list: isinstance(count_vector_list, list), lambda count_vector_list: isinstance(count_vector_list[0], list), lambda count_vector_list: isinstance(count_vector_list[0][0], int), lambda region: isinstance(region, str)) def set_count_vector_list(self, count_vector_list, region): if region == self.START: self.count_vector_list_start = count_vector_list elif region == self.TERM: self.count_vector_list_term = count_vector_list elif region == self.FULL_LENGTH: self.count_vector_list_full_length = count_vector_list else: error_message = "Cannot set counts: wrong region %s supplied: should be either of (%s, %s, %s)" \ % (region, self.START, self.TERM, self.FULL_LENGTH) self.logger.error(error_message) raise ValueError(error_message) @preconditions(lambda meta_count_series: isinstance(meta_count_series, pd.Series), lambda region: isinstance(region, str)) def set_meta_count_series(self, meta_count_series, region): if region == self.START: self.meta_count_series_start = meta_count_series elif region == self.TERM: self.meta_count_series_term = meta_count_series @preconditions(lambda region: isinstance(region, str), lambda span_before: isinstance(span_before, int), lambda span_before: span_before >= 0, lambda span_after: isinstance(span_after, int), lambda span_after: span_after >= 0) def get_unique_sequences(self, region, span_before, span_after): sequences = {} i = 0 for transcript in self.annotation.get_transcript_assembly(max(span_before, span_after)): sequence = transcript.get_sequence(self.genome.genome_dict) if region == self.TERM: endpoint = len(transcript.spanning_segment) - max(span_before, span_after) span_sequence = sequence[endpoint - span_before: endpoint + span_after] elif region == self.START: startpoint = max(span_before, span_after) span_sequence = sequence[startpoint - span_before: startpoint + span_after] else: raise Exception if span_sequence in sequences.keys(): sequences[span_sequence] += 1 else: sequences[span_sequence] = 1 i += 1 return sequences def get_amino_acid_pauses(self): if self.amino_acid_count_df is None: self.compute_codon_pauses() return self.amino_acid_count_df def get_codon_pauses(self): if self.codon_count_df is None: self.compute_codon_pauses() return self.codon_count_df def get_tricodon_pauses(self): if self.tricodon_count_df is None: self.compute_codon_pauses() return self.tricodon_count_df def get_dicodon_pauses(self): if self.dicodon_count_df is None: self.compute_codon_pauses() return self.dicodon_count_df def get_dipeptide_pauses(self): if self.dipeptide_count_df is None: self.compute_codon_pauses() return self.dipeptide_count_df def get_tripeptide_pauses(self): if self.tripeptide_count_df is None: self.compute_codon_pauses() return self.tripeptide_count_df @preconditions(lambda dist_from: isinstance(dist_from, int), lambda dist_from: dist_from < 0, lambda dist_to: isinstance(dist_to, int), lambda dist_to: dist_to >= 0) def compute_codon_pauses(self, dist_from=-30, dist_to=3, downsample=True): self.logger.info( "Counting codon specific pauses within %d to %d nt distance from the first nucleotide of each codon" % (dist_from, dist_to)) if self.config.args.no_mask: mask_dist = 0 self.logger.info("Transcript boundaries will not be masked") else: if hasattr(config.args, "codon_mask_size"): mask_dist = config.args.codon_mask_size else: mask_dist = self.MASK_DIST self.logger.info("Transcript boundaries will be masked by %d nucleotides" % mask_dist) codon_count_df = pd.DataFrame(data=0, index=Codons.CODON_TABLE.keys(), columns=range(dist_from, dist_to)) dicodon_count_df = pd.DataFrame(data=0, index=Codons.get_dicodon_table().keys(), columns=range(dist_from + 3, dist_to + 3)) dipeptide_count_df = pd.DataFrame(data=0, index=Codons.get_dipeptide_list(), columns=range(dist_from + 3, dist_to + 3)) tricodon_count_df = pd.DataFrame(data=0, index=Codons.get_tricodon_table().keys(), columns=range(dist_from + 6, dist_to + 6)) tripeptide_count_df = pd.DataFrame(data=0, index=Codons.get_tripeptide_list(), columns=range(dist_from + 6, dist_to + 6)) self.codon_genome_usage_df = pd.DataFrame(data=0, index=Codons.CODON_TABLE.keys(), columns=['abs', 'fraction']) self.amino_acid_genome_usage_df = pd.DataFrame(data=0, index=Codons.AMINO_ACID_TABLE.keys(), columns=['abs', 'fraction']) counter = 1 transcript_assembly = self.annotation.get_transcript_assembly( span_size=0) transcript_count = len(transcript_assembly) for t in range(transcript_count): transcript = transcript_assembly[t] if np.floor(transcript_count / 1000) > 0 and counter % 1000 == 0: self.logger.info("\r>>Transcript count: %d (%d%s)\t" % ( counter, floor(100 * (counter - 1) / transcript_count), '%',), ) counter += 1 count_vector = self.get_count_vector(transcript, span_size=0, region=FivePSeqCounts.FULL_LENGTH, downsample=downsample) cds_sequence = self.get_cds_sequence_safe(transcript, 0) if sum(count_vector) == 0: continue if len(cds_sequence) != len(count_vector): self.logger.warning("Transcript num %d: cds sequence length %d not equal to count vector length %d" % (counter, len(cds_sequence), len(count_vector))) continue if (mask_dist >= 3): # v1.0b3 mask the first and last 20 counts to avoid initiation affecting codon-specific counts count_vector[0:mask_dist] = [0] * mask_dist # v1.0b3 mask the last 20 nucleotides to avoid termination affecting codon-specific counts, but keep STOP codon counts cds_sequence = cds_sequence[0:len(cds_sequence) - mask_dist] + ''.join( 'N' * (mask_dist - 3)) + cds_sequence[len(cds_sequence) - 3:len(cds_sequence)] # v1.0b3 add stretches of 0's to count_vector and N's to cds_sequence to avoid checking vector boundaries count_vector = [0] * (-1 * dist_from) + count_vector + [0] * dist_to cds_sequence = ''.join('N' * (-1 * dist_from)) + cds_sequence + ''.join('N' * dist_to) # store genome usage stats for i in range(0, len(cds_sequence), 3): codon = cds_sequence[i: i + 3].upper() if codon in self.codon_genome_usage_df.index: self.codon_genome_usage_df.at[codon, "abs"] += 1 amino_acid = Codons.CODON_TABLE.get(codon) self.amino_acid_genome_usage_df.at[amino_acid, "abs"] += 1 # identify 3nt bins with non-zero counts ind = np.array(range(0, len(count_vector), 3)) hits = [sum(count_vector[i:i + 3]) > 0 for i in ind] non_empty_ind = ind[hits] # loop through non-empty triplets only for i in non_empty_ind: # loop through all codons dist_from nucleotides downstream and dist_to nucleotides upstream j_range = list(np.arange(i, i - dist_to, -3))[::-1] + list(np.arange(i + 3, i + 3 - dist_from, 3)) for j in j_range: if j < 0: continue if j + 3 > len(cds_sequence): break codonA = cds_sequence[j: j + 3].upper() if j - 3 >= 0: codonP = cds_sequence[j - 3: j].upper() else: codonP = 'NNN' if j - 6 >= 0: codonE = cds_sequence[j - 6: j - 3].upper() else: codonE = 'NNN' if (len(codonA) == 3) & (codonA in Codons.CODON_TABLE.keys()): for p in range(0, 3): d = i - j + p try: codon_count_df.at[codonA, d] += count_vector[i + p] if len(codonP) == 3 and codonP in Codons.CODON_TABLE.keys(): dicodon_count_df.at[codonP + codonA, d + 3] += count_vector[i + p] dipeptide = Codons.get_peptide_from_codon_list([codonP, codonA]) dipeptide_count_df.at[dipeptide, d + 3] += count_vector[i + p] if len(codonE) == 3 and codonE in Codons.CODON_TABLE: tricodon_count_df.at[codonE + codonP + codonA, d + 6] += count_vector[i + p] tripeptide = Codons.get_peptide_from_codon_list([codonE, codonP, codonA]) tripeptide_count_df.at[tripeptide, d + 6] += count_vector[i + p] except Exception as e: self.logger.warn("Index out of range: i: %d, j: %d, p: %d, d: %d. %s" % (i, j, p, d, str(e))) self.codon_genome_usage_df.loc[:, "fraction"] = self.codon_genome_usage_df.loc[:, "abs"] / sum( self.codon_genome_usage_df.loc[:, "abs"]) self.amino_acid_genome_usage_df.loc[:, "fraction"] = self.amino_acid_genome_usage_df.loc[:, "abs"] / sum( self.amino_acid_genome_usage_df.loc[:, "abs"]) self.amino_acid_count_df = self.codon_to_amino_acid_count_df(codon_count_df) self.tripeptide_count_df = self.filter_codon_counts(tripeptide_count_df, self.get_tripeptide_pos()) self.dipeptide_count_df = self.filter_codon_counts(dipeptide_count_df, self.get_dipeptide_pos()) # rename codon_count_df indices by adding amino acid names new_index = [Codons.CODON_TABLE.get(codon) + '_' + codon for codon in codon_count_df.index] codon_count_df.index = new_index self.codon_count_df = codon_count_df # rename codon_count_df indices by adding amino acid names self.logger.info("Mapping tricodons to amino acid names") tricodon_count_df.index = Codons.get_tricodon_full_index() self.tricodon_count_df = self.filter_codon_counts(tricodon_count_df, self.get_tripeptide_pos()) # rename codon_count_df indices by adding amino acid names self.logger.info("Mapping dicodons to amino acid names") dicodon_count_df.index = Codons.get_dicodon_full_index() self.dicodon_count_df = self.filter_codon_counts(dicodon_count_df, self.get_dipeptide_pos()) return def codon_to_amino_acid_count_df(self, codon_count_df): amino_acid_count_df = pd.DataFrame(data=0, index=Codons.AMINO_ACID_TABLE.keys(), columns=codon_count_df.columns) for codon in codon_count_df.index: aa = Codons.CODON_TABLE.get(codon) amino_acid_count_df.loc[aa, :] += codon_count_df.loc[codon, :] return amino_acid_count_df def get_tripeptide_pos(self): if hasattr(config.args, "tripeptide_pos"): pos = config.args.tripeptide_pos else: pos = self.TRIPEPTIDE_POS return pos def get_dipeptide_pos(self): if hasattr(config.args, "dipeptide_pos"): pos = config.args.dipeptide_pos else: pos = self.DIPEPTIDE_POS return pos def filter_codon_counts(self, codon_count_df, pos, top=50): self.logger.info("Sorting and selecting top %d peptides/codons at position %d from the A site" % (top, pos)) codon_filtered_df = codon_count_df[codon_count_df.sum(1) >= self.COUNT_THRESHOLD] pos_rel_counts = codon_filtered_df[pos] / codon_filtered_df.sum(1) codon_filtered_df = codon_filtered_df.iloc[ sorted(range(len(pos_rel_counts)), reverse=True, key=lambda k: pos_rel_counts[k])[0:top]] return codon_filtered_df def get_amino_acid_stats(self): if self.amino_acid_stats_df is None: self.amino_acid_stats_df = self.compute_codon_stats_amino_acid() return self.amino_acid_stats_df def get_codon_stats(self): if self.codon_stats_df is None: self.codon_stats_df = self.compute_codon_stats_codon() return self.codon_stats_df def compute_codon_genome_usage(self): self.codon_genome_usage_df = pd.DataFrame(data=0, index=Codons.CODON_TABLE.keys(), columns=['abs', 'fraction']) self.amino_acid_genome_usage_df = pd.DataFrame(data=0, index=Codons.AMINO_ACID_TABLE.keys(), columns=['abs', 'fraction']) def compute_codon_stats_amino_acid(self): return self.compute_codon_stats(self.get_amino_acid_pauses(), self.amino_acid_genome_usage_df) def compute_codon_stats_codon(self): return self.compute_codon_stats(self.get_codon_pauses(), self.codon_genome_usage_df) def compute_codon_stats(self, codon_counts, codon_genome_usage, until=-3): self.logger.info("Counting codon usage statistics") try: stop_ind = codon_counts.keys().to_list().index(until) codon_counts = codon_counts.iloc[:, 0:stop_ind] f2 = sum([codon_counts.iloc[:, i] for i in reversed(range(stop_ind - 1, -1, -3))]) f1 = sum([codon_counts.iloc[:, i] for i in reversed(range(stop_ind - 2, -1, -3))]) f0 = sum([codon_counts.iloc[:, i] for i in reversed(range(stop_ind - 3, -1, -3))]) codon_stats = pd.DataFrame(list(zip(f0, f1, f2)), columns=['F0', 'F1', 'F2']) codon_stats['FPI'] = np.zeros(len(codon_stats)) codon_stats['F'] = np.zeros(len(codon_stats)) codon_stats['F_perc'] = np.zeros(len(codon_stats)) for i in range(len(codon_stats)): fpi, fmax, fperc = CountManager.fpi_stats_from_frame_counts(codon_stats.iloc[i, :]) codon_stats.loc[i, 'FPI'] = fpi codon_stats.loc[i, 'F'] = fmax codon_stats.loc[i, 'F_perc'] = fperc codon_stats['peak_pos'] = [np.argmax(codon_counts.iloc[i, :]) for i in range(len(codon_stats))] codon_stats['peak_scale'] = np.zeros(len(codon_stats)) for i in range(len(codon_stats)): for i in range(len(codon_stats)): counts = list(codon_counts.iloc[i, :]) if sum(counts) > 0: frame = int(codon_stats.loc[i, 'F']) frame_inds = [j for j in reversed(range(len(counts) - 3 + frame, -1, -3))] frame_counts = [counts[j] for j in frame_inds] codon_stats.loc[i, 'peak_scale'] = len(frame_counts) * max(frame_counts) / sum(frame_counts) codon_stats.loc[i, 'peak_pos'] = codon_counts.columns[frame_inds[np.argmax(frame_counts)]] codon_stats['usage'] = list(sum([codon_counts.iloc[:, i] for i in range(0, stop_ind)])) codon_stats['genome_usage_abs'] = list(codon_genome_usage.loc[:, 'abs']) codon_stats['genome_usage_fraction'] = list(codon_genome_usage.loc[:, 'fraction']) usage_norm = codon_stats['usage'] / codon_stats['genome_usage_fraction'] usage_norm /= sum(usage_norm) codon_stats['usage_normalized'] = usage_norm codon_stats.index = codon_counts.index return codon_stats except: self.logger.warning("Could not compute codon stats. Codon counts dataframe did not have column %d." % until) return None # exclude the counts downstream from -3 @preconditions(lambda loci_file: str) def get_pauses_from_loci(self, loci_file, read_locations=READ_LOCATIONS_ALL): self.logger.info( "Counting pauses in %s region from loci given in file %s" % (read_locations, loci_file)) loci = pd.read_csv(loci_file, sep="\t", index_col=None) self.loci_overlaps = [] # the results will be kept in a dictionary: # key - distance from any locus # value - number of mapping positions at key distance from any locus loci_pauses_dict = {} span_size = self.annotation.span_size counter = 0 loci_row = 0 done = False move_transcript = True move_locus = False tg = self.annotation.get_transcript_assembly(span_size) transcript = None while True: if counter % 1000 == 0: self.logger.info("\r>>Transcript count: %d (%d%s)\t" % ( counter, floor(100 * (counter - 1) / self.annotation.transcript_count), '%',), ) if move_locus: if loci.shape[0] == loci_row: self.logger.debug("Reached the end of loci file (row %d)" % loci_row) break loci_row += 1 move_locus = False continue if move_transcript: try: transcript = tg[counter] except: self.logger.debug("Reached the end of transcript assembly (counter: %d)" % counter) break counter += 1 move_transcript = False continue # check if the locus at the cursor is within the current transcript if loci_row < loci.shape[0]: if str(transcript.chrom) == str(loci.loc[loci_row, "chr"]): if loci.loc[loci_row, "str"] == "+": locus_pos = loci.loc[loci_row, "start"] else: locus_pos = loci.loc[loci_row, "end"] # locus is upstream of transcript -> move locus if transcript.cds_genome_start - span_size > locus_pos: move_locus = True continue # transcript is upstream of locus -> move transcript elif transcript.cds_genome_end + span_size < locus_pos: move_transcript = True continue elif str(transcript.strand) != str(loci.loc[loci_row, "str"]): move_locus = True continue else: count_vector = self.get_count_vector(transcript, span_size, FivePSeqCounts.FULL_LENGTH, downsample=True) transcript_genome_start = transcript.cds_genome_start - span_size transcript_genome_end = transcript.cds_genome_end + span_size if len(count_vector) != transcript_genome_end - transcript_genome_start: move_transcript = True continue if transcript.strand == "+": locus_ind = locus_pos - transcript_genome_start else: locus_ind = transcript_genome_end - locus_pos if read_locations == self.READ_LOCATIONS_ALL: ind = np.array(range(len(count_vector) - 2 * span_size, len(count_vector))) elif read_locations == self.READ_LOCATIONS_5UTR: ind = np.array(range(0, span_size)) elif read_locations == self.READ_LOCATIONS_3UTR: ind = np.array(range(len(count_vector) - span_size, len(count_vector))) elif read_locations == self.READ_LOCATIONS_CDS: ind = np.array(range(len(count_vector) - 2 * span_size, len(count_vector) - span_size)) else: ind = np.array(range(0, len(count_vector))) hits = [count_vector[i] > 0 for i in ind] non_empty_ind = ind[hits] for i in non_empty_ind: distance = i - locus_ind if distance < 2 * span_size and distance >= -2 * span_size: if distance in loci_pauses_dict.keys(): loci_pauses_dict[distance] += count_vector[i] else: loci_pauses_dict.update({distance: count_vector[i]}) overlap = [FivePSeqOut.get_transcript_attr(transcript, "ID"), FivePSeqOut.get_transcript_attr(transcript, "Name"), transcript.chrom, transcript.strand, transcript.cds_genome_start, transcript.cds_genome_end, loci.loc[loci_row, "symbol"], loci.loc[loci_row, "chr"], loci.loc[loci_row, "str"], loci.loc[loci_row, "start"], loci.loc[loci_row, "end"], i, distance, count_vector[i]] self.loci_overlaps.append(overlap) move_locus = True elif str(transcript.chrom) > str(loci.loc[loci_row, "chr"]): move_locus = True continue else: move_transcript = True continue else: break # turn the dictionary into a metacount vector, with indices from -1*maxdistance to maxdistance self.logger.debug("Merging the dictionary into metacounts") maxdist = 2 * span_size metacount_vector = [0] * 2 * maxdist for i in range(-1 * maxdist, maxdist): if i in loci_pauses_dict.keys(): metacount_vector[maxdist + i] = loci_pauses_dict[i] metacount_series = pd.Series(data=metacount_vector, index=np.arange(-1 * maxdist, maxdist)) return metacount_series def get_loci_overlaps_df(self): colnames = ["ID", "Name", "chr", "str", "genome_start", "genome_end", "RBP", "loc_chr", "loc_str", "loc_start", "loc_end", "i", "dist", "count"] outliers_df = pd.DataFrame(self.loci_overlaps, index=None, columns=colnames) return outliers_df @preconditions(lambda num: isinstance(num, int)) def top_populated_transcript_indices(self, num=1000): populated = [0] * self.annotation.transcript_count for i in range(self.annotation.transcript_count): transcript = self.annotation.transcript_assembly[i] count_vector = self.get_count_vector(transcript, 0, FivePSeqCounts.FULL_LENGTH, downsample=False) populated[i] = sum(count_vector > 0) / len(count_vector) populated_indices = sorted(range(len(populated)), key=lambda k: populated[k]) return populated_indices class FivePSeqCountsContainer: count_vector_list_start = None count_vector_list_term = None count_vector_list_full_length = None meta_count_series_start = None meta_count_series_term = None frame_counts_df_start = None frame_counts_df_term = None def __init__(self, count_vector_list_start, count_vector_list_term, count_vector_list_full_length, meta_count_series_start, meta_count_series_term, frame_counts_df_start, frame_counts_df_term): self.count_vector_list_start = count_vector_list_start self.count_vector_list_term = count_vector_list_term self.count_vector_list_full_length = count_vector_list_full_length self.meta_count_series_term = meta_count_series_term self.meta_count_series_start = meta_count_series_start self.frame_counts_df_start = frame_counts_df_start self.frame_counts_df_term = frame_counts_df_term class CountManager: def __init__(self): pass @staticmethod @preconditions(lambda count_vector_list: isinstance(count_vector_list, list), lambda count_vector_list: isinstance(count_vector_list[0], list), lambda count_vector_list: isinstance(count_vector_list[0][0], int)) def compute_meta_counts(count_vector_list): # TODO check that the count vectors have the same length max_len = 0 for i in range(len(count_vector_list)): if len(count_vector_list) > max_len: max_len = len(count_vector_list[i]) for i in range(len(count_vector_list)): if len(count_vector_list[i]) < max_len: short_vec = count_vector_list[i] long_vec = [0] * max_len long_vec[0:len(short_vec)] = short_vec count_vector_list[i] = long_vec # sum the position-wise counts meta_count_vector = np.vstack(count_vector_list).sum(axis=0).tolist() return meta_count_vector @staticmethod @preconditions(lambda count_vector: isinstance(count_vector, list), lambda count_vector: isinstance(count_vector[0], int), lambda span_size: isinstance(span_size, int), lambda region: isinstance(region, str), lambda include_span: isinstance(include_span, bool)) def extract_frame_count_vectors(count_vector, span_size, region=FivePSeqCounts.START, include_span=False): # determine the tail size to be subtracted from the count_vector if include_span: tail = 0 else: tail = span_size # for START, start the Frame0 from tail to the length of the vector minus the tail if region == FivePSeqCounts.START: frame0_array = count_vector[0 + tail: len(count_vector) - tail: 3] frame1_array = count_vector[1 + tail: len(count_vector) - tail: 3] frame2_array = count_vector[2 + tail: len(count_vector) - tail: 3] elif region == FivePSeqCounts.TERM: # NOTE the frames relative to START and TERM should be aligned in the future # NOTE (if cds length is not a multiple of 3) frame0_array = [count_vector[i] for i in list(reversed(range(len(count_vector) - 3 - tail, -1 + tail, -3)))] frame1_array = [count_vector[i] for i in list(reversed(range(len(count_vector) - 2 - tail, -1 + tail, -3)))] frame2_array = [count_vector[i] for i in list(reversed(range(len(count_vector) - 1 - tail, -1 + tail, -3)))] else: error_message = "Invalid region %s specified: should be either %s or %s" \ % (region, FivePSeqCounts.START, FivePSeqCounts.TERM) logger = logging.getLogger(config.FIVEPSEQ_LOGGER) logger.error(error_message) raise Exception(error_message) return frame0_array, frame1_array, frame2_array @staticmethod @preconditions(lambda count_vector: isinstance(count_vector, list), lambda count_vector: isinstance(count_vector[0], int), lambda region: isinstance(region, str), lambda tail: isinstance(tail, int), lambda tail: tail >= 0) def count_vector_to_series(count_vector, region, tail=0): if region == FivePSeqCounts.START: d = np.arange(-tail, len(count_vector) - tail) elif region == FivePSeqCounts.TERM: d = np.arange(-(len(count_vector) - tail - 3), tail + 3) else: error_message = "Invalid region %s specified: should be either %s or %s" \ % (region, FivePSeqCounts.START, FivePSeqCounts.TERM) logger = logging.getLogger(config.FIVEPSEQ_LOGGER) logger.error(error_message) raise Exception(error_message) counts_series = pd.Series(data=count_vector, index=d) return counts_series @staticmethod @preconditions(lambda count_vector: isinstance(count_vector, list), lambda count_vector: isinstance(count_vector[0], int), lambda region: isinstance(region, str), lambda tail: isinstance(tail, int), lambda tail: tail >= 0) def count_vector_to_df(count_vector, region, tail=0): if region == FivePSeqCounts.START: d = np.arange(-tail, len(count_vector) - tail) elif region == FivePSeqCounts.TERM: d = np.arange(-(len(count_vector) - tail - 3), tail + 3) else: error_message = "Invalid region %s specified: should be either %s or %s" \ % (region, FivePSeqCounts.START, FivePSeqCounts.TERM) logging.getLogger(config.FIVEPSEQ_LOGGER).error(error_message) raise Exception(error_message) counts_df = pd.DataFrame({'D': d, 'C': count_vector}) return counts_df @staticmethod @preconditions(lambda region: isinstance(region, str), lambda span_size: isinstance(span_size, int)) def extract_count_sums_per_frame_per_transcript(count_vector_list, span_size, region): logging.getLogger(config.FIVEPSEQ_LOGGER).debug( "Retrieving count-sums per frame relative to %s ..." % region) # Create an empty dataframe n = len(count_vector_list) frame_counts_df = pd.DataFrame({'F0': [0] * n, 'F1': [0] * n, 'F2': [0] * n}) for t_ind in range(0, n): # Print status update to console if t_ind % 10000 == 0: logging.getLogger(config.FIVEPSEQ_LOGGER).info("\r>>Transcript count: %d (%d%s)\t" % ( t_ind, np.floor(100 * (t_ind - 1) / n), '%')) # extract frame count vectors from count vectors count_vector = count_vector_list[t_ind] if sum(count_vector) == 0: for f in range(0, 3): frame_counts_df.iloc[t_ind, f] = 0 else: frame_counts = CountManager.extract_frame_count_vectors(count_vector, span_size, region) # sum-up counts in each frame and add to the dataframe for f in range(0, 3): frame_counts_df.iloc[t_ind, f] = sum(frame_counts[f]) return frame_counts_df @staticmethod @preconditions(lambda file_path: isinstance(file_path, str)) def read_index_as_list(file_path): if not os.path.exists(file_path): error_message = "Problem reading counts: the file %s does not exist" % file_path logging.getLogger(config.FIVEPSEQ_LOGGER).error(error_message) raise IOError(error_message) logging.getLogger(config.FIVEPSEQ_LOGGER).debug("Reading count file %s" % file_path) indices = list(pd.read_csv(file_path, header=None).iloc[:, 0]) return indices @staticmethod @preconditions(lambda file_path: isinstance(file_path, str)) def read_counts_as_list(file_path): if not os.path.exists(file_path): error_message = "Problem reading counts: the file %s does not exist" % file_path logging.getLogger(config.FIVEPSEQ_LOGGER).error(error_message) raise IOError(error_message) logging.getLogger(config.FIVEPSEQ_LOGGER).debug("Reading count file %s" % file_path) df = pd.read_csv(file_path, header=None, sep="|") count_vector_list = [[]] * len(df) for i in range(0, len(df)): count_vector_list[i] = list(map(int, df.iloc[i, 0].split("\t"))) return count_vector_list @staticmethod @preconditions(lambda file: isinstance(file, str)) def read_meta_counts(file): if not os.path.exists(file): error_message = "Problem reading meta counts: the file %s does not exist" % file logging.getLogger(config.FIVEPSEQ_LOGGER).error(error_message) raise IOError(error_message) logging.getLogger(config.FIVEPSEQ_LOGGER).debug("Reading meta count file %s" % file) meta_count = pd.read_csv(file, sep="\t", header=None, names=["D", "C"]) return meta_count @staticmethod @preconditions(lambda file: isinstance(file, str)) def read_frame_counts(file): if not os.path.exists(file): error_message = "Problem reading frame counts: the file %s does not exist" % file logging.getLogger(config.FIVEPSEQ_LOGGER).error(error_message) raise IOError(error_message) logging.getLogger(config.FIVEPSEQ_LOGGER).debug("Reading frame counts file %s" % file) frame_counts = pd.read_csv(file, sep="\t") return frame_counts @staticmethod @preconditions(lambda file: isinstance(file, str)) def read_amino_acid_df(file): if not os.path.exists(file): error_message = "Problem reading amino acid pauses: the file %s does not exist" % file logging.getLogger(config.FIVEPSEQ_LOGGER).error(error_message) raise IOError(error_message) logging.getLogger(config.FIVEPSEQ_LOGGER).debug("Reading amino acids pauses file %s" % file) amino_acid_df = pd.read_csv(file, sep="\t", header=0, index_col=0) return amino_acid_df @staticmethod def top_populated_count_vector_indices(count_vector_list, num=1000): populated = [0] * len(count_vector_list) for i in range(len(count_vector_list)): count_vector = count_vector_list[i] populated[i] = float(np.count_nonzero(count_vector)) / len(count_vector) populated_indices = sorted(range(len(populated)), key=lambda k: populated[k], reverse=True) return populated_indices[0:num] @staticmethod def canonical_transcript_indices(count_dir): canonical_index_file = os.path.join(count_dir, FivePSeqOut.CANONICAL_TRANSCRIPT_INDEX_FILE) if os.path.exists(canonical_index_file): transcript_index = list(pd.read_csv(canonical_index_file, header=None).iloc[:, 0]) return transcript_index else: logging.getLogger(config.FIVEPSEQ_LOGGER).debug( "Problem retrieving canonical transcript indices. No file %s exists. " "The filter will return None." % canonical_index_file) return None @staticmethod @preconditions(lambda file_path: isinstance(file_path, str)) def read_count_vector(file_path): if not os.path.exists(file_path): error_message = "Problem reading counts: the file %s does not exist" % file_path logging.getLogger(config.FIVEPSEQ_LOGGER).error(error_message) raise IOError(error_message) logging.getLogger(config.FIVEPSEQ_LOGGER).debug("Reading count file %s" % file_path) if os.stat(file_path).st_size == 0: counts = [] else: logging.getLogger("Reading in count distribution (this may last a few minutes for large libraries)") counts = pd.read_csv(file_path, header=None) counts = list(map(int, counts.iloc[:, 0])) return counts @staticmethod @preconditions(lambda file_path: isinstance(file_path, str)) def read_outlier_lower(file_path): outlier_lower = float(pd.read_csv(file_path, header=None)) return outlier_lower @staticmethod @preconditions(lambda file_path: isinstance(file_path, str)) def read_count_dict(file_path): count_freq_dict = {} dict_mat = pd.read_csv(file_path, header=None, delimiter="\t", index_col=0) for i in range(len(dict_mat)): count_freq_dict[dict_mat.index[i]] = dict_mat.iloc[i, 0] return collections.OrderedDict(sorted(count_freq_dict.items())) @staticmethod def filter_fivepseqCountsContainer(fivepseqcountsContainer, transcript_indices, span_size=100): if fivepseqcountsContainer.count_vector_list_full_length is not None: count_vector_list_full_length = [fivepseqcountsContainer.count_vector_list_full_length[i] for i in transcript_indices] else: count_vector_list_full_length = None count_vector_list_term = [fivepseqcountsContainer.count_vector_list_term[i] for i in transcript_indices] count_vector_list_start = [fivepseqcountsContainer.count_vector_list_start[i] for i in transcript_indices] meta_count_series_term = CountManager.count_vector_to_df( CountManager.compute_meta_counts(count_vector_list_term), FivePSeqCounts.TERM, tail=span_size) meta_count_series_start = CountManager.count_vector_to_df( CountManager.compute_meta_counts(count_vector_list_start), FivePSeqCounts.START, tail=span_size) frame_counts_df_term = fivepseqcountsContainer.frame_counts_df_term.iloc[transcript_indices,] frame_counts_df_start = fivepseqcountsContainer.frame_counts_df_start.iloc[ transcript_indices,] fivepseq_counts_filtered = FivePSeqCountsContainer(count_vector_list_start, count_vector_list_term, count_vector_list_full_length, meta_count_series_start, meta_count_series_term, frame_counts_df_start, frame_counts_df_term) return fivepseq_counts_filtered @staticmethod def combine_count_series(count_series_dict, lib_size_dict=None, scale=False): count_series_combined = None start = True for key in count_series_dict.keys(): count_series = count_series_dict[key].copy() if lib_size_dict is not None: if scale: count_series.C /= (float(lib_size_dict[key]) / (10 ** 6)) * len(lib_size_dict) else: count_series.C *= float(lib_size_dict[key]) / sum(lib_size_dict.values()) if start: count_series_combined = count_series.copy() start = False else: count_series_combined.C += count_series.C return count_series_combined @staticmethod def combine_frame_counts(frame_count_dict, lib_size_dict=None): frame_count_combined = None start = True for key in frame_count_dict.keys(): count_df = frame_count_dict[key] if lib_size_dict is not None: count_df.loc[:, ('F0', 'F1', 'F2')] *= float(lib_size_dict[key]) / sum(lib_size_dict.values()) if start: frame_count_combined = count_df.copy() start = False else: frame_count_combined.loc[:, ('F0', 'F1', 'F2')] += count_df.loc[:, ('F0', 'F1', 'F2')] return frame_count_combined @staticmethod def combine_amino_acid_dfs(amino_acid_df_dict, lib_size_dict=None): amino_acid_df_combined = None start = True for key in amino_acid_df_dict.keys(): count_df = amino_acid_df_dict[key].copy(deep=True) if lib_size_dict is not None: count_df *= float(lib_size_dict[key]) / sum(lib_size_dict.values()) if start: amino_acid_df_combined = count_df.copy() start = False else: amino_acid_df_combined += count_df return amino_acid_df_combined @staticmethod def fpi_stats_from_frame_counts(frame_counts): f_counts = (frame_counts['F0'], frame_counts['F1'], frame_counts['F2']) fmax = np.argmax(f_counts) nom = f_counts[fmax] if nom == 0: fpi = None f_perc = None else: denom = (sum(f_counts) - nom) / 2. if denom == 0: fpi = np.log2(float(nom) / 0.5) else: fpi = np.log2(float(nom / denom)) f_perc = 100 * (float(f_counts[fmax]) / sum(f_counts)) return fpi, fmax, f_perc
true
true
f7f67bb47b2fe1d76e46539ae517f28fe34660a1
252
py
Python
codesignal/simpleSort.py
andraantariksa/code-exercise-answer
69b7dbdc081cdb094cb110a72bc0c9242d3d344d
[ "MIT" ]
1
2019-11-06T15:17:48.000Z
2019-11-06T15:17:48.000Z
codesignal/simpleSort.py
andraantariksa/code-exercise-answer
69b7dbdc081cdb094cb110a72bc0c9242d3d344d
[ "MIT" ]
null
null
null
codesignal/simpleSort.py
andraantariksa/code-exercise-answer
69b7dbdc081cdb094cb110a72bc0c9242d3d344d
[ "MIT" ]
1
2018-11-13T08:43:26.000Z
2018-11-13T08:43:26.000Z
def simpleSort(arr): n = len(arr) for i in range(n): j = 0 stop = n - i while j < stop - 1: if arr[j] > arr[j + 1]: arr[j], arr[j + 1] = arr[j + 1], arr[j] j += 1 return arr
19.384615
55
0.373016
def simpleSort(arr): n = len(arr) for i in range(n): j = 0 stop = n - i while j < stop - 1: if arr[j] > arr[j + 1]: arr[j], arr[j + 1] = arr[j + 1], arr[j] j += 1 return arr
true
true
f7f67ce159f562ec968a106b9cbbc27b8adb5c39
2,369
py
Python
analyzer/checks/metrics/__init__.py
cqr-cryeye-forks/gdpr-scanner
5d5bd517a97befa84b8294504f8e4aaf3eb7f5f8
[ "MIT" ]
18
2020-02-14T12:51:48.000Z
2022-01-05T08:57:44.000Z
analyzer/checks/metrics/__init__.py
cqr-cryeye-forks/gdpr-scanner
5d5bd517a97befa84b8294504f8e4aaf3eb7f5f8
[ "MIT" ]
null
null
null
analyzer/checks/metrics/__init__.py
cqr-cryeye-forks/gdpr-scanner
5d5bd517a97befa84b8294504f8e4aaf3eb7f5f8
[ "MIT" ]
2
2021-06-15T13:04:09.000Z
2021-07-13T07:04:20.000Z
import logging import os import re from abc import ABC, abstractmethod from typing import Dict, List from bs4 import BeautifulSoup from analyzer.checks.check_result import CheckResult from analyzer.checks.severity import Severity from analyzer.types_definitions import CrawlerDomainMetaData logger = logging.getLogger(__name__) class MetricCheck(ABC): def __init__(self, domain: str, page_types: CrawlerDomainMetaData, meta_data_filepath: str, *args, **kwargs): self.domain = domain self.page_types = page_types self.meta_data_filepath = meta_data_filepath def _get_check_result(self, passed: CheckResult.PassType, description: str = '') -> CheckResult: return CheckResult( domain=self.domain, identifier=self.IDENTIFIER, passed=passed, severity=self.SEVERITY, description=description ) def get_html_strings_of(self, page_type: str) -> List[str]: html_strings: List[str] = list() for page in self.page_types.get(page_type, []): html_abspath = os.path.join(os.path.dirname(self.meta_data_filepath), page['htmlFilePath']) with open(html_abspath, 'rb') as f: # don't fail on encoding issues, but replace the faulty characters html = f.read().decode('utf-8', errors='replace') html_strings.append(html) return html_strings def phrase_in_html_body(self, phrase: str, html: str) -> bool: # ToDo: Don't instantiate bs for every call, share it across checks based on the html content / hash soup = BeautifulSoup(html, 'html.parser') # We're compiling a regex here, otherwise bs4 would only return for *exact* matches. return soup.find(text=re.compile(phrase, re.IGNORECASE)) is not None def phrase_in_page_title(selfself, phrase: str, html: str) -> bool: soup = BeautifulSoup(html, 'html.parser') # We're compiling a regex here, otherwise bs4 would only return for *exact* matches. return soup.find('title', text=re.compile(phrase, re.IGNORECASE)) is not None @property @abstractmethod def SEVERITY(self) -> Severity: pass @property @abstractmethod def IDENTIFIER(self) -> str: pass @abstractmethod def check(self) -> CheckResult: pass
35.893939
113
0.672436
import logging import os import re from abc import ABC, abstractmethod from typing import Dict, List from bs4 import BeautifulSoup from analyzer.checks.check_result import CheckResult from analyzer.checks.severity import Severity from analyzer.types_definitions import CrawlerDomainMetaData logger = logging.getLogger(__name__) class MetricCheck(ABC): def __init__(self, domain: str, page_types: CrawlerDomainMetaData, meta_data_filepath: str, *args, **kwargs): self.domain = domain self.page_types = page_types self.meta_data_filepath = meta_data_filepath def _get_check_result(self, passed: CheckResult.PassType, description: str = '') -> CheckResult: return CheckResult( domain=self.domain, identifier=self.IDENTIFIER, passed=passed, severity=self.SEVERITY, description=description ) def get_html_strings_of(self, page_type: str) -> List[str]: html_strings: List[str] = list() for page in self.page_types.get(page_type, []): html_abspath = os.path.join(os.path.dirname(self.meta_data_filepath), page['htmlFilePath']) with open(html_abspath, 'rb') as f: html = f.read().decode('utf-8', errors='replace') html_strings.append(html) return html_strings def phrase_in_html_body(self, phrase: str, html: str) -> bool: # ToDo: Don't instantiate bs for every call, share it across checks based on the html content / hash soup = BeautifulSoup(html, 'html.parser') return soup.find(text=re.compile(phrase, re.IGNORECASE)) is not None def phrase_in_page_title(selfself, phrase: str, html: str) -> bool: soup = BeautifulSoup(html, 'html.parser') # We're compiling a regex here, otherwise bs4 would only return for *exact* matches. return soup.find('title', text=re.compile(phrase, re.IGNORECASE)) is not None @property @abstractmethod def SEVERITY(self) -> Severity: pass @property @abstractmethod def IDENTIFIER(self) -> str: pass @abstractmethod def check(self) -> CheckResult: pass
true
true
f7f67d228e5c3ebe6dc3287545d743a5638c5a60
11,946
py
Python
podqueue/main.py
Earthnuker/podqueue
7d9ff4142b672c758493f5a906e3bde63de78fea
[ "MIT" ]
null
null
null
podqueue/main.py
Earthnuker/podqueue
7d9ff4142b672c758493f5a906e3bde63de78fea
[ "MIT" ]
null
null
null
podqueue/main.py
Earthnuker/podqueue
7d9ff4142b672c758493f5a906e3bde63de78fea
[ "MIT" ]
null
null
null
#!/bin/env python3 import feedparser import argparse import os from os import path, getcwd import xml.etree.ElementTree as ET from io import IOBase import json import requests import time from configparser import ConfigParser import re import logging # ----- ----- ----- ----- ----- class podqueue(): def __init__(self): # Initialise to defaults before checking config file / CLI args self.verbose = False self.opml = None self.dest = os.path.join(os.getcwd(), 'output') self.time_format = '%Y-%m-%d' self.log_file = 'podqueue.log' self.feeds = [] self.FEED_FIELDS = ['title', 'link', 'description', 'published', 'image', 'categories',] self.EPISODE_FIELDS = ['title', 'link', 'description', 'published_parsed', 'links',] # If a config file exists, ingest it self.check_config() # Overwrite any config file defaults with CLI params self.cli_args() self.config_logging() # Check an OPML was provided try: assert self.opml is not None except Exception as e: logging.error('OPML file or destination dir was not provided') exit() def config_logging(self): # Always log to file; only stdout if -v handlers = [logging.FileHandler(self.log_file)] if (self.verbose): handlers.append(logging.StreamHandler()) # Config settings level = logging.INFO if (self.verbose) else logging.WARNING logging.basicConfig(level=level, datefmt='%Y-%m-%d %H:%M:%S', handlers=handlers, format='%(asctime)s [%(levelname)s] %(message)s') # Add header for append-mode file logging logging.info('\n----- ----- ----- ----- -----\nInitialising\n----- ----- ----- ----- -----') def ascii_normalise(self, input_str, ): try: # Replace non-simple chars with dunders input_str = re.sub(r'[^a-zA-Z0-9\-\_\/\\\.]', '_', input_str) # Replace any strings of 2+ puncts with a single underscore input_str = re.sub(r'_+', r'_', input_str) input_str = re.sub(r'([^a-zA-Z0-9]{2,})', r'_', input_str) # Remove any trailing puncts input_str = re.sub(r'(_|\.)$', r'', input_str) except Exception as e: logging.error(f'\t\tError normalising file name: {e}') exit() return input_str def check_config(self): # get the path to podqueue.conf config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'podqueue.conf') # Check if the file has been created if not os.path.exists(config_path): logging.info(f'Config file does not exist: {config_path}') return None conf = ConfigParser() conf.read(config_path) for key in ['opml', 'dest', 'time_format', 'verbose', 'log_file']: if conf['podqueue'].get(key, None): setattr(self, key, conf['podqueue'].get(key, None)) # If we just changed verbose to str, make sure it's back to a bool if self.verbose: self.verbose = bool(self.verbose) return def cli_args(self): parser = argparse.ArgumentParser(add_help=True) parser.add_argument('-o', '--opml', dest='opml', default=None, type=argparse.FileType('r'), help='Pass an OPML file that contains a podcast subscription list.') parser.add_argument('-d', '--dest', dest='dest', type=self.args_path, help='The destination folder for downloads. Will be created if required, including sub-directories for each separate podcast.') parser.add_argument('-t', '--time_format', dest='time_format', help='Specify a time format string for JSON files. Defaults to 2022-06-31 if not specified.') parser.add_argument('-v', '--verbose', default=False, action='store_true', help='Prints additional debug information. If excluded, only errors are printed (for automation).') parser.add_argument('-l', '--log_file', dest='log_file', help='Specify a path to the log file. Defaults to ./podqueue.log') # Save the CLI args to class vars - self.XXX # vars() converts into a native dict result = vars(parser.parse_args()) for key, value in result.items(): # Don't overwrite if it's not provided in CLI if value is not None: setattr(self, key, value) def args_path(self, directory): # Create the directory, if required if not os.path.isdir(directory): os.makedirs(directory) return directory def parse_opml(self, opml): logging.info(f'Parsing OPML file: {opml.name}') # Check if we have an actual file handle (CLI arg), # Or a string path (config file), and we need to get our own handle with (opml if isinstance(opml, IOBase) else open(opml, 'r')) as opml_f: xml_root = ET.parse(opml_f).getroot() # Get all RSS feeds with a 'xmlUrl' attribute for feed in [x.attrib for x in xml_root.findall(".//outline[@type='rss']")]: feed_url = feed.get('xmlUrl', None) if feed_url: self.feeds.append(feed_url) def get_feeds(self, feeds): logging.info(f'Fetching feeds:') for feed in feeds: try: content = feedparser.parse(feed) # The remote RSS server can close the HTTP connection # except http.client.RemoteDisconnected: except: logging.warning(f'Feed server unexpectedly closed connection: {feed}') continue # If feedparser library reports bad XML, warn and skip # Test str: 'http://feedparser.org/tests/illformed/rss/aaa_illformed.xml' if content.get('bozo', False): logging.warning(f'Feed is misformatted: {feed}') continue title = content.feed.get('title', 'Unknown Title') logging.info(f'\tProcessing feed: {title}') # Normalise the podcast name with no spaces or non-simple ascii feed_dir_name = '_'.join([x for x in title.split(' ')]) feed_dir_name = self.ascii_normalise(feed_dir_name) # Create the directory we need (no spaces) if it doesn't exist directory = os.path.join(self.dest, feed_dir_name) if not os.path.isdir(directory): os.makedirs(directory) # Also create the <<PODCAST>>/episodes subdirectory if not os.path.isdir(os.path.join(directory, 'episodes')): os.makedirs(os.path.join(directory, 'episodes')) # Get content.feed metadata - podcast title, icon, description, etc. # And write it to disk as <<PODCAST>>/<<PODCAST>>.json feed_metadata = self.process_feed_metadata(content, directory) # Also fetch the podcast logo, if available if feed_metadata.get('image', None): self.get_feed_image(feed_metadata['image'], directory) # Then, process the episodes each and write to disk for episode in content.entries: episode_data = self.process_feed_episode(episode, directory) return None def process_feed_metadata(self, content, directory): logging.info(f'\t\tProcessing feed metadata') feed_metadata = {} for field in self.FEED_FIELDS: # .image is a dict structure where we only want href, # the rest are strs, so special case if (field == 'image') and (content.feed.get('image', None)): value = content.feed.image.href else: value = content.feed.get(field, None) feed_metadata[field] = value # Additional calculated metadata based on structure: feed_metadata['episode_count'] = len(content.entries) metadata_filename = os.path.join(directory, f'{os.path.split(directory)[1]}.json') with open(metadata_filename, 'w') as meta_f: meta_f.write(json.dumps(feed_metadata)) return feed_metadata def get_feed_image(self, image_url, directory): try: img = requests.get(image_url) img.raise_for_status() except Exception as e: logging.warning(f'\t\tImage could not be found: {image_url}, for reason: {e}') return image_filename_ext = os.path.splitext(image_url)[1] image_filename = os.path.join(directory, f'{os.path.split(directory)[1]}{image_filename_ext}') with open(image_filename, 'wb') as img_f: for chunk in img.iter_content(chunk_size=128): img_f.write(chunk) logging.info(f'\t\tAdded image to disk: {os.path.split(image_filename)[1]}') return def process_feed_episode(self, episode, directory): episode_metadata = {} for field in self.EPISODE_FIELDS: episode_metadata[field] = episode.get(field, None) # Change the time_struct tuple to a human string if episode_metadata.get('published_parsed', None): episode_metadata['published_parsed'] = time.strftime(self.time_format, \ episode_metadata['published_parsed']) # Change the links{} into a single audio URL if episode_metadata.get('links', None): for link in episode_metadata['links']: if link.get('type', None): if 'audio' in link.get('type', None): episode_metadata['link'] = link.get('href', None) break # Remove the old complicated links{} episode_metadata.pop('links', None) # Get a unique episode filename(s) episode_title = f'{episode_metadata["published_parsed"]}_{episode_metadata["title"]}' # Special case - the final file name (not path) can't have a slash in it # Also replace colons as they are invalid in filenames on Windows (used for Alterante Data Streams on NTFS) episode_title = re.sub(r'(\/|\\|:|\?|")', r'_', episode_title) # Check the title isn't going to overshoot 255 bytes # This is the limit in ZFS, BTRFS, ext*, NTFS, APFS, XFS, etc ... # Otherwise, file.write will raise OSError 36 - "File name too long" # I'm looking at you, Memory Palace 73. I mean really, 55 words and 316 characters long? # https://thememorypalace.us/notes-on-an-imagined-plaque/ if len(episode_title) >= 250: episode_title = f'{episode_title[0:245]}_' episode_meta_filename = os.path.join(os.path.join(directory, 'episodes'), \ f'{episode_title}.json') episode_audio_filename = os.path.join(os.path.join(directory, 'episodes'), \ f'{episode_title}.mp3') # episode_meta_filename = self.ascii_normalise(episode_meta_filename) # episode_audio_filename = self.ascii_normalise(episode_audio_filename) # Check if the file already exists on disk (if so, skip) if os.path.exists(episode_meta_filename) and os.path.exists(episode_audio_filename): logging.info(f'\t\tEpisode already saved, skipping: {episode_title}') return # Write metadata to disk with open(episode_meta_filename, 'w') as ep_meta_f: ep_meta_f.write(json.dumps(episode_metadata)) logging.info(f'\t\t\tAdded episode metadata to disk: {episode_title}') # Download the audio file if episode_metadata.get('link', None): try: audio = requests.get(episode_metadata['link']) audio.raise_for_status() except Exception as e: logging.warning(f'\t\t\tAudio could not be found: {episode_metadata["link"]}') return # Write audio to disk with open(episode_audio_filename, 'wb') as audio_f: for chunk in audio.iter_content(chunk_size=1024*8): audio_f.write(chunk) logging.info(f'\t\t\tAdded episode audio to disk: {episode_title}') return # ----- ----- ----- ----- ----- def entry(): # Initialise the config - from file, or CLI args pq = podqueue() # Parse all feed URLs out of the OPML XML into pq.feeds=[] pq.parse_opml(pq.opml) # Download the metadata, images, and any missing episodes pq.get_feeds(pq.feeds) if __name__ == '__main__': entry()
35.766467
134
0.639879
import feedparser import argparse import os from os import path, getcwd import xml.etree.ElementTree as ET from io import IOBase import json import requests import time from configparser import ConfigParser import re import logging class podqueue(): def __init__(self): self.verbose = False self.opml = None self.dest = os.path.join(os.getcwd(), 'output') self.time_format = '%Y-%m-%d' self.log_file = 'podqueue.log' self.feeds = [] self.FEED_FIELDS = ['title', 'link', 'description', 'published', 'image', 'categories',] self.EPISODE_FIELDS = ['title', 'link', 'description', 'published_parsed', 'links',] self.check_config() self.cli_args() self.config_logging() try: assert self.opml is not None except Exception as e: logging.error('OPML file or destination dir was not provided') exit() def config_logging(self): handlers = [logging.FileHandler(self.log_file)] if (self.verbose): handlers.append(logging.StreamHandler()) level = logging.INFO if (self.verbose) else logging.WARNING logging.basicConfig(level=level, datefmt='%Y-%m-%d %H:%M:%S', handlers=handlers, format='%(asctime)s [%(levelname)s] %(message)s') logging.info('\n----- ----- ----- ----- -----\nInitialising\n----- ----- ----- ----- -----') def ascii_normalise(self, input_str, ): try: input_str = re.sub(r'[^a-zA-Z0-9\-\_\/\\\.]', '_', input_str) input_str = re.sub(r'_+', r'_', input_str) input_str = re.sub(r'([^a-zA-Z0-9]{2,})', r'_', input_str) input_str = re.sub(r'(_|\.)$', r'', input_str) except Exception as e: logging.error(f'\t\tError normalising file name: {e}') exit() return input_str def check_config(self): config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'podqueue.conf') if not os.path.exists(config_path): logging.info(f'Config file does not exist: {config_path}') return None conf = ConfigParser() conf.read(config_path) for key in ['opml', 'dest', 'time_format', 'verbose', 'log_file']: if conf['podqueue'].get(key, None): setattr(self, key, conf['podqueue'].get(key, None)) if self.verbose: self.verbose = bool(self.verbose) return def cli_args(self): parser = argparse.ArgumentParser(add_help=True) parser.add_argument('-o', '--opml', dest='opml', default=None, type=argparse.FileType('r'), help='Pass an OPML file that contains a podcast subscription list.') parser.add_argument('-d', '--dest', dest='dest', type=self.args_path, help='The destination folder for downloads. Will be created if required, including sub-directories for each separate podcast.') parser.add_argument('-t', '--time_format', dest='time_format', help='Specify a time format string for JSON files. Defaults to 2022-06-31 if not specified.') parser.add_argument('-v', '--verbose', default=False, action='store_true', help='Prints additional debug information. If excluded, only errors are printed (for automation).') parser.add_argument('-l', '--log_file', dest='log_file', help='Specify a path to the log file. Defaults to ./podqueue.log') # Save the CLI args to class vars - self.XXX # vars() converts into a native dict result = vars(parser.parse_args()) for key, value in result.items(): # Don't overwrite if it's not provided in CLI if value is not None: setattr(self, key, value) def args_path(self, directory): # Create the directory, if required if not os.path.isdir(directory): os.makedirs(directory) return directory def parse_opml(self, opml): logging.info(f'Parsing OPML file: {opml.name}') # Check if we have an actual file handle (CLI arg), # Or a string path (config file), and we need to get our own handle with (opml if isinstance(opml, IOBase) else open(opml, 'r')) as opml_f: xml_root = ET.parse(opml_f).getroot() # Get all RSS feeds with a 'xmlUrl' attribute for feed in [x.attrib for x in xml_root.findall(".//outline[@type='rss']")]: feed_url = feed.get('xmlUrl', None) if feed_url: self.feeds.append(feed_url) def get_feeds(self, feeds): logging.info(f'Fetching feeds:') for feed in feeds: try: content = feedparser.parse(feed) # The remote RSS server can close the HTTP connection # except http.client.RemoteDisconnected: except: logging.warning(f'Feed server unexpectedly closed connection: {feed}') continue # If feedparser library reports bad XML, warn and skip # Test str: 'http://feedparser.org/tests/illformed/rss/aaa_illformed.xml' if content.get('bozo', False): logging.warning(f'Feed is misformatted: {feed}') continue title = content.feed.get('title', 'Unknown Title') logging.info(f'\tProcessing feed: {title}') # Normalise the podcast name with no spaces or non-simple ascii feed_dir_name = '_'.join([x for x in title.split(' ')]) feed_dir_name = self.ascii_normalise(feed_dir_name) # Create the directory we need (no spaces) if it doesn't exist directory = os.path.join(self.dest, feed_dir_name) if not os.path.isdir(directory): os.makedirs(directory) if not os.path.isdir(os.path.join(directory, 'episodes')): os.makedirs(os.path.join(directory, 'episodes')) feed_metadata = self.process_feed_metadata(content, directory) if feed_metadata.get('image', None): self.get_feed_image(feed_metadata['image'], directory) for episode in content.entries: episode_data = self.process_feed_episode(episode, directory) return None def process_feed_metadata(self, content, directory): logging.info(f'\t\tProcessing feed metadata') feed_metadata = {} for field in self.FEED_FIELDS: if (field == 'image') and (content.feed.get('image', None)): value = content.feed.image.href else: value = content.feed.get(field, None) feed_metadata[field] = value feed_metadata['episode_count'] = len(content.entries) metadata_filename = os.path.join(directory, f'{os.path.split(directory)[1]}.json') with open(metadata_filename, 'w') as meta_f: meta_f.write(json.dumps(feed_metadata)) return feed_metadata def get_feed_image(self, image_url, directory): try: img = requests.get(image_url) img.raise_for_status() except Exception as e: logging.warning(f'\t\tImage could not be found: {image_url}, for reason: {e}') return image_filename_ext = os.path.splitext(image_url)[1] image_filename = os.path.join(directory, f'{os.path.split(directory)[1]}{image_filename_ext}') with open(image_filename, 'wb') as img_f: for chunk in img.iter_content(chunk_size=128): img_f.write(chunk) logging.info(f'\t\tAdded image to disk: {os.path.split(image_filename)[1]}') return def process_feed_episode(self, episode, directory): episode_metadata = {} for field in self.EPISODE_FIELDS: episode_metadata[field] = episode.get(field, None) if episode_metadata.get('published_parsed', None): episode_metadata['published_parsed'] = time.strftime(self.time_format, \ episode_metadata['published_parsed']) if episode_metadata.get('links', None): for link in episode_metadata['links']: if link.get('type', None): if 'audio' in link.get('type', None): episode_metadata['link'] = link.get('href', None) break episode_metadata.pop('links', None) episode_title = f'{episode_metadata["published_parsed"]}_{episode_metadata["title"]}' # Also replace colons as they are invalid in filenames on Windows (used for Alterante Data Streams on NTFS) episode_title = re.sub(r'(\/|\\|:|\?|")', r'_', episode_title) # Check the title isn't going to overshoot 255 bytes # This is the limit in ZFS, BTRFS, ext*, NTFS, APFS, XFS, etc ... # Otherwise, file.write will raise OSError 36 - "File name too long" # I'm looking at you, Memory Palace 73. I mean really, 55 words and 316 characters long? # https://thememorypalace.us/notes-on-an-imagined-plaque/ if len(episode_title) >= 250: episode_title = f'{episode_title[0:245]}_' episode_meta_filename = os.path.join(os.path.join(directory, 'episodes'), \ f'{episode_title}.json') episode_audio_filename = os.path.join(os.path.join(directory, 'episodes'), \ f'{episode_title}.mp3') # episode_meta_filename = self.ascii_normalise(episode_meta_filename) # episode_audio_filename = self.ascii_normalise(episode_audio_filename) # Check if the file already exists on disk (if so, skip) if os.path.exists(episode_meta_filename) and os.path.exists(episode_audio_filename): logging.info(f'\t\tEpisode already saved, skipping: {episode_title}') return # Write metadata to disk with open(episode_meta_filename, 'w') as ep_meta_f: ep_meta_f.write(json.dumps(episode_metadata)) logging.info(f'\t\t\tAdded episode metadata to disk: {episode_title}') # Download the audio file if episode_metadata.get('link', None): try: audio = requests.get(episode_metadata['link']) audio.raise_for_status() except Exception as e: logging.warning(f'\t\t\tAudio could not be found: {episode_metadata["link"]}') return # Write audio to disk with open(episode_audio_filename, 'wb') as audio_f: for chunk in audio.iter_content(chunk_size=1024*8): audio_f.write(chunk) logging.info(f'\t\t\tAdded episode audio to disk: {episode_title}') return # ----- ----- ----- ----- ----- def entry(): # Initialise the config - from file, or CLI args pq = podqueue() # Parse all feed URLs out of the OPML XML into pq.feeds=[] pq.parse_opml(pq.opml) # Download the metadata, images, and any missing episodes pq.get_feeds(pq.feeds) if __name__ == '__main__': entry()
true
true
f7f67dcdf0d2b82d616fa3157980732a991a33b3
535
py
Python
eval_ricord1a_timm-regnetx_002_GridDropout.py
BrunoKrinski/segtool
cb604b5f38104c43a76450136e37c3d1c4b6d275
[ "MIT" ]
null
null
null
eval_ricord1a_timm-regnetx_002_GridDropout.py
BrunoKrinski/segtool
cb604b5f38104c43a76450136e37c3d1c4b6d275
[ "MIT" ]
null
null
null
eval_ricord1a_timm-regnetx_002_GridDropout.py
BrunoKrinski/segtool
cb604b5f38104c43a76450136e37c3d1c4b6d275
[ "MIT" ]
null
null
null
import os ls=["python main.py --configs configs/eval_ricord1a_unetplusplus_timm-regnetx_002_0_GridDropout.yml", "python main.py --configs configs/eval_ricord1a_unetplusplus_timm-regnetx_002_1_GridDropout.yml", "python main.py --configs configs/eval_ricord1a_unetplusplus_timm-regnetx_002_2_GridDropout.yml", "python main.py --configs configs/eval_ricord1a_unetplusplus_timm-regnetx_002_3_GridDropout.yml", "python main.py --configs configs/eval_ricord1a_unetplusplus_timm-regnetx_002_4_GridDropout.yml", ] for l in ls: os.system(l)
48.636364
101
0.84486
import os ls=["python main.py --configs configs/eval_ricord1a_unetplusplus_timm-regnetx_002_0_GridDropout.yml", "python main.py --configs configs/eval_ricord1a_unetplusplus_timm-regnetx_002_1_GridDropout.yml", "python main.py --configs configs/eval_ricord1a_unetplusplus_timm-regnetx_002_2_GridDropout.yml", "python main.py --configs configs/eval_ricord1a_unetplusplus_timm-regnetx_002_3_GridDropout.yml", "python main.py --configs configs/eval_ricord1a_unetplusplus_timm-regnetx_002_4_GridDropout.yml", ] for l in ls: os.system(l)
true
true
f7f67df1d072564d52b3c391226e8fa9a72729ae
22,004
py
Python
baselines/deepq/build_graph.py
junkilee/simple_baselines
cc5cc4b8d83119bf144abb08900762b76b1a33ac
[ "MIT" ]
null
null
null
baselines/deepq/build_graph.py
junkilee/simple_baselines
cc5cc4b8d83119bf144abb08900762b76b1a33ac
[ "MIT" ]
null
null
null
baselines/deepq/build_graph.py
junkilee/simple_baselines
cc5cc4b8d83119bf144abb08900762b76b1a33ac
[ "MIT" ]
null
null
null
"""Deep Q learning graph The functions in this file can are used to create the following functions: ======= act ======== Function to chose an action given an observation Parameters ---------- observation: object Observation that can be feed into the output of make_obs_ph stochastic: bool if set to False all the actions are always deterministic (default False) update_eps_ph: float update epsilon a new value, if negative not update happens (default: no update) Returns ------- Tensor of dtype tf.int64 and shape (BATCH_SIZE,) with an action to be performed for every element of the batch. ======= act (in case of parameter noise) ======== Function to chose an action given an observation Parameters ---------- observation: object Observation that can be feed into the output of make_obs_ph stochastic: bool if set to False all the actions are always deterministic (default False) update_eps_ph: float update epsilon a new value, if negative not update happens (default: no update) reset_ph: bool reset the perturbed policy by sampling a new perturbation update_param_noise_threshold_ph: float the desired threshold for the difference between non-perturbed and perturbed policy update_param_noise_scale_ph: bool whether or not to update the scale of the noise for the next time it is re-perturbed Returns ------- Tensor of dtype tf.int64 and shape (BATCH_SIZE,) with an action to be performed for every element of the batch. ======= train ======= Function that takes a transition (s,a,r,s') and optimizes Bellman equation's error: td_error = Q(s,a) - (r + gamma * max_a' Q(s', a')) loss = huber_loss[td_error] Parameters ---------- obs_t: object a batch of observations action: np.array actions that were selected upon seeing obs_t. dtype must be int32 and shape must be (batch_size,) reward: np.array immediate reward attained after executing those actions dtype must be float32 and shape must be (batch_size,) obs_tp1: object observations that followed obs_t done: np.array 1 if obs_t was the last observation in the episode and 0 otherwise obs_tp1 gets ignored, but must be of the valid shape. dtype must be float32 and shape must be (batch_size,) weight: np.array imporance weights for every element of the batch (gradient is multiplied by the importance weight) dtype must be float32 and shape must be (batch_size,) Returns ------- td_error: np.array a list of differences between Q(s,a) and the target in Bellman's equation. dtype is float32 and shape is (batch_size,) ======= update_target ======== copy the parameters from optimized Q function to the target Q function. In Q learning we actually optimize the following error: Q(s,a) - (r + gamma * max_a' Q'(s', a')) Where Q' is lagging behind Q to stablize the learning. For example for Atari Q' is set to Q once every 10000 updates training steps. """ import tensorflow as tf import baselines.common.tf_util as U def default_param_noise_filter(var): if var not in tf.trainable_variables(): # We never perturb non-trainable vars. return False if "fully_connected" in var.name: # We perturb fully-connected layers. return True # The remaining layers are likely conv or layer norm layers, which we do not wish to # perturb (in the former case because they only extract features, in the latter case because # we use them for normalization purposes). If you change your network, you will likely want # to re-consider which layers to perturb and which to keep untouched. return False def build_act(make_obs_ph, q_func, num_actions, scope="deepq", reuse=None): """Creates the act function: Parameters ---------- make_obs_ph: str -> tf.placeholder or TfInput a function that take a name and creates a placeholder of input with that name q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. num_actions: int number of actions. scope: str or VariableScope optional scope for variable_scope. reuse: bool or None whether or not the variables should be reused. To be able to reuse the scope must be given. Returns ------- act: (tf.Variable, bool, float) -> tf.Variable function to select and action given observation. ` See the top of the file for details. """ with tf.variable_scope(scope, reuse=reuse): observations_ph = U.ensure_tf_input(make_obs_ph("observation")) stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic") update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps") eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0)) q_values = q_func(observations_ph.get(), num_actions, scope="q_func") deterministic_actions = tf.argmax(q_values, axis=1) batch_size = tf.shape(observations_ph.get())[0] random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64) chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions) output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions) update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps)) act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph], outputs=[output_actions, update_eps_expr, eps], givens={update_eps_ph: -1.0, stochastic_ph: True}, updates=[update_eps_expr]) return act def build_test_act(make_obs_ph, q_func, num_actions, scope="deepq", reuse=None, test_epsilon=0.0): """Creates the act function: Parameters ---------- make_obs_ph: str -> tf.placeholder or TfInput a function that take a name and creates a placeholder of input with that name q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. num_actions: int number of actions. scope: str or VariableScope optional scope for variable_scope. reuse: bool or None whether or not the variables should be reused. To be able to reuse the scope must be given. Returns ------- act: (tf.Variable, bool, float) -> tf.Variable function to select and action given observation. ` See the top of the file for details. """ with tf.variable_scope(scope, reuse=reuse): observations_ph = U.ensure_tf_input(make_obs_ph("observation")) stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic") update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps") eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0.0)) q_func_results = q_func(observations_ph.get(), num_actions, scope="q_func") q_values = q_func_results['q'] s_value = q_func_results['s'] a_values = q_func_results['a'] deterministic_actions = tf.argmax(q_values, axis=1) batch_size = tf.shape(observations_ph.get())[0] random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64) chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions) output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions) update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps)) act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph], outputs=[output_actions, q_values, s_value, a_values, update_eps_expr], givens={update_eps_ph: test_epsilon, stochastic_ph: False}, updates=[update_eps_expr]) return act def build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope="deepq", reuse=None, param_noise_filter_func=None): """Creates the act function with support for parameter space noise exploration (https://arxiv.org/abs/1706.01905): Parameters ---------- make_obs_ph: str -> tf.placeholder or TfInput a function that take a name and creates a placeholder of input with that name q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. num_actions: int number of actions. scope: str or VariableScope optional scope for variable_scope. reuse: bool or None whether or not the variables should be reused. To be able to reuse the scope must be given. param_noise_filter_func: tf.Variable -> bool function that decides whether or not a variable should be perturbed. Only applicable if param_noise is True. If set to None, default_param_noise_filter is used by default. Returns ------- act: (tf.Variable, bool, float, bool, float, bool) -> tf.Variable function to select and action given observation. ` See the top of the file for details. """ if param_noise_filter_func is None: param_noise_filter_func = default_param_noise_filter with tf.variable_scope(scope, reuse=reuse): observations_ph = U.ensure_tf_input(make_obs_ph("observation")) stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic") update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps") update_param_noise_threshold_ph = tf.placeholder(tf.float32, (), name="update_param_noise_threshold") update_param_noise_scale_ph = tf.placeholder(tf.bool, (), name="update_param_noise_scale") reset_ph = tf.placeholder(tf.bool, (), name="reset") eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0)) param_noise_scale = tf.get_variable("param_noise_scale", (), initializer=tf.constant_initializer(0.01), trainable=False) param_noise_threshold = tf.get_variable("param_noise_threshold", (), initializer=tf.constant_initializer(0.05), trainable=False) # Unmodified Q. q_values = q_func(observations_ph.get(), num_actions, scope="q_func") # Perturbable Q used for the actual rollout. q_values_perturbed = q_func(observations_ph.get(), num_actions, scope="perturbed_q_func") # We have to wrap this code into a function due to the way tf.cond() works. See # https://stackoverflow.com/questions/37063952/confused-by-the-behavior-of-tf-cond for # a more detailed discussion. def perturb_vars(original_scope, perturbed_scope): all_vars = U.scope_vars(U.absolute_scope_name("q_func")) all_perturbed_vars = U.scope_vars(U.absolute_scope_name("perturbed_q_func")) assert len(all_vars) == len(all_perturbed_vars) perturb_ops = [] for var, perturbed_var in zip(all_vars, all_perturbed_vars): if param_noise_filter_func(perturbed_var): # Perturb this variable. op = tf.assign(perturbed_var, var + tf.random_normal(shape=tf.shape(var), mean=0., stddev=param_noise_scale)) else: # Do not perturb, just assign. op = tf.assign(perturbed_var, var) perturb_ops.append(op) assert len(perturb_ops) == len(all_vars) return tf.group(*perturb_ops) # Set up functionality to re-compute `param_noise_scale`. This perturbs yet another copy # of the network and measures the effect of that perturbation in action space. If the perturbation # is too big, reduce scale of perturbation, otherwise increase. q_values_adaptive = q_func(observations_ph.get(), num_actions, scope="adaptive_q_func") perturb_for_adaption = perturb_vars(original_scope="q_func", perturbed_scope="adaptive_q_func") kl = tf.reduce_sum(tf.nn.softmax(q_values) * (tf.log(tf.nn.softmax(q_values)) - tf.log(tf.nn.softmax(q_values_adaptive))), axis=-1) mean_kl = tf.reduce_mean(kl) def update_scale(): with tf.control_dependencies([perturb_for_adaption]): update_scale_expr = tf.cond(mean_kl < param_noise_threshold, lambda: param_noise_scale.assign(param_noise_scale * 1.01), lambda: param_noise_scale.assign(param_noise_scale / 1.01), ) return update_scale_expr # Functionality to update the threshold for parameter space noise. update_param_noise_threshold_expr = param_noise_threshold.assign(tf.cond(update_param_noise_threshold_ph >= 0, lambda: update_param_noise_threshold_ph, lambda: param_noise_threshold)) # Put everything together. deterministic_actions = tf.argmax(q_values_perturbed, axis=1) batch_size = tf.shape(observations_ph.get())[0] random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64) chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions) output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions) update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps)) updates = [ update_eps_expr, tf.cond(reset_ph, lambda: perturb_vars(original_scope="q_func", perturbed_scope="perturbed_q_func"), lambda: tf.group(*[])), tf.cond(update_param_noise_scale_ph, lambda: update_scale(), lambda: tf.Variable(0., trainable=False)), update_param_noise_threshold_expr, ] act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph, reset_ph, update_param_noise_threshold_ph, update_param_noise_scale_ph], outputs=output_actions, givens={update_eps_ph: -1.0, stochastic_ph: True, reset_ph: False, update_param_noise_threshold_ph: False, update_param_noise_scale_ph: False}, updates=updates) return act def build_train(make_obs_ph, q_func, num_actions, optimizer, grad_norm_clipping=None, gamma=1.0, double_q=True, scope="deepq", reuse=None, param_noise=False, param_noise_filter_func=None): """Creates the train function: Parameters ---------- make_obs_ph: str -> tf.placeholder or TfInput a function that takes a name and creates a placeholder of input with that name q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. num_actions: int number of actions reuse: bool whether or not to reuse the graph variables optimizer: tf.train.Optimizer optimizer to use for the Q-learning objective. grad_norm_clipping: float or None clip gradient norms to this value. If None no clipping is performed. gamma: float discount rate. double_q: bool if true will use Double Q Learning (https://arxiv.org/abs/1509.06461). In general it is a good idea to keep it enabled. scope: str or VariableScope optional scope for variable_scope. reuse: bool or None whether or not the variables should be reused. To be able to reuse the scope must be given. param_noise: bool whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905) param_noise_filter_func: tf.Variable -> bool function that decides whether or not a variable should be perturbed. Only applicable if param_noise is True. If set to None, default_param_noise_filter is used by default. Returns ------- act: (tf.Variable, bool, float) -> tf.Variable function to select and action given observation. ` See the top of the file for details. train: (object, np.array, np.array, object, np.array, np.array) -> np.array optimize the error in Bellman's equation. ` See the top of the file for details. update_target: () -> () copy the parameters from optimized Q function to the target Q function. ` See the top of the file for details. debug: {str: function} a bunch of functions to print debug data like q_values. """ if param_noise: act_f = build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse, param_noise_filter_func=param_noise_filter_func) else: act_f = build_act(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse) with tf.variable_scope(scope, reuse=reuse): # set up placeholders obs_t_input = U.ensure_tf_input(make_obs_ph("obs_t")) act_t_ph = tf.placeholder(tf.int32, [None], name="action") rew_t_ph = tf.placeholder(tf.float32, [None], name="reward") obs_tp1_input = U.ensure_tf_input(make_obs_ph("obs_tp1")) done_mask_ph = tf.placeholder(tf.float32, [None], name="done") importance_weights_ph = tf.placeholder(tf.float32, [None], name="weight") # q network evaluation q_t = q_func(obs_t_input.get(), num_actions, scope="q_func", reuse=True) # reuse parameters from act q_func_vars = U.scope_vars(U.absolute_scope_name("q_func")) # target q network evalution q_tp1 = q_func(obs_tp1_input.get(), num_actions, scope="target_q_func") target_q_func_vars = U.scope_vars(U.absolute_scope_name("target_q_func")) # q scores for actions which we know were selected in the given state. q_t_selected = tf.reduce_sum(q_t * tf.one_hot(act_t_ph, num_actions), 1) # compute estimate of best possible value starting from state at t + 1 if double_q: q_tp1_using_online_net = q_func(obs_tp1_input.get(), num_actions, scope="q_func", reuse=True) q_tp1_best_using_online_net = tf.arg_max(q_tp1_using_online_net, 1) q_tp1_best = tf.reduce_sum(q_tp1 * tf.one_hot(q_tp1_best_using_online_net, num_actions), 1) else: q_tp1_best = tf.reduce_max(q_tp1, 1) q_tp1_best_masked = (1.0 - done_mask_ph) * q_tp1_best # compute RHS of bellman equation q_t_selected_target = rew_t_ph + gamma * q_tp1_best_masked # compute the error (potentially clipped) td_error = q_t_selected - tf.stop_gradient(q_t_selected_target) errors = U.huber_loss(td_error) weighted_error = tf.reduce_mean(importance_weights_ph * errors) # compute optimization op (potentially with gradient clipping) if grad_norm_clipping is not None: optimize_expr = U.minimize_and_clip(optimizer, weighted_error, var_list=q_func_vars, clip_val=grad_norm_clipping) else: optimize_expr = optimizer.minimize(weighted_error, var_list=q_func_vars) # update_target_fn will be called periodically to copy Q network to target Q network update_target_expr = [] for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name), sorted(target_q_func_vars, key=lambda v: v.name)): update_target_expr.append(var_target.assign(var)) update_target_expr = tf.group(*update_target_expr) # Create callable functions train = U.function( inputs=[ obs_t_input, act_t_ph, rew_t_ph, obs_tp1_input, done_mask_ph, importance_weights_ph ], outputs=td_error, updates=[optimize_expr] ) update_target = U.function([], [], updates=[update_target_expr]) q_values = U.function([obs_t_input], q_t) return act_f, train, update_target, {'q_values': q_values}
46.916844
168
0.664334
import tensorflow as tf import baselines.common.tf_util as U def default_param_noise_filter(var): if var not in tf.trainable_variables(): return False if "fully_connected" in var.name: return True return False def build_act(make_obs_ph, q_func, num_actions, scope="deepq", reuse=None): with tf.variable_scope(scope, reuse=reuse): observations_ph = U.ensure_tf_input(make_obs_ph("observation")) stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic") update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps") eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0)) q_values = q_func(observations_ph.get(), num_actions, scope="q_func") deterministic_actions = tf.argmax(q_values, axis=1) batch_size = tf.shape(observations_ph.get())[0] random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64) chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions) output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions) update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps)) act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph], outputs=[output_actions, update_eps_expr, eps], givens={update_eps_ph: -1.0, stochastic_ph: True}, updates=[update_eps_expr]) return act def build_test_act(make_obs_ph, q_func, num_actions, scope="deepq", reuse=None, test_epsilon=0.0): with tf.variable_scope(scope, reuse=reuse): observations_ph = U.ensure_tf_input(make_obs_ph("observation")) stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic") update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps") eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0.0)) q_func_results = q_func(observations_ph.get(), num_actions, scope="q_func") q_values = q_func_results['q'] s_value = q_func_results['s'] a_values = q_func_results['a'] deterministic_actions = tf.argmax(q_values, axis=1) batch_size = tf.shape(observations_ph.get())[0] random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64) chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions) output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions) update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps)) act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph], outputs=[output_actions, q_values, s_value, a_values, update_eps_expr], givens={update_eps_ph: test_epsilon, stochastic_ph: False}, updates=[update_eps_expr]) return act def build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope="deepq", reuse=None, param_noise_filter_func=None): if param_noise_filter_func is None: param_noise_filter_func = default_param_noise_filter with tf.variable_scope(scope, reuse=reuse): observations_ph = U.ensure_tf_input(make_obs_ph("observation")) stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic") update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps") update_param_noise_threshold_ph = tf.placeholder(tf.float32, (), name="update_param_noise_threshold") update_param_noise_scale_ph = tf.placeholder(tf.bool, (), name="update_param_noise_scale") reset_ph = tf.placeholder(tf.bool, (), name="reset") eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0)) param_noise_scale = tf.get_variable("param_noise_scale", (), initializer=tf.constant_initializer(0.01), trainable=False) param_noise_threshold = tf.get_variable("param_noise_threshold", (), initializer=tf.constant_initializer(0.05), trainable=False) q_values = q_func(observations_ph.get(), num_actions, scope="q_func") q_values_perturbed = q_func(observations_ph.get(), num_actions, scope="perturbed_q_func") def perturb_vars(original_scope, perturbed_scope): all_vars = U.scope_vars(U.absolute_scope_name("q_func")) all_perturbed_vars = U.scope_vars(U.absolute_scope_name("perturbed_q_func")) assert len(all_vars) == len(all_perturbed_vars) perturb_ops = [] for var, perturbed_var in zip(all_vars, all_perturbed_vars): if param_noise_filter_func(perturbed_var): op = tf.assign(perturbed_var, var + tf.random_normal(shape=tf.shape(var), mean=0., stddev=param_noise_scale)) else: op = tf.assign(perturbed_var, var) perturb_ops.append(op) assert len(perturb_ops) == len(all_vars) return tf.group(*perturb_ops) q_values_adaptive = q_func(observations_ph.get(), num_actions, scope="adaptive_q_func") perturb_for_adaption = perturb_vars(original_scope="q_func", perturbed_scope="adaptive_q_func") kl = tf.reduce_sum(tf.nn.softmax(q_values) * (tf.log(tf.nn.softmax(q_values)) - tf.log(tf.nn.softmax(q_values_adaptive))), axis=-1) mean_kl = tf.reduce_mean(kl) def update_scale(): with tf.control_dependencies([perturb_for_adaption]): update_scale_expr = tf.cond(mean_kl < param_noise_threshold, lambda: param_noise_scale.assign(param_noise_scale * 1.01), lambda: param_noise_scale.assign(param_noise_scale / 1.01), ) return update_scale_expr update_param_noise_threshold_expr = param_noise_threshold.assign(tf.cond(update_param_noise_threshold_ph >= 0, lambda: update_param_noise_threshold_ph, lambda: param_noise_threshold)) deterministic_actions = tf.argmax(q_values_perturbed, axis=1) batch_size = tf.shape(observations_ph.get())[0] random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64) chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions) output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions) update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps)) updates = [ update_eps_expr, tf.cond(reset_ph, lambda: perturb_vars(original_scope="q_func", perturbed_scope="perturbed_q_func"), lambda: tf.group(*[])), tf.cond(update_param_noise_scale_ph, lambda: update_scale(), lambda: tf.Variable(0., trainable=False)), update_param_noise_threshold_expr, ] act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph, reset_ph, update_param_noise_threshold_ph, update_param_noise_scale_ph], outputs=output_actions, givens={update_eps_ph: -1.0, stochastic_ph: True, reset_ph: False, update_param_noise_threshold_ph: False, update_param_noise_scale_ph: False}, updates=updates) return act def build_train(make_obs_ph, q_func, num_actions, optimizer, grad_norm_clipping=None, gamma=1.0, double_q=True, scope="deepq", reuse=None, param_noise=False, param_noise_filter_func=None): if param_noise: act_f = build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse, param_noise_filter_func=param_noise_filter_func) else: act_f = build_act(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse) with tf.variable_scope(scope, reuse=reuse): obs_t_input = U.ensure_tf_input(make_obs_ph("obs_t")) act_t_ph = tf.placeholder(tf.int32, [None], name="action") rew_t_ph = tf.placeholder(tf.float32, [None], name="reward") obs_tp1_input = U.ensure_tf_input(make_obs_ph("obs_tp1")) done_mask_ph = tf.placeholder(tf.float32, [None], name="done") importance_weights_ph = tf.placeholder(tf.float32, [None], name="weight") q_t = q_func(obs_t_input.get(), num_actions, scope="q_func", reuse=True) q_func_vars = U.scope_vars(U.absolute_scope_name("q_func")) q_tp1 = q_func(obs_tp1_input.get(), num_actions, scope="target_q_func") target_q_func_vars = U.scope_vars(U.absolute_scope_name("target_q_func")) q_t_selected = tf.reduce_sum(q_t * tf.one_hot(act_t_ph, num_actions), 1) if double_q: q_tp1_using_online_net = q_func(obs_tp1_input.get(), num_actions, scope="q_func", reuse=True) q_tp1_best_using_online_net = tf.arg_max(q_tp1_using_online_net, 1) q_tp1_best = tf.reduce_sum(q_tp1 * tf.one_hot(q_tp1_best_using_online_net, num_actions), 1) else: q_tp1_best = tf.reduce_max(q_tp1, 1) q_tp1_best_masked = (1.0 - done_mask_ph) * q_tp1_best q_t_selected_target = rew_t_ph + gamma * q_tp1_best_masked td_error = q_t_selected - tf.stop_gradient(q_t_selected_target) errors = U.huber_loss(td_error) weighted_error = tf.reduce_mean(importance_weights_ph * errors) if grad_norm_clipping is not None: optimize_expr = U.minimize_and_clip(optimizer, weighted_error, var_list=q_func_vars, clip_val=grad_norm_clipping) else: optimize_expr = optimizer.minimize(weighted_error, var_list=q_func_vars) update_target_expr = [] for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name), sorted(target_q_func_vars, key=lambda v: v.name)): update_target_expr.append(var_target.assign(var)) update_target_expr = tf.group(*update_target_expr) train = U.function( inputs=[ obs_t_input, act_t_ph, rew_t_ph, obs_tp1_input, done_mask_ph, importance_weights_ph ], outputs=td_error, updates=[optimize_expr] ) update_target = U.function([], [], updates=[update_target_expr]) q_values = U.function([obs_t_input], q_t) return act_f, train, update_target, {'q_values': q_values}
true
true
f7f67e3263e2a9954edd3be81afc24e56dc7632c
2,657
py
Python
test/integration/012_deprecation_tests/test_deprecations.py
tjengel/dbt
f985902a002fba36f6f709c6aacf9ae20778e58c
[ "Apache-2.0" ]
1
2021-09-01T20:50:52.000Z
2021-09-01T20:50:52.000Z
test/integration/012_deprecation_tests/test_deprecations.py
tjengel/dbt
f985902a002fba36f6f709c6aacf9ae20778e58c
[ "Apache-2.0" ]
1
2019-10-28T15:33:04.000Z
2019-10-28T15:33:04.000Z
test/integration/012_deprecation_tests/test_deprecations.py
tjengel/dbt
f985902a002fba36f6f709c6aacf9ae20778e58c
[ "Apache-2.0" ]
2
2019-05-10T21:23:08.000Z
2021-06-09T01:28:37.000Z
from test.integration.base import DBTIntegrationTest, use_profile from dbt import deprecations import dbt.exceptions class BaseTestDeprecations(DBTIntegrationTest): def setUp(self): super().setUp() deprecations.reset_deprecations() @property def schema(self): return "deprecation_test_012" @staticmethod def dir(path): return path.lstrip("/") @property def models(self): return self.dir("models") class TestDeprecations(BaseTestDeprecations): @use_profile('postgres') def test_postgres_deprecations_fail(self): self.run_dbt(strict=True, expect_pass=False) @use_profile('postgres') def test_postgres_deprecations(self): self.assertEqual(deprecations.active_deprecations, set()) self.run_dbt(strict=False) expected = {'adapter:already_exists'} self.assertEqual(expected, deprecations.active_deprecations) class TestMacroDeprecations(BaseTestDeprecations): @property def models(self): return self.dir('boring-models') @property def project_config(self): return { 'macro-paths': [self.dir('deprecated-macros')], } @use_profile('postgres') def test_postgres_deprecations_fail(self): with self.assertRaises(dbt.exceptions.CompilationException): self.run_dbt(strict=True) @use_profile('postgres') def test_postgres_deprecations(self): self.assertEqual(deprecations.active_deprecations, set()) self.run_dbt(strict=False) expected = {'generate-schema-name-single-arg'} self.assertEqual(expected, deprecations.active_deprecations) class TestMaterializationReturnDeprecation(BaseTestDeprecations): def setUp(self): super().setUp() deprecations.reset_deprecations() @property def schema(self): return "deprecation_test_012" @staticmethod def dir(path): return path.lstrip("/") @property def models(self): return self.dir('custom-models') @property def project_config(self): return { 'macro-paths': [self.dir('custom-materialization-macros')], } @use_profile('postgres') def test_postgres_deprecations_fail(self): # this should fail at runtime self.run_dbt(strict=True, expect_pass=False) @use_profile('postgres') def test_postgres_deprecations(self): self.assertEqual(deprecations.active_deprecations, set()) self.run_dbt(strict=False) expected = {'materialization-return'} self.assertEqual(expected, deprecations.active_deprecations)
27.677083
71
0.681972
from test.integration.base import DBTIntegrationTest, use_profile from dbt import deprecations import dbt.exceptions class BaseTestDeprecations(DBTIntegrationTest): def setUp(self): super().setUp() deprecations.reset_deprecations() @property def schema(self): return "deprecation_test_012" @staticmethod def dir(path): return path.lstrip("/") @property def models(self): return self.dir("models") class TestDeprecations(BaseTestDeprecations): @use_profile('postgres') def test_postgres_deprecations_fail(self): self.run_dbt(strict=True, expect_pass=False) @use_profile('postgres') def test_postgres_deprecations(self): self.assertEqual(deprecations.active_deprecations, set()) self.run_dbt(strict=False) expected = {'adapter:already_exists'} self.assertEqual(expected, deprecations.active_deprecations) class TestMacroDeprecations(BaseTestDeprecations): @property def models(self): return self.dir('boring-models') @property def project_config(self): return { 'macro-paths': [self.dir('deprecated-macros')], } @use_profile('postgres') def test_postgres_deprecations_fail(self): with self.assertRaises(dbt.exceptions.CompilationException): self.run_dbt(strict=True) @use_profile('postgres') def test_postgres_deprecations(self): self.assertEqual(deprecations.active_deprecations, set()) self.run_dbt(strict=False) expected = {'generate-schema-name-single-arg'} self.assertEqual(expected, deprecations.active_deprecations) class TestMaterializationReturnDeprecation(BaseTestDeprecations): def setUp(self): super().setUp() deprecations.reset_deprecations() @property def schema(self): return "deprecation_test_012" @staticmethod def dir(path): return path.lstrip("/") @property def models(self): return self.dir('custom-models') @property def project_config(self): return { 'macro-paths': [self.dir('custom-materialization-macros')], } @use_profile('postgres') def test_postgres_deprecations_fail(self): self.run_dbt(strict=True, expect_pass=False) @use_profile('postgres') def test_postgres_deprecations(self): self.assertEqual(deprecations.active_deprecations, set()) self.run_dbt(strict=False) expected = {'materialization-return'} self.assertEqual(expected, deprecations.active_deprecations)
true
true
f7f67e93db29ff1b40013de76266bd63a79f6b72
1,604
py
Python
stentseg/apps/sliceviewer.py
almarklein/stentseg
48255fffdc2394d1dc4ce2208c9a91e1d4c35a46
[ "BSD-3-Clause" ]
1
2020-08-28T16:34:10.000Z
2020-08-28T16:34:10.000Z
stentseg/apps/sliceviewer.py
almarklein/stentseg
48255fffdc2394d1dc4ce2208c9a91e1d4c35a46
[ "BSD-3-Clause" ]
null
null
null
stentseg/apps/sliceviewer.py
almarklein/stentseg
48255fffdc2394d1dc4ce2208c9a91e1d4c35a46
[ "BSD-3-Clause" ]
1
2021-04-25T06:59:36.000Z
2021-04-25T06:59:36.000Z
import visvis as vv class VolViewer: """ VolViewer. View (CT) volume while scrolling through slices (z) """ def __init__(self, vol): # Store vol and init self.vol = vol self.z = 0 # Prepare figure and axex self.f = vv.figure(1001) self.f.Clear() self.a = vv.gca() # Create slice in 2D texture self.t = vv.imshow(vol[self.z,:,:]) # Bind self.f.eventScroll.Bind(self.on_scroll) self.a.eventScroll.Bind(self.on_scroll) def on_scroll(self, event): self.z += int(event.verticalSteps) self.z = max(0, self.z) self.z = min(self.vol.shape[0], self.z) self.show() return True def show(self): self.t.SetData(self.vol[self.z]) # #todo: incorporate option to draw line profile to get intensities and option to display intensity under mouse # from skimage.measure import profile_line # img = vol[0,:,:] # vv.imshow(img) # plt.imshow(img) # # im = vv.imread('lena.png') # new_viewer = skimage.viewer.ImageViewer(img) # from skimage.viewer.plugins import lineprofile # new_viewer += lineprofile.LineProfile() # new_viewer.show() # # # import numpy as np # import matplotlib.pyplot as plt # # class Formatter(object): # def __init__(self, im): # self.im = im # def __call__(self, x, y): # z = self.im.get_array()[int(y), int(x)] # return 'x={:.01f}, y={:.01f}, z={:.01f}'.format(x, y, z) # # fig, ax = plt.subplots() # im = ax.imshow(img) # ax.format_coord = Formatter(im) # plt.show()
27.186441
111
0.593516
import visvis as vv class VolViewer: def __init__(self, vol): self.vol = vol self.z = 0 self.f = vv.figure(1001) self.f.Clear() self.a = vv.gca() self.t = vv.imshow(vol[self.z,:,:]) self.f.eventScroll.Bind(self.on_scroll) self.a.eventScroll.Bind(self.on_scroll) def on_scroll(self, event): self.z += int(event.verticalSteps) self.z = max(0, self.z) self.z = min(self.vol.shape[0], self.z) self.show() return True def show(self): self.t.SetData(self.vol[self.z])
true
true
f7f67f2bcbc49693ddf71edbadec225650e745b7
942
py
Python
tests/testapp/models/costume.py
Bilonan/django-binder
d2d9b504a92029a0afc616be81a08f0deddd5b64
[ "MIT" ]
14
2016-08-15T13:08:55.000Z
2021-11-17T11:43:20.000Z
tests/testapp/models/costume.py
Bilonan/django-binder
d2d9b504a92029a0afc616be81a08f0deddd5b64
[ "MIT" ]
141
2016-08-14T15:36:35.000Z
2022-02-17T08:53:52.000Z
tests/testapp/models/costume.py
Bilonan/django-binder
d2d9b504a92029a0afc616be81a08f0deddd5b64
[ "MIT" ]
18
2016-10-01T21:30:22.000Z
2022-03-28T10:51:41.000Z
from django.db import models from binder.models import BinderModel from binder.websocket import trigger from django.db.models.signals import post_save # Some of our fictitious animals actually wear clothes/costumes... # Each costume is unique to an animal (one to one mapping) class Costume(BinderModel): class Meta(BinderModel.Meta): ordering = ['animal_id'] nickname = models.TextField(blank=True) description = models.TextField(blank=True, null=True) animal = models.OneToOneField('Animal', on_delete=models.CASCADE, related_name='costume', primary_key=True) def __str__(self): return 'costume %d: %s (for %s)' % (self.pk, self.description, self.animal) def list_rooms(self): return [{ 'costume': self.animal.id, }] def trigger_websocket(instance, created, **kwargs): if created: costume = instance trigger({'id': costume.animal.id}, costume.list_rooms()) post_save.connect(trigger_websocket, sender=Costume)
29.4375
108
0.753715
from django.db import models from binder.models import BinderModel from binder.websocket import trigger from django.db.models.signals import post_save class Costume(BinderModel): class Meta(BinderModel.Meta): ordering = ['animal_id'] nickname = models.TextField(blank=True) description = models.TextField(blank=True, null=True) animal = models.OneToOneField('Animal', on_delete=models.CASCADE, related_name='costume', primary_key=True) def __str__(self): return 'costume %d: %s (for %s)' % (self.pk, self.description, self.animal) def list_rooms(self): return [{ 'costume': self.animal.id, }] def trigger_websocket(instance, created, **kwargs): if created: costume = instance trigger({'id': costume.animal.id}, costume.list_rooms()) post_save.connect(trigger_websocket, sender=Costume)
true
true
f7f67fc4f82f57e691ea958c42b57e7e0b1ca62b
3,912
py
Python
data/aligned3_tm_max_dataset.py
tkuri/pytorch-CycleGAN-and-pix2pix
b00b3f0bcebfb12d3f026c2a61c98ff63175a583
[ "BSD-3-Clause" ]
null
null
null
data/aligned3_tm_max_dataset.py
tkuri/pytorch-CycleGAN-and-pix2pix
b00b3f0bcebfb12d3f026c2a61c98ff63175a583
[ "BSD-3-Clause" ]
null
null
null
data/aligned3_tm_max_dataset.py
tkuri/pytorch-CycleGAN-and-pix2pix
b00b3f0bcebfb12d3f026c2a61c98ff63175a583
[ "BSD-3-Clause" ]
null
null
null
import os.path from data.base_dataset import BaseDataset, get_params, get_transform from data.image_folder import make_dataset from PIL import Image, ImageOps import torch class Aligned3TmMaxDataset(BaseDataset): """A dataset class for paired image dataset. It assumes that the directory '/path/to/data/train' contains image pairs in the form of {A,B}. During test time, you need to prepare a directory '/path/to/data/test'. """ def __init__(self, opt): """Initialize this dataset class. Parameters: opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions """ BaseDataset.__init__(self, opt) self.dir_ABC = os.path.join(opt.dataroot, opt.phase) # get the image directory self.ABC_paths = sorted(make_dataset(self.dir_ABC, opt.max_dataset_size)) # get image paths assert(self.opt.load_size >= self.opt.crop_size) # crop_size should be smaller than the size of loaded image self.input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc self.output_nc = self.opt.input_nc if self.opt.direction == 'BtoA' else self.opt.output_nc self.input2_nc = self.opt.input2_nc def __getitem__(self, index): """Return a data point and its metadata information. Parameters: index - - a random integer for data indexing Returns a dictionary that contains A, B, A_paths and B_paths A (tensor) - - an image in the input domain B (tensor) - - its corresponding image in the target domain C (tensor) - - an alternative image in the input domain A_paths (str) - - image paths B_paths (str) - - image paths (same as A_paths) C_paths (str) - - image paths (same as A_paths) """ # read a image given a random integer index ABC_path = self.ABC_paths[index] ABC = Image.open(ABC_path).convert('RGB') # split AB image into A and B w, h = ABC.size h25 = int(h / 25) w3 = int(w / 3) A = [] B = [] C = [] for i in range(25): A.append(ABC.crop((0, h25*i, w3, h25*(i+1)))) B.append(ABC.crop((w3, h25*i, w3*2, h25*(i+1)))) Ctmp = ImageOps.flip(ABC.crop((w3*2, h25*i, w, h25*(i+1)))) Ctmp = Ctmp.convert("L") _, vmax = Ctmp.getextrema() Ctmp = Ctmp.point(lambda x: 0 if x < vmax else 255) C.append(Ctmp) # apply the same transform to both A and B transform_params = get_params(self.opt, A[0].size) A_transform = get_transform(self.opt, transform_params, grayscale=(self.input_nc == 1)) B_transform = get_transform(self.opt, transform_params, grayscale=(self.output_nc == 1)) C_transform = get_transform(self.opt, transform_params, grayscale=(self.input2_nc == 1), convert=False) for i in range(25): A[i] = A_transform(A[i]) B[i] = B_transform(B[i]) C[i] = C_transform(C[i]) Acat = torch.unsqueeze(A[0], 0) Bcat = torch.unsqueeze(B[0], 0) Ccat = torch.unsqueeze(C[0], 0) for i in range(1,25): Acat = torch.cat([Acat, torch.unsqueeze(A[i], 0)], dim=0) Bcat = torch.cat([Bcat, torch.unsqueeze(B[i], 0)], dim=0) Ccat = torch.cat([Ccat, torch.unsqueeze(C[i], 0)], dim=0) # print('Acat size:', Acat.size()) # print('A_trans:', A.max(), A.min()) # print('B_trans:', B.max(), B.min()) # print('C_trans:', C.max(), C.min()) return {'A': Acat, 'B': Bcat, 'C': Ccat, 'A_paths': ABC_path, 'B_paths': ABC_path, 'C_paths': ABC_path} def __len__(self): """Return the total number of images in the dataset.""" return len(self.ABC_paths)
42.989011
118
0.594325
import os.path from data.base_dataset import BaseDataset, get_params, get_transform from data.image_folder import make_dataset from PIL import Image, ImageOps import torch class Aligned3TmMaxDataset(BaseDataset): def __init__(self, opt): BaseDataset.__init__(self, opt) self.dir_ABC = os.path.join(opt.dataroot, opt.phase) self.ABC_paths = sorted(make_dataset(self.dir_ABC, opt.max_dataset_size)) assert(self.opt.load_size >= self.opt.crop_size) self.input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc self.output_nc = self.opt.input_nc if self.opt.direction == 'BtoA' else self.opt.output_nc self.input2_nc = self.opt.input2_nc def __getitem__(self, index): ABC_path = self.ABC_paths[index] ABC = Image.open(ABC_path).convert('RGB') w, h = ABC.size h25 = int(h / 25) w3 = int(w / 3) A = [] B = [] C = [] for i in range(25): A.append(ABC.crop((0, h25*i, w3, h25*(i+1)))) B.append(ABC.crop((w3, h25*i, w3*2, h25*(i+1)))) Ctmp = ImageOps.flip(ABC.crop((w3*2, h25*i, w, h25*(i+1)))) Ctmp = Ctmp.convert("L") _, vmax = Ctmp.getextrema() Ctmp = Ctmp.point(lambda x: 0 if x < vmax else 255) C.append(Ctmp) transform_params = get_params(self.opt, A[0].size) A_transform = get_transform(self.opt, transform_params, grayscale=(self.input_nc == 1)) B_transform = get_transform(self.opt, transform_params, grayscale=(self.output_nc == 1)) C_transform = get_transform(self.opt, transform_params, grayscale=(self.input2_nc == 1), convert=False) for i in range(25): A[i] = A_transform(A[i]) B[i] = B_transform(B[i]) C[i] = C_transform(C[i]) Acat = torch.unsqueeze(A[0], 0) Bcat = torch.unsqueeze(B[0], 0) Ccat = torch.unsqueeze(C[0], 0) for i in range(1,25): Acat = torch.cat([Acat, torch.unsqueeze(A[i], 0)], dim=0) Bcat = torch.cat([Bcat, torch.unsqueeze(B[i], 0)], dim=0) Ccat = torch.cat([Ccat, torch.unsqueeze(C[i], 0)], dim=0) return {'A': Acat, 'B': Bcat, 'C': Ccat, 'A_paths': ABC_path, 'B_paths': ABC_path, 'C_paths': ABC_path} def __len__(self): return len(self.ABC_paths)
true
true
f7f67ffad82d122822794a35f28a9b7086905d6d
564
py
Python
ics/structures/rad_moon_duo_converter_settings.py
intrepidcs/python_ics
7bfa8c2f893763608f9255f9536a2019cfae0c23
[ "Unlicense" ]
45
2017-10-17T08:42:08.000Z
2022-02-21T16:26:48.000Z
ics/structures/rad_moon_duo_converter_settings.py
intrepidcs/python_ics
7bfa8c2f893763608f9255f9536a2019cfae0c23
[ "Unlicense" ]
106
2017-03-07T21:10:39.000Z
2022-03-29T15:32:46.000Z
ics/structures/rad_moon_duo_converter_settings.py
intrepidcs/python_ics
7bfa8c2f893763608f9255f9536a2019cfae0c23
[ "Unlicense" ]
17
2017-04-04T12:30:22.000Z
2022-01-28T05:30:25.000Z
# This file was auto generated; Do not modify, if you value your sanity! import ctypes import enum class rad_moon_duo_converter_settings(ctypes.Structure): _pack_ = 2 _fields_ = [ ('linkMode0', ctypes.c_uint8), ('linkMode1', ctypes.c_uint8), ('converter1Mode', ctypes.c_uint8), ('ipAddress', ctypes.c_uint32), ('ipMask', ctypes.c_uint32), ('ipGateway', ctypes.c_uint32), ] _RadMoonDuoConverterSettings = rad_moon_duo_converter_settings RadMoonDuoConverterSettings = rad_moon_duo_converter_settings
25.636364
72
0.703901
import ctypes import enum class rad_moon_duo_converter_settings(ctypes.Structure): _pack_ = 2 _fields_ = [ ('linkMode0', ctypes.c_uint8), ('linkMode1', ctypes.c_uint8), ('converter1Mode', ctypes.c_uint8), ('ipAddress', ctypes.c_uint32), ('ipMask', ctypes.c_uint32), ('ipGateway', ctypes.c_uint32), ] _RadMoonDuoConverterSettings = rad_moon_duo_converter_settings RadMoonDuoConverterSettings = rad_moon_duo_converter_settings
true
true
f7f680d798ba23b7633125e1e293cf403f0d37e2
27,427
py
Python
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_06_01/aio/operations/_local_network_gateways_operations.py
praveenkuttappan/azure-sdk-for-python
4b79413667b7539750a6c7dde15737013a3d4bd5
[ "MIT" ]
2,728
2015-01-09T10:19:32.000Z
2022-03-31T14:50:33.000Z
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_06_01/aio/operations/_local_network_gateways_operations.py
v-xuto/azure-sdk-for-python
9c6296d22094c5ede410bc83749e8df8694ccacc
[ "MIT" ]
17,773
2015-01-05T15:57:17.000Z
2022-03-31T23:50:25.000Z
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_06_01/aio/operations/_local_network_gateways_operations.py
v-xuto/azure-sdk-for-python
9c6296d22094c5ede410bc83749e8df8694ccacc
[ "MIT" ]
1,916
2015-01-19T05:05:41.000Z
2022-03-31T19:36:44.000Z
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling from ... import models as _models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class LocalNetworkGatewaysOperations: """LocalNetworkGatewaysOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.network.v2018_06_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config async def _create_or_update_initial( self, resource_group_name: str, local_network_gateway_name: str, parameters: "_models.LocalNetworkGateway", **kwargs: Any ) -> "_models.LocalNetworkGateway": cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2018-06-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self._create_or_update_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(parameters, 'LocalNetworkGateway') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize('LocalNetworkGateway', pipeline_response) if response.status_code == 201: deserialized = self._deserialize('LocalNetworkGateway', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore async def begin_create_or_update( self, resource_group_name: str, local_network_gateway_name: str, parameters: "_models.LocalNetworkGateway", **kwargs: Any ) -> AsyncLROPoller["_models.LocalNetworkGateway"]: """Creates or updates a local network gateway in the specified resource group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param local_network_gateway_name: The name of the local network gateway. :type local_network_gateway_name: str :param parameters: Parameters supplied to the create or update local network gateway operation. :type parameters: ~azure.mgmt.network.v2018_06_01.models.LocalNetworkGateway :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either LocalNetworkGateway or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_06_01.models.LocalNetworkGateway] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._create_or_update_initial( resource_group_name=resource_group_name, local_network_gateway_name=local_network_gateway_name, parameters=parameters, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('LocalNetworkGateway', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore async def get( self, resource_group_name: str, local_network_gateway_name: str, **kwargs: Any ) -> "_models.LocalNetworkGateway": """Gets the specified local network gateway in a resource group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param local_network_gateway_name: The name of the local network gateway. :type local_network_gateway_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: LocalNetworkGateway, or the result of cls(response) :rtype: ~azure.mgmt.network.v2018_06_01.models.LocalNetworkGateway :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2018-06-01" accept = "application/json" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('LocalNetworkGateway', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore async def _delete_initial( self, resource_group_name: str, local_network_gateway_name: str, **kwargs: Any ) -> None: cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2018-06-01" # Construct URL url = self._delete_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore async def begin_delete( self, resource_group_name: str, local_network_gateway_name: str, **kwargs: Any ) -> AsyncLROPoller[None]: """Deletes the specified local network gateway. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param local_network_gateway_name: The name of the local network gateway. :type local_network_gateway_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._delete_initial( resource_group_name=resource_group_name, local_network_gateway_name=local_network_gateway_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore async def _update_tags_initial( self, resource_group_name: str, local_network_gateway_name: str, parameters: "_models.TagsObject", **kwargs: Any ) -> "_models.LocalNetworkGateway": cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2018-06-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self._update_tags_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(parameters, 'TagsObject') body_content_kwargs['content'] = body_content request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('LocalNetworkGateway', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore async def begin_update_tags( self, resource_group_name: str, local_network_gateway_name: str, parameters: "_models.TagsObject", **kwargs: Any ) -> AsyncLROPoller["_models.LocalNetworkGateway"]: """Updates a local network gateway tags. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param local_network_gateway_name: The name of the local network gateway. :type local_network_gateway_name: str :param parameters: Parameters supplied to update local network gateway tags. :type parameters: ~azure.mgmt.network.v2018_06_01.models.TagsObject :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either LocalNetworkGateway or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_06_01.models.LocalNetworkGateway] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._update_tags_initial( resource_group_name=resource_group_name, local_network_gateway_name=local_network_gateway_name, parameters=parameters, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('LocalNetworkGateway', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore def list( self, resource_group_name: str, **kwargs: Any ) -> AsyncIterable["_models.LocalNetworkGatewayListResult"]: """Gets all the local network gateways in a resource group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either LocalNetworkGatewayListResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_06_01.models.LocalNetworkGatewayListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGatewayListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2018-06-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('LocalNetworkGatewayListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways'} # type: ignore
51.651601
209
0.681919
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling from ... import models as _models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class LocalNetworkGatewaysOperations: models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config async def _create_or_update_initial( self, resource_group_name: str, local_network_gateway_name: str, parameters: "_models.LocalNetworkGateway", **kwargs: Any ) -> "_models.LocalNetworkGateway": cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2018-06-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" url = self._create_or_update_initial.metadata['url'] path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') header_parameters = {} header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} body_content = self._serialize.body(parameters, 'LocalNetworkGateway') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize('LocalNetworkGateway', pipeline_response) if response.status_code == 201: deserialized = self._deserialize('LocalNetworkGateway', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} async def begin_create_or_update( self, resource_group_name: str, local_network_gateway_name: str, parameters: "_models.LocalNetworkGateway", **kwargs: Any ) -> AsyncLROPoller["_models.LocalNetworkGateway"]: polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) if cont_token is None: raw_result = await self._create_or_update_initial( resource_group_name=resource_group_name, local_network_gateway_name=local_network_gateway_name, parameters=parameters, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('LocalNetworkGateway', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} async def get( self, resource_group_name: str, local_network_gateway_name: str, **kwargs: Any ) -> "_models.LocalNetworkGateway": cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2018-06-01" accept = "application/json" url = self.get.metadata['url'] path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') header_parameters = {} header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('LocalNetworkGateway', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} async def _delete_initial( self, resource_group_name: str, local_network_gateway_name: str, **kwargs: Any ) -> None: cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2018-06-01" url = self._delete_initial.metadata['url'] path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') header_parameters = {} request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} async def begin_delete( self, resource_group_name: str, local_network_gateway_name: str, **kwargs: Any ) -> AsyncLROPoller[None]: polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) if cont_token is None: raw_result = await self._delete_initial( resource_group_name=resource_group_name, local_network_gateway_name=local_network_gateway_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} async def _update_tags_initial( self, resource_group_name: str, local_network_gateway_name: str, parameters: "_models.TagsObject", **kwargs: Any ) -> "_models.LocalNetworkGateway": cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2018-06-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" url = self._update_tags_initial.metadata['url'] path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') header_parameters = {} header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} body_content = self._serialize.body(parameters, 'TagsObject') body_content_kwargs['content'] = body_content request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('LocalNetworkGateway', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} async def begin_update_tags( self, resource_group_name: str, local_network_gateway_name: str, parameters: "_models.TagsObject", **kwargs: Any ) -> AsyncLROPoller["_models.LocalNetworkGateway"]: polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) if cont_token is None: raw_result = await self._update_tags_initial( resource_group_name=resource_group_name, local_network_gateway_name=local_network_gateway_name, parameters=parameters, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('LocalNetworkGateway', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} def list( self, resource_group_name: str, **kwargs: Any ) -> AsyncIterable["_models.LocalNetworkGatewayListResult"]: cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2018-06-01" accept = "application/json" def prepare_request(next_link=None): header_parameters = {} header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: url = self.list.metadata['url'] path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('LocalNetworkGatewayListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways'}
true
true
f7f68190aae1fda7d3e9ea1f61659cf10ddedf4f
4,478
py
Python
tests/conftest.py
bullocke/yatsm_nrt
b0ded56032bf9f9dcdf6b7b749f6554ade56de1e
[ "MIT" ]
2
2018-04-25T02:10:30.000Z
2021-07-30T03:57:49.000Z
tests/conftest.py
bullocke/yatsm_nrt
b0ded56032bf9f9dcdf6b7b749f6554ade56de1e
[ "MIT" ]
null
null
null
tests/conftest.py
bullocke/yatsm_nrt
b0ded56032bf9f9dcdf6b7b749f6554ade56de1e
[ "MIT" ]
1
2017-04-01T16:11:52.000Z
2017-04-01T16:11:52.000Z
import fnmatch from functools import partial import os import shutil import tarfile from tempfile import mkdtemp try: from os import walk except ImportError: from scandir import walk if os.environ.get('TRAVIS'): # use agg backend on TRAVIS for testing so DISPLAY isn't required import matplotlib as mpl mpl.use('agg') import numpy as np # noqa import pandas as pd # noqa import pytest # noqa import yaml # noqa here = os.path.dirname(__file__) example_cachedir = os.path.join(here, 'data', 'cache') example_cachefile = os.path.join(example_cachedir, 'yatsm_r0_n447_b8.npy.npz') example_training = os.path.join(here, 'data', 'results', 'training_image_1995-06-01.gtif') yaml_config = os.path.join(here, 'data', 'p035r032_config.yaml') example_classify_config = 'RandomForest.yaml' example_classify_pickle = 'train_rf.pkl' # EXAMPLE DATASETS @pytest.fixture(scope='session') def example_timeseries(request): """ Extract example timeseries returning a dictionary of dataset attributes """ path = mkdtemp('_yatsm') tgz = os.path.join(here, 'data', 'p035r032_testdata.tar.gz') with tarfile.open(tgz) as tgz: tgz.extractall(path) request.addfinalizer(partial(shutil.rmtree, path)) # Find data subset_path = os.path.join(path, 'p035r032', 'images') stack_images, stack_image_IDs = [], [] for root, dnames, fnames in walk(subset_path): for fname in fnmatch.filter(fnames, 'L*stack.gtif'): stack_images.append(os.path.join(root, fname)) stack_image_IDs.append(os.path.basename(root)) stack_images = np.asarray(stack_images) stack_image_IDs = np.asarray(stack_image_IDs) # Formulate "images.csv" input_file input_file = os.path.join(path, 'images.csv') dates = np.array([_d[9:16]for _d in stack_image_IDs]) # YYYYDOY sensors = np.array([_id[0:3] for _id in stack_image_IDs]) # Landsat IDs df = pd.DataFrame({ 'date': dates, 'sensor': sensors, 'filename': stack_images }) # Sort by date pd_ver = pd.__version__.split('.') if pd_ver[0] == '0' and int(pd_ver[1]) < 17: df = df.sort(columns='date') else: df = df.sort_values(by='date') df.to_csv(input_file, index=False) # Copy configuration file dest_config = os.path.join(path, os.path.basename(yaml_config)) config = yaml.load(open(yaml_config)) config['dataset']['input_file'] = input_file config['dataset']['output'] = os.path.join(path, 'YATSM') config['dataset']['cache_line_dir'] = os.path.join(path, 'cache') config['classification']['training_image'] = example_training yaml.dump(config, open(dest_config, 'w')) return { 'path': subset_path, 'images': stack_images, 'image_IDs': stack_image_IDs, 'input_file': input_file, 'images.csv': df, 'config': dest_config, } @pytest.fixture(scope='function') def example_results(request, tmpdir): dst = os.path.join(tmpdir.mkdir('data').strpath, 'results') shutil.copytree(os.path.join(here, 'data', 'results'), dst) results = { 'root': dst, 'results_dir': os.path.join(dst, 'YATSM'), 'results_dir_classified': os.path.join(dst, 'YATSM_classified'), 'example_img': os.path.join(dst, 'example_image.gtif'), 'classify_config': os.path.join(dst, example_classify_config), 'example_classify_pickle': os.path.join(dst, example_classify_pickle) } return results @pytest.fixture(scope='session') def example_cache(request): return np.load(example_cachefile) # EXAMPLE CACHE DATA @pytest.fixture(scope='function') def cachedir(request): return example_cachedir @pytest.fixture(scope='function') def cachefile(request): return example_cachefile # MISC @pytest.fixture(scope='function') def mkdir_permissions(request): """ Fixture for creating dir with specific read/write permissions """ def make_mkdir(read=False, write=False): if read and write: mode = 0755 elif read and not write: mode = 0555 elif not read and write: mode = 0333 elif not read and not write: mode = 0000 path = mkdtemp() os.chmod(path, mode) def fin(): os.chmod(path, 0755) os.removedirs(path) request.addfinalizer(fin) return path return make_mkdir
30.256757
79
0.659
import fnmatch from functools import partial import os import shutil import tarfile from tempfile import mkdtemp try: from os import walk except ImportError: from scandir import walk if os.environ.get('TRAVIS'): import matplotlib as mpl mpl.use('agg') import numpy as np # noqa import pandas as pd # noqa import pytest # noqa import yaml # noqa here = os.path.dirname(__file__) example_cachedir = os.path.join(here, 'data', 'cache') example_cachefile = os.path.join(example_cachedir, 'yatsm_r0_n447_b8.npy.npz') example_training = os.path.join(here, 'data', 'results', 'training_image_1995-06-01.gtif') yaml_config = os.path.join(here, 'data', 'p035r032_config.yaml') example_classify_config = 'RandomForest.yaml' example_classify_pickle = 'train_rf.pkl' # EXAMPLE DATASETS @pytest.fixture(scope='session') def example_timeseries(request): """ Extract example timeseries returning a dictionary of dataset attributes """ path = mkdtemp('_yatsm') tgz = os.path.join(here, 'data', 'p035r032_testdata.tar.gz') with tarfile.open(tgz) as tgz: tgz.extractall(path) request.addfinalizer(partial(shutil.rmtree, path)) # Find data subset_path = os.path.join(path, 'p035r032', 'images') stack_images, stack_image_IDs = [], [] for root, dnames, fnames in walk(subset_path): for fname in fnmatch.filter(fnames, 'L*stack.gtif'): stack_images.append(os.path.join(root, fname)) stack_image_IDs.append(os.path.basename(root)) stack_images = np.asarray(stack_images) stack_image_IDs = np.asarray(stack_image_IDs) # Formulate "images.csv" input_file input_file = os.path.join(path, 'images.csv') dates = np.array([_d[9:16]for _d in stack_image_IDs]) # YYYYDOY sensors = np.array([_id[0:3] for _id in stack_image_IDs]) # Landsat IDs df = pd.DataFrame({ 'date': dates, 'sensor': sensors, 'filename': stack_images }) # Sort by date pd_ver = pd.__version__.split('.') if pd_ver[0] == '0' and int(pd_ver[1]) < 17: df = df.sort(columns='date') else: df = df.sort_values(by='date') df.to_csv(input_file, index=False) # Copy configuration file dest_config = os.path.join(path, os.path.basename(yaml_config)) config = yaml.load(open(yaml_config)) config['dataset']['input_file'] = input_file config['dataset']['output'] = os.path.join(path, 'YATSM') config['dataset']['cache_line_dir'] = os.path.join(path, 'cache') config['classification']['training_image'] = example_training yaml.dump(config, open(dest_config, 'w')) return { 'path': subset_path, 'images': stack_images, 'image_IDs': stack_image_IDs, 'input_file': input_file, 'images.csv': df, 'config': dest_config, } @pytest.fixture(scope='function') def example_results(request, tmpdir): dst = os.path.join(tmpdir.mkdir('data').strpath, 'results') shutil.copytree(os.path.join(here, 'data', 'results'), dst) results = { 'root': dst, 'results_dir': os.path.join(dst, 'YATSM'), 'results_dir_classified': os.path.join(dst, 'YATSM_classified'), 'example_img': os.path.join(dst, 'example_image.gtif'), 'classify_config': os.path.join(dst, example_classify_config), 'example_classify_pickle': os.path.join(dst, example_classify_pickle) } return results @pytest.fixture(scope='session') def example_cache(request): return np.load(example_cachefile) # EXAMPLE CACHE DATA @pytest.fixture(scope='function') def cachedir(request): return example_cachedir @pytest.fixture(scope='function') def cachefile(request): return example_cachefile # MISC @pytest.fixture(scope='function') def mkdir_permissions(request): """ Fixture for creating dir with specific read/write permissions """ def make_mkdir(read=False, write=False): if read and write: mode = 0755 elif read and not write: mode = 0555 elif not read and write: mode = 0333 elif not read and not write: mode = 0000 path = mkdtemp() os.chmod(path, mode) def fin(): os.chmod(path, 0755) os.removedirs(path) request.addfinalizer(fin) return path return make_mkdir
false
true
f7f68375b9f644ed7779c5df8b9b4a1f86fb148f
15,788
bzl
Python
deps.bzl
anthonylusardi-da/daml
ca297690049f0c425a386d693f72c5e385f19ba5
[ "Apache-2.0" ]
null
null
null
deps.bzl
anthonylusardi-da/daml
ca297690049f0c425a386d693f72c5e385f19ba5
[ "Apache-2.0" ]
null
null
null
deps.bzl
anthonylusardi-da/daml
ca297690049f0c425a386d693f72c5e385f19ba5
[ "Apache-2.0" ]
null
null
null
# Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # The dependencies of the daml workspace. # This allows using the daml workspace externally # from another bazel workspace. # # For example, another Bazel project can depend on # targets in the daml repository by doing: # --- # local_repository( # name = "com_github_digital_asset_daml", # path = "/path/to/daml" # ) # load("@com_github_digital_asset_daml//:deps.bzl", "daml_deps") # daml_deps() # --- # # A 3rd-party consumer would also need to register relevant # toolchains and repositories in order to build targets. # That is, copy some setup calls from WORKSPACE into the # other WORKSPACE. # # Make sure to reference repository local files with the full # prefix: @com_github_digital_asset_daml//..., as these won't # be resolvable from external workspaces otherwise. load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_file") load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository") rules_scala_version = "6c16cff213b76a4126bdc850956046da5db1daaa" rules_haskell_version = "ac87721a4dbc0f7dbe731df928d322f02ed93330" rules_haskell_sha256 = "684f91defad36e9d6ce3ac4213864b89e8f6fe813508ae93bfe80996447a1516" rules_nixpkgs_version = "d3c7bc94fed4001d5375632a936d743dc085c9a1" rules_nixpkgs_sha256 = "903c6b98aa6a298bf45a6b931e77a3313c40a0cb1b44fa00d9792f9e8aedbb35" buildifier_version = "0.26.0" buildifier_sha256 = "86592d703ecbe0c5cbb5139333a63268cf58d7efd2c459c8be8e69e77d135e29" zlib_version = "cacf7f1d4e3d44d871b605da3b647f07d718623f" zlib_sha256 = "6d4d6640ca3121620995ee255945161821218752b551a1a180f4215f7d124d45" rules_nodejs_version = "1.6.0" rules_nodejs_sha256 = "f9e7b9f42ae202cc2d2ce6d698ccb49a9f7f7ea572a78fd451696d03ef2ee116" # Recent davl. davl_version = "f2d7480d118f32626533d6a150a8ee7552cc0222" # 2020-03-23, "Deploy upgrade to DAML SDK 0.13.56-snapshot.20200318",https://github.com/digital-asset/davl/pull/233/commits. davl_sha256 = "3e8ae2a05724093e33b7f0363381e81a7e8e9655ccb3aa47ad540ea87e814321" # Pinned davl relied on by damlc packaging tests. davl_v3_version = "51d3977be2ab22f7f4434fd4692ca2e17a7cce23" davl_v3_sha256 = "e8e76e21b50fb3adab36df26045b1e8c3ee12814abc60f137d39b864d2eae166" # daml cheat sheet daml_cheat_sheet_version = "32bc69d42c49be5844650ddf81d3ac37e5f7fc8b" # 2020-05-19 daml_cheat_sheet_sha256 = "f21626f0eb258ad578d7a73afa2256d976fcf0680be2d5eeefbac392a9b01496" def daml_deps(): if "rules_haskell" not in native.existing_rules(): http_archive( name = "rules_haskell", strip_prefix = "rules_haskell-%s" % rules_haskell_version, urls = ["https://github.com/tweag/rules_haskell/archive/%s.tar.gz" % rules_haskell_version], patches = [ # Update and remove this patch once this is upstreamed. # See https://github.com/tweag/rules_haskell/pull/1281 "@com_github_digital_asset_daml//bazel_tools:haskell-strict-source-names.patch", # The fake libs issue should be fixed in upstream rules_haskell # or GHC. Remove this patch once that's available. "@com_github_digital_asset_daml//bazel_tools:haskell-windows-remove-fake-libs.patch", # This is a daml specific patch and not upstreamable. "@com_github_digital_asset_daml//bazel_tools:haskell-windows-extra-libraries.patch", # This fixes a ghc-lib specific build issue and is not upstreamable. # This might also be fixed by using `stack_snapshot` in the future. "@com_github_digital_asset_daml//bazel_tools:haskell-no-isystem.patch", # This should be made configurable in rules_haskell. # Remove this patch once that's available. "@com_github_digital_asset_daml//bazel_tools:haskell-opt.patch", # This can be upstreamed. "@com_github_digital_asset_daml//bazel_tools:haskell-pgmc.patch", ], patch_args = ["-p1"], sha256 = rules_haskell_sha256, ) if "io_tweag_rules_nixpkgs" not in native.existing_rules(): http_archive( name = "io_tweag_rules_nixpkgs", strip_prefix = "rules_nixpkgs-%s" % rules_nixpkgs_version, urls = ["https://github.com/tweag/rules_nixpkgs/archive/%s.tar.gz" % rules_nixpkgs_version], sha256 = rules_nixpkgs_sha256, patches = [ # Remove once https://github.com/tweag/rules_nixpkgs/pull/128 # has been merged "@com_github_digital_asset_daml//bazel_tools:nixpkgs-hermetic-cc-toolchain.patch", # On CI and locally we observe occasional segmantation faults # of nix. A known issue since Nix 2.2.2 is that HTTP2 support # can cause such segmentation faults. Since Nix 2.3.2 it is # possible to disable HTTP2 via a command-line flag, which # reportedly solves the issue. See # https://github.com/NixOS/nix/issues/2733#issuecomment-518324335 "@com_github_digital_asset_daml//bazel_tools:nixpkgs-disable-http2.patch", ], patch_args = ["-p1"], ) if "com_github_madler_zlib" not in native.existing_rules(): http_archive( name = "com_github_madler_zlib", build_file = "@com_github_digital_asset_daml//3rdparty/c:zlib.BUILD", strip_prefix = "zlib-{}".format(zlib_version), urls = ["https://github.com/madler/zlib/archive/{}.tar.gz".format(zlib_version)], sha256 = zlib_sha256, ) if "io_bazel_rules_go" not in native.existing_rules(): http_archive( name = "io_bazel_rules_go", urls = [ "https://storage.googleapis.com/bazel-mirror/github.com/bazelbuild/rules_go/releases/download/v0.20.2/rules_go-v0.20.2.tar.gz", "https://github.com/bazelbuild/rules_go/releases/download/v0.20.2/rules_go-v0.20.2.tar.gz", ], sha256 = "b9aa86ec08a292b97ec4591cf578e020b35f98e12173bbd4a921f84f583aebd9", ) if "rules_jvm_external" not in native.existing_rules(): http_archive( name = "rules_jvm_external", strip_prefix = "rules_jvm_external-2.8", sha256 = "79c9850690d7614ecdb72d68394f994fef7534b292c4867ce5e7dec0aa7bdfad", url = "https://github.com/bazelbuild/rules_jvm_external/archive/2.8.zip", ) if "io_bazel_rules_scala" not in native.existing_rules(): http_archive( name = "io_bazel_rules_scala", url = "https://github.com/bazelbuild/rules_scala/archive/%s.zip" % rules_scala_version, type = "zip", strip_prefix = "rules_scala-%s" % rules_scala_version, sha256 = "132cf8eeaab67f3142cec17152b8415901e7fa8396dd585d6334eec21bf7419d", patches = [ "@com_github_digital_asset_daml//bazel_tools:scala-escape-jvmflags.patch", "@com_github_digital_asset_daml//bazel_tools:scala-fail-jmh-build-on-error.patch", ], patch_args = ["-p1"], ) if "io_bazel_rules_docker" not in native.existing_rules(): http_archive( name = "io_bazel_rules_docker", url = "https://github.com/bazelbuild/rules_docker/releases/download/v0.12.1/rules_docker-v0.12.1.tar.gz", strip_prefix = "rules_docker-0.12.1", sha256 = "14ac30773fdb393ddec90e158c9ec7ebb3f8a4fd533ec2abbfd8789ad81a284b", ) if "com_google_protobuf" not in native.existing_rules(): http_archive( name = "com_google_protobuf", sha256 = "1e622ce4b84b88b6d2cdf1db38d1a634fe2392d74f0b7b74ff98f3a51838ee53", strip_prefix = "protobuf-3.8.0", urls = ["https://github.com/google/protobuf/archive/v3.8.0.zip"], patches = [ "@com_github_digital_asset_daml//bazel_tools:proto-zlib-url.patch", ], patch_args = ["-p1"], ) if "bazel_gazelle" not in native.existing_rules(): http_archive( name = "bazel_gazelle", urls = [ "https://storage.googleapis.com/bazel-mirror/github.com/bazelbuild/bazel-gazelle/releases/download/v0.19.1/bazel-gazelle-v0.19.1.tar.gz", "https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.19.1/bazel-gazelle-v0.19.1.tar.gz", ], sha256 = "86c6d481b3f7aedc1d60c1c211c6f76da282ae197c3b3160f54bd3a8f847896f", ) if "io_bazel_rules_sass" not in native.existing_rules(): http_archive( name = "io_bazel_rules_sass", sha256 = "7b9c9a88099d00dbb16be359c3b1946309d99673220c6b39c7e8bda8ecc692f8", strip_prefix = "rules_sass-1.24.4", urls = [ "https://github.com/bazelbuild/rules_sass/archive/1.24.4.zip", "https://mirror.bazel.build/github.com/bazelbuild/rules_sass/archive/1.24.4.zip", ], ) # Fetch rules_nodejs so we can install our npm dependencies if "build_bazel_rules_nodejs" not in native.existing_rules(): http_archive( name = "build_bazel_rules_nodejs", urls = ["https://github.com/bazelbuild/rules_nodejs/releases/download/{}/rules_nodejs-{}.tar.gz".format(rules_nodejs_version, rules_nodejs_version)], sha256 = rules_nodejs_sha256, patches = [ # Work around for https://github.com/bazelbuild/rules_nodejs/issues/1565 "@com_github_digital_asset_daml//bazel_tools:rules_nodejs_npm_cli_path.patch", "@com_github_digital_asset_daml//bazel_tools:rules_nodejs_node_dependency.patch", ], patch_args = ["-p1"], ) if "com_github_grpc_grpc" not in native.existing_rules(): # This should be kept in sync with the grpc version we get from Nix. http_archive( name = "com_github_grpc_grpc", strip_prefix = "grpc-1.23.1", urls = ["https://github.com/grpc/grpc/archive/v1.23.1.tar.gz"], sha256 = "dd7da002b15641e4841f20a1f3eb1e359edb69d5ccf8ac64c362823b05f523d9", patches = [ "@com_github_digital_asset_daml//bazel_tools:grpc-bazel-mingw.patch", ], patch_args = ["-p1"], ) if "io_grpc_grpc_java" not in native.existing_rules(): http_archive( name = "io_grpc_grpc_java", strip_prefix = "grpc-java-1.21.0", urls = ["https://github.com/grpc/grpc-java/archive/v1.21.0.tar.gz"], sha256 = "9bc289e861c6118623fcb931044d843183c31d0e4d53fc43c4a32b56d6bb87fa", patches = [ "@com_github_digital_asset_daml//bazel_tools:grpc-java-plugin-visibility.patch", ], patch_args = ["-p1"], ) if "com_github_johnynek_bazel_jar_jar" not in native.existing_rules(): http_archive( name = "com_github_johnynek_bazel_jar_jar", sha256 = "841ae424eec3f322d411eb49d949622cc84787cb4189a30698fa9adadb98deac", strip_prefix = "bazel_jar_jar-20dbf71f09b1c1c2a8575a42005a968b38805519", urls = ["https://github.com/johnynek/bazel_jar_jar/archive/20dbf71f09b1c1c2a8575a42005a968b38805519.zip"], # Latest commit SHA as at 2019/02/13 ) if "com_github_googleapis_googleapis" not in native.existing_rules(): http_archive( name = "com_github_googleapis_googleapis", strip_prefix = "googleapis-6c48ab5aef47dc14e02e2dc718d232a28067129d", urls = ["https://github.com/googleapis/googleapis/archive/6c48ab5aef47dc14e02e2dc718d232a28067129d.tar.gz"], sha256 = "70d7be6ad49b4424313aad118c8622aab1c5fdd5a529d4215d3884ff89264a71", ) # Buildifier. # It is written in Go and hence needs rules_go to be available. if "com_github_bazelbuild_buildtools" not in native.existing_rules(): http_archive( name = "com_github_bazelbuild_buildtools", sha256 = buildifier_sha256, strip_prefix = "buildtools-{}".format(buildifier_version), url = "https://github.com/bazelbuild/buildtools/archive/{}.tar.gz".format(buildifier_version), ) native.bind( name = "guava", actual = "@com_google_guava_guava//jar", ) native.bind( name = "gson", actual = "@com_google_code_gson_gson//jar", ) if "com_github_google_bazel_common" not in native.existing_rules(): http_archive( name = "com_github_google_bazel_common", sha256 = "48a209fed9575c9d108eaf11fb77f7fe6178a90135e4d60cac6f70c2603aa53a", strip_prefix = "bazel-common-9e3880428c1837db9fb13335ed390b7e33e346a7", urls = ["https://github.com/google/bazel-common/archive/9e3880428c1837db9fb13335ed390b7e33e346a7.zip"], ) if "com_github_grpc_ecosystem_grpc_health_probe_binary" not in native.existing_rules(): http_file( name = "com_github_grpc_ecosystem_grpc_health_probe_binary", sha256 = "bfbe82e34645e91cdf3bacbb0d2dc7786f3c3cc4da6b64a446e5fdfb7bb0429f", downloaded_file_path = "grpc-health-probe", urls = [ "https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/v0.3.1/grpc_health_probe-linux-amd64", ], executable = True, ) if "davl-v3" not in native.existing_rules(): http_archive( name = "davl-v3", strip_prefix = "davl-{}".format(davl_v3_version), urls = ["https://github.com/digital-asset/davl/archive/{}.tar.gz".format(davl_v3_version)], sha256 = davl_v3_sha256, build_file_content = """ package(default_visibility = ["//visibility:public"]) exports_files(["released/davl-v3.dar"]) """, ) if "davl" not in native.existing_rules(): http_archive( name = "davl", strip_prefix = "davl-{}".format(davl_version), urls = ["https://github.com/digital-asset/davl/archive/{}.tar.gz".format(davl_version)], sha256 = davl_sha256, build_file_content = """ package(default_visibility = ["//visibility:public"]) exports_files(["released/davl-v4.dar", "released/davl-v5.dar", "released/davl-upgrade-v3-v4.dar", "released/davl-upgrade-v4-v5.dar"]) """, ) if "daml-cheat-sheet" not in native.existing_rules(): http_archive( name = "daml-cheat-sheet", strip_prefix = "daml-cheat-sheet-{}".format(daml_cheat_sheet_version), urls = ["https://github.com/digital-asset/daml-cheat-sheet/archive/{}.tar.gz".format(daml_cheat_sheet_version)], sha256 = daml_cheat_sheet_sha256, build_file_content = """ package(default_visibility = ["//visibility:public"]) genrule( name = "site", srcs = ["_config.yml"] + glob(["**/*"], exclude = ["_config.yml", "LICENSE", "WORKSPACE", "BUILD.bazel", "README.md"]), outs = ["cheat-sheet.tar.gz"], tools = ["@jekyll_nix//:bin/jekyll"], cmd = ''' DIR=$$(dirname $(execpath _config.yml)) $(execpath @jekyll_nix//:bin/jekyll) build -s $$DIR tar hc _site \ --owner=1000 \ --group=1000 \ --mtime=2000-01-01\ 00:00Z \ --no-acls \ --no-xattrs \ --no-selinux \ --sort=name \ | gzip -n > $(OUTS) ''', ) """, )
47.128358
183
0.655941
# be resolvable from external workspaces otherwise. load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_file") load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository") rules_scala_version = "6c16cff213b76a4126bdc850956046da5db1daaa" rules_haskell_version = "ac87721a4dbc0f7dbe731df928d322f02ed93330" rules_haskell_sha256 = "684f91defad36e9d6ce3ac4213864b89e8f6fe813508ae93bfe80996447a1516" rules_nixpkgs_version = "d3c7bc94fed4001d5375632a936d743dc085c9a1" rules_nixpkgs_sha256 = "903c6b98aa6a298bf45a6b931e77a3313c40a0cb1b44fa00d9792f9e8aedbb35" buildifier_version = "0.26.0" buildifier_sha256 = "86592d703ecbe0c5cbb5139333a63268cf58d7efd2c459c8be8e69e77d135e29" zlib_version = "cacf7f1d4e3d44d871b605da3b647f07d718623f" zlib_sha256 = "6d4d6640ca3121620995ee255945161821218752b551a1a180f4215f7d124d45" rules_nodejs_version = "1.6.0" rules_nodejs_sha256 = "f9e7b9f42ae202cc2d2ce6d698ccb49a9f7f7ea572a78fd451696d03ef2ee116" # Recent davl. davl_version = "f2d7480d118f32626533d6a150a8ee7552cc0222" # 2020-03-23, "Deploy upgrade to DAML SDK 0.13.56-snapshot.20200318",https://github.com/digital-asset/davl/pull/233/commits. davl_sha256 = "3e8ae2a05724093e33b7f0363381e81a7e8e9655ccb3aa47ad540ea87e814321" # Pinned davl relied on by damlc packaging tests. davl_v3_version = "51d3977be2ab22f7f4434fd4692ca2e17a7cce23" davl_v3_sha256 = "e8e76e21b50fb3adab36df26045b1e8c3ee12814abc60f137d39b864d2eae166" # daml cheat sheet daml_cheat_sheet_version = "32bc69d42c49be5844650ddf81d3ac37e5f7fc8b" # 2020-05-19 daml_cheat_sheet_sha256 = "f21626f0eb258ad578d7a73afa2256d976fcf0680be2d5eeefbac392a9b01496" def daml_deps(): if "rules_haskell" not in native.existing_rules(): http_archive( name = "rules_haskell", strip_prefix = "rules_haskell-%s" % rules_haskell_version, urls = ["https://github.com/tweag/rules_haskell/archive/%s.tar.gz" % rules_haskell_version], patches = [ # Update and remove this patch once this is upstreamed. # See https://github.com/tweag/rules_haskell/pull/1281 "@com_github_digital_asset_daml//bazel_tools:haskell-strict-source-names.patch", # The fake libs issue should be fixed in upstream rules_haskell # or GHC. Remove this patch once that's available. "@com_github_digital_asset_daml//bazel_tools:haskell-windows-remove-fake-libs.patch", "@com_github_digital_asset_daml//bazel_tools:haskell-windows-extra-libraries.patch", "@com_github_digital_asset_daml//bazel_tools:haskell-no-isystem.patch", "@com_github_digital_asset_daml//bazel_tools:haskell-opt.patch", # This can be upstreamed. "@com_github_digital_asset_daml//bazel_tools:haskell-pgmc.patch", ], patch_args = ["-p1"], sha256 = rules_haskell_sha256, ) if "io_tweag_rules_nixpkgs" not in native.existing_rules(): http_archive( name = "io_tweag_rules_nixpkgs", strip_prefix = "rules_nixpkgs-%s" % rules_nixpkgs_version, urls = ["https://github.com/tweag/rules_nixpkgs/archive/%s.tar.gz" % rules_nixpkgs_version], sha256 = rules_nixpkgs_sha256, patches = [ # Remove once https://github.com/tweag/rules_nixpkgs/pull/128 # has been merged "@com_github_digital_asset_daml//bazel_tools:nixpkgs-hermetic-cc-toolchain.patch", # On CI and locally we observe occasional segmantation faults # of nix. A known issue since Nix 2.2.2 is that HTTP2 support # can cause such segmentation faults. Since Nix 2.3.2 it is # possible to disable HTTP2 via a command-line flag, which # reportedly solves the issue. See # https://github.com/NixOS/nix/issues/2733#issuecomment-518324335 "@com_github_digital_asset_daml//bazel_tools:nixpkgs-disable-http2.patch", ], patch_args = ["-p1"], ) if "com_github_madler_zlib" not in native.existing_rules(): http_archive( name = "com_github_madler_zlib", build_file = "@com_github_digital_asset_daml//3rdparty/c:zlib.BUILD", strip_prefix = "zlib-{}".format(zlib_version), urls = ["https://github.com/madler/zlib/archive/{}.tar.gz".format(zlib_version)], sha256 = zlib_sha256, ) if "io_bazel_rules_go" not in native.existing_rules(): http_archive( name = "io_bazel_rules_go", urls = [ "https://storage.googleapis.com/bazel-mirror/github.com/bazelbuild/rules_go/releases/download/v0.20.2/rules_go-v0.20.2.tar.gz", "https://github.com/bazelbuild/rules_go/releases/download/v0.20.2/rules_go-v0.20.2.tar.gz", ], sha256 = "b9aa86ec08a292b97ec4591cf578e020b35f98e12173bbd4a921f84f583aebd9", ) if "rules_jvm_external" not in native.existing_rules(): http_archive( name = "rules_jvm_external", strip_prefix = "rules_jvm_external-2.8", sha256 = "79c9850690d7614ecdb72d68394f994fef7534b292c4867ce5e7dec0aa7bdfad", url = "https://github.com/bazelbuild/rules_jvm_external/archive/2.8.zip", ) if "io_bazel_rules_scala" not in native.existing_rules(): http_archive( name = "io_bazel_rules_scala", url = "https://github.com/bazelbuild/rules_scala/archive/%s.zip" % rules_scala_version, type = "zip", strip_prefix = "rules_scala-%s" % rules_scala_version, sha256 = "132cf8eeaab67f3142cec17152b8415901e7fa8396dd585d6334eec21bf7419d", patches = [ "@com_github_digital_asset_daml//bazel_tools:scala-escape-jvmflags.patch", "@com_github_digital_asset_daml//bazel_tools:scala-fail-jmh-build-on-error.patch", ], patch_args = ["-p1"], ) if "io_bazel_rules_docker" not in native.existing_rules(): http_archive( name = "io_bazel_rules_docker", url = "https://github.com/bazelbuild/rules_docker/releases/download/v0.12.1/rules_docker-v0.12.1.tar.gz", strip_prefix = "rules_docker-0.12.1", sha256 = "14ac30773fdb393ddec90e158c9ec7ebb3f8a4fd533ec2abbfd8789ad81a284b", ) if "com_google_protobuf" not in native.existing_rules(): http_archive( name = "com_google_protobuf", sha256 = "1e622ce4b84b88b6d2cdf1db38d1a634fe2392d74f0b7b74ff98f3a51838ee53", strip_prefix = "protobuf-3.8.0", urls = ["https://github.com/google/protobuf/archive/v3.8.0.zip"], patches = [ "@com_github_digital_asset_daml//bazel_tools:proto-zlib-url.patch", ], patch_args = ["-p1"], ) if "bazel_gazelle" not in native.existing_rules(): http_archive( name = "bazel_gazelle", urls = [ "https://storage.googleapis.com/bazel-mirror/github.com/bazelbuild/bazel-gazelle/releases/download/v0.19.1/bazel-gazelle-v0.19.1.tar.gz", "https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.19.1/bazel-gazelle-v0.19.1.tar.gz", ], sha256 = "86c6d481b3f7aedc1d60c1c211c6f76da282ae197c3b3160f54bd3a8f847896f", ) if "io_bazel_rules_sass" not in native.existing_rules(): http_archive( name = "io_bazel_rules_sass", sha256 = "7b9c9a88099d00dbb16be359c3b1946309d99673220c6b39c7e8bda8ecc692f8", strip_prefix = "rules_sass-1.24.4", urls = [ "https://github.com/bazelbuild/rules_sass/archive/1.24.4.zip", "https://mirror.bazel.build/github.com/bazelbuild/rules_sass/archive/1.24.4.zip", ], ) # Fetch rules_nodejs so we can install our npm dependencies if "build_bazel_rules_nodejs" not in native.existing_rules(): http_archive( name = "build_bazel_rules_nodejs", urls = ["https://github.com/bazelbuild/rules_nodejs/releases/download/{}/rules_nodejs-{}.tar.gz".format(rules_nodejs_version, rules_nodejs_version)], sha256 = rules_nodejs_sha256, patches = [ # Work around for https://github.com/bazelbuild/rules_nodejs/issues/1565 "@com_github_digital_asset_daml//bazel_tools:rules_nodejs_npm_cli_path.patch", "@com_github_digital_asset_daml//bazel_tools:rules_nodejs_node_dependency.patch", ], patch_args = ["-p1"], ) if "com_github_grpc_grpc" not in native.existing_rules(): # This should be kept in sync with the grpc version we get from Nix. http_archive( name = "com_github_grpc_grpc", strip_prefix = "grpc-1.23.1", urls = ["https://github.com/grpc/grpc/archive/v1.23.1.tar.gz"], sha256 = "dd7da002b15641e4841f20a1f3eb1e359edb69d5ccf8ac64c362823b05f523d9", patches = [ "@com_github_digital_asset_daml//bazel_tools:grpc-bazel-mingw.patch", ], patch_args = ["-p1"], ) if "io_grpc_grpc_java" not in native.existing_rules(): http_archive( name = "io_grpc_grpc_java", strip_prefix = "grpc-java-1.21.0", urls = ["https://github.com/grpc/grpc-java/archive/v1.21.0.tar.gz"], sha256 = "9bc289e861c6118623fcb931044d843183c31d0e4d53fc43c4a32b56d6bb87fa", patches = [ "@com_github_digital_asset_daml//bazel_tools:grpc-java-plugin-visibility.patch", ], patch_args = ["-p1"], ) if "com_github_johnynek_bazel_jar_jar" not in native.existing_rules(): http_archive( name = "com_github_johnynek_bazel_jar_jar", sha256 = "841ae424eec3f322d411eb49d949622cc84787cb4189a30698fa9adadb98deac", strip_prefix = "bazel_jar_jar-20dbf71f09b1c1c2a8575a42005a968b38805519", urls = ["https://github.com/johnynek/bazel_jar_jar/archive/20dbf71f09b1c1c2a8575a42005a968b38805519.zip"], # Latest commit SHA as at 2019/02/13 ) if "com_github_googleapis_googleapis" not in native.existing_rules(): http_archive( name = "com_github_googleapis_googleapis", strip_prefix = "googleapis-6c48ab5aef47dc14e02e2dc718d232a28067129d", urls = ["https://github.com/googleapis/googleapis/archive/6c48ab5aef47dc14e02e2dc718d232a28067129d.tar.gz"], sha256 = "70d7be6ad49b4424313aad118c8622aab1c5fdd5a529d4215d3884ff89264a71", ) # Buildifier. # It is written in Go and hence needs rules_go to be available. if "com_github_bazelbuild_buildtools" not in native.existing_rules(): http_archive( name = "com_github_bazelbuild_buildtools", sha256 = buildifier_sha256, strip_prefix = "buildtools-{}".format(buildifier_version), url = "https://github.com/bazelbuild/buildtools/archive/{}.tar.gz".format(buildifier_version), ) native.bind( name = "guava", actual = "@com_google_guava_guava//jar", ) native.bind( name = "gson", actual = "@com_google_code_gson_gson//jar", ) if "com_github_google_bazel_common" not in native.existing_rules(): http_archive( name = "com_github_google_bazel_common", sha256 = "48a209fed9575c9d108eaf11fb77f7fe6178a90135e4d60cac6f70c2603aa53a", strip_prefix = "bazel-common-9e3880428c1837db9fb13335ed390b7e33e346a7", urls = ["https://github.com/google/bazel-common/archive/9e3880428c1837db9fb13335ed390b7e33e346a7.zip"], ) if "com_github_grpc_ecosystem_grpc_health_probe_binary" not in native.existing_rules(): http_file( name = "com_github_grpc_ecosystem_grpc_health_probe_binary", sha256 = "bfbe82e34645e91cdf3bacbb0d2dc7786f3c3cc4da6b64a446e5fdfb7bb0429f", downloaded_file_path = "grpc-health-probe", urls = [ "https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/v0.3.1/grpc_health_probe-linux-amd64", ], executable = True, ) if "davl-v3" not in native.existing_rules(): http_archive( name = "davl-v3", strip_prefix = "davl-{}".format(davl_v3_version), urls = ["https://github.com/digital-asset/davl/archive/{}.tar.gz".format(davl_v3_version)], sha256 = davl_v3_sha256, build_file_content = """ package(default_visibility = ["//visibility:public"]) exports_files(["released/davl-v3.dar"]) """, ) if "davl" not in native.existing_rules(): http_archive( name = "davl", strip_prefix = "davl-{}".format(davl_version), urls = ["https://github.com/digital-asset/davl/archive/{}.tar.gz".format(davl_version)], sha256 = davl_sha256, build_file_content = """ package(default_visibility = ["//visibility:public"]) exports_files(["released/davl-v4.dar", "released/davl-v5.dar", "released/davl-upgrade-v3-v4.dar", "released/davl-upgrade-v4-v5.dar"]) """, ) if "daml-cheat-sheet" not in native.existing_rules(): http_archive( name = "daml-cheat-sheet", strip_prefix = "daml-cheat-sheet-{}".format(daml_cheat_sheet_version), urls = ["https://github.com/digital-asset/daml-cheat-sheet/archive/{}.tar.gz".format(daml_cheat_sheet_version)], sha256 = daml_cheat_sheet_sha256, build_file_content = """ package(default_visibility = ["//visibility:public"]) genrule( name = "site", srcs = ["_config.yml"] + glob(["**/*"], exclude = ["_config.yml", "LICENSE", "WORKSPACE", "BUILD.bazel", "README.md"]), outs = ["cheat-sheet.tar.gz"], tools = ["@jekyll_nix//:bin/jekyll"], cmd = ''' DIR=$$(dirname $(execpath _config.yml)) $(execpath @jekyll_nix//:bin/jekyll) build -s $$DIR tar hc _site \ --owner=1000 \ --group=1000 \ --mtime=2000-01-01\ 00:00Z \ --no-acls \ --no-xattrs \ --no-selinux \ --sort=name \ | gzip -n > $(OUTS) ''', ) """, )
true
true
f7f683deb119d3b142f6459541138afa3fa02625
275
py
Python
input_output.py
aaskov/convnet-est-loss
4fdb13dbd858700f3eaa92e6f7309787984beb8d
[ "MIT" ]
null
null
null
input_output.py
aaskov/convnet-est-loss
4fdb13dbd858700f3eaa92e6f7309787984beb8d
[ "MIT" ]
null
null
null
input_output.py
aaskov/convnet-est-loss
4fdb13dbd858700f3eaa92e6f7309787984beb8d
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ convnet-est-loss """ import pickle def save_obj(obj, name): with open(name + '.pkl', 'wb') as f: pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL) def load_obj(name): with open(name + '.pkl', 'rb') as f: return pickle.load(f)
19.642857
52
0.589091
import pickle def save_obj(obj, name): with open(name + '.pkl', 'wb') as f: pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL) def load_obj(name): with open(name + '.pkl', 'rb') as f: return pickle.load(f)
true
true
f7f6840f6edecab2a351e699c31f3291bb25c113
630
py
Python
fibonacciDatasetCreator.py
ReetNayan/PythonMiscellaneousProgramsForFun
9fd51432726dbfb4b06e5e13fd143171a4319ab7
[ "MIT" ]
1
2021-08-04T01:46:19.000Z
2021-08-04T01:46:19.000Z
fibonacciDatasetCreator.py
ReetNayan/PythonMiscellaneousProgramsForFun
9fd51432726dbfb4b06e5e13fd143171a4319ab7
[ "MIT" ]
null
null
null
fibonacciDatasetCreator.py
ReetNayan/PythonMiscellaneousProgramsForFun
9fd51432726dbfb4b06e5e13fd143171a4319ab7
[ "MIT" ]
null
null
null
fileWrite = open("fibonacciDataset.csv", 'w+') num = int(input("Enter the number of data: ")) print("Writing new data in the file...") done = 0 a = 0 b = 1 string = "" pair = 0 while True: if pair < 2: if pair == 1: string = string + ',' + str(a) else: string = string + str(a) pair += 1 if pair == 2: fileWrite.write(string + "\n") done += 1 string = "" pair = 0 c = a a = b b = c+b if done == num: break print("Data written successfully!") fileWrite.close() print("File closed.")
19.090909
47
0.473016
fileWrite = open("fibonacciDataset.csv", 'w+') num = int(input("Enter the number of data: ")) print("Writing new data in the file...") done = 0 a = 0 b = 1 string = "" pair = 0 while True: if pair < 2: if pair == 1: string = string + ',' + str(a) else: string = string + str(a) pair += 1 if pair == 2: fileWrite.write(string + "\n") done += 1 string = "" pair = 0 c = a a = b b = c+b if done == num: break print("Data written successfully!") fileWrite.close() print("File closed.")
true
true
f7f684da60763f542bea5df9d86ec164816e18d3
5,026
py
Python
pydev_source/_pydevd_bundle/pydevd_command_line_handling.py
mangopipeline/3dsMax_PyDev_Debug
e02bf5d383f1b674f273473fe9aa5837c9559550
[ "MIT" ]
null
null
null
pydev_source/_pydevd_bundle/pydevd_command_line_handling.py
mangopipeline/3dsMax_PyDev_Debug
e02bf5d383f1b674f273473fe9aa5837c9559550
[ "MIT" ]
null
null
null
pydev_source/_pydevd_bundle/pydevd_command_line_handling.py
mangopipeline/3dsMax_PyDev_Debug
e02bf5d383f1b674f273473fe9aa5837c9559550
[ "MIT" ]
null
null
null
class ArgHandlerWithParam: ''' Handler for some arguments which needs a value ''' def __init__(self, arg_name, convert_val=None, default_val=None): self.arg_name = arg_name self.arg_v_rep = '--%s' % (arg_name,) self.convert_val = convert_val self.default_val = default_val def to_argv(self, lst, setup): v = setup.get(self.arg_name) if v is not None and v != self.default_val: lst.append(self.arg_v_rep) lst.append('%s' % (v,)) def handle_argv(self, argv, i, setup): assert argv[i] == self.arg_v_rep del argv[i] val = argv[i] if self.convert_val: val = self.convert_val(val) setup[self.arg_name] = val del argv[i] class ArgHandlerBool: ''' If a given flag is received, mark it as 'True' in setup. ''' def __init__(self, arg_name, default_val=False): self.arg_name = arg_name self.arg_v_rep = '--%s' % (arg_name,) self.default_val = default_val def to_argv(self, lst, setup): v = setup.get(self.arg_name) if v: lst.append(self.arg_v_rep) def handle_argv(self, argv, i, setup): assert argv[i] == self.arg_v_rep del argv[i] setup[self.arg_name] = True ACCEPTED_ARG_HANDLERS = [ ArgHandlerWithParam('port', int, 0), ArgHandlerWithParam('vm_type'), ArgHandlerWithParam('client'), ArgHandlerWithParam('access-token'), ArgHandlerWithParam('ide-access-token'), ArgHandlerBool('server'), ArgHandlerBool('DEBUG_RECORD_SOCKET_READS'), ArgHandlerBool('multiproc'), # Used by PyCharm (reuses connection: ssh tunneling) ArgHandlerBool('multiprocess'), # Used by PyDev (creates new connection to ide) ArgHandlerBool('save-signatures'), ArgHandlerBool('save-threading'), ArgHandlerBool('save-asyncio'), ArgHandlerBool('print-in-debugger-startup'), ArgHandlerBool('cmd-line'), ArgHandlerBool('module'), ArgHandlerBool('json-dap'), # Protocol used by ptvsd to communicate with pydevd (a single json message in each read) ArgHandlerBool('json-dap-http'), # Actual DAP (json messages over http protocol). ] ARGV_REP_TO_HANDLER = {} for handler in ACCEPTED_ARG_HANDLERS: ARGV_REP_TO_HANDLER[handler.arg_v_rep] = handler def get_pydevd_file(): import pydevd f = pydevd.__file__ if f.endswith('.pyc'): f = f[:-1] elif f.endswith('$py.class'): f = f[:-len('$py.class')] + '.py' return f def setup_to_argv(setup): ''' :param dict setup: A dict previously gotten from process_command_line. :note: does not handle --file nor --DEBUG. ''' ret = [get_pydevd_file()] for handler in ACCEPTED_ARG_HANDLERS: if handler.arg_name in setup: handler.to_argv(ret, setup) return ret def process_command_line(argv): """ parses the arguments. removes our arguments from the command line """ setup = {} for handler in ACCEPTED_ARG_HANDLERS: setup[handler.arg_name] = handler.default_val setup['file'] = '' setup['qt-support'] = '' i = 0 del argv[0] while i < len(argv): handler = ARGV_REP_TO_HANDLER.get(argv[i]) if handler is not None: handler.handle_argv(argv, i, setup) elif argv[i].startswith('--qt-support'): # The --qt-support is special because we want to keep backward compatibility: # Previously, just passing '--qt-support' meant that we should use the auto-discovery mode # whereas now, if --qt-support is passed, it should be passed as --qt-support=<mode>, where # mode can be one of 'auto', 'none', 'pyqt5', 'pyqt4', 'pyside'. if argv[i] == '--qt-support': setup['qt-support'] = 'auto' elif argv[i].startswith('--qt-support='): qt_support = argv[i][len('--qt-support='):] valid_modes = ('none', 'auto', 'pyqt5', 'pyqt4', 'pyside') if qt_support not in valid_modes: raise ValueError("qt-support mode invalid: " + qt_support) if qt_support == 'none': # On none, actually set an empty string to evaluate to False. setup['qt-support'] = '' else: setup['qt-support'] = qt_support else: raise ValueError("Unexpected definition for qt-support flag: " + argv[i]) del argv[i] elif argv[i] == '--file': # --file is special because it's the last one (so, no handler for it). del argv[i] setup['file'] = argv[i] i = len(argv) # pop out, file is our last argument elif argv[i] == '--DEBUG': from pydevd import set_debug del argv[i] set_debug(setup) else: raise ValueError("Unexpected option: " + argv[i]) return setup
32.425806
121
0.593514
class ArgHandlerWithParam: def __init__(self, arg_name, convert_val=None, default_val=None): self.arg_name = arg_name self.arg_v_rep = '--%s' % (arg_name,) self.convert_val = convert_val self.default_val = default_val def to_argv(self, lst, setup): v = setup.get(self.arg_name) if v is not None and v != self.default_val: lst.append(self.arg_v_rep) lst.append('%s' % (v,)) def handle_argv(self, argv, i, setup): assert argv[i] == self.arg_v_rep del argv[i] val = argv[i] if self.convert_val: val = self.convert_val(val) setup[self.arg_name] = val del argv[i] class ArgHandlerBool: def __init__(self, arg_name, default_val=False): self.arg_name = arg_name self.arg_v_rep = '--%s' % (arg_name,) self.default_val = default_val def to_argv(self, lst, setup): v = setup.get(self.arg_name) if v: lst.append(self.arg_v_rep) def handle_argv(self, argv, i, setup): assert argv[i] == self.arg_v_rep del argv[i] setup[self.arg_name] = True ACCEPTED_ARG_HANDLERS = [ ArgHandlerWithParam('port', int, 0), ArgHandlerWithParam('vm_type'), ArgHandlerWithParam('client'), ArgHandlerWithParam('access-token'), ArgHandlerWithParam('ide-access-token'), ArgHandlerBool('server'), ArgHandlerBool('DEBUG_RECORD_SOCKET_READS'), ArgHandlerBool('multiproc'), ArgHandlerBool('multiprocess'), ArgHandlerBool('save-signatures'), ArgHandlerBool('save-threading'), ArgHandlerBool('save-asyncio'), ArgHandlerBool('print-in-debugger-startup'), ArgHandlerBool('cmd-line'), ArgHandlerBool('module'), ArgHandlerBool('json-dap'), ArgHandlerBool('json-dap-http'), ] ARGV_REP_TO_HANDLER = {} for handler in ACCEPTED_ARG_HANDLERS: ARGV_REP_TO_HANDLER[handler.arg_v_rep] = handler def get_pydevd_file(): import pydevd f = pydevd.__file__ if f.endswith('.pyc'): f = f[:-1] elif f.endswith('$py.class'): f = f[:-len('$py.class')] + '.py' return f def setup_to_argv(setup): ret = [get_pydevd_file()] for handler in ACCEPTED_ARG_HANDLERS: if handler.arg_name in setup: handler.to_argv(ret, setup) return ret def process_command_line(argv): setup = {} for handler in ACCEPTED_ARG_HANDLERS: setup[handler.arg_name] = handler.default_val setup['file'] = '' setup['qt-support'] = '' i = 0 del argv[0] while i < len(argv): handler = ARGV_REP_TO_HANDLER.get(argv[i]) if handler is not None: handler.handle_argv(argv, i, setup) elif argv[i].startswith('--qt-support'): if argv[i] == '--qt-support': setup['qt-support'] = 'auto' elif argv[i].startswith('--qt-support='): qt_support = argv[i][len('--qt-support='):] valid_modes = ('none', 'auto', 'pyqt5', 'pyqt4', 'pyside') if qt_support not in valid_modes: raise ValueError("qt-support mode invalid: " + qt_support) if qt_support == 'none': setup['qt-support'] = '' else: setup['qt-support'] = qt_support else: raise ValueError("Unexpected definition for qt-support flag: " + argv[i]) del argv[i] elif argv[i] == '--file': del argv[i] setup['file'] = argv[i] i = len(argv) # pop out, file is our last argument elif argv[i] == '--DEBUG': from pydevd import set_debug del argv[i] set_debug(setup) else: raise ValueError("Unexpected option: " + argv[i]) return setup
true
true
f7f6855914c02597c65a8812372c45437caaa84e
23,774
py
Python
pytorch_lightning/strategies/ddp.py
alat-rights/pytorch-lightning
a4f1f3dc28982eb6578df62ca92b93f83a2defcc
[ "Apache-2.0" ]
null
null
null
pytorch_lightning/strategies/ddp.py
alat-rights/pytorch-lightning
a4f1f3dc28982eb6578df62ca92b93f83a2defcc
[ "Apache-2.0" ]
null
null
null
pytorch_lightning/strategies/ddp.py
alat-rights/pytorch-lightning
a4f1f3dc28982eb6578df62ca92b93f83a2defcc
[ "Apache-2.0" ]
null
null
null
# Copyright The PyTorch Lightning team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import shutil import signal import subprocess import sys import tempfile import time from pathlib import Path from time import sleep from typing import Any, Callable, Dict, List, Optional, Union import __main__ import numpy as np import torch import torch.distributed from torch.distributed import GradBucket from torch.nn import Module from torch.nn.parallel.distributed import DistributedDataParallel import pytorch_lightning as pl from pytorch_lightning.core.optimizer import LightningOptimizer from pytorch_lightning.overrides import LightningDistributedModule from pytorch_lightning.overrides.distributed import prepare_for_backward from pytorch_lightning.plugins.environments.cluster_environment import ClusterEnvironment from pytorch_lightning.plugins.io.checkpoint_plugin import CheckpointIO from pytorch_lightning.plugins.precision import PrecisionPlugin from pytorch_lightning.strategies.parallel import ParallelStrategy from pytorch_lightning.strategies.strategy import TBroadcast from pytorch_lightning.trainer.states import TrainerFn from pytorch_lightning.utilities import ( _FAIRSCALE_AVAILABLE, _HYDRA_AVAILABLE, _IS_WINDOWS, _TORCH_GREATER_EQUAL_1_8, _TORCH_GREATER_EQUAL_1_9, _TORCH_GREATER_EQUAL_1_10, ) from pytorch_lightning.utilities.distributed import _revert_sync_batchnorm, distributed_available from pytorch_lightning.utilities.distributed import group as _group from pytorch_lightning.utilities.distributed import init_dist_connection, ReduceOp, sync_ddp_if_available from pytorch_lightning.utilities.enums import _StrategyType from pytorch_lightning.utilities.exceptions import DeadlockDetectedException from pytorch_lightning.utilities.rank_zero import rank_zero_only, rank_zero_warn from pytorch_lightning.utilities.seed import reset_seed from pytorch_lightning.utilities.types import STEP_OUTPUT if _FAIRSCALE_AVAILABLE: from fairscale.optim import OSS if _HYDRA_AVAILABLE: from hydra.core.hydra_config import HydraConfig from hydra.utils import get_original_cwd, to_absolute_path if _TORCH_GREATER_EQUAL_1_8: from pytorch_lightning.utilities.distributed import register_ddp_comm_hook log = logging.getLogger(__name__) class DDPStrategy(ParallelStrategy): """Plugin for multi-process single-device training on one or multiple nodes. The main process in each node spawns N-1 child processes via :func:`subprocess.Popen`, where N is the number of devices (e.g. GPU) per node. It is very similar to how :mod:`torch.distributed.launch` launches processes. """ distributed_backend = _StrategyType.DDP def __init__( self, accelerator: Optional["pl.accelerators.accelerator.Accelerator"] = None, parallel_devices: Optional[List[torch.device]] = None, cluster_environment: Optional[ClusterEnvironment] = None, checkpoint_io: Optional[CheckpointIO] = None, precision_plugin: Optional[PrecisionPlugin] = None, ddp_comm_state: Optional[object] = None, ddp_comm_hook: Optional[Callable[[Any, GradBucket], torch.futures.Future[torch.Tensor]]] = None, ddp_comm_wrapper: Optional[ Callable[ [Callable[[Any, GradBucket], torch.futures.Future[torch.Tensor]], Any], torch.futures.Future[torch.Tensor], ] ] = None, model_averaging_period: Optional[int] = None, **kwargs: Union[Any, Dict[str, Any]], ) -> None: super().__init__( accelerator=accelerator, parallel_devices=parallel_devices, cluster_environment=cluster_environment, checkpoint_io=checkpoint_io, precision_plugin=precision_plugin, ) log.detail(f"{self.__class__.__name__}: initializing DDP plugin") self.interactive_ddp_procs: List[subprocess.Popen] = [] self._num_nodes = 1 self.sync_batchnorm = False self._ddp_kwargs = kwargs self._ddp_comm_state = ddp_comm_state self._ddp_comm_hook = ddp_comm_hook self._ddp_comm_wrapper = ddp_comm_wrapper self._model_averaging_period = model_averaging_period self._pids: Optional[List[int]] = None self._sync_dir: Optional[str] = None self._rank_0_has_called_call_children_scripts: bool = False self.set_world_ranks() @property def is_distributed(self) -> bool: return True @property def root_device(self) -> torch.device: return self.parallel_devices[self.local_rank] @property def num_nodes(self) -> int: return self._num_nodes @num_nodes.setter def num_nodes(self, num_nodes: int) -> None: # note that world ranks is related to num_nodes, when resetting it, need to reset world ranks self._num_nodes = num_nodes self.set_world_ranks() @property def num_processes(self) -> int: return len(self.parallel_devices) if self.parallel_devices is not None else 0 @property def distributed_sampler_kwargs(self) -> Dict[str, Any]: distributed_sampler_kwargs = dict(num_replicas=(self.num_nodes * self.num_processes), rank=self.global_rank) return distributed_sampler_kwargs @property def _is_single_process_single_device(self) -> bool: return True def setup_environment(self) -> None: # start the other scripts assert self.cluster_environment is not None if not self.cluster_environment.creates_processes_externally: self._call_children_scripts() self.setup_distributed() super().setup_environment() def setup(self, trainer: "pl.Trainer") -> None: super().setup(trainer) # share ddp pids to all processes self._rank_0_has_called_call_children_scripts = ( self.broadcast(self._rank_0_has_called_call_children_scripts) is True ) if self._should_run_deadlock_detection(): self._share_information_to_prevent_deadlock() # move the model to the correct device self.model_to_device() assert self.model is not None if self.sync_batchnorm: self.model = self.configure_sync_batchnorm(self.model) # skip wrapping the model if we are not fitting as no gradients need to be exchanged assert self.lightining_module is not None trainer_fn = self.lightning_module.trainer.state.fn if trainer_fn == TrainerFn.FITTING: self.configure_ddp() def _setup_model(self, model: Module) -> DistributedDataParallel: """Wraps the model into a :class:`~torch.nn.parallel.distributed.DistributedDataParallel` module.""" device_ids = self.determine_ddp_device_ids() log.detail(f"setting up DDP model with device ids: {device_ids}, kwargs: {self._ddp_kwargs}") return DistributedDataParallel(module=model, device_ids=device_ids, **self._ddp_kwargs) def _call_children_scripts(self) -> None: # bookkeeping of spawned processes self._check_can_spawn_children() assert self.cluster_environment is not None # DDP Environment variables os.environ["MASTER_ADDR"] = self.cluster_environment.main_address os.environ["MASTER_PORT"] = str(self.cluster_environment.main_port) # allow the user to pass the node rank os.environ["NODE_RANK"] = str(self.cluster_environment.node_rank()) os.environ["LOCAL_RANK"] = str(self.cluster_environment.local_rank()) # Check if the current calling command looked like `python a/b/c.py` or `python -m a.b.c` # See https://docs.python.org/3/reference/import.html#main-spec if __main__.__spec__ is None: # pragma: no-cover # Script called as `python a/b/c.py` # when user is using hydra find the absolute path path_lib = os.path.abspath if not _HYDRA_AVAILABLE else to_absolute_path # pull out the commands used to run the script and resolve the abs file path command = sys.argv try: full_path = path_lib(command[0]) except Exception: full_path = os.path.abspath(command[0]) command[0] = full_path # use the same python interpreter and actually running command = [sys.executable] + command else: # Script called as `python -m a.b.c` command = [sys.executable, "-m", __main__.__spec__.name] + sys.argv[1:] os.environ["WORLD_SIZE"] = f"{self.num_processes * self.num_nodes}" self.interactive_ddp_procs = [] for local_rank in range(1, self.num_processes): env_copy = os.environ.copy() env_copy["LOCAL_RANK"] = f"{local_rank}" # remove env var if global seed not set if os.environ.get("PL_GLOBAL_SEED") is None and "PL_GLOBAL_SEED" in env_copy: del env_copy["PL_GLOBAL_SEED"] # start process # if hydra is available and initialized, make sure to set the cwd correctly cwd: Optional[str] = None if _HYDRA_AVAILABLE: if HydraConfig.initialized(): cwd = get_original_cwd() os_cwd = f'"{os.getcwd()}"' command += [f"hydra.run.dir={os_cwd}", f"hydra.job.name=train_ddp_process_{local_rank}"] proc = subprocess.Popen(command, env=env_copy, cwd=cwd) self.interactive_ddp_procs.append(proc) # starting all processes at once can cause issues # with dataloaders delay between 1-10 seconds delay = np.random.uniform(1, 5, 1)[0] sleep(delay) self._rank_0_has_called_call_children_scripts = True def setup_distributed(self) -> None: log.detail(f"{self.__class__.__name__}: setting up distributed...") reset_seed() # determine which process we are and world size self.set_world_ranks() # set warning rank rank_zero_only.rank = self.global_rank # set up server using proc 0's ip address # try to init for 20 times at max in case ports are taken # where to store ip_table assert self.cluster_environment is not None init_dist_connection(self.cluster_environment, self.torch_distributed_backend) def _check_can_spawn_children(self) -> None: if self.local_rank != 0: raise RuntimeError( "Lightning attempted to launch new distributed processes with `local_rank > 0`. This should not happen." " Possible reasons: 1) LOCAL_RANK environment variable was incorrectly modified by the user," " 2) `ClusterEnvironment.creates_processes_externally` incorrectly implemented." ) def set_world_ranks(self) -> None: if self.cluster_environment is None: return self.cluster_environment.set_global_rank(self.node_rank * self.num_processes + self.local_rank) self.cluster_environment.set_world_size(self.num_nodes * self.num_processes) rank_zero_only.rank = self.cluster_environment.global_rank() def pre_configure_ddp(self) -> None: # if unset, default `find_unused_parameters` `True` # Many models require setting this parameter to True, as there are corner cases # when not all parameter backward hooks are fired by the autograd engine even if require_grad is set to True. # This flag does come with a performance hit, so it is suggested to disable in cases where it is possible. self._ddp_kwargs["find_unused_parameters"] = self._ddp_kwargs.get("find_unused_parameters", True) assert self.lightning_module is not None if not self.lightning_module.automatic_optimization and not self._ddp_kwargs.get( "find_unused_parameters", False ): # TODO: PyTorch 1.7.0 DDP introduces `self.reducer._rebuild_buckets()` breaking manual_optimization rank_zero_warn( "From PyTorch 1.7.0, Lightning `manual_optimization` needs to set `find_unused_parameters=True` to" " properly work with DDP. Using `find_unused_parameters=True`." ) self._ddp_kwargs["find_unused_parameters"] = True def _register_ddp_hooks(self) -> None: log.detail(f"{self.__class__.__name__}: registering ddp hooks") # In 1.8, DDP communication hooks only work with NCCL backend and SPSD (single process single device) mode # Since 1.9, DDP communication hooks can work on all backends. if _TORCH_GREATER_EQUAL_1_9 or ( _TORCH_GREATER_EQUAL_1_8 and self.root_device.type == "cuda" and self._is_single_process_single_device ): register_ddp_comm_hook( model=self.model, ddp_comm_state=self._ddp_comm_state, ddp_comm_hook=self._ddp_comm_hook, ddp_comm_wrapper=self._ddp_comm_wrapper, ) if _TORCH_GREATER_EQUAL_1_10 and self.lightning_module.trainer.state.fn == TrainerFn.FITTING: import torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook as post_localSGD if isinstance(self._ddp_comm_state, post_localSGD.PostLocalSGDState): self._reinit_optimizers_with_post_localSGD(self._ddp_comm_state.start_localSGD_iter) def _reinit_optimizers_with_post_localSGD(self, warmup_steps: int) -> None: log.detail(f"{self.__class__.__name__}: reinitializing optimizers with post localSGD") optimizers = self.optimizers if self._model_averaging_period is None: raise ValueError( "Post-localSGD algorithm is used, but model averaging period is not provided to DDP strategy." ) if _TORCH_GREATER_EQUAL_1_10: if not _IS_WINDOWS: from torch.distributed.optim import DistributedOptimizer import torch.distributed.algorithms.model_averaging.averagers as averagers from torch.distributed.optim import PostLocalSGDOptimizer, ZeroRedundancyOptimizer averager = averagers.PeriodicModelAverager(period=self._model_averaging_period, warmup_steps=warmup_steps) for x, optimizer in enumerate(optimizers): if isinstance(optimizer, LightningOptimizer): optimizer = optimizer._optimizer is_distributed_optimizer = isinstance(optimizer, DistributedOptimizer) if not _IS_WINDOWS else False if ( is_distributed_optimizer or isinstance(optimizer, ZeroRedundancyOptimizer) or (_FAIRSCALE_AVAILABLE and isinstance(optimizer, OSS)) ): raise ValueError( f"Cannot wrap a distributed optimizer of type {optimizer.__name__} by PostLocalSGDOptimizer." ) if isinstance(optimizer, PostLocalSGDOptimizer): continue optim_class = type(optimizer) post_localSGD_optimizer = PostLocalSGDOptimizer( params=optimizer.param_groups, optimizer_class=optim_class, averager=averager, **optimizer.defaults, ) optimizers[x] = post_localSGD_optimizer del optimizer self.optimizers = optimizers def configure_ddp(self) -> None: log.detail(f"{self.__class__.__name__}: configuring DistributedDataParallel") self.pre_configure_ddp() assert self.model is not None self.model = self._setup_model(LightningDistributedModule(self.model)) self._register_ddp_hooks() def determine_ddp_device_ids(self) -> Optional[List[int]]: if self.root_device.type == "cpu": return None return [self.root_device.index] def barrier(self, *args, **kwargs) -> None: if not distributed_available(): return if _TORCH_GREATER_EQUAL_1_8 and torch.distributed.get_backend() == "nccl": torch.distributed.barrier(device_ids=self.determine_ddp_device_ids()) else: torch.distributed.barrier() def broadcast(self, obj: TBroadcast, src: int = 0) -> TBroadcast: obj = [obj] if self.global_rank != src: obj = [None] torch.distributed.broadcast_object_list(obj, src, group=_group.WORLD) return obj[0] def pre_backward(self, closure_loss: torch.Tensor) -> None: """Run before precision plugin executes backward.""" assert self.model is not None assert self.lightning_module is not None if not self.lightning_module.automatic_optimization: prepare_for_backward(self.model, closure_loss) def model_to_device(self) -> None: log.detail(f"{self.__class__.__name__}: moving model to device [{self.root_device}]...") if self.model: self.model.to(self.root_device) def reduce( self, tensor: torch.Tensor, group: Optional[Any] = None, reduce_op: Union[ReduceOp, str, None] = "mean" ) -> torch.Tensor: """Reduces a tensor from several distributed processes to one aggregated tensor. Args: tensor: the tensor to sync and reduce group: the process group to gather results from. Defaults to all processes (world) reduce_op: the reduction operation. Defaults to 'mean'/'avg'. Can also be a string 'sum' to calculate the sum during reduction. Return: reduced value, except when the input was not a tensor the output remains is unchanged """ if isinstance(tensor, torch.Tensor): tensor = sync_ddp_if_available(tensor, group, reduce_op=reduce_op) return tensor def training_step(self, *args, **kwargs) -> STEP_OUTPUT: with self.precision_plugin.train_step_context(): assert self.model is not None return self.model(*args, **kwargs) def validation_step(self, *args, **kwargs) -> Optional[STEP_OUTPUT]: with self.precision_plugin.val_step_context(): if isinstance(self.model, DistributedDataParallel): # used when calling `trainer.fit` return self.model(*args, **kwargs) else: # used when calling `trainer.validate` assert self.lightning_module is not None return self.lightning_module.validation_step(*args, **kwargs) def test_step(self, *args, **kwargs) -> Optional[STEP_OUTPUT]: with self.precision_plugin.test_step_context(): assert self.lightning_module is not None return self.lightning_module.test_step(*args, **kwargs) def predict_step(self, *args, **kwargs) -> STEP_OUTPUT: with self.precision_plugin.predict_step_context(): assert self.lightning_module is not None return self.lightning_module.predict_step(*args, **kwargs) def post_training_step(self) -> None: assert self.model is not None assert self.lightning_module is not None if not self.lightning_module.automatic_optimization: self.model.require_backward_grad_sync = True @classmethod def register_strategies(cls, strategy_registry: Dict) -> None: strategy_registry.register( "ddp_find_unused_parameters_false", cls, description="DDP Strategy with `find_unused_parameters` as False", find_unused_parameters=False, ) def _should_run_deadlock_detection(self) -> bool: """Determines whether the plugin will perform process reconciliation in case of errors. If the environment variable `PL_RECONCILE_PROCESS` is set, run detection regardless of the cluster environment. By default this is disabled. Otherwise, if the cluster environment creates the processes, allow the scheduler / parent process to perform the process termination, external to Lightning. """ return os.getenv("PL_RECONCILE_PROCESS", "0") == "1" or self._rank_0_has_called_call_children_scripts def _share_information_to_prevent_deadlock(self) -> None: self._share_pids() # there should be a unique sync_dir per nodes. if self.local_rank == 0: # create a temporary directory used to synchronize processes on deadlock. self._sync_dir = tempfile.mkdtemp() sync_dirs = [] global_node_rank_zero = 0 for _ in range(self.num_nodes): sync_dirs.append(self.broadcast(self._sync_dir, global_node_rank_zero)) global_node_rank_zero += self.world_size // self.num_nodes self._sync_dir = sync_dirs[self.node_rank] def _share_pids(self) -> None: """Make all DDP processes aware of all processes pids.""" self.barrier() pids = self.all_gather(torch.tensor(os.getpid(), device=self.root_device)) pids = pids.cpu().numpy().tolist() self._pids = pids if isinstance(pids, list) else [pids] def reconciliate_processes(self, trace: str) -> None: if self.world_size < 2: return if not self._should_run_deadlock_detection(): return sync_dir = self._sync_dir if not sync_dir: rank_zero_warn("Error handling mechanism for deadlock detection is uninitialized. Skipping check.") return # The cluster may be configured to periodically purge the `/tmp` # directory, in which case `sync_dir` may not exist anymore at this # point. Idempotently create it to ensure its existence. Path(sync_dir).mkdir(parents=True, exist_ok=True) # save a file locally. torch.save(True, os.path.join(sync_dir, f"{self.global_rank}.pl")) # sleep for a short time time.sleep(3) # return if all processes wrote a file in the `sync_dir`. # todo (tchaton) Add support for non-shared file-system which will fail. if len(os.listdir(sync_dir)) == (self.world_size // self.num_nodes): return if not self._pids: return for pid in self._pids: if pid != os.getpid(): os.kill(pid, signal.SIGKILL) shutil.rmtree(sync_dir) raise DeadlockDetectedException(f"DeadLock detected from rank: {self.global_rank} \n {trace}") def teardown(self) -> None: log.detail(f"{self.__class__.__name__}: tearing down DDP plugin") super().teardown() if isinstance(self.model, DistributedDataParallel): self.model = self.lightning_module assert self.model is not None if self.sync_batchnorm: self.model = _revert_sync_batchnorm(self.model) if self.root_device.type == "cuda": # GPU teardown log.detail(f"{self.__class__.__name__}: moving model to CPU") assert self.lightning_module is not None self.lightning_module.cpu() # clean up memory torch.cuda.empty_cache()
43.622018
120
0.678388
import logging import os import shutil import signal import subprocess import sys import tempfile import time from pathlib import Path from time import sleep from typing import Any, Callable, Dict, List, Optional, Union import __main__ import numpy as np import torch import torch.distributed from torch.distributed import GradBucket from torch.nn import Module from torch.nn.parallel.distributed import DistributedDataParallel import pytorch_lightning as pl from pytorch_lightning.core.optimizer import LightningOptimizer from pytorch_lightning.overrides import LightningDistributedModule from pytorch_lightning.overrides.distributed import prepare_for_backward from pytorch_lightning.plugins.environments.cluster_environment import ClusterEnvironment from pytorch_lightning.plugins.io.checkpoint_plugin import CheckpointIO from pytorch_lightning.plugins.precision import PrecisionPlugin from pytorch_lightning.strategies.parallel import ParallelStrategy from pytorch_lightning.strategies.strategy import TBroadcast from pytorch_lightning.trainer.states import TrainerFn from pytorch_lightning.utilities import ( _FAIRSCALE_AVAILABLE, _HYDRA_AVAILABLE, _IS_WINDOWS, _TORCH_GREATER_EQUAL_1_8, _TORCH_GREATER_EQUAL_1_9, _TORCH_GREATER_EQUAL_1_10, ) from pytorch_lightning.utilities.distributed import _revert_sync_batchnorm, distributed_available from pytorch_lightning.utilities.distributed import group as _group from pytorch_lightning.utilities.distributed import init_dist_connection, ReduceOp, sync_ddp_if_available from pytorch_lightning.utilities.enums import _StrategyType from pytorch_lightning.utilities.exceptions import DeadlockDetectedException from pytorch_lightning.utilities.rank_zero import rank_zero_only, rank_zero_warn from pytorch_lightning.utilities.seed import reset_seed from pytorch_lightning.utilities.types import STEP_OUTPUT if _FAIRSCALE_AVAILABLE: from fairscale.optim import OSS if _HYDRA_AVAILABLE: from hydra.core.hydra_config import HydraConfig from hydra.utils import get_original_cwd, to_absolute_path if _TORCH_GREATER_EQUAL_1_8: from pytorch_lightning.utilities.distributed import register_ddp_comm_hook log = logging.getLogger(__name__) class DDPStrategy(ParallelStrategy): distributed_backend = _StrategyType.DDP def __init__( self, accelerator: Optional["pl.accelerators.accelerator.Accelerator"] = None, parallel_devices: Optional[List[torch.device]] = None, cluster_environment: Optional[ClusterEnvironment] = None, checkpoint_io: Optional[CheckpointIO] = None, precision_plugin: Optional[PrecisionPlugin] = None, ddp_comm_state: Optional[object] = None, ddp_comm_hook: Optional[Callable[[Any, GradBucket], torch.futures.Future[torch.Tensor]]] = None, ddp_comm_wrapper: Optional[ Callable[ [Callable[[Any, GradBucket], torch.futures.Future[torch.Tensor]], Any], torch.futures.Future[torch.Tensor], ] ] = None, model_averaging_period: Optional[int] = None, **kwargs: Union[Any, Dict[str, Any]], ) -> None: super().__init__( accelerator=accelerator, parallel_devices=parallel_devices, cluster_environment=cluster_environment, checkpoint_io=checkpoint_io, precision_plugin=precision_plugin, ) log.detail(f"{self.__class__.__name__}: initializing DDP plugin") self.interactive_ddp_procs: List[subprocess.Popen] = [] self._num_nodes = 1 self.sync_batchnorm = False self._ddp_kwargs = kwargs self._ddp_comm_state = ddp_comm_state self._ddp_comm_hook = ddp_comm_hook self._ddp_comm_wrapper = ddp_comm_wrapper self._model_averaging_period = model_averaging_period self._pids: Optional[List[int]] = None self._sync_dir: Optional[str] = None self._rank_0_has_called_call_children_scripts: bool = False self.set_world_ranks() @property def is_distributed(self) -> bool: return True @property def root_device(self) -> torch.device: return self.parallel_devices[self.local_rank] @property def num_nodes(self) -> int: return self._num_nodes @num_nodes.setter def num_nodes(self, num_nodes: int) -> None: self._num_nodes = num_nodes self.set_world_ranks() @property def num_processes(self) -> int: return len(self.parallel_devices) if self.parallel_devices is not None else 0 @property def distributed_sampler_kwargs(self) -> Dict[str, Any]: distributed_sampler_kwargs = dict(num_replicas=(self.num_nodes * self.num_processes), rank=self.global_rank) return distributed_sampler_kwargs @property def _is_single_process_single_device(self) -> bool: return True def setup_environment(self) -> None: assert self.cluster_environment is not None if not self.cluster_environment.creates_processes_externally: self._call_children_scripts() self.setup_distributed() super().setup_environment() def setup(self, trainer: "pl.Trainer") -> None: super().setup(trainer) self._rank_0_has_called_call_children_scripts = ( self.broadcast(self._rank_0_has_called_call_children_scripts) is True ) if self._should_run_deadlock_detection(): self._share_information_to_prevent_deadlock() self.model_to_device() assert self.model is not None if self.sync_batchnorm: self.model = self.configure_sync_batchnorm(self.model) assert self.lightining_module is not None trainer_fn = self.lightning_module.trainer.state.fn if trainer_fn == TrainerFn.FITTING: self.configure_ddp() def _setup_model(self, model: Module) -> DistributedDataParallel: device_ids = self.determine_ddp_device_ids() log.detail(f"setting up DDP model with device ids: {device_ids}, kwargs: {self._ddp_kwargs}") return DistributedDataParallel(module=model, device_ids=device_ids, **self._ddp_kwargs) def _call_children_scripts(self) -> None: self._check_can_spawn_children() assert self.cluster_environment is not None os.environ["MASTER_ADDR"] = self.cluster_environment.main_address os.environ["MASTER_PORT"] = str(self.cluster_environment.main_port) os.environ["NODE_RANK"] = str(self.cluster_environment.node_rank()) os.environ["LOCAL_RANK"] = str(self.cluster_environment.local_rank()) f __main__.__spec__ is None: path_lib = os.path.abspath if not _HYDRA_AVAILABLE else to_absolute_path command = sys.argv try: full_path = path_lib(command[0]) except Exception: full_path = os.path.abspath(command[0]) command[0] = full_path command = [sys.executable] + command else: command = [sys.executable, "-m", __main__.__spec__.name] + sys.argv[1:] os.environ["WORLD_SIZE"] = f"{self.num_processes * self.num_nodes}" self.interactive_ddp_procs = [] for local_rank in range(1, self.num_processes): env_copy = os.environ.copy() env_copy["LOCAL_RANK"] = f"{local_rank}" if os.environ.get("PL_GLOBAL_SEED") is None and "PL_GLOBAL_SEED" in env_copy: del env_copy["PL_GLOBAL_SEED"] cwd: Optional[str] = None if _HYDRA_AVAILABLE: if HydraConfig.initialized(): cwd = get_original_cwd() os_cwd = f'"{os.getcwd()}"' command += [f"hydra.run.dir={os_cwd}", f"hydra.job.name=train_ddp_process_{local_rank}"] proc = subprocess.Popen(command, env=env_copy, cwd=cwd) self.interactive_ddp_procs.append(proc) delay = np.random.uniform(1, 5, 1)[0] sleep(delay) self._rank_0_has_called_call_children_scripts = True def setup_distributed(self) -> None: log.detail(f"{self.__class__.__name__}: setting up distributed...") reset_seed() self.set_world_ranks() rank_zero_only.rank = self.global_rank # try to init for 20 times at max in case ports are taken # where to store ip_table assert self.cluster_environment is not None init_dist_connection(self.cluster_environment, self.torch_distributed_backend) def _check_can_spawn_children(self) -> None: if self.local_rank != 0: raise RuntimeError( "Lightning attempted to launch new distributed processes with `local_rank > 0`. This should not happen." " Possible reasons: 1) LOCAL_RANK environment variable was incorrectly modified by the user," " 2) `ClusterEnvironment.creates_processes_externally` incorrectly implemented." ) def set_world_ranks(self) -> None: if self.cluster_environment is None: return self.cluster_environment.set_global_rank(self.node_rank * self.num_processes + self.local_rank) self.cluster_environment.set_world_size(self.num_nodes * self.num_processes) rank_zero_only.rank = self.cluster_environment.global_rank() def pre_configure_ddp(self) -> None: # if unset, default `find_unused_parameters` `True` # Many models require setting this parameter to True, as there are corner cases # when not all parameter backward hooks are fired by the autograd engine even if require_grad is set to True. # This flag does come with a performance hit, so it is suggested to disable in cases where it is possible. self._ddp_kwargs["find_unused_parameters"] = self._ddp_kwargs.get("find_unused_parameters", True) assert self.lightning_module is not None if not self.lightning_module.automatic_optimization and not self._ddp_kwargs.get( "find_unused_parameters", False ): # TODO: PyTorch 1.7.0 DDP introduces `self.reducer._rebuild_buckets()` breaking manual_optimization rank_zero_warn( "From PyTorch 1.7.0, Lightning `manual_optimization` needs to set `find_unused_parameters=True` to" " properly work with DDP. Using `find_unused_parameters=True`." ) self._ddp_kwargs["find_unused_parameters"] = True def _register_ddp_hooks(self) -> None: log.detail(f"{self.__class__.__name__}: registering ddp hooks") # In 1.8, DDP communication hooks only work with NCCL backend and SPSD (single process single device) mode # Since 1.9, DDP communication hooks can work on all backends. if _TORCH_GREATER_EQUAL_1_9 or ( _TORCH_GREATER_EQUAL_1_8 and self.root_device.type == "cuda" and self._is_single_process_single_device ): register_ddp_comm_hook( model=self.model, ddp_comm_state=self._ddp_comm_state, ddp_comm_hook=self._ddp_comm_hook, ddp_comm_wrapper=self._ddp_comm_wrapper, ) if _TORCH_GREATER_EQUAL_1_10 and self.lightning_module.trainer.state.fn == TrainerFn.FITTING: import torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook as post_localSGD if isinstance(self._ddp_comm_state, post_localSGD.PostLocalSGDState): self._reinit_optimizers_with_post_localSGD(self._ddp_comm_state.start_localSGD_iter) def _reinit_optimizers_with_post_localSGD(self, warmup_steps: int) -> None: log.detail(f"{self.__class__.__name__}: reinitializing optimizers with post localSGD") optimizers = self.optimizers if self._model_averaging_period is None: raise ValueError( "Post-localSGD algorithm is used, but model averaging period is not provided to DDP strategy." ) if _TORCH_GREATER_EQUAL_1_10: if not _IS_WINDOWS: from torch.distributed.optim import DistributedOptimizer import torch.distributed.algorithms.model_averaging.averagers as averagers from torch.distributed.optim import PostLocalSGDOptimizer, ZeroRedundancyOptimizer averager = averagers.PeriodicModelAverager(period=self._model_averaging_period, warmup_steps=warmup_steps) for x, optimizer in enumerate(optimizers): if isinstance(optimizer, LightningOptimizer): optimizer = optimizer._optimizer is_distributed_optimizer = isinstance(optimizer, DistributedOptimizer) if not _IS_WINDOWS else False if ( is_distributed_optimizer or isinstance(optimizer, ZeroRedundancyOptimizer) or (_FAIRSCALE_AVAILABLE and isinstance(optimizer, OSS)) ): raise ValueError( f"Cannot wrap a distributed optimizer of type {optimizer.__name__} by PostLocalSGDOptimizer." ) if isinstance(optimizer, PostLocalSGDOptimizer): continue optim_class = type(optimizer) post_localSGD_optimizer = PostLocalSGDOptimizer( params=optimizer.param_groups, optimizer_class=optim_class, averager=averager, **optimizer.defaults, ) optimizers[x] = post_localSGD_optimizer del optimizer self.optimizers = optimizers def configure_ddp(self) -> None: log.detail(f"{self.__class__.__name__}: configuring DistributedDataParallel") self.pre_configure_ddp() assert self.model is not None self.model = self._setup_model(LightningDistributedModule(self.model)) self._register_ddp_hooks() def determine_ddp_device_ids(self) -> Optional[List[int]]: if self.root_device.type == "cpu": return None return [self.root_device.index] def barrier(self, *args, **kwargs) -> None: if not distributed_available(): return if _TORCH_GREATER_EQUAL_1_8 and torch.distributed.get_backend() == "nccl": torch.distributed.barrier(device_ids=self.determine_ddp_device_ids()) else: torch.distributed.barrier() def broadcast(self, obj: TBroadcast, src: int = 0) -> TBroadcast: obj = [obj] if self.global_rank != src: obj = [None] torch.distributed.broadcast_object_list(obj, src, group=_group.WORLD) return obj[0] def pre_backward(self, closure_loss: torch.Tensor) -> None: assert self.model is not None assert self.lightning_module is not None if not self.lightning_module.automatic_optimization: prepare_for_backward(self.model, closure_loss) def model_to_device(self) -> None: log.detail(f"{self.__class__.__name__}: moving model to device [{self.root_device}]...") if self.model: self.model.to(self.root_device) def reduce( self, tensor: torch.Tensor, group: Optional[Any] = None, reduce_op: Union[ReduceOp, str, None] = "mean" ) -> torch.Tensor: if isinstance(tensor, torch.Tensor): tensor = sync_ddp_if_available(tensor, group, reduce_op=reduce_op) return tensor def training_step(self, *args, **kwargs) -> STEP_OUTPUT: with self.precision_plugin.train_step_context(): assert self.model is not None return self.model(*args, **kwargs) def validation_step(self, *args, **kwargs) -> Optional[STEP_OUTPUT]: with self.precision_plugin.val_step_context(): if isinstance(self.model, DistributedDataParallel): # used when calling `trainer.fit` return self.model(*args, **kwargs) else: # used when calling `trainer.validate` assert self.lightning_module is not None return self.lightning_module.validation_step(*args, **kwargs) def test_step(self, *args, **kwargs) -> Optional[STEP_OUTPUT]: with self.precision_plugin.test_step_context(): assert self.lightning_module is not None return self.lightning_module.test_step(*args, **kwargs) def predict_step(self, *args, **kwargs) -> STEP_OUTPUT: with self.precision_plugin.predict_step_context(): assert self.lightning_module is not None return self.lightning_module.predict_step(*args, **kwargs) def post_training_step(self) -> None: assert self.model is not None assert self.lightning_module is not None if not self.lightning_module.automatic_optimization: self.model.require_backward_grad_sync = True @classmethod def register_strategies(cls, strategy_registry: Dict) -> None: strategy_registry.register( "ddp_find_unused_parameters_false", cls, description="DDP Strategy with `find_unused_parameters` as False", find_unused_parameters=False, ) def _should_run_deadlock_detection(self) -> bool: return os.getenv("PL_RECONCILE_PROCESS", "0") == "1" or self._rank_0_has_called_call_children_scripts def _share_information_to_prevent_deadlock(self) -> None: self._share_pids() # there should be a unique sync_dir per nodes. if self.local_rank == 0: # create a temporary directory used to synchronize processes on deadlock. self._sync_dir = tempfile.mkdtemp() sync_dirs = [] global_node_rank_zero = 0 for _ in range(self.num_nodes): sync_dirs.append(self.broadcast(self._sync_dir, global_node_rank_zero)) global_node_rank_zero += self.world_size // self.num_nodes self._sync_dir = sync_dirs[self.node_rank] def _share_pids(self) -> None: self.barrier() pids = self.all_gather(torch.tensor(os.getpid(), device=self.root_device)) pids = pids.cpu().numpy().tolist() self._pids = pids if isinstance(pids, list) else [pids] def reconciliate_processes(self, trace: str) -> None: if self.world_size < 2: return if not self._should_run_deadlock_detection(): return sync_dir = self._sync_dir if not sync_dir: rank_zero_warn("Error handling mechanism for deadlock detection is uninitialized. Skipping check.") return # The cluster may be configured to periodically purge the `/tmp` # directory, in which case `sync_dir` may not exist anymore at this # point. Idempotently create it to ensure its existence. Path(sync_dir).mkdir(parents=True, exist_ok=True) # save a file locally. torch.save(True, os.path.join(sync_dir, f"{self.global_rank}.pl")) # sleep for a short time time.sleep(3) # return if all processes wrote a file in the `sync_dir`. # todo (tchaton) Add support for non-shared file-system which will fail. if len(os.listdir(sync_dir)) == (self.world_size // self.num_nodes): return if not self._pids: return for pid in self._pids: if pid != os.getpid(): os.kill(pid, signal.SIGKILL) shutil.rmtree(sync_dir) raise DeadlockDetectedException(f"DeadLock detected from rank: {self.global_rank} \n {trace}") def teardown(self) -> None: log.detail(f"{self.__class__.__name__}: tearing down DDP plugin") super().teardown() if isinstance(self.model, DistributedDataParallel): self.model = self.lightning_module assert self.model is not None if self.sync_batchnorm: self.model = _revert_sync_batchnorm(self.model) if self.root_device.type == "cuda": # GPU teardown log.detail(f"{self.__class__.__name__}: moving model to CPU") assert self.lightning_module is not None self.lightning_module.cpu() # clean up memory torch.cuda.empty_cache()
true
true
f7f68585426061425b5c370c764dc2a77a726137
387
py
Python
djnago_portfolio/projects/migrations/0002_auto_20200605_0855.py
RohanMiraje/my_django_learning_start
57f5dd0c984862119a551e640bf4b31d583b4353
[ "MIT" ]
null
null
null
djnago_portfolio/projects/migrations/0002_auto_20200605_0855.py
RohanMiraje/my_django_learning_start
57f5dd0c984862119a551e640bf4b31d583b4353
[ "MIT" ]
null
null
null
djnago_portfolio/projects/migrations/0002_auto_20200605_0855.py
RohanMiraje/my_django_learning_start
57f5dd0c984862119a551e640bf4b31d583b4353
[ "MIT" ]
null
null
null
# Generated by Django 3.0.7 on 2020-06-05 03:25 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('projects', '0001_initial'), ] operations = [ migrations.AlterField( model_name='project', name='image', field=models.FilePathField(path='/projects/img'), ), ]
20.368421
61
0.589147
from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('projects', '0001_initial'), ] operations = [ migrations.AlterField( model_name='project', name='image', field=models.FilePathField(path='/projects/img'), ), ]
true
true
f7f6869f92e17d9188222683b1cd610a511312b9
3,299
py
Python
api/autotest/common/log.py
P-JIANGH/autonium
99a7de401c378e5e546727e0f920e11ce7e24a15
[ "Apache-2.0" ]
null
null
null
api/autotest/common/log.py
P-JIANGH/autonium
99a7de401c378e5e546727e0f920e11ce7e24a15
[ "Apache-2.0" ]
null
null
null
api/autotest/common/log.py
P-JIANGH/autonium
99a7de401c378e5e546727e0f920e11ce7e24a15
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 """ Log输出器 """ __author__ = 'JIANGH' import logging, time, threading from .config_reader import readconfig # 设定log等级 LEVEL = getattr(logging, readconfig('result', 'log_level'), logging.NOTSET) """ 设定log信息格式,可设置参数如下,引用自python的文档: %(name)s Name of the logger (logging channel) %(levelno)s Numeric logging level for the message (DEBUG, INFO, WARNING, ERROR, CRITICAL) %(levelname)s Text logging level for the message ("DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL") %(pathname)s Full pathname of the source file where the logging call was issued (if available) %(_filename)s _filename portion of pathname %(module)s Module (name portion of _filename) %(lineno)d Source line number where the logging call was issued (if available) %(funcName)s Function name %(created)f Time when the LogRecord was created (time.time() return value) %(asctime)s Textual time when the LogRecord was created %(msecs)d Millisecond portion of the creation time %(relativeCreated)d Time in milliseconds when the LogRecord was created, relative to the time the logging module was loaded (typically at application startup time) %(thread)d Thread ID (if available) %(threadName)s Thread name (if available) %(process)d Process ID (if available) %(message)s The result of record.getMessage(), computed just as the record is emitted """ FORMAT = '%(asctime)s %(levelname)s %(name)s %(message)s' # 全局设置 logging.basicConfig(level=LEVEL, format=FORMAT) class Logger(): """ 将logger实例存于dict中,键为线程ID 取得配置好的logger。TODO: SocketHandler待添加 可以同时输出到控制台和文件中去 文件名格式:YYYYMMDDHHMMSS-%(name).log """ __logger_dict = {} def __init__(self, filename): self.thread_id = threading.get_ident() from ..testcodegen import time_format # 设置文件全路径 _filepath = readconfig('result', 'log_folder') # 使用时间戳设置文件名 self.__filename = _filepath + time.strftime(time_format, time.localtime(time.time())) + '-' + filename + '.log' self.__logger = self.init_logger(filename) def init_logger(self, name): # 如果当前线程没有logger对象则新建实例 if self.__logger_dict.get(self.thread_id) == None: _log_creator = logging.getLogger(name=name) # socket = logging.handlers.SocketHandler TODO # 创建文件输出处理器 _console = logging.FileHandler(filename=self.__filename, mode='w', encoding='utf-8') # 设置文件输出处理器的格式,使用FORMAT全局变量进行统一 _console.setFormatter(logging.Formatter(FORMAT)) _log_creator.addHandler(_console) self.__logger_dict[self.thread_id] = _log_creator print('在线程%d新建一个Logger' % self.thread_id) else: _log_creator = self.__logger_dict.get(self.thread_id) _log_creator.name = name return _log_creator @property def log_path(self): return self.__filename def get_logger(self, name=None): if not name == None: self.__logger.name = name return self.__logger def close(self): for handler in self.__logger.handlers: self.__logger.removeHandler(handler) self.__logger = None self.__logger_dict.pop(self.thread_id)
33.323232
115
0.666869
__author__ = 'JIANGH' import logging, time, threading from .config_reader import readconfig LEVEL = getattr(logging, readconfig('result', 'log_level'), logging.NOTSET) FORMAT = '%(asctime)s %(levelname)s %(name)s %(message)s' logging.basicConfig(level=LEVEL, format=FORMAT) class Logger(): __logger_dict = {} def __init__(self, filename): self.thread_id = threading.get_ident() from ..testcodegen import time_format _filepath = readconfig('result', 'log_folder') self.__filename = _filepath + time.strftime(time_format, time.localtime(time.time())) + '-' + filename + '.log' self.__logger = self.init_logger(filename) def init_logger(self, name): if self.__logger_dict.get(self.thread_id) == None: _log_creator = logging.getLogger(name=name) _console = logging.FileHandler(filename=self.__filename, mode='w', encoding='utf-8') _console.setFormatter(logging.Formatter(FORMAT)) _log_creator.addHandler(_console) self.__logger_dict[self.thread_id] = _log_creator print('在线程%d新建一个Logger' % self.thread_id) else: _log_creator = self.__logger_dict.get(self.thread_id) _log_creator.name = name return _log_creator @property def log_path(self): return self.__filename def get_logger(self, name=None): if not name == None: self.__logger.name = name return self.__logger def close(self): for handler in self.__logger.handlers: self.__logger.removeHandler(handler) self.__logger = None self.__logger_dict.pop(self.thread_id)
true
true
f7f687e62a2c3254c6462215e190dd0fd5f2eab8
2,664
py
Python
python/ansible/plugins/action/adcm_delete_service.py
AKhodus/adcm
98dbf22af3f1c6afa94505e9acaff0ac4088a602
[ "Apache-2.0" ]
null
null
null
python/ansible/plugins/action/adcm_delete_service.py
AKhodus/adcm
98dbf22af3f1c6afa94505e9acaff0ac4088a602
[ "Apache-2.0" ]
null
null
null
python/ansible/plugins/action/adcm_delete_service.py
AKhodus/adcm
98dbf22af3f1c6afa94505e9acaff0ac4088a602
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=wrong-import-position, unused-import, import-error from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'supported_by': 'Arenadata'} DOCUMENTATION = r''' --- module: adcm_delete_service short_description: delete service from cluster in ADCM DB description: - The C(adcm_delete_service) module is intended to delete service from ADCM DB. This module should be run in service context. Service Id is taken from context. options: ''' EXAMPLES = r''' - name: delete service from cluster adcm_delete_service: ''' RETURN = r''' ''' import sys from ansible.errors import AnsibleError from ansible.plugins.action import ActionBase sys.path.append('/adcm/python') import adcm.init_django import cm.api from cm.ansible_plugin import get_object_id_from_context from cm.errors import AdcmEx from cm.logger import log class ActionModule(ActionBase): TRANSFERS_FILES = False _VALID_ARGS = frozenset(()) def run(self, tmp=None, task_vars=None): super().run(tmp, task_vars) service = self._task.args.get('service', None) if service: msg = 'You can delete service by name only in cluster context' cluster_id = get_object_id_from_context(task_vars, 'cluster_id', 'cluster', err_msg=msg) log.info('ansible module adcm_delete_service: service "%s"', service) try: cm.api.delete_service_by_name(service, cluster_id) except AdcmEx as e: raise AnsibleError(e.code + ":" + e.msg) from e else: msg = 'You can delete service only in service context' service_id = get_object_id_from_context(task_vars, 'service_id', 'service', err_msg=msg) log.info('ansible module adcm_delete_service: service #%s', service_id) try: cm.api.delete_service_by_id(service_id) except AdcmEx as e: raise AnsibleError(e.code + ":" + e.msg) from e return {"failed": False, "changed": True}
34.153846
100
0.697823
from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'supported_by': 'Arenadata'} DOCUMENTATION = r''' --- module: adcm_delete_service short_description: delete service from cluster in ADCM DB description: - The C(adcm_delete_service) module is intended to delete service from ADCM DB. This module should be run in service context. Service Id is taken from context. options: ''' EXAMPLES = r''' - name: delete service from cluster adcm_delete_service: ''' RETURN = r''' ''' import sys from ansible.errors import AnsibleError from ansible.plugins.action import ActionBase sys.path.append('/adcm/python') import adcm.init_django import cm.api from cm.ansible_plugin import get_object_id_from_context from cm.errors import AdcmEx from cm.logger import log class ActionModule(ActionBase): TRANSFERS_FILES = False _VALID_ARGS = frozenset(()) def run(self, tmp=None, task_vars=None): super().run(tmp, task_vars) service = self._task.args.get('service', None) if service: msg = 'You can delete service by name only in cluster context' cluster_id = get_object_id_from_context(task_vars, 'cluster_id', 'cluster', err_msg=msg) log.info('ansible module adcm_delete_service: service "%s"', service) try: cm.api.delete_service_by_name(service, cluster_id) except AdcmEx as e: raise AnsibleError(e.code + ":" + e.msg) from e else: msg = 'You can delete service only in service context' service_id = get_object_id_from_context(task_vars, 'service_id', 'service', err_msg=msg) log.info('ansible module adcm_delete_service: service #%s', service_id) try: cm.api.delete_service_by_id(service_id) except AdcmEx as e: raise AnsibleError(e.code + ":" + e.msg) from e return {"failed": False, "changed": True}
true
true
f7f6885e7e8e6b42973282ac8022cf893bad5ad5
2,172
py
Python
colcon_core/shell/installed_packages.py
esteve/colcon-core
c11ce9cb27482dac85f05b50b3cd2c1b459ae6b3
[ "Apache-2.0" ]
null
null
null
colcon_core/shell/installed_packages.py
esteve/colcon-core
c11ce9cb27482dac85f05b50b3cd2c1b459ae6b3
[ "Apache-2.0" ]
13
2020-04-02T21:11:56.000Z
2022-01-27T05:47:30.000Z
colcon_core/shell/installed_packages.py
esteve/colcon-core
c11ce9cb27482dac85f05b50b3cd2c1b459ae6b3
[ "Apache-2.0" ]
null
null
null
# Copyright 2016-2021 Dirk Thomas # Licensed under the Apache License, Version 2.0 from pathlib import Path from colcon_core.location import get_relative_package_index_path from colcon_core.shell import FindInstalledPackagesExtensionPoint class IsolatedInstalledPackageFinder(FindInstalledPackagesExtensionPoint): """Find installed packages in colcon isolated install spaces.""" def find_installed_packages(self, install_base: Path): """Find installed packages in colcon isolated install spaces.""" marker_file = install_base / '.colcon_install_layout' if not marker_file.is_file(): return None install_layout = marker_file.read_text().rstrip() if install_layout != 'isolated': return None packages = {} # for each subdirectory look for the package specific file for p in install_base.iterdir(): if not p.is_dir(): continue if p.name.startswith('.'): continue marker = p / get_relative_package_index_path() / p.name if marker.is_file(): packages[p.name] = p return packages class MergedInstalledPackageFinder(FindInstalledPackagesExtensionPoint): """Find installed packages in colcon merged install spaces.""" def find_installed_packages(self, install_base: Path): """Find installed packages in colcon isolated install spaces.""" marker_file = install_base / '.colcon_install_layout' if not marker_file.is_file(): return None install_layout = marker_file.read_text().rstrip() if install_layout != 'merged': return None packages = {} # find all files in the subdirectory if (install_base / get_relative_package_index_path()).is_dir(): package_index = install_base / get_relative_package_index_path() for p in package_index.iterdir(): if not p.is_file(): continue if p.name.startswith('.'): continue packages[p.name] = install_base return packages
37.448276
76
0.645488
from pathlib import Path from colcon_core.location import get_relative_package_index_path from colcon_core.shell import FindInstalledPackagesExtensionPoint class IsolatedInstalledPackageFinder(FindInstalledPackagesExtensionPoint): def find_installed_packages(self, install_base: Path): marker_file = install_base / '.colcon_install_layout' if not marker_file.is_file(): return None install_layout = marker_file.read_text().rstrip() if install_layout != 'isolated': return None packages = {} for p in install_base.iterdir(): if not p.is_dir(): continue if p.name.startswith('.'): continue marker = p / get_relative_package_index_path() / p.name if marker.is_file(): packages[p.name] = p return packages class MergedInstalledPackageFinder(FindInstalledPackagesExtensionPoint): def find_installed_packages(self, install_base: Path): marker_file = install_base / '.colcon_install_layout' if not marker_file.is_file(): return None install_layout = marker_file.read_text().rstrip() if install_layout != 'merged': return None packages = {} if (install_base / get_relative_package_index_path()).is_dir(): package_index = install_base / get_relative_package_index_path() for p in package_index.iterdir(): if not p.is_file(): continue if p.name.startswith('.'): continue packages[p.name] = install_base return packages
true
true
f7f689344886ac882047419d6978cd9edc1358bc
4,557
py
Python
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/locks/v2016_09_01/models/_models_py3.py
iscai-msft/azure-sdk-for-python
83715b95c41e519d5be7f1180195e2fba136fc0f
[ "MIT" ]
1
2021-06-02T08:01:35.000Z
2021-06-02T08:01:35.000Z
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/locks/v2016_09_01/models/_models_py3.py
iscai-msft/azure-sdk-for-python
83715b95c41e519d5be7f1180195e2fba136fc0f
[ "MIT" ]
226
2019-07-24T07:57:21.000Z
2019-10-15T01:07:24.000Z
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/locks/v2016_09_01/models/_models_py3.py
iscai-msft/azure-sdk-for-python
83715b95c41e519d5be7f1180195e2fba136fc0f
[ "MIT" ]
2
2021-05-23T16:46:31.000Z
2021-05-26T23:51:09.000Z
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class CloudError(Model): """CloudError. """ _attribute_map = { } class ManagementLockObject(Model): """The lock information. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :param level: Required. The level of the lock. Possible values are: NotSpecified, CanNotDelete, ReadOnly. CanNotDelete means authorized users are able to read and modify the resources, but not delete. ReadOnly means authorized users can only read from a resource, but they can't modify or delete it. Possible values include: 'NotSpecified', 'CanNotDelete', 'ReadOnly' :type level: str or ~azure.mgmt.resource.locks.v2016_09_01.models.LockLevel :param notes: Notes about the lock. Maximum of 512 characters. :type notes: str :param owners: The owners of the lock. :type owners: list[~azure.mgmt.resource.locks.v2016_09_01.models.ManagementLockOwner] :ivar id: The resource ID of the lock. :vartype id: str :ivar type: The resource type of the lock - Microsoft.Authorization/locks. :vartype type: str :ivar name: The name of the lock. :vartype name: str """ _validation = { 'level': {'required': True}, 'id': {'readonly': True}, 'type': {'readonly': True}, 'name': {'readonly': True}, } _attribute_map = { 'level': {'key': 'properties.level', 'type': 'str'}, 'notes': {'key': 'properties.notes', 'type': 'str'}, 'owners': {'key': 'properties.owners', 'type': '[ManagementLockOwner]'}, 'id': {'key': 'id', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } def __init__(self, *, level, notes: str=None, owners=None, **kwargs) -> None: super(ManagementLockObject, self).__init__(**kwargs) self.level = level self.notes = notes self.owners = owners self.id = None self.type = None self.name = None class ManagementLockOwner(Model): """Lock owner properties. :param application_id: The application ID of the lock owner. :type application_id: str """ _attribute_map = { 'application_id': {'key': 'applicationId', 'type': 'str'}, } def __init__(self, *, application_id: str=None, **kwargs) -> None: super(ManagementLockOwner, self).__init__(**kwargs) self.application_id = application_id class Operation(Model): """Microsoft.Authorization operation. :param name: Operation name: {provider}/{resource}/{operation} :type name: str :param display: The object that represents the operation. :type display: ~azure.mgmt.resource.locks.v2016_09_01.models.OperationDisplay """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'display': {'key': 'display', 'type': 'OperationDisplay'}, } def __init__(self, *, name: str=None, display=None, **kwargs) -> None: super(Operation, self).__init__(**kwargs) self.name = name self.display = display class OperationDisplay(Model): """The object that represents the operation. :param provider: Service provider: Microsoft.Authorization :type provider: str :param resource: Resource on which the operation is performed: Profile, endpoint, etc. :type resource: str :param operation: Operation type: Read, write, delete, etc. :type operation: str """ _attribute_map = { 'provider': {'key': 'provider', 'type': 'str'}, 'resource': {'key': 'resource', 'type': 'str'}, 'operation': {'key': 'operation', 'type': 'str'}, } def __init__(self, *, provider: str=None, resource: str=None, operation: str=None, **kwargs) -> None: super(OperationDisplay, self).__init__(**kwargs) self.provider = provider self.resource = resource self.operation = operation
33.021739
105
0.617073
from msrest.serialization import Model class CloudError(Model): _attribute_map = { } class ManagementLockObject(Model): _validation = { 'level': {'required': True}, 'id': {'readonly': True}, 'type': {'readonly': True}, 'name': {'readonly': True}, } _attribute_map = { 'level': {'key': 'properties.level', 'type': 'str'}, 'notes': {'key': 'properties.notes', 'type': 'str'}, 'owners': {'key': 'properties.owners', 'type': '[ManagementLockOwner]'}, 'id': {'key': 'id', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } def __init__(self, *, level, notes: str=None, owners=None, **kwargs) -> None: super(ManagementLockObject, self).__init__(**kwargs) self.level = level self.notes = notes self.owners = owners self.id = None self.type = None self.name = None class ManagementLockOwner(Model): _attribute_map = { 'application_id': {'key': 'applicationId', 'type': 'str'}, } def __init__(self, *, application_id: str=None, **kwargs) -> None: super(ManagementLockOwner, self).__init__(**kwargs) self.application_id = application_id class Operation(Model): _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'display': {'key': 'display', 'type': 'OperationDisplay'}, } def __init__(self, *, name: str=None, display=None, **kwargs) -> None: super(Operation, self).__init__(**kwargs) self.name = name self.display = display class OperationDisplay(Model): _attribute_map = { 'provider': {'key': 'provider', 'type': 'str'}, 'resource': {'key': 'resource', 'type': 'str'}, 'operation': {'key': 'operation', 'type': 'str'}, } def __init__(self, *, provider: str=None, resource: str=None, operation: str=None, **kwargs) -> None: super(OperationDisplay, self).__init__(**kwargs) self.provider = provider self.resource = resource self.operation = operation
true
true
f7f68aacb24e0263bb3c7a989935a8298fb93037
907
py
Python
api/views/notes.py
chronossc/notes-app
c060e78d7fee45221fc2ffd6fcf86b030f603fac
[ "MIT" ]
null
null
null
api/views/notes.py
chronossc/notes-app
c060e78d7fee45221fc2ffd6fcf86b030f603fac
[ "MIT" ]
null
null
null
api/views/notes.py
chronossc/notes-app
c060e78d7fee45221fc2ffd6fcf86b030f603fac
[ "MIT" ]
null
null
null
# -*- coding: UTF-8 -*- from __future__ import ( absolute_import, division, print_function, unicode_literals ) from api.serializers import NoteSerializer from rest_framework import filters, status, viewsets from rest_framework.response import Response class NoteViewSet(viewsets.ModelViewSet): serializer_class = NoteSerializer filter_backends = (filters.DjangoFilterBackend,) filter_fields = ('title', 'note', 'favorited') def get_queryset(self): notes = self.request.user.notes.all() return notes def create(self, request, *args, **kwargs): serializer = self.get_serializer(data=request.data, user=request.user) serializer.is_valid(raise_exception=True) self.perform_create(serializer) headers = self.get_success_headers(serializer.data) return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
34.884615
89
0.737596
from __future__ import ( absolute_import, division, print_function, unicode_literals ) from api.serializers import NoteSerializer from rest_framework import filters, status, viewsets from rest_framework.response import Response class NoteViewSet(viewsets.ModelViewSet): serializer_class = NoteSerializer filter_backends = (filters.DjangoFilterBackend,) filter_fields = ('title', 'note', 'favorited') def get_queryset(self): notes = self.request.user.notes.all() return notes def create(self, request, *args, **kwargs): serializer = self.get_serializer(data=request.data, user=request.user) serializer.is_valid(raise_exception=True) self.perform_create(serializer) headers = self.get_success_headers(serializer.data) return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
true
true
f7f68b666ba232efab07cc41e07bb9a4cad6f12d
1,440
py
Python
homeassistant/components/tradfri/sensor.py
basicpail/core
5cc54618c5af3f75c08314bf2375cc7ac40d2b7e
[ "Apache-2.0" ]
4
2018-03-06T20:21:56.000Z
2022-03-02T11:47:33.000Z
homeassistant/components/tradfri/sensor.py
basicpail/core
5cc54618c5af3f75c08314bf2375cc7ac40d2b7e
[ "Apache-2.0" ]
69
2020-08-04T09:03:43.000Z
2022-03-31T06:13:01.000Z
homeassistant/components/tradfri/sensor.py
basicpail/core
5cc54618c5af3f75c08314bf2375cc7ac40d2b7e
[ "Apache-2.0" ]
7
2021-03-20T12:34:01.000Z
2021-12-02T10:13:52.000Z
"""Support for IKEA Tradfri sensors.""" from homeassistant.components.sensor import SensorEntity from homeassistant.const import DEVICE_CLASS_BATTERY, PERCENTAGE from .base_class import TradfriBaseDevice from .const import CONF_GATEWAY_ID, DEVICES, DOMAIN, KEY_API async def async_setup_entry(hass, config_entry, async_add_entities): """Set up a Tradfri config entry.""" gateway_id = config_entry.data[CONF_GATEWAY_ID] tradfri_data = hass.data[DOMAIN][config_entry.entry_id] api = tradfri_data[KEY_API] devices = tradfri_data[DEVICES] sensors = ( dev for dev in devices if not dev.has_light_control and not dev.has_socket_control and not dev.has_blind_control and not dev.has_signal_repeater_control ) if sensors: async_add_entities(TradfriSensor(sensor, api, gateway_id) for sensor in sensors) class TradfriSensor(TradfriBaseDevice, SensorEntity): """The platform class required by Home Assistant.""" _attr_device_class = DEVICE_CLASS_BATTERY _attr_native_unit_of_measurement = PERCENTAGE def __init__(self, device, api, gateway_id): """Initialize the device.""" super().__init__(device, api, gateway_id) self._unique_id = f"{gateway_id}-{device.id}" @property def native_value(self): """Return the current state of the device.""" return self._device.device_info.battery_level
32.727273
88
0.723611
from homeassistant.components.sensor import SensorEntity from homeassistant.const import DEVICE_CLASS_BATTERY, PERCENTAGE from .base_class import TradfriBaseDevice from .const import CONF_GATEWAY_ID, DEVICES, DOMAIN, KEY_API async def async_setup_entry(hass, config_entry, async_add_entities): gateway_id = config_entry.data[CONF_GATEWAY_ID] tradfri_data = hass.data[DOMAIN][config_entry.entry_id] api = tradfri_data[KEY_API] devices = tradfri_data[DEVICES] sensors = ( dev for dev in devices if not dev.has_light_control and not dev.has_socket_control and not dev.has_blind_control and not dev.has_signal_repeater_control ) if sensors: async_add_entities(TradfriSensor(sensor, api, gateway_id) for sensor in sensors) class TradfriSensor(TradfriBaseDevice, SensorEntity): _attr_device_class = DEVICE_CLASS_BATTERY _attr_native_unit_of_measurement = PERCENTAGE def __init__(self, device, api, gateway_id): super().__init__(device, api, gateway_id) self._unique_id = f"{gateway_id}-{device.id}" @property def native_value(self): return self._device.device_info.battery_level
true
true
f7f68c6c5bc7f9a1f5d192811dbf33fec6a124cf
5,427
py
Python
bin/firefox/addon-sdk-1.15/python-lib/cuddlefish/prefs.py
cerivera/crossfire
3bb7a3fe9821f2e9da3cb24acfc9a25a31e33b48
[ "MIT" ]
2
2016-05-17T16:29:35.000Z
2019-07-12T03:17:12.000Z
bin/firefox/addon-sdk-1.15/python-lib/cuddlefish/prefs.py
cerivera/crossfire
3bb7a3fe9821f2e9da3cb24acfc9a25a31e33b48
[ "MIT" ]
null
null
null
bin/firefox/addon-sdk-1.15/python-lib/cuddlefish/prefs.py
cerivera/crossfire
3bb7a3fe9821f2e9da3cb24acfc9a25a31e33b48
[ "MIT" ]
null
null
null
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. DEFAULT_COMMON_PREFS = { # allow debug output via dump to be printed to the system console # (setting it here just in case, even though PlainTextConsole also # sets this preference) 'browser.dom.window.dump.enabled': True, # warn about possibly incorrect code 'javascript.options.strict': True, 'javascript.options.showInConsole': True, # Allow remote connections to the debugger 'devtools.debugger.remote-enabled' : True, 'extensions.sdk.console.logLevel': 'info', 'extensions.checkCompatibility.nightly' : False, # Disable extension updates and notifications. 'extensions.update.enabled' : False, 'extensions.update.notifyUser' : False, # From: # http://hg.mozilla.org/mozilla-central/file/1dd81c324ac7/build/automation.py.in#l372 # Only load extensions from the application and user profile. # AddonManager.SCOPE_PROFILE + AddonManager.SCOPE_APPLICATION 'extensions.enabledScopes' : 5, # Disable metadata caching for installed add-ons by default 'extensions.getAddons.cache.enabled' : False, # Disable intalling any distribution add-ons 'extensions.installDistroAddons' : False, # Allow installing extensions dropped into the profile folder 'extensions.autoDisableScopes' : 10, # Disable app update 'app.update.enabled' : False, # Point update checks to a nonexistent local URL for fast failures. 'extensions.update.url' : 'http://localhost/extensions-dummy/updateURL', 'extensions.blocklist.url' : 'http://localhost/extensions-dummy/blocklistURL', # Make sure opening about:addons won't hit the network. 'extensions.webservice.discoverURL' : 'http://localhost/extensions-dummy/discoveryURL' } DEFAULT_FENNEC_PREFS = { 'browser.console.showInPanel': True, 'browser.firstrun.show.uidiscovery': False } # When launching a temporary new Firefox profile, use these preferences. DEFAULT_FIREFOX_PREFS = { 'browser.startup.homepage' : 'about:blank', 'startup.homepage_welcome_url' : 'about:blank', 'devtools.errorconsole.enabled' : True, 'devtools.chrome.enabled' : True, # From: # http://hg.mozilla.org/mozilla-central/file/1dd81c324ac7/build/automation.py.in#l388 # Make url-classifier updates so rare that they won't affect tests. 'urlclassifier.updateinterval' : 172800, # Point the url-classifier to a nonexistent local URL for fast failures. 'browser.safebrowsing.provider.0.gethashURL' : 'http://localhost/safebrowsing-dummy/gethash', 'browser.safebrowsing.provider.0.keyURL' : 'http://localhost/safebrowsing-dummy/newkey', 'browser.safebrowsing.provider.0.updateURL' : 'http://localhost/safebrowsing-dummy/update', } # When launching a temporary new Thunderbird profile, use these preferences. # Note that these were taken from: # http://mxr.mozilla.org/comm-central/source/mail/test/mozmill/runtest.py DEFAULT_THUNDERBIRD_PREFS = { # say no to slow script warnings 'dom.max_chrome_script_run_time': 200, 'dom.max_script_run_time': 0, # do not ask about being the default mail client 'mail.shell.checkDefaultClient': False, # disable non-gloda indexing daemons 'mail.winsearch.enable': False, 'mail.winsearch.firstRunDone': True, 'mail.spotlight.enable': False, 'mail.spotlight.firstRunDone': True, # disable address books for undisclosed reasons 'ldap_2.servers.osx.position': 0, 'ldap_2.servers.oe.position': 0, # disable the first use junk dialog 'mailnews.ui.junk.firstuse': False, # other unknown voodoo # -- dummied up local accounts to stop the account wizard 'mail.account.account1.server' : "server1", 'mail.account.account2.identities' : "id1", 'mail.account.account2.server' : "server2", 'mail.accountmanager.accounts' : "account1,account2", 'mail.accountmanager.defaultaccount' : "account2", 'mail.accountmanager.localfoldersserver' : "server1", 'mail.identity.id1.fullName' : "Tinderbox", 'mail.identity.id1.smtpServer' : "smtp1", 'mail.identity.id1.useremail' : "tinderbox@invalid.com", 'mail.identity.id1.valid' : True, 'mail.root.none-rel' : "[ProfD]Mail", 'mail.root.pop3-rel' : "[ProfD]Mail", 'mail.server.server1.directory-rel' : "[ProfD]Mail/Local Folders", 'mail.server.server1.hostname' : "Local Folders", 'mail.server.server1.name' : "Local Folders", 'mail.server.server1.type' : "none", 'mail.server.server1.userName' : "nobody", 'mail.server.server2.check_new_mail' : False, 'mail.server.server2.directory-rel' : "[ProfD]Mail/tinderbox", 'mail.server.server2.download_on_biff' : True, 'mail.server.server2.hostname' : "tinderbox", 'mail.server.server2.login_at_startup' : False, 'mail.server.server2.name' : "tinderbox@invalid.com", 'mail.server.server2.type' : "pop3", 'mail.server.server2.userName' : "tinderbox", 'mail.smtp.defaultserver' : "smtp1", 'mail.smtpserver.smtp1.hostname' : "tinderbox", 'mail.smtpserver.smtp1.username' : "tinderbox", 'mail.smtpservers' : "smtp1", 'mail.startup.enabledMailCheckOnce' : True, 'mailnews.start_page_override.mstone' : "ignore", }
44.483607
97
0.710153
DEFAULT_COMMON_PREFS = { 'browser.dom.window.dump.enabled': True, 'javascript.options.strict': True, 'javascript.options.showInConsole': True, 'devtools.debugger.remote-enabled' : True, 'extensions.sdk.console.logLevel': 'info', 'extensions.checkCompatibility.nightly' : False, 'extensions.update.enabled' : False, 'extensions.update.notifyUser' : False, 'extensions.enabledScopes' : 5, 'extensions.getAddons.cache.enabled' : False, 'extensions.installDistroAddons' : False, 'extensions.autoDisableScopes' : 10, 'app.update.enabled' : False, 'extensions.update.url' : 'http://localhost/extensions-dummy/updateURL', 'extensions.blocklist.url' : 'http://localhost/extensions-dummy/blocklistURL', 'extensions.webservice.discoverURL' : 'http://localhost/extensions-dummy/discoveryURL' } DEFAULT_FENNEC_PREFS = { 'browser.console.showInPanel': True, 'browser.firstrun.show.uidiscovery': False } # When launching a temporary new Firefox profile, use these preferences. DEFAULT_FIREFOX_PREFS = { 'browser.startup.homepage' : 'about:blank', 'startup.homepage_welcome_url' : 'about:blank', 'devtools.errorconsole.enabled' : True, 'devtools.chrome.enabled' : True, # From: # http://hg.mozilla.org/mozilla-central/file/1dd81c324ac7/build/automation.py.in#l388 # Make url-classifier updates so rare that they won't affect tests. 'urlclassifier.updateinterval' : 172800, 'browser.safebrowsing.provider.0.gethashURL' : 'http://localhost/safebrowsing-dummy/gethash', 'browser.safebrowsing.provider.0.keyURL' : 'http://localhost/safebrowsing-dummy/newkey', 'browser.safebrowsing.provider.0.updateURL' : 'http://localhost/safebrowsing-dummy/update', } DEFAULT_THUNDERBIRD_PREFS = { 'dom.max_chrome_script_run_time': 200, 'dom.max_script_run_time': 0, 'mail.shell.checkDefaultClient': False, 'mail.winsearch.enable': False, 'mail.winsearch.firstRunDone': True, 'mail.spotlight.enable': False, 'mail.spotlight.firstRunDone': True, 'ldap_2.servers.osx.position': 0, 'ldap_2.servers.oe.position': 0, 'mailnews.ui.junk.firstuse': False, 'mail.account.account1.server' : "server1", 'mail.account.account2.identities' : "id1", 'mail.account.account2.server' : "server2", 'mail.accountmanager.accounts' : "account1,account2", 'mail.accountmanager.defaultaccount' : "account2", 'mail.accountmanager.localfoldersserver' : "server1", 'mail.identity.id1.fullName' : "Tinderbox", 'mail.identity.id1.smtpServer' : "smtp1", 'mail.identity.id1.useremail' : "tinderbox@invalid.com", 'mail.identity.id1.valid' : True, 'mail.root.none-rel' : "[ProfD]Mail", 'mail.root.pop3-rel' : "[ProfD]Mail", 'mail.server.server1.directory-rel' : "[ProfD]Mail/Local Folders", 'mail.server.server1.hostname' : "Local Folders", 'mail.server.server1.name' : "Local Folders", 'mail.server.server1.type' : "none", 'mail.server.server1.userName' : "nobody", 'mail.server.server2.check_new_mail' : False, 'mail.server.server2.directory-rel' : "[ProfD]Mail/tinderbox", 'mail.server.server2.download_on_biff' : True, 'mail.server.server2.hostname' : "tinderbox", 'mail.server.server2.login_at_startup' : False, 'mail.server.server2.name' : "tinderbox@invalid.com", 'mail.server.server2.type' : "pop3", 'mail.server.server2.userName' : "tinderbox", 'mail.smtp.defaultserver' : "smtp1", 'mail.smtpserver.smtp1.hostname' : "tinderbox", 'mail.smtpserver.smtp1.username' : "tinderbox", 'mail.smtpservers' : "smtp1", 'mail.startup.enabledMailCheckOnce' : True, 'mailnews.start_page_override.mstone' : "ignore", }
true
true
f7f68c6c639bc891a9793afa52f822775398302d
3,944
py
Python
ex3_len_interval_proposed_oc.py
vonguyenleduy/dnn_representation_selective_inference
ea9924950441d30d2619a235551673f089f5a54f
[ "BSD-3-Clause" ]
null
null
null
ex3_len_interval_proposed_oc.py
vonguyenleduy/dnn_representation_selective_inference
ea9924950441d30d2619a235551673f089f5a54f
[ "BSD-3-Clause" ]
null
null
null
ex3_len_interval_proposed_oc.py
vonguyenleduy/dnn_representation_selective_inference
ea9924950441d30d2619a235551673f089f5a54f
[ "BSD-3-Clause" ]
null
null
null
import numpy as np from tensorflow.keras.models import load_model import tensorflow as tf import time import gen_data import util def run(): d = 8 IMG_WIDTH = d IMG_HEIGHT = d IMG_CHANNELS = 1 mu_1 = 0 mu_2 = 2 global_list_ineq = [] X_test, Y_test = gen_data.generate(1, IMG_WIDTH, mu_1, mu_2) X_para, X_vec = util.create_X_para(X_test, d) X_para_pad = util.create_X_pad(X_para, d, IMG_CHANNELS) model = load_model('./model/test_' + str(d) + '.h5') # model.summary() weights = model.get_weights() kernel_1 = weights[0] bias_1 = weights[1] kernel_2 = weights[2] bias_2 = weights[3] out_conv_1, out_conv_1_para = util.conv(X_test, X_para_pad, kernel_1) _, d, _, no_channel = out_conv_1.shape out_conv_1 = out_conv_1 + bias_1 for i in range(d): for j in range(d): for k in range(no_channel): out_conv_1_para[0][i][j][k][1] = out_conv_1_para[0][i][j][k][1] + bias_1[k] out_max_pooling, out_max_pooling_para, max_pooling_event = util.max_pooling(out_conv_1, out_conv_1_para) for element in max_pooling_event: global_list_ineq.append(element) out_up_sampling, out_up_sampling_para = util.up_sampling(out_max_pooling, out_max_pooling_para) _, d, _, no_channel = out_up_sampling.shape out_up_sampling_para_pad = util.create_X_pad(out_up_sampling_para, d, no_channel) out_conv_2, out_conv_2_para = util.conv(out_up_sampling, out_up_sampling_para_pad, kernel_2) _, d, _, no_channel = out_conv_2.shape out_conv_2 = out_conv_2 + bias_2 for i in range(d): for j in range(d): for k in range(no_channel): out_conv_2_para[0][i][j][k][1] = out_conv_2_para[0][i][j][k][1] + bias_2[k] out_conv_2 = util.sigmoid(out_conv_2) output = out_conv_2 for i in range(d): for j in range(d): for k in range(no_channel): pT = out_conv_2_para[0][i][j][k][0] q = out_conv_2_para[0][i][j][k][1] val = np.dot(pT, X_vec)[0][0] + q val = util.sigmoid(val) if val <= 0.5: global_list_ineq.append([pT, q]) else: global_list_ineq.append([-pT, -q]) output = output.flatten() binary_vec = [] for each_e in output: if each_e <= 0.5: binary_vec.append(0) else: binary_vec.append(1) x = X_vec eta, etaTx = util.construct_test_statistic(x, binary_vec, d * d) u, v = util.compute_u_v(x, eta, d * d) Vminus = np.NINF Vplus = np.Inf for element in global_list_ineq: aT = element[0] b = element[1] a_scalar = np.dot(aT, v)[0][0] b_scalar = np.dot(aT, u)[0][0] + b if a_scalar == 0: if b > 0: print('Error B') elif a_scalar > 0: Vplus = min(Vplus, -b_scalar / a_scalar) else: Vminus = max(Vminus, -b_scalar / a_scalar) return Vplus - Vminus from mpi4py import MPI COMM = MPI.COMM_WORLD start_time = None if COMM.rank == 0: start_time = time.time() max_iteration = 120 no_thread = COMM.size iter_each_thread = int(max_iteration / no_thread) else: iter_each_thread = None iter_each_thread = COMM.bcast(iter_each_thread, root=0) local_list_length = [] for i in range(iter_each_thread): length = run() if length is not None: local_list_length.append(length) total_list_length = COMM.gather(local_list_length, root=0) if COMM.rank == 0: total_list_length = [_i for temp in total_list_length for _i in temp] print(total_list_length) print("--- %s seconds ---" % (time.time() - start_time))
25.282051
109
0.587728
import numpy as np from tensorflow.keras.models import load_model import tensorflow as tf import time import gen_data import util def run(): d = 8 IMG_WIDTH = d IMG_HEIGHT = d IMG_CHANNELS = 1 mu_1 = 0 mu_2 = 2 global_list_ineq = [] X_test, Y_test = gen_data.generate(1, IMG_WIDTH, mu_1, mu_2) X_para, X_vec = util.create_X_para(X_test, d) X_para_pad = util.create_X_pad(X_para, d, IMG_CHANNELS) model = load_model('./model/test_' + str(d) + '.h5') weights = model.get_weights() kernel_1 = weights[0] bias_1 = weights[1] kernel_2 = weights[2] bias_2 = weights[3] out_conv_1, out_conv_1_para = util.conv(X_test, X_para_pad, kernel_1) _, d, _, no_channel = out_conv_1.shape out_conv_1 = out_conv_1 + bias_1 for i in range(d): for j in range(d): for k in range(no_channel): out_conv_1_para[0][i][j][k][1] = out_conv_1_para[0][i][j][k][1] + bias_1[k] out_max_pooling, out_max_pooling_para, max_pooling_event = util.max_pooling(out_conv_1, out_conv_1_para) for element in max_pooling_event: global_list_ineq.append(element) out_up_sampling, out_up_sampling_para = util.up_sampling(out_max_pooling, out_max_pooling_para) _, d, _, no_channel = out_up_sampling.shape out_up_sampling_para_pad = util.create_X_pad(out_up_sampling_para, d, no_channel) out_conv_2, out_conv_2_para = util.conv(out_up_sampling, out_up_sampling_para_pad, kernel_2) _, d, _, no_channel = out_conv_2.shape out_conv_2 = out_conv_2 + bias_2 for i in range(d): for j in range(d): for k in range(no_channel): out_conv_2_para[0][i][j][k][1] = out_conv_2_para[0][i][j][k][1] + bias_2[k] out_conv_2 = util.sigmoid(out_conv_2) output = out_conv_2 for i in range(d): for j in range(d): for k in range(no_channel): pT = out_conv_2_para[0][i][j][k][0] q = out_conv_2_para[0][i][j][k][1] val = np.dot(pT, X_vec)[0][0] + q val = util.sigmoid(val) if val <= 0.5: global_list_ineq.append([pT, q]) else: global_list_ineq.append([-pT, -q]) output = output.flatten() binary_vec = [] for each_e in output: if each_e <= 0.5: binary_vec.append(0) else: binary_vec.append(1) x = X_vec eta, etaTx = util.construct_test_statistic(x, binary_vec, d * d) u, v = util.compute_u_v(x, eta, d * d) Vminus = np.NINF Vplus = np.Inf for element in global_list_ineq: aT = element[0] b = element[1] a_scalar = np.dot(aT, v)[0][0] b_scalar = np.dot(aT, u)[0][0] + b if a_scalar == 0: if b > 0: print('Error B') elif a_scalar > 0: Vplus = min(Vplus, -b_scalar / a_scalar) else: Vminus = max(Vminus, -b_scalar / a_scalar) return Vplus - Vminus from mpi4py import MPI COMM = MPI.COMM_WORLD start_time = None if COMM.rank == 0: start_time = time.time() max_iteration = 120 no_thread = COMM.size iter_each_thread = int(max_iteration / no_thread) else: iter_each_thread = None iter_each_thread = COMM.bcast(iter_each_thread, root=0) local_list_length = [] for i in range(iter_each_thread): length = run() if length is not None: local_list_length.append(length) total_list_length = COMM.gather(local_list_length, root=0) if COMM.rank == 0: total_list_length = [_i for temp in total_list_length for _i in temp] print(total_list_length) print("--- %s seconds ---" % (time.time() - start_time))
true
true
f7f68db640557bc88fbc50c725ea5a75721c9747
2,043
py
Python
Tiba/consultation/migrations/0001_initial.py
veldakarimi/Tiba_Online
84252ad69dc4bb823b6ebec386088d3b56951891
[ "MIT" ]
1
2021-04-01T12:54:14.000Z
2021-04-01T12:54:14.000Z
Tiba/consultation/migrations/0001_initial.py
veldakarimi/Tiba_Online
84252ad69dc4bb823b6ebec386088d3b56951891
[ "MIT" ]
null
null
null
Tiba/consultation/migrations/0001_initial.py
veldakarimi/Tiba_Online
84252ad69dc4bb823b6ebec386088d3b56951891
[ "MIT" ]
null
null
null
# Generated by Django 3.1.4 on 2020-12-31 06:36 from django.conf import settings from django.db import migrations, models import django.db.models.deletion import uuid class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Admit', fields=[ ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('ward', models.CharField(max_length=300)), ('specialist', models.CharField(max_length=300)), ('note_to_specialist', models.CharField(max_length=300)), ('current_prescriptions', models.CharField(max_length=300)), ], ), migrations.CreateModel( name='Reffer', fields=[ ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('hospital', models.CharField(max_length=300)), ('note_to_specialist', models.CharField(max_length=300)), ('current_prescriptions', models.CharField(max_length=300)), ], ), migrations.CreateModel( name='Consultation', fields=[ ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('symptoms', models.CharField(max_length=1000)), ('temparature', models.IntegerField()), ('blood_pressure', models.IntegerField()), ('diagnosis', models.CharField(max_length=1000)), ('doctor_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='doctor', to=settings.AUTH_USER_MODEL)), ('user_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='patient', to=settings.AUTH_USER_MODEL)), ], ), ]
40.86
146
0.605482
from django.conf import settings from django.db import migrations, models import django.db.models.deletion import uuid class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Admit', fields=[ ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('ward', models.CharField(max_length=300)), ('specialist', models.CharField(max_length=300)), ('note_to_specialist', models.CharField(max_length=300)), ('current_prescriptions', models.CharField(max_length=300)), ], ), migrations.CreateModel( name='Reffer', fields=[ ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('hospital', models.CharField(max_length=300)), ('note_to_specialist', models.CharField(max_length=300)), ('current_prescriptions', models.CharField(max_length=300)), ], ), migrations.CreateModel( name='Consultation', fields=[ ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('symptoms', models.CharField(max_length=1000)), ('temparature', models.IntegerField()), ('blood_pressure', models.IntegerField()), ('diagnosis', models.CharField(max_length=1000)), ('doctor_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='doctor', to=settings.AUTH_USER_MODEL)), ('user_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='patient', to=settings.AUTH_USER_MODEL)), ], ), ]
true
true
f7f68de11fd3abc131df58726f6e8bb32b5243b2
2,062
py
Python
tests/main_app/business/test_register.py
ricardochaves/financeiro-bot
2c48be4355e3c8630c36aa846c16042f22b88271
[ "MIT" ]
4
2020-01-21T00:21:44.000Z
2021-06-15T19:38:36.000Z
tests/main_app/business/test_register.py
ricardochaves/financeiro-bot
2c48be4355e3c8630c36aa846c16042f22b88271
[ "MIT" ]
173
2019-11-18T08:19:44.000Z
2021-09-08T01:37:19.000Z
tests/main_app/business/test_register.py
ricardochaves/financeiro-bot
2c48be4355e3c8630c36aa846c16042f22b88271
[ "MIT" ]
3
2020-01-28T19:19:35.000Z
2021-05-01T02:33:36.000Z
from base_site.mainapp.business.register import Register from django.test import TestCase from tests.helper import create_scenario_with_two_commands_complete_and_empty class RegisterClassTestCase(TestCase): def setUp(self): self.category, self.family_member, self.type_entry, self.empty_command, self.completed_command = ( create_scenario_with_two_commands_complete_and_empty() ) def test_should_return_true_for_all_options(self): register = Register(self.empty_command) self.assertFalse(register.need_payment_installments()) self.assertTrue(register.need_entry_date()) self.assertTrue(register.need_payment_date()) self.assertTrue(register.need_debit()) self.assertTrue(register.need_credit()) self.assertTrue(register.need_category()) self.assertTrue(register.need_name()) self.assertTrue(register.need_description()) self.assertTrue(register.need_type()) self.empty_command.payment_date = 2 self.assertTrue(register.need_payment_installments()) self.assertTrue(register.need_entry_date()) self.assertFalse(register.need_payment_date()) self.assertTrue(register.need_debit()) self.assertTrue(register.need_credit()) self.assertTrue(register.need_category()) self.assertTrue(register.need_name()) self.assertTrue(register.need_description()) self.assertTrue(register.need_type()) def test_should_return_false_for_all_options(self): register = Register(self.completed_command) self.assertFalse(register.need_payment_installments()) self.assertFalse(register.need_entry_date()) self.assertFalse(register.need_payment_date()) self.assertFalse(register.need_debit()) self.assertFalse(register.need_credit()) self.assertFalse(register.need_category()) self.assertFalse(register.need_name()) self.assertFalse(register.need_description()) self.assertFalse(register.need_type())
40.431373
106
0.728904
from base_site.mainapp.business.register import Register from django.test import TestCase from tests.helper import create_scenario_with_two_commands_complete_and_empty class RegisterClassTestCase(TestCase): def setUp(self): self.category, self.family_member, self.type_entry, self.empty_command, self.completed_command = ( create_scenario_with_two_commands_complete_and_empty() ) def test_should_return_true_for_all_options(self): register = Register(self.empty_command) self.assertFalse(register.need_payment_installments()) self.assertTrue(register.need_entry_date()) self.assertTrue(register.need_payment_date()) self.assertTrue(register.need_debit()) self.assertTrue(register.need_credit()) self.assertTrue(register.need_category()) self.assertTrue(register.need_name()) self.assertTrue(register.need_description()) self.assertTrue(register.need_type()) self.empty_command.payment_date = 2 self.assertTrue(register.need_payment_installments()) self.assertTrue(register.need_entry_date()) self.assertFalse(register.need_payment_date()) self.assertTrue(register.need_debit()) self.assertTrue(register.need_credit()) self.assertTrue(register.need_category()) self.assertTrue(register.need_name()) self.assertTrue(register.need_description()) self.assertTrue(register.need_type()) def test_should_return_false_for_all_options(self): register = Register(self.completed_command) self.assertFalse(register.need_payment_installments()) self.assertFalse(register.need_entry_date()) self.assertFalse(register.need_payment_date()) self.assertFalse(register.need_debit()) self.assertFalse(register.need_credit()) self.assertFalse(register.need_category()) self.assertFalse(register.need_name()) self.assertFalse(register.need_description()) self.assertFalse(register.need_type())
true
true
f7f68e82a6a55a33a4b6f0f3017b6ac514ad9509
451
py
Python
test/integration/008_retry_failed_test/test_retry_failed.py
bastienboutonnet/dbt-helper
7bf56384ae584542eb22adf5431df1854e95ae9b
[ "Apache-2.0" ]
104
2019-01-25T13:50:37.000Z
2022-02-10T14:38:45.000Z
test/integration/008_retry_failed_test/test_retry_failed.py
bastienboutonnet/dbt-helper
7bf56384ae584542eb22adf5431df1854e95ae9b
[ "Apache-2.0" ]
27
2019-01-25T01:16:40.000Z
2020-10-12T22:33:15.000Z
test/integration/008_retry_failed_test/test_retry_failed.py
bastienboutonnet/dbt-helper
7bf56384ae584542eb22adf5431df1854e95ae9b
[ "Apache-2.0" ]
10
2019-04-11T18:57:35.000Z
2022-03-15T22:29:50.000Z
from test.integration.base import DBTIntegrationTest class RetryFailedTest(DBTIntegrationTest): @property def models(self): return "test/integration/008_retry_failed_test/models" def tests_retry_failed(self): _, success = self.run_dbt(["run"]) self.assertFalse(success) self.assertEqual( self.run_dbthelper(["retry-failed"]), ["my_failing_model", "my_skipped_model"], )
25.055556
62
0.660754
from test.integration.base import DBTIntegrationTest class RetryFailedTest(DBTIntegrationTest): @property def models(self): return "test/integration/008_retry_failed_test/models" def tests_retry_failed(self): _, success = self.run_dbt(["run"]) self.assertFalse(success) self.assertEqual( self.run_dbthelper(["retry-failed"]), ["my_failing_model", "my_skipped_model"], )
true
true
f7f68f3f58690d7f13e0528dd454036591a0a824
8,915
py
Python
test/functional/example_test.py
satcoin-dev/satcoin
a68f5965a8c28cfcaf8855a661ea3f15de9ae7d5
[ "MIT" ]
4
2021-02-28T04:34:58.000Z
2021-09-14T15:25:31.000Z
test/functional/example_test.py
satcoin-dev/satcoin
a68f5965a8c28cfcaf8855a661ea3f15de9ae7d5
[ "MIT" ]
null
null
null
test/functional/example_test.py
satcoin-dev/satcoin
a68f5965a8c28cfcaf8855a661ea3f15de9ae7d5
[ "MIT" ]
1
2021-06-18T13:13:17.000Z
2021-06-18T13:13:17.000Z
#!/usr/bin/env python3 # Copyright (c) 2017-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """An example functional test The module-level docstring should include a high-level description of what the test is doing. It's the first thing people see when they open the file and should give the reader information about *what* the test is testing and *how* it's being tested """ # Imports should be in PEP8 ordering (std library first, then third party # libraries then local imports). from collections import defaultdict # Avoid wildcard * imports from test_framework.blocktools import (create_block, create_coinbase) from test_framework.messages import CInv from test_framework.mininode import ( P2PInterface, mininode_lock, msg_block, msg_getdata, ) from test_framework.test_framework import SatcoinTestFramework from test_framework.util import ( assert_equal, connect_nodes, wait_until, ) # P2PInterface is a class containing callbacks to be executed when a P2P # message is received from the node-under-test. Subclass P2PInterface and # override the on_*() methods if you need custom behaviour. class BaseNode(P2PInterface): def __init__(self): """Initialize the P2PInterface Used to initialize custom properties for the Node that aren't included by default in the base class. Be aware that the P2PInterface base class already stores a counter for each P2P message type and the last received message of each type, which should be sufficient for the needs of most tests. Call super().__init__() first for standard initialization and then initialize custom properties.""" super().__init__() # Stores a dictionary of all blocks received self.block_receive_map = defaultdict(int) def on_block(self, message): """Override the standard on_block callback Store the hash of a received block in the dictionary.""" message.block.calc_sha256() self.block_receive_map[message.block.sha256] += 1 def on_inv(self, message): """Override the standard on_inv callback""" pass def custom_function(): """Do some custom behaviour If this function is more generally useful for other tests, consider moving it to a module in test_framework.""" # self.log.info("running custom_function") # Oops! Can't run self.log outside the SatcoinTestFramework pass class ExampleTest(SatcoinTestFramework): # Each functional test is a subclass of the SatcoinTestFramework class. # Override the set_test_params(), skip_test_if_missing_module(), add_options(), setup_chain(), setup_network() # and setup_nodes() methods to customize the test setup as required. def set_test_params(self): """Override test parameters for your individual test. This method must be overridden and num_nodes must be explicitly set.""" self.setup_clean_chain = True self.num_nodes = 3 # Use self.extra_args to change command-line arguments for the nodes self.extra_args = [[], ["-logips"], []] # self.log.info("I've finished set_test_params") # Oops! Can't run self.log before run_test() # Use skip_test_if_missing_module() to skip the test if your test requires certain modules to be present. # This test uses generate which requires wallet to be compiled def skip_test_if_missing_module(self): self.skip_if_no_wallet() # Use add_options() to add specific command-line options for your test. # In practice this is not used very much, since the tests are mostly written # to be run in automated environments without command-line options. # def add_options() # pass # Use setup_chain() to customize the node data directories. In practice # this is not used very much since the default behaviour is almost always # fine # def setup_chain(): # pass def setup_network(self): """Setup the test network topology Often you won't need to override this, since the standard network topology (linear: node0 <-> node1 <-> node2 <-> ...) is fine for most tests. If you do override this method, remember to start the nodes, assign them to self.nodes, connect them and then sync.""" self.setup_nodes() # In this test, we're not connecting node2 to node0 or node1. Calls to # sync_all() should not include node2, since we're not expecting it to # sync. connect_nodes(self.nodes[0], 1) self.sync_all(self.nodes[0:2]) # Use setup_nodes() to customize the node start behaviour (for example if # you don't want to start all nodes at the start of the test). # def setup_nodes(): # pass def custom_method(self): """Do some custom behaviour for this test Define it in a method here because you're going to use it repeatedly. If you think it's useful in general, consider moving it to the base SatcoinTestFramework class so other tests can use it.""" self.log.info("Running custom_method") def run_test(self): """Main test logic""" # Create P2P connections will wait for a verack to make sure the connection is fully up self.nodes[0].add_p2p_connection(BaseNode()) # Generating a block on one of the nodes will get us out of IBD blocks = [int(self.nodes[0].generate(nblocks=1)[0], 16)] self.sync_all(self.nodes[0:2]) # Notice above how we called an RPC by calling a method with the same # name on the node object. Notice also how we used a keyword argument # to specify a named RPC argument. Neither of those are defined on the # node object. Instead there's some __getattr__() magic going on under # the covers to dispatch unrecognised attribute calls to the RPC # interface. # Logs are nice. Do plenty of them. They can be used in place of comments for # breaking the test into sub-sections. self.log.info("Starting test!") self.log.info("Calling a custom function") custom_function() self.log.info("Calling a custom method") self.custom_method() self.log.info("Create some blocks") self.tip = int(self.nodes[0].getbestblockhash(), 16) self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1 height = self.nodes[0].getblockcount() for i in range(10): # Use the mininode and blocktools functionality to manually build a block # Calling the generate() rpc is easier, but this allows us to exactly # control the blocks and transactions. block = create_block(self.tip, create_coinbase(height+1), self.block_time) block.solve() block_message = msg_block(block) # Send message is used to send a P2P message to the node over our P2PInterface self.nodes[0].p2p.send_message(block_message) self.tip = block.sha256 blocks.append(self.tip) self.block_time += 1 height += 1 self.log.info("Wait for node1 to reach current tip (height 11) using RPC") self.nodes[1].waitforblockheight(11) self.log.info("Connect node2 and node1") connect_nodes(self.nodes[1], 2) self.log.info("Wait for node2 to receive all the blocks from node1") self.sync_all() self.log.info("Add P2P connection to node2") self.nodes[0].disconnect_p2ps() self.nodes[2].add_p2p_connection(BaseNode()) self.log.info("Test that node2 propagates all the blocks to us") getdata_request = msg_getdata() for block in blocks: getdata_request.inv.append(CInv(2, block)) self.nodes[2].p2p.send_message(getdata_request) # wait_until() will loop until a predicate condition is met. Use it to test properties of the # P2PInterface objects. wait_until(lambda: sorted(blocks) == sorted(list(self.nodes[2].p2p.block_receive_map.keys())), timeout=5, lock=mininode_lock) self.log.info("Check that each block was received only once") # The network thread uses a global lock on data access to the P2PConnection objects when sending and receiving # messages. The test thread should acquire the global lock before accessing any P2PConnection data to avoid locking # and synchronization issues. Note wait_until() acquires this global lock when testing the predicate. with mininode_lock: for block in self.nodes[2].p2p.block_receive_map.values(): assert_equal(block, 1) if __name__ == '__main__': ExampleTest().main()
40.894495
133
0.684577
from collections import defaultdict from test_framework.blocktools import (create_block, create_coinbase) from test_framework.messages import CInv from test_framework.mininode import ( P2PInterface, mininode_lock, msg_block, msg_getdata, ) from test_framework.test_framework import SatcoinTestFramework from test_framework.util import ( assert_equal, connect_nodes, wait_until, ) class BaseNode(P2PInterface): def __init__(self): super().__init__() self.block_receive_map = defaultdict(int) def on_block(self, message): message.block.calc_sha256() self.block_receive_map[message.block.sha256] += 1 def on_inv(self, message): pass def custom_function(): Each functional test is a subclass of the SatcoinTestFramework class. # Override the set_test_params(), skip_test_if_missing_module(), add_options(), setup_chain(), setup_network() # and setup_nodes() methods to customize the test setup as required. def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 3 # Use self.extra_args to change command-line arguments for the nodes self.extra_args = [[], ["-logips"], []] # self.log.info("I've finished set_test_params") # Use skip_test_if_missing_module() to skip the test if your test requires certain modules to be present. # This test uses generate which requires wallet to be compiled def skip_test_if_missing_module(self): self.skip_if_no_wallet() # Use add_options() to add specific command-line options for your test. # In practice this is not used very much, since the tests are mostly written # to be run in automated environments without command-line options. # def add_options() # pass # Use setup_chain() to customize the node data directories. In practice # this is not used very much since the default behaviour is almost always # fine # def setup_chain(): # pass def setup_network(self): self.setup_nodes() # In this test, we're not connecting node2 to node0 or node1. Calls to # sync. connect_nodes(self.nodes[0], 1) self.sync_all(self.nodes[0:2]) # Use setup_nodes() to customize the node start behaviour (for example if # you don't want to start all nodes at the start of the test). def custom_method(self): self.log.info("Running custom_method") def run_test(self): self.nodes[0].add_p2p_connection(BaseNode()) blocks = [int(self.nodes[0].generate(nblocks=1)[0], 16)] self.sync_all(self.nodes[0:2]) # the covers to dispatch unrecognised attribute calls to the RPC # interface. # Logs are nice. Do plenty of them. They can be used in place of comments for # breaking the test into sub-sections. self.log.info("Starting test!") self.log.info("Calling a custom function") custom_function() self.log.info("Calling a custom method") self.custom_method() self.log.info("Create some blocks") self.tip = int(self.nodes[0].getbestblockhash(), 16) self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1 height = self.nodes[0].getblockcount() for i in range(10): # Use the mininode and blocktools functionality to manually build a block # Calling the generate() rpc is easier, but this allows us to exactly # control the blocks and transactions. block = create_block(self.tip, create_coinbase(height+1), self.block_time) block.solve() block_message = msg_block(block) # Send message is used to send a P2P message to the node over our P2PInterface self.nodes[0].p2p.send_message(block_message) self.tip = block.sha256 blocks.append(self.tip) self.block_time += 1 height += 1 self.log.info("Wait for node1 to reach current tip (height 11) using RPC") self.nodes[1].waitforblockheight(11) self.log.info("Connect node2 and node1") connect_nodes(self.nodes[1], 2) self.log.info("Wait for node2 to receive all the blocks from node1") self.sync_all() self.log.info("Add P2P connection to node2") self.nodes[0].disconnect_p2ps() self.nodes[2].add_p2p_connection(BaseNode()) self.log.info("Test that node2 propagates all the blocks to us") getdata_request = msg_getdata() for block in blocks: getdata_request.inv.append(CInv(2, block)) self.nodes[2].p2p.send_message(getdata_request) # wait_until() will loop until a predicate condition is met. Use it to test properties of the # P2PInterface objects. wait_until(lambda: sorted(blocks) == sorted(list(self.nodes[2].p2p.block_receive_map.keys())), timeout=5, lock=mininode_lock) self.log.info("Check that each block was received only once") # The network thread uses a global lock on data access to the P2PConnection objects when sending and receiving # messages. The test thread should acquire the global lock before accessing any P2PConnection data to avoid locking # and synchronization issues. Note wait_until() acquires this global lock when testing the predicate. with mininode_lock: for block in self.nodes[2].p2p.block_receive_map.values(): assert_equal(block, 1) if __name__ == '__main__': ExampleTest().main()
true
true
f7f68f92251baf290b2514537324084d1080784e
1,851
py
Python
connectrum/script.py
SKlayer/connectrum
d05617df8e3a32377b2772245dcab2c175922f0a
[ "MIT" ]
null
null
null
connectrum/script.py
SKlayer/connectrum
d05617df8e3a32377b2772245dcab2c175922f0a
[ "MIT" ]
null
null
null
connectrum/script.py
SKlayer/connectrum
d05617df8e3a32377b2772245dcab2c175922f0a
[ "MIT" ]
null
null
null
from bitcoinb58 import b58decode, b58encode from hashlib import sha256 import hashlib import binascii def dblsha(b): return sha256(sha256(b).digest()).digest() def ripemd160(inp): h = hashlib.new('ripemd160') h.update(inp) return h.digest() WitnessMagic = b'\xaa\x21\xa9\xed' def _Address2PKH(addr): try: addr = b58decode(addr, 25) except: return None if addr is None: return None ver = addr[0] cksumA = addr[-4:] cksumB = dblsha(addr[:-4])[:4] if cksumA != cksumB: return None return (ver, addr[1:-4]) def _PKH2Address(ver, addr): addr = addr[3:-2] ver = int(ver).to_bytes(length=1, byteorder="little", signed=False) cksumB = dblsha(ver + addr)[:4] px = "" if ver == b"\x00": px = "1" return px + b58encode(ver + addr + cksumB) class BitcoinScript: @classmethod def toAddress(cls,addr): d = _Address2PKH(addr) if not d: raise ValueError('invalid address') (ver, pubkeyhash) = d if ver == 35 or ver == 111 or ver == 0: return b'\x76\xa9\x14' + pubkeyhash + b'\x88\xac' elif ver == 5 or ver == 196: return b'\xa9\x14' + pubkeyhash + b'\x87' raise ValueError('invalid address version') @classmethod def commitment(cls, commitment): clen = len(commitment) if clen > 0x4b: raise NotImplementedError return b'\x6a' + bytes((clen,)) + commitment def fch2btc(addr): pkh =BitcoinScript.toAddress(addr) return _PKH2Address(0, pkh) def btc2fch(addr): pkh =BitcoinScript.toAddress(addr) return _PKH2Address(35, pkh) if __name__ == '__main__': o = fch2btc("FMHuHuKmWQHQx3ZxmaeHnrRsPrLYRkUat4") print(o) o = btc2fch("1GTnq6tgf64kKsgvutz8pTuLNCKXXCzXDK") print(o)
22.301205
71
0.606159
from bitcoinb58 import b58decode, b58encode from hashlib import sha256 import hashlib import binascii def dblsha(b): return sha256(sha256(b).digest()).digest() def ripemd160(inp): h = hashlib.new('ripemd160') h.update(inp) return h.digest() WitnessMagic = b'\xaa\x21\xa9\xed' def _Address2PKH(addr): try: addr = b58decode(addr, 25) except: return None if addr is None: return None ver = addr[0] cksumA = addr[-4:] cksumB = dblsha(addr[:-4])[:4] if cksumA != cksumB: return None return (ver, addr[1:-4]) def _PKH2Address(ver, addr): addr = addr[3:-2] ver = int(ver).to_bytes(length=1, byteorder="little", signed=False) cksumB = dblsha(ver + addr)[:4] px = "" if ver == b"\x00": px = "1" return px + b58encode(ver + addr + cksumB) class BitcoinScript: @classmethod def toAddress(cls,addr): d = _Address2PKH(addr) if not d: raise ValueError('invalid address') (ver, pubkeyhash) = d if ver == 35 or ver == 111 or ver == 0: return b'\x76\xa9\x14' + pubkeyhash + b'\x88\xac' elif ver == 5 or ver == 196: return b'\xa9\x14' + pubkeyhash + b'\x87' raise ValueError('invalid address version') @classmethod def commitment(cls, commitment): clen = len(commitment) if clen > 0x4b: raise NotImplementedError return b'\x6a' + bytes((clen,)) + commitment def fch2btc(addr): pkh =BitcoinScript.toAddress(addr) return _PKH2Address(0, pkh) def btc2fch(addr): pkh =BitcoinScript.toAddress(addr) return _PKH2Address(35, pkh) if __name__ == '__main__': o = fch2btc("FMHuHuKmWQHQx3ZxmaeHnrRsPrLYRkUat4") print(o) o = btc2fch("1GTnq6tgf64kKsgvutz8pTuLNCKXXCzXDK") print(o)
true
true
f7f68f9f0683b19e58eefe9814ca58fb19854346
11,192
py
Python
numba/npyufunc/ufuncbuilder.py
rudrapatel/numba
5111ad5da17f914ca203e67f3026d2c0d890dfbb
[ "BSD-2-Clause" ]
null
null
null
numba/npyufunc/ufuncbuilder.py
rudrapatel/numba
5111ad5da17f914ca203e67f3026d2c0d890dfbb
[ "BSD-2-Clause" ]
1
2019-05-01T20:39:46.000Z
2019-05-07T03:43:29.000Z
numba/npyufunc/ufuncbuilder.py
jdburnet/numba
e8ac4951affacd25c63ba2c18d62a3f12ed7e0ba
[ "BSD-2-Clause" ]
null
null
null
# -*- coding: utf-8 -*- from __future__ import print_function, division, absolute_import import warnings import inspect from contextlib import contextmanager import numpy as np from numba.decorators import jit from numba.targets.descriptors import TargetDescriptor from numba.targets.options import TargetOptions from numba.targets.registry import dispatcher_registry, cpu_target from numba.targets.cpu import FastMathOptions from numba import utils, compiler, types, sigutils from numba.numpy_support import as_dtype from . import _internal from .sigparse import parse_signature from .wrappers import build_ufunc_wrapper, build_gufunc_wrapper from numba.caching import FunctionCache, NullCache from numba.compiler_lock import global_compiler_lock class UFuncTargetOptions(TargetOptions): OPTIONS = { "nopython" : bool, "forceobj" : bool, "fastmath" : FastMathOptions, } class UFuncTarget(TargetDescriptor): options = UFuncTargetOptions @property def typing_context(self): return cpu_target.typing_context @property def target_context(self): return cpu_target.target_context ufunc_target = UFuncTarget() class UFuncDispatcher(object): """ An object handling compilation of various signatures for a ufunc. """ targetdescr = ufunc_target def __init__(self, py_func, locals={}, targetoptions={}): self.py_func = py_func self.overloads = utils.UniqueDict() self.targetoptions = targetoptions self.locals = locals self.cache = NullCache() def enable_caching(self): self.cache = FunctionCache(self.py_func) def compile(self, sig, locals={}, **targetoptions): locs = self.locals.copy() locs.update(locals) topt = self.targetoptions.copy() topt.update(targetoptions) flags = compiler.Flags() self.targetdescr.options.parse_as_flags(flags, topt) flags.set("no_cpython_wrapper") flags.set("error_model", "numpy") # Disable loop lifting # The feature requires a real python function flags.unset("enable_looplift") return self._compile_core(sig, flags, locals) def _compile_core(self, sig, flags, locals): """ Trigger the compiler on the core function or load a previously compiled version from the cache. Returns the CompileResult. """ typingctx = self.targetdescr.typing_context targetctx = self.targetdescr.target_context @contextmanager def store_overloads_on_success(): # use to ensure overloads are stored on success try: yield except: raise else: exists = self.overloads.get(cres.signature) if exists is None: self.overloads[cres.signature] = cres # Use cache and compiler in a critical section with global_compiler_lock: with store_overloads_on_success(): # attempt look up of existing cres = self.cache.load_overload(sig, targetctx) if cres is not None: return cres # Compile args, return_type = sigutils.normalize_signature(sig) cres = compiler.compile_extra(typingctx, targetctx, self.py_func, args=args, return_type=return_type, flags=flags, locals=locals) # cache lookup failed before so safe to save self.cache.save_overload(sig, cres) return cres dispatcher_registry['npyufunc'] = UFuncDispatcher # Utility functions def _compile_element_wise_function(nb_func, targetoptions, sig): # Do compilation # Return CompileResult to test cres = nb_func.compile(sig, **targetoptions) args, return_type = sigutils.normalize_signature(sig) return cres, args, return_type def _finalize_ufunc_signature(cres, args, return_type): '''Given a compilation result, argument types, and a return type, build a valid Numba signature after validating that it doesn't violate the constraints for the compilation mode. ''' if return_type is None: if cres.objectmode: # Object mode is used and return type is not specified raise TypeError("return type must be specified for object mode") else: return_type = cres.signature.return_type assert return_type != types.pyobject return return_type(*args) def _build_element_wise_ufunc_wrapper(cres, signature): '''Build a wrapper for the ufunc loop entry point given by the compilation result object, using the element-wise signature. ''' ctx = cres.target_context library = cres.library fname = cres.fndesc.llvm_func_name with global_compiler_lock: ptr = build_ufunc_wrapper(library, ctx, fname, signature, cres.objectmode, cres) # Get dtypes dtypenums = [as_dtype(a).num for a in signature.args] dtypenums.append(as_dtype(signature.return_type).num) return dtypenums, ptr, cres.environment _identities = { 0: _internal.PyUFunc_Zero, 1: _internal.PyUFunc_One, None: _internal.PyUFunc_None, "reorderable": _internal.PyUFunc_ReorderableNone, } def parse_identity(identity): """ Parse an identity value and return the corresponding low-level value for Numpy. """ try: identity = _identities[identity] except KeyError: raise ValueError("Invalid identity value %r" % (identity,)) return identity # Class definitions class _BaseUFuncBuilder(object): def add(self, sig=None): if hasattr(self, 'targetoptions'): targetoptions = self.targetoptions else: targetoptions = self.nb_func.targetoptions cres, args, return_type = _compile_element_wise_function( self.nb_func, targetoptions, sig) sig = self._finalize_signature(cres, args, return_type) self._sigs.append(sig) self._cres[sig] = cres return cres def disable_compile(self): """ Disable the compilation of new signatures at call time. """ # Override this for implementations that support lazy compilation class UFuncBuilder(_BaseUFuncBuilder): def __init__(self, py_func, identity=None, cache=False, targetoptions={}): self.py_func = py_func self.identity = parse_identity(identity) self.nb_func = jit(target='npyufunc', cache=cache, **targetoptions)(py_func) self._sigs = [] self._cres = {} def _finalize_signature(self, cres, args, return_type): '''Slated for deprecation, use ufuncbuilder._finalize_ufunc_signature() instead. ''' return _finalize_ufunc_signature(cres, args, return_type) def build_ufunc(self): with global_compiler_lock: dtypelist = [] ptrlist = [] if not self.nb_func: raise TypeError("No definition") # Get signature in the order they are added keepalive = [] cres = None for sig in self._sigs: cres = self._cres[sig] dtypenums, ptr, env = self.build(cres, sig) dtypelist.append(dtypenums) ptrlist.append(utils.longint(ptr)) keepalive.append((cres.library, env)) datlist = [None] * len(ptrlist) if cres is None: argspec = inspect.getargspec(self.py_func) inct = len(argspec.args) else: inct = len(cres.signature.args) outct = 1 # Becareful that fromfunc does not provide full error checking yet. # If typenum is out-of-bound, we have nasty memory corruptions. # For instance, -1 for typenum will cause segfault. # If elements of type-list (2nd arg) is tuple instead, # there will also memory corruption. (Seems like code rewrite.) ufunc = _internal.fromfunc(self.py_func.__name__, self.py_func.__doc__, ptrlist, dtypelist, inct, outct, datlist, keepalive, self.identity) return ufunc def build(self, cres, signature): '''Slated for deprecation, use ufuncbuilder._build_element_wise_ufunc_wrapper(). ''' return _build_element_wise_ufunc_wrapper(cres, signature) class GUFuncBuilder(_BaseUFuncBuilder): # TODO handle scalar def __init__(self, py_func, signature, identity=None, cache=False, targetoptions={}): self.py_func = py_func self.identity = parse_identity(identity) self.nb_func = jit(target='npyufunc', cache=cache)(py_func) self.signature = signature self.sin, self.sout = parse_signature(signature) self.targetoptions = targetoptions self.cache = cache self._sigs = [] self._cres = {} def _finalize_signature(self, cres, args, return_type): if not cres.objectmode and cres.signature.return_type != types.void: raise TypeError("gufunc kernel must have void return type") if return_type is None: return_type = types.void return return_type(*args) @global_compiler_lock def build_ufunc(self): dtypelist = [] ptrlist = [] if not self.nb_func: raise TypeError("No definition") # Get signature in the order they are added keepalive = [] for sig in self._sigs: cres = self._cres[sig] dtypenums, ptr, env = self.build(cres) dtypelist.append(dtypenums) ptrlist.append(utils.longint(ptr)) keepalive.append((cres.library, env)) datlist = [None] * len(ptrlist) inct = len(self.sin) outct = len(self.sout) # Pass envs to fromfuncsig to bind to the lifetime of the ufunc object ufunc = _internal.fromfunc(self.py_func.__name__, self.py_func.__doc__, ptrlist, dtypelist, inct, outct, datlist, keepalive, self.identity, self.signature) return ufunc def build(self, cres): """ Returns (dtype numbers, function ptr, EnvironmentObject) """ # Buider wrapper for ufunc entry point signature = cres.signature ptr, env, wrapper_name = build_gufunc_wrapper(self.py_func, cres, self.sin, self.sout, cache=self.cache) # Get dtypes dtypenums = [] for a in signature.args: if isinstance(a, types.Array): ty = a.dtype else: ty = a dtypenums.append(as_dtype(ty).num) return dtypenums, ptr, env
33.210682
84
0.619818
from __future__ import print_function, division, absolute_import import warnings import inspect from contextlib import contextmanager import numpy as np from numba.decorators import jit from numba.targets.descriptors import TargetDescriptor from numba.targets.options import TargetOptions from numba.targets.registry import dispatcher_registry, cpu_target from numba.targets.cpu import FastMathOptions from numba import utils, compiler, types, sigutils from numba.numpy_support import as_dtype from . import _internal from .sigparse import parse_signature from .wrappers import build_ufunc_wrapper, build_gufunc_wrapper from numba.caching import FunctionCache, NullCache from numba.compiler_lock import global_compiler_lock class UFuncTargetOptions(TargetOptions): OPTIONS = { "nopython" : bool, "forceobj" : bool, "fastmath" : FastMathOptions, } class UFuncTarget(TargetDescriptor): options = UFuncTargetOptions @property def typing_context(self): return cpu_target.typing_context @property def target_context(self): return cpu_target.target_context ufunc_target = UFuncTarget() class UFuncDispatcher(object): targetdescr = ufunc_target def __init__(self, py_func, locals={}, targetoptions={}): self.py_func = py_func self.overloads = utils.UniqueDict() self.targetoptions = targetoptions self.locals = locals self.cache = NullCache() def enable_caching(self): self.cache = FunctionCache(self.py_func) def compile(self, sig, locals={}, **targetoptions): locs = self.locals.copy() locs.update(locals) topt = self.targetoptions.copy() topt.update(targetoptions) flags = compiler.Flags() self.targetdescr.options.parse_as_flags(flags, topt) flags.set("no_cpython_wrapper") flags.set("error_model", "numpy") flags.unset("enable_looplift") return self._compile_core(sig, flags, locals) def _compile_core(self, sig, flags, locals): typingctx = self.targetdescr.typing_context targetctx = self.targetdescr.target_context @contextmanager def store_overloads_on_success(): try: yield except: raise else: exists = self.overloads.get(cres.signature) if exists is None: self.overloads[cres.signature] = cres with global_compiler_lock: with store_overloads_on_success(): cres = self.cache.load_overload(sig, targetctx) if cres is not None: return cres args, return_type = sigutils.normalize_signature(sig) cres = compiler.compile_extra(typingctx, targetctx, self.py_func, args=args, return_type=return_type, flags=flags, locals=locals) self.cache.save_overload(sig, cres) return cres dispatcher_registry['npyufunc'] = UFuncDispatcher def _compile_element_wise_function(nb_func, targetoptions, sig): cres = nb_func.compile(sig, **targetoptions) args, return_type = sigutils.normalize_signature(sig) return cres, args, return_type def _finalize_ufunc_signature(cres, args, return_type): if return_type is None: if cres.objectmode: raise TypeError("return type must be specified for object mode") else: return_type = cres.signature.return_type assert return_type != types.pyobject return return_type(*args) def _build_element_wise_ufunc_wrapper(cres, signature): ctx = cres.target_context library = cres.library fname = cres.fndesc.llvm_func_name with global_compiler_lock: ptr = build_ufunc_wrapper(library, ctx, fname, signature, cres.objectmode, cres) dtypenums = [as_dtype(a).num for a in signature.args] dtypenums.append(as_dtype(signature.return_type).num) return dtypenums, ptr, cres.environment _identities = { 0: _internal.PyUFunc_Zero, 1: _internal.PyUFunc_One, None: _internal.PyUFunc_None, "reorderable": _internal.PyUFunc_ReorderableNone, } def parse_identity(identity): try: identity = _identities[identity] except KeyError: raise ValueError("Invalid identity value %r" % (identity,)) return identity class _BaseUFuncBuilder(object): def add(self, sig=None): if hasattr(self, 'targetoptions'): targetoptions = self.targetoptions else: targetoptions = self.nb_func.targetoptions cres, args, return_type = _compile_element_wise_function( self.nb_func, targetoptions, sig) sig = self._finalize_signature(cres, args, return_type) self._sigs.append(sig) self._cres[sig] = cres return cres def disable_compile(self): class UFuncBuilder(_BaseUFuncBuilder): def __init__(self, py_func, identity=None, cache=False, targetoptions={}): self.py_func = py_func self.identity = parse_identity(identity) self.nb_func = jit(target='npyufunc', cache=cache, **targetoptions)(py_func) self._sigs = [] self._cres = {} def _finalize_signature(self, cres, args, return_type): return _finalize_ufunc_signature(cres, args, return_type) def build_ufunc(self): with global_compiler_lock: dtypelist = [] ptrlist = [] if not self.nb_func: raise TypeError("No definition") keepalive = [] cres = None for sig in self._sigs: cres = self._cres[sig] dtypenums, ptr, env = self.build(cres, sig) dtypelist.append(dtypenums) ptrlist.append(utils.longint(ptr)) keepalive.append((cres.library, env)) datlist = [None] * len(ptrlist) if cres is None: argspec = inspect.getargspec(self.py_func) inct = len(argspec.args) else: inct = len(cres.signature.args) outct = 1 ufunc = _internal.fromfunc(self.py_func.__name__, self.py_func.__doc__, ptrlist, dtypelist, inct, outct, datlist, keepalive, self.identity) return ufunc def build(self, cres, signature): return _build_element_wise_ufunc_wrapper(cres, signature) class GUFuncBuilder(_BaseUFuncBuilder): def __init__(self, py_func, signature, identity=None, cache=False, targetoptions={}): self.py_func = py_func self.identity = parse_identity(identity) self.nb_func = jit(target='npyufunc', cache=cache)(py_func) self.signature = signature self.sin, self.sout = parse_signature(signature) self.targetoptions = targetoptions self.cache = cache self._sigs = [] self._cres = {} def _finalize_signature(self, cres, args, return_type): if not cres.objectmode and cres.signature.return_type != types.void: raise TypeError("gufunc kernel must have void return type") if return_type is None: return_type = types.void return return_type(*args) @global_compiler_lock def build_ufunc(self): dtypelist = [] ptrlist = [] if not self.nb_func: raise TypeError("No definition") keepalive = [] for sig in self._sigs: cres = self._cres[sig] dtypenums, ptr, env = self.build(cres) dtypelist.append(dtypenums) ptrlist.append(utils.longint(ptr)) keepalive.append((cres.library, env)) datlist = [None] * len(ptrlist) inct = len(self.sin) outct = len(self.sout) ufunc = _internal.fromfunc(self.py_func.__name__, self.py_func.__doc__, ptrlist, dtypelist, inct, outct, datlist, keepalive, self.identity, self.signature) return ufunc def build(self, cres): signature = cres.signature ptr, env, wrapper_name = build_gufunc_wrapper(self.py_func, cres, self.sin, self.sout, cache=self.cache) dtypenums = [] for a in signature.args: if isinstance(a, types.Array): ty = a.dtype else: ty = a dtypenums.append(as_dtype(ty).num) return dtypenums, ptr, env
true
true
f7f69229572cdacd38170e7f318760b754021f5d
1,898
py
Python
lc/worker.py
kain-jy/large-colony
3b751412869e903012935b76c17dd705f25ce3a0
[ "MIT" ]
null
null
null
lc/worker.py
kain-jy/large-colony
3b751412869e903012935b76c17dd705f25ce3a0
[ "MIT" ]
null
null
null
lc/worker.py
kain-jy/large-colony
3b751412869e903012935b76c17dd705f25ce3a0
[ "MIT" ]
null
null
null
# coding: utf-8 import sys import os BASEPATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, BASEPATH) import optparse import time import zipfile from redis import Redis import subprocess from lib import model from lib.utils import * TMPDIR = os.path.join(BASEPATH, "tmp") parser = optparse.OptionParser() parser.add_option("--redis_host", dest="redis_host", help="", default="127.0.0.1") parser.add_option("--redis_port", dest="redis_port", type="int", help="", default=6379) parser.add_option("--redis_db", dest="redis_db", type="int", help="", default=0) parser.add_option("-i", "--interval", dest="interval", type="int", help="", default=10) parser.add_option("-t", "--target", dest="target", help="", default="127.0.0.1:%s" % BASEPATH) (options, args) = parser.parse_args() redis_session = Redis(host=options.redis_host, port=options.redis_port, db=options.redis_db) while True: try: id = redis_session.lpop("lc:task") if id: model_options = redis_session.hgetall("lc:%s" % id) redis_session.hset("lc:stat", id, 1) for k,v in model_options.items(): os.environ[k] = v print "start %s" % id subprocess.call("python %s -t True -s %s -o %s" % ( BASEPATH, model_options.get('STEP',100), os.path.join(BASEPATH, "tmp", "%s.zip" % id)), shell=True) subprocess.call("scp -i %s %s %s" % ( os.path.join(os.path.dirname(BASEPATH),"batch","ssh","id_rsa"), os.path.join(BASEPATH, "tmp", "%s.zip" % id), options.target), shell=True) print "done %s" % id redis_session.hset("lc:stat", id, 2) else: time.sleep(options.interval) except KeyboardInterrupt: break
36.5
94
0.590095
import sys import os BASEPATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, BASEPATH) import optparse import time import zipfile from redis import Redis import subprocess from lib import model from lib.utils import * TMPDIR = os.path.join(BASEPATH, "tmp") parser = optparse.OptionParser() parser.add_option("--redis_host", dest="redis_host", help="", default="127.0.0.1") parser.add_option("--redis_port", dest="redis_port", type="int", help="", default=6379) parser.add_option("--redis_db", dest="redis_db", type="int", help="", default=0) parser.add_option("-i", "--interval", dest="interval", type="int", help="", default=10) parser.add_option("-t", "--target", dest="target", help="", default="127.0.0.1:%s" % BASEPATH) (options, args) = parser.parse_args() redis_session = Redis(host=options.redis_host, port=options.redis_port, db=options.redis_db) while True: try: id = redis_session.lpop("lc:task") if id: model_options = redis_session.hgetall("lc:%s" % id) redis_session.hset("lc:stat", id, 1) for k,v in model_options.items(): os.environ[k] = v print "start %s" % id subprocess.call("python %s -t True -s %s -o %s" % ( BASEPATH, model_options.get('STEP',100), os.path.join(BASEPATH, "tmp", "%s.zip" % id)), shell=True) subprocess.call("scp -i %s %s %s" % ( os.path.join(os.path.dirname(BASEPATH),"batch","ssh","id_rsa"), os.path.join(BASEPATH, "tmp", "%s.zip" % id), options.target), shell=True) print "done %s" % id redis_session.hset("lc:stat", id, 2) else: time.sleep(options.interval) except KeyboardInterrupt: break
false
true
f7f69259fbe10efefc5779111fcbf84166454ec6
2,399
py
Python
configs/SETR/SETR_PUP_DeiT_768x768_40k_cityscapes_bs_8_MS.py
cocolord/mmsegmentation
45db7269d7aa40f8aac5ddaabf7e1b4b01353ca5
[ "Apache-2.0" ]
1
2021-05-27T11:28:16.000Z
2021-05-27T11:28:16.000Z
configs/SETR/SETR_PUP_DeiT_768x768_40k_cityscapes_bs_8_MS.py
cocolord/mmsegmentation
45db7269d7aa40f8aac5ddaabf7e1b4b01353ca5
[ "Apache-2.0" ]
null
null
null
configs/SETR/SETR_PUP_DeiT_768x768_40k_cityscapes_bs_8_MS.py
cocolord/mmsegmentation
45db7269d7aa40f8aac5ddaabf7e1b4b01353ca5
[ "Apache-2.0" ]
null
null
null
_base_ = [ '../_base_/models/setr_naive_pup.py', '../_base_/datasets/cityscapes_768x768_multi_scale.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' ] norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( backbone=dict(img_size=768,align_corners=False, pos_embed_interp=True,drop_rate=0., model_name='deit_base_distilled_path16_384', embed_dim=768, depth=12, num_heads=12), decode_head=dict(img_size=768,align_corners=False,num_conv=4,upsampling_method='bilinear', num_upsampe_layer=4, embed_dim=768, in_index=11), auxiliary_head=[dict( type='VisionTransformerUpHead', in_channels=1024, channels=512, in_index=4, img_size=768, embed_dim=768, num_classes=19, norm_cfg=norm_cfg, num_conv=2, upsampling_method='bilinear', num_upsampe_layer=2, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), dict( type='VisionTransformerUpHead', in_channels=1024, channels=512, in_index=7, img_size=768, embed_dim=768, num_classes=19, norm_cfg=norm_cfg, num_conv=2, upsampling_method='bilinear', num_upsampe_layer=2, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), dict( type='VisionTransformerUpHead', in_channels=1024, channels=512, in_index=9, img_size=768, embed_dim=768, num_classes=19, norm_cfg=norm_cfg, num_conv=2, upsampling_method='bilinear', num_upsampe_layer=2, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), dict( type='VisionTransformerUpHead', in_channels=1024, channels=512, in_index=11, img_size=768, embed_dim=768, num_classes=19, norm_cfg=norm_cfg, num_conv=2, upsampling_method='bilinear', num_upsampe_layer=2, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)) ]) optimizer = dict(lr=0.01, weight_decay=0.0, paramwise_cfg = dict(custom_keys={'head': dict(lr_mult=10.)}) ) crop_size = (768, 768) test_cfg = dict(mode='slide', crop_size=crop_size, stride=(512, 512)) find_unused_parameters = True data = dict(samples_per_gpu=1)
28.903614
102
0.689871
_base_ = [ '../_base_/models/setr_naive_pup.py', '../_base_/datasets/cityscapes_768x768_multi_scale.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' ] norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( backbone=dict(img_size=768,align_corners=False, pos_embed_interp=True,drop_rate=0., model_name='deit_base_distilled_path16_384', embed_dim=768, depth=12, num_heads=12), decode_head=dict(img_size=768,align_corners=False,num_conv=4,upsampling_method='bilinear', num_upsampe_layer=4, embed_dim=768, in_index=11), auxiliary_head=[dict( type='VisionTransformerUpHead', in_channels=1024, channels=512, in_index=4, img_size=768, embed_dim=768, num_classes=19, norm_cfg=norm_cfg, num_conv=2, upsampling_method='bilinear', num_upsampe_layer=2, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), dict( type='VisionTransformerUpHead', in_channels=1024, channels=512, in_index=7, img_size=768, embed_dim=768, num_classes=19, norm_cfg=norm_cfg, num_conv=2, upsampling_method='bilinear', num_upsampe_layer=2, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), dict( type='VisionTransformerUpHead', in_channels=1024, channels=512, in_index=9, img_size=768, embed_dim=768, num_classes=19, norm_cfg=norm_cfg, num_conv=2, upsampling_method='bilinear', num_upsampe_layer=2, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), dict( type='VisionTransformerUpHead', in_channels=1024, channels=512, in_index=11, img_size=768, embed_dim=768, num_classes=19, norm_cfg=norm_cfg, num_conv=2, upsampling_method='bilinear', num_upsampe_layer=2, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)) ]) optimizer = dict(lr=0.01, weight_decay=0.0, paramwise_cfg = dict(custom_keys={'head': dict(lr_mult=10.)}) ) crop_size = (768, 768) test_cfg = dict(mode='slide', crop_size=crop_size, stride=(512, 512)) find_unused_parameters = True data = dict(samples_per_gpu=1)
true
true
f7f692a517ace9157684e29722d31874bed902b1
13,421
py
Python
transformer.py
tommccoy1/copynet
6bc6f3a81a4922d06bdd9e86dcd7125251282076
[ "MIT" ]
null
null
null
transformer.py
tommccoy1/copynet
6bc6f3a81a4922d06bdd9e86dcd7125251282076
[ "MIT" ]
null
null
null
transformer.py
tommccoy1/copynet
6bc6f3a81a4922d06bdd9e86dcd7125251282076
[ "MIT" ]
null
null
null
# From https://github.com/ischlag/TP-Transformer import math import torch import torch.nn as nn import torch.nn.functional as F def build_transformer(input_dim=None, hidden=None, dropout=None, max_length=None, n_layers=None, n_heads=None, myfilter=None, pad_idx=None): embedding = TokenEmbedding(d_vocab=input_dim, d_h=hidden, d_p=hidden, dropout=dropout, max_length=200) encoder = Encoder(hid_dim=hidden, n_layers=n_layers, n_heads=n_heads, pf_dim=myfilter, encoder_layer=EncoderLayer, self_attention=SelfAttention, positionwise_feedforward=PositionwiseFeedforward, dropout=dropout) decoder = Decoder(hid_dim=hidden, n_layers=n_layers, n_heads=n_heads, pf_dim=myfilter, decoder_layer=DecoderLayer, self_attention=SelfAttention, positionwise_feedforward=PositionwiseFeedforward, dropout=dropout) model = Seq2Seq(embedding=embedding, encoder=encoder, decoder=decoder, pad_idx=pad_idx) return model class TokenEmbedding(nn.Module): def __init__(self, d_vocab, d_h, d_p, dropout, max_length): super(TokenEmbedding, self).__init__() self.dropout = nn.Dropout(dropout) # token encodings self.d_h = d_h self.tok_embedding = nn.Embedding(d_vocab, d_h) self.scale = torch.sqrt(torch.FloatTensor([d_h])) # Compute the positional encodings once in log space. pe = torch.zeros(max_length, d_p) position = torch.arange(0., max_length).unsqueeze(1) div_term = torch.exp(torch.arange(0., d_p, 2) * -(math.log(10000.0) / d_p)) pe[:, 0::2] = torch.sin(position * div_term) pe[:, 1::2] = torch.cos(position * div_term) pe = pe.unsqueeze(0) self.register_buffer('pe', pe) # pe = [1, seq_len, d_p] self.reset_parameters() # init tok_embedding to N(0,1/sqrt(d_h)) def forward(self, src): # src = [batch_size, src_seq_len] # scale up embedding to be N(0,1) tok_emb = self.tok_embedding(src) * self.scale.to(src.device) pos_emb = torch.autograd.Variable(self.pe[:, :src.size(1)], requires_grad=False) x = tok_emb + pos_emb x = self.dropout(x) # src = [batch_size, src_seq_len, d_h] return x def transpose_forward(self, trg): # trg = [batch_size, trg_seq_len, d_h] logits = torch.einsum('btd,vd->btv',trg,self.tok_embedding.weight) # logits = torch.matmul(trg, torch.transpose(self.tok_embedding.weight, 0, 1)) # logits = [batch_size, trg_seq_len, d_vocab] return logits def reset_parameters(self): nn.init.normal_(self.tok_embedding.weight, mean=0, std=1./math.sqrt(self.d_h)) class Encoder(nn.Module): def __init__(self, hid_dim, n_layers, n_heads, pf_dim, encoder_layer, self_attention, positionwise_feedforward, dropout): super().__init__() self.layers = nn.ModuleList([encoder_layer(hid_dim, n_heads, pf_dim, self_attention, positionwise_feedforward, dropout) for _ in range(n_layers)]) def forward(self, src, src_mask): # src = [batch_size, src_seq_len] # src_mask = [batch_size, src_seq_len] for layer in self.layers: src = layer(src, src_mask) return src class EncoderLayer(nn.Module): def __init__(self, hid_dim, n_heads, pf_dim, self_attention, positionwise_feedforward, dropout): super().__init__() self.layernorm1 = nn.LayerNorm(hid_dim) self.layernorm2 = nn.LayerNorm(hid_dim) self.layernorm3 = nn.LayerNorm(hid_dim) self.MHA = self_attention(hid_dim, n_heads, dropout) self.densefilter = positionwise_feedforward(hid_dim, pf_dim, dropout) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) def forward(self, src, src_mask): # src = [batch_size, src_seq_size, hid_dim] # src_mask = [batch_size, src_seq_size] # sublayer 1 z = self.layernorm1(src) z, attn = self.MHA(z, z, z, src_mask) z = self.dropout1(z) src = src + z # sublayer 2 z = self.layernorm2(src) z = self.densefilter(z) z = self.dropout2(z) src = src + z return self.layernorm3(src) class SelfAttention(nn.Module): def __init__(self, hid_dim, n_heads, dropout): super().__init__() self.hid_dim = hid_dim self.n_heads = n_heads assert hid_dim % n_heads == 0 self.w_q = nn.Linear(hid_dim, hid_dim) self.w_k = nn.Linear(hid_dim, hid_dim) self.w_v = nn.Linear(hid_dim, hid_dim) self.linear = nn.Linear(hid_dim, hid_dim) self.dropout = nn.Dropout(dropout) self.scale = torch.sqrt(torch.FloatTensor([hid_dim // n_heads])) self.reset_parameters() def forward(self, query, key, value, mask=None): # query = key = value = [batch_size, seq_len, hid_dim] # src_mask = [batch_size, 1, 1, pad_seq] # trg_mask = [batch_size, 1, pad_seq, past_seq] bsz = query.shape[0] Q = self.w_q(query) K = self.w_k(key) V = self.w_v(value) # Q, K, V = [batch_size, seq_len, hid_dim] Q = Q.view(bsz, -1, self.n_heads, self.hid_dim // self.n_heads)\ .permute(0,2,1,3) K = K.view(bsz, -1, self.n_heads, self.hid_dim // self.n_heads)\ .permute(0,2,1,3) V = V.view(bsz, -1, self.n_heads, self.hid_dim // self.n_heads)\ .permute(0,2,1,3) # Q, K, V = [batch_size, n_heads, seq_size, hid_dim // n heads] energy = torch.einsum('bhid,bhjd->bhij',Q,K) / self.scale.to(key.device) # energy = torch.matmul(Q, K.permute(0, 1, 3, 2)) / self.scale.to(key.device) # energy = [batch_size, n_heads, query_pos , key_pos] # src_mask = [batch_size, 1 , 1 , attn] # trg_mask = [batch_size, 1 , query_specific, attn] if mask is not None: energy = energy.masked_fill(mask == 0, -1e10) attention = self.dropout(F.softmax(energy, dim=-1)) # attention = [batch_size, n_heads, seq_size, seq_size] x = torch.einsum('bhjd,bhij->bhid',V,attention) # x = torch.matmul(attention, V) # x = [batch_size, n_heads, seq_size, hid_dim // n heads] x = x.permute(0, 2, 1, 3).contiguous() # x = [batch_size, seq_size, n_heads, hid_dim // n heads] x = x.view(bsz, -1, self.n_heads * (self.hid_dim // self.n_heads)) # x = [batch_size, src_seq_size, hid_dim] x = self.linear(x) # x = [batch_size, seq_size, hid_dim] return x, attention.detach() def reset_parameters(self): # nn.init.xavier_normal_(self.w_q.weight) # nn.init.xavier_normal_(self.w_k.weight) # nn.init.xavier_normal_(self.w_v.weight) # nn.init.xavier_normal_(self.linear.weight) nn.init.xavier_uniform_(self.w_q.weight) nn.init.xavier_uniform_(self.w_k.weight) nn.init.xavier_uniform_(self.w_v.weight) nn.init.xavier_uniform_(self.linear.weight) class PositionwiseFeedforward(nn.Module): def __init__(self, hid_dim, pf_dim, dropout): super().__init__() self.hid_dim = hid_dim self.pf_dim = pf_dim self.linear1 = nn.Linear(hid_dim, pf_dim) self.linear2 = nn.Linear(pf_dim, hid_dim) self.dropout = nn.Dropout(dropout) self.reset_parameters() def forward(self, x): # x = [batch_size, seq_size, hid_dim] x = self.linear1(x) x = self.dropout(F.relu(x)) x = self.linear2(x) # x = [batch_size, seq_size, hid_dim] return x def reset_parameters(self): #nn.init.kaiming_normal_(self.linear1.weight, a=math.sqrt(5)) #nn.init.xavier_normal_(self.linear2.weight) nn.init.xavier_uniform_(self.linear1.weight) nn.init.xavier_uniform_(self.linear2.weight) class Decoder(nn.Module): def __init__(self, hid_dim, n_layers, n_heads, pf_dim, decoder_layer, self_attention, positionwise_feedforward, dropout): super().__init__() self.layers = nn.ModuleList([decoder_layer(hid_dim, n_heads, pf_dim, self_attention, positionwise_feedforward, dropout) for _ in range(n_layers)]) def forward(self, trg, src, trg_mask, src_mask): # trg = [batch_size, trg_seq_size, hid_dim] # src = [batch_size, src_seq_size, hid_dim] # trg_mask = [batch_size, trg_seq_size] # src_mask = [batch_size, src_seq_size] for layer in self.layers: trg = layer(trg, src, trg_mask, src_mask) return trg class DecoderLayer(nn.Module): def __init__(self, hid_dim, n_heads, pf_dim, self_attention, positionwise_feedforward, dropout): super().__init__() self.layernorm1 = nn.LayerNorm(hid_dim) self.layernorm2 = nn.LayerNorm(hid_dim) self.layernorm3 = nn.LayerNorm(hid_dim) self.layernorm4 = nn.LayerNorm(hid_dim) self.selfAttn = self_attention(hid_dim, n_heads, dropout) self.encAttn = self_attention(hid_dim, n_heads, dropout) self.densefilter = positionwise_feedforward(hid_dim, pf_dim, dropout) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.dropout3 = nn.Dropout(dropout) def forward(self, trg, src, trg_mask, src_mask): # trg = [batch_size, trg_seq_size, hid_dim] # src = [batch_size, src_seq_size, hid_dim] # trg_mask = [batch_size, trg_seq_size] # src_mask = [batch_size, src_seq_size] # self attention z = self.layernorm1(trg) z, attn = self.selfAttn(z, z, z, trg_mask) z = self.dropout1(z) trg = trg + z # encoder attention z = self.layernorm2(trg) z, attn = self.encAttn(z, src, src, src_mask) z = self.dropout2(z) trg = trg + z # dense filter z = self.layernorm3(trg) z = self.densefilter(z) z = self.dropout3(z) trg = trg + z return self.layernorm4(trg) class Seq2Seq(nn.Module): def __init__(self, embedding, encoder, decoder, pad_idx): super().__init__() self.embedding = embedding self.encoder = encoder self.decoder = decoder self.pad_idx = pad_idx def make_masks(self, src, trg): # src = [batch_size, src_seq_size] # trg = [batch_size, trg_seq_size] src_mask = (src != self.pad_idx).unsqueeze(1).unsqueeze(2) trg_pad_mask = (trg != self.pad_idx).unsqueeze(1).unsqueeze(3) # trg_mask = [batch_size, 1, trg_seq_size, 1] trg_len = trg.shape[1] trg_sub_mask = torch.tril( torch.ones((trg_len, trg_len), dtype=torch.uint8, device=trg.device)) #print(torch.BoolTensor(trg_pad_mask)) #print(trg_sub_mask) trg_mask = trg_pad_mask & trg_sub_mask.type(torch.BoolTensor) # src_mask = [batch_size, 1, 1, pad_seq] # trg_mask = [batch_size, 1, pad_seq, past_seq] return src_mask, trg_mask def forward(self, src, trg): # src = [batch_size, src_seq_size] # trg = [batch_size, trg_seq_size] src_mask, trg_mask = self.make_masks(src, trg) # src_mask = [batch_size, 1, 1, pad_seq] # trg_mask = [batch_size, 1, pad_seq, past_seq] src = self.embedding(src) trg = self.embedding(trg) # src = [batch_size, src_seq_size, hid_dim] enc_src = self.encoder(src, src_mask) # enc_src = [batch_size, src_seq_size, hid_dim] out = self.decoder(trg, enc_src, trg_mask, src_mask) # out = [batch_size, trg_seq_size, hid_dim] logits = self.embedding.transpose_forward(out) # logits = [batch_size, trg_seq_size, d_vocab] return logits def make_src_mask(self, src): # src = [batch size, src sent len] src_mask = (src != self.pad_idx).unsqueeze(1).unsqueeze(2) return src_mask def make_trg_mask(self, trg): # trg = [batch size, trg sent len] trg_pad_mask = (trg != self.pad_idx).unsqueeze(1).unsqueeze(3) trg_len = trg.shape[1] trg_sub_mask = torch.tril( torch.ones((trg_len, trg_len), dtype=torch.uint8, device=trg.device)) trg_mask = trg_pad_mask & trg_sub_mask.type(torch.BoolTensor) return trg_mask def greedy_inference(model, src, sos_idx, eos_idx, max_length, device): model.eval() src = src.to(device) src_mask = model.make_src_mask(src) src_emb = model.embedding(src) # run encoder enc_src = model.encoder(src_emb, src_mask) trg = torch.ones(src.shape[0], 1).fill_(sos_idx).type_as(src).to(device) done = torch.zeros(src.shape[0]).type(torch.uint8).to(device) for _ in range(max_length): trg_emb = model.embedding(trg) trg_mask = model.make_trg_mask(trg) # run decoder output = model.decoder(src=enc_src, trg=trg_emb, src_mask=src_mask, trg_mask=trg_mask) logits = model.embedding.transpose_forward(output) pred = torch.argmax(logits[:,[-1],:], dim=-1) trg = torch.cat([trg, pred], dim=1) eos_match = (pred.squeeze(1) == eos_idx) done = done.type(torch.BoolTensor) | eos_match if done.sum() == src.shape[0]: break return trg
31.211628
140
0.628195
import math import torch import torch.nn as nn import torch.nn.functional as F def build_transformer(input_dim=None, hidden=None, dropout=None, max_length=None, n_layers=None, n_heads=None, myfilter=None, pad_idx=None): embedding = TokenEmbedding(d_vocab=input_dim, d_h=hidden, d_p=hidden, dropout=dropout, max_length=200) encoder = Encoder(hid_dim=hidden, n_layers=n_layers, n_heads=n_heads, pf_dim=myfilter, encoder_layer=EncoderLayer, self_attention=SelfAttention, positionwise_feedforward=PositionwiseFeedforward, dropout=dropout) decoder = Decoder(hid_dim=hidden, n_layers=n_layers, n_heads=n_heads, pf_dim=myfilter, decoder_layer=DecoderLayer, self_attention=SelfAttention, positionwise_feedforward=PositionwiseFeedforward, dropout=dropout) model = Seq2Seq(embedding=embedding, encoder=encoder, decoder=decoder, pad_idx=pad_idx) return model class TokenEmbedding(nn.Module): def __init__(self, d_vocab, d_h, d_p, dropout, max_length): super(TokenEmbedding, self).__init__() self.dropout = nn.Dropout(dropout) self.d_h = d_h self.tok_embedding = nn.Embedding(d_vocab, d_h) self.scale = torch.sqrt(torch.FloatTensor([d_h])) pe = torch.zeros(max_length, d_p) position = torch.arange(0., max_length).unsqueeze(1) div_term = torch.exp(torch.arange(0., d_p, 2) * -(math.log(10000.0) / d_p)) pe[:, 0::2] = torch.sin(position * div_term) pe[:, 1::2] = torch.cos(position * div_term) pe = pe.unsqueeze(0) self.register_buffer('pe', pe) self.reset_parameters() def forward(self, src): tok_emb = self.tok_embedding(src) * self.scale.to(src.device) pos_emb = torch.autograd.Variable(self.pe[:, :src.size(1)], requires_grad=False) x = tok_emb + pos_emb x = self.dropout(x) return x def transpose_forward(self, trg): logits = torch.einsum('btd,vd->btv',trg,self.tok_embedding.weight) return logits def reset_parameters(self): nn.init.normal_(self.tok_embedding.weight, mean=0, std=1./math.sqrt(self.d_h)) class Encoder(nn.Module): def __init__(self, hid_dim, n_layers, n_heads, pf_dim, encoder_layer, self_attention, positionwise_feedforward, dropout): super().__init__() self.layers = nn.ModuleList([encoder_layer(hid_dim, n_heads, pf_dim, self_attention, positionwise_feedforward, dropout) for _ in range(n_layers)]) def forward(self, src, src_mask): for layer in self.layers: src = layer(src, src_mask) return src class EncoderLayer(nn.Module): def __init__(self, hid_dim, n_heads, pf_dim, self_attention, positionwise_feedforward, dropout): super().__init__() self.layernorm1 = nn.LayerNorm(hid_dim) self.layernorm2 = nn.LayerNorm(hid_dim) self.layernorm3 = nn.LayerNorm(hid_dim) self.MHA = self_attention(hid_dim, n_heads, dropout) self.densefilter = positionwise_feedforward(hid_dim, pf_dim, dropout) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) def forward(self, src, src_mask): z = self.layernorm1(src) z, attn = self.MHA(z, z, z, src_mask) z = self.dropout1(z) src = src + z z = self.layernorm2(src) z = self.densefilter(z) z = self.dropout2(z) src = src + z return self.layernorm3(src) class SelfAttention(nn.Module): def __init__(self, hid_dim, n_heads, dropout): super().__init__() self.hid_dim = hid_dim self.n_heads = n_heads assert hid_dim % n_heads == 0 self.w_q = nn.Linear(hid_dim, hid_dim) self.w_k = nn.Linear(hid_dim, hid_dim) self.w_v = nn.Linear(hid_dim, hid_dim) self.linear = nn.Linear(hid_dim, hid_dim) self.dropout = nn.Dropout(dropout) self.scale = torch.sqrt(torch.FloatTensor([hid_dim // n_heads])) self.reset_parameters() def forward(self, query, key, value, mask=None): bsz = query.shape[0] Q = self.w_q(query) K = self.w_k(key) V = self.w_v(value) Q = Q.view(bsz, -1, self.n_heads, self.hid_dim // self.n_heads)\ .permute(0,2,1,3) K = K.view(bsz, -1, self.n_heads, self.hid_dim // self.n_heads)\ .permute(0,2,1,3) V = V.view(bsz, -1, self.n_heads, self.hid_dim // self.n_heads)\ .permute(0,2,1,3) energy = torch.einsum('bhid,bhjd->bhij',Q,K) / self.scale.to(key.device) if mask is not None: energy = energy.masked_fill(mask == 0, -1e10) attention = self.dropout(F.softmax(energy, dim=-1)) x = torch.einsum('bhjd,bhij->bhid',V,attention) x = x.permute(0, 2, 1, 3).contiguous() x = x.view(bsz, -1, self.n_heads * (self.hid_dim // self.n_heads)) x = self.linear(x) return x, attention.detach() def reset_parameters(self): nn.init.xavier_uniform_(self.w_q.weight) nn.init.xavier_uniform_(self.w_k.weight) nn.init.xavier_uniform_(self.w_v.weight) nn.init.xavier_uniform_(self.linear.weight) class PositionwiseFeedforward(nn.Module): def __init__(self, hid_dim, pf_dim, dropout): super().__init__() self.hid_dim = hid_dim self.pf_dim = pf_dim self.linear1 = nn.Linear(hid_dim, pf_dim) self.linear2 = nn.Linear(pf_dim, hid_dim) self.dropout = nn.Dropout(dropout) self.reset_parameters() def forward(self, x): x = self.linear1(x) x = self.dropout(F.relu(x)) x = self.linear2(x) return x def reset_parameters(self): nn.init.xavier_uniform_(self.linear1.weight) nn.init.xavier_uniform_(self.linear2.weight) class Decoder(nn.Module): def __init__(self, hid_dim, n_layers, n_heads, pf_dim, decoder_layer, self_attention, positionwise_feedforward, dropout): super().__init__() self.layers = nn.ModuleList([decoder_layer(hid_dim, n_heads, pf_dim, self_attention, positionwise_feedforward, dropout) for _ in range(n_layers)]) def forward(self, trg, src, trg_mask, src_mask): for layer in self.layers: trg = layer(trg, src, trg_mask, src_mask) return trg class DecoderLayer(nn.Module): def __init__(self, hid_dim, n_heads, pf_dim, self_attention, positionwise_feedforward, dropout): super().__init__() self.layernorm1 = nn.LayerNorm(hid_dim) self.layernorm2 = nn.LayerNorm(hid_dim) self.layernorm3 = nn.LayerNorm(hid_dim) self.layernorm4 = nn.LayerNorm(hid_dim) self.selfAttn = self_attention(hid_dim, n_heads, dropout) self.encAttn = self_attention(hid_dim, n_heads, dropout) self.densefilter = positionwise_feedforward(hid_dim, pf_dim, dropout) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.dropout3 = nn.Dropout(dropout) def forward(self, trg, src, trg_mask, src_mask): z = self.layernorm1(trg) z, attn = self.selfAttn(z, z, z, trg_mask) z = self.dropout1(z) trg = trg + z z = self.layernorm2(trg) z, attn = self.encAttn(z, src, src, src_mask) z = self.dropout2(z) trg = trg + z z = self.layernorm3(trg) z = self.densefilter(z) z = self.dropout3(z) trg = trg + z return self.layernorm4(trg) class Seq2Seq(nn.Module): def __init__(self, embedding, encoder, decoder, pad_idx): super().__init__() self.embedding = embedding self.encoder = encoder self.decoder = decoder self.pad_idx = pad_idx def make_masks(self, src, trg): src_mask = (src != self.pad_idx).unsqueeze(1).unsqueeze(2) trg_pad_mask = (trg != self.pad_idx).unsqueeze(1).unsqueeze(3) trg_len = trg.shape[1] trg_sub_mask = torch.tril( torch.ones((trg_len, trg_len), dtype=torch.uint8, device=trg.device)) trg_mask = trg_pad_mask & trg_sub_mask.type(torch.BoolTensor) return src_mask, trg_mask def forward(self, src, trg): src_mask, trg_mask = self.make_masks(src, trg) src = self.embedding(src) trg = self.embedding(trg) enc_src = self.encoder(src, src_mask) out = self.decoder(trg, enc_src, trg_mask, src_mask) logits = self.embedding.transpose_forward(out) return logits def make_src_mask(self, src): src_mask = (src != self.pad_idx).unsqueeze(1).unsqueeze(2) return src_mask def make_trg_mask(self, trg): trg_pad_mask = (trg != self.pad_idx).unsqueeze(1).unsqueeze(3) trg_len = trg.shape[1] trg_sub_mask = torch.tril( torch.ones((trg_len, trg_len), dtype=torch.uint8, device=trg.device)) trg_mask = trg_pad_mask & trg_sub_mask.type(torch.BoolTensor) return trg_mask def greedy_inference(model, src, sos_idx, eos_idx, max_length, device): model.eval() src = src.to(device) src_mask = model.make_src_mask(src) src_emb = model.embedding(src) enc_src = model.encoder(src_emb, src_mask) trg = torch.ones(src.shape[0], 1).fill_(sos_idx).type_as(src).to(device) done = torch.zeros(src.shape[0]).type(torch.uint8).to(device) for _ in range(max_length): trg_emb = model.embedding(trg) trg_mask = model.make_trg_mask(trg) output = model.decoder(src=enc_src, trg=trg_emb, src_mask=src_mask, trg_mask=trg_mask) logits = model.embedding.transpose_forward(output) pred = torch.argmax(logits[:,[-1],:], dim=-1) trg = torch.cat([trg, pred], dim=1) eos_match = (pred.squeeze(1) == eos_idx) done = done.type(torch.BoolTensor) | eos_match if done.sum() == src.shape[0]: break return trg
true
true
f7f692f17f23c8ead185bb277f3359b9a6817194
2,303
py
Python
datasets/helper functions/combine_A_and_B.py
anonymous191116/S3ED
250b3e39db7dfa9d2212795b6d9ea428acfa907b
[ "BSD-3-Clause" ]
null
null
null
datasets/helper functions/combine_A_and_B.py
anonymous191116/S3ED
250b3e39db7dfa9d2212795b6d9ea428acfa907b
[ "BSD-3-Clause" ]
null
null
null
datasets/helper functions/combine_A_and_B.py
anonymous191116/S3ED
250b3e39db7dfa9d2212795b6d9ea428acfa907b
[ "BSD-3-Clause" ]
null
null
null
from pdb import set_trace as st import os import numpy as np import cv2 import argparse parser = argparse.ArgumentParser('create image pairs') parser.add_argument('--fold_A', dest='fold_A', help='input directory for image A', type=str, default='/media/disk2/daic/blur_data/3_14GAN/GAN_GO_PRO/A') parser.add_argument('--fold_B', dest='fold_B', help='input directory for image B', type=str, default='/media/disk2/daic/blur_data/3_14GAN/GAN_GO_PRO/B') parser.add_argument('--fold_AB', dest='fold_AB', help='output directory', type=str, default='/media/disk2/daic/blur_data/3_14GAN/GAN_GO_PRO/A_B') parser.add_argument('--num_imgs', dest='num_imgs', help='number of images',type=int, default=100000) parser.add_argument('--use_AB', dest='use_AB', help='if true: (0001_A, 0001_B) to (0001_AB)',action='store_true') args = parser.parse_args() for arg in vars(args): print('[%s] = ' % arg, getattr(args, arg)) splits = os.listdir(args.fold_A) for sp in splits: img_fold_A = os.path.join(args.fold_A, sp) img_fold_B = os.path.join(args.fold_B, sp) img_list = os.listdir(img_fold_A) if args.use_AB: img_list = [img_path for img_path in img_list if '_A.' in img_path] num_imgs = min(args.num_imgs, len(img_list)) print('split = %s, use %d/%d images' % (sp, num_imgs, len(img_list))) img_fold_AB = os.path.join(args.fold_AB, sp) if not os.path.isdir(img_fold_AB): os.makedirs(img_fold_AB) print('split = %s, number of images = %d' % (sp, num_imgs)) for n in range(num_imgs): name_A = img_list[n] path_A = os.path.join(img_fold_A, name_A) if args.use_AB: name_B = name_A.replace('_A.', '_B.') else: name_B = name_A path_B = os.path.join(img_fold_B, name_B) if os.path.isfile(path_A) and os.path.isfile(path_B): name_AB = name_A if args.use_AB: name_AB = name_AB.replace('_A.', '.') # remove _A path_AB = os.path.join(img_fold_AB, name_AB) im_A = cv2.imread(path_A, cv2.IMREAD_COLOR) im_B = cv2.imread(path_B, cv2.IMREAD_COLOR) im_AB = np.concatenate([im_A, im_B], 1) cv2.imwrite(path_AB, im_AB)
41.872727
113
0.63439
from pdb import set_trace as st import os import numpy as np import cv2 import argparse parser = argparse.ArgumentParser('create image pairs') parser.add_argument('--fold_A', dest='fold_A', help='input directory for image A', type=str, default='/media/disk2/daic/blur_data/3_14GAN/GAN_GO_PRO/A') parser.add_argument('--fold_B', dest='fold_B', help='input directory for image B', type=str, default='/media/disk2/daic/blur_data/3_14GAN/GAN_GO_PRO/B') parser.add_argument('--fold_AB', dest='fold_AB', help='output directory', type=str, default='/media/disk2/daic/blur_data/3_14GAN/GAN_GO_PRO/A_B') parser.add_argument('--num_imgs', dest='num_imgs', help='number of images',type=int, default=100000) parser.add_argument('--use_AB', dest='use_AB', help='if true: (0001_A, 0001_B) to (0001_AB)',action='store_true') args = parser.parse_args() for arg in vars(args): print('[%s] = ' % arg, getattr(args, arg)) splits = os.listdir(args.fold_A) for sp in splits: img_fold_A = os.path.join(args.fold_A, sp) img_fold_B = os.path.join(args.fold_B, sp) img_list = os.listdir(img_fold_A) if args.use_AB: img_list = [img_path for img_path in img_list if '_A.' in img_path] num_imgs = min(args.num_imgs, len(img_list)) print('split = %s, use %d/%d images' % (sp, num_imgs, len(img_list))) img_fold_AB = os.path.join(args.fold_AB, sp) if not os.path.isdir(img_fold_AB): os.makedirs(img_fold_AB) print('split = %s, number of images = %d' % (sp, num_imgs)) for n in range(num_imgs): name_A = img_list[n] path_A = os.path.join(img_fold_A, name_A) if args.use_AB: name_B = name_A.replace('_A.', '_B.') else: name_B = name_A path_B = os.path.join(img_fold_B, name_B) if os.path.isfile(path_A) and os.path.isfile(path_B): name_AB = name_A if args.use_AB: name_AB = name_AB.replace('_A.', '.') path_AB = os.path.join(img_fold_AB, name_AB) im_A = cv2.imread(path_A, cv2.IMREAD_COLOR) im_B = cv2.imread(path_B, cv2.IMREAD_COLOR) im_AB = np.concatenate([im_A, im_B], 1) cv2.imwrite(path_AB, im_AB)
true
true
f7f6934a18d5b28cef36169cfd9d25e220fe9e03
2,046
py
Python
examples_source/1D_simulation(crystalline)/plot_6_coupled_spin_system.py
pjgrandinetti/mrsimulator
e603395e52ad162d4a9051a0741651c9030c3459
[ "BSD-3-Clause" ]
14
2019-05-28T20:06:13.000Z
2021-05-27T01:37:16.000Z
examples_source/1D_simulation(crystalline)/plot_6_coupled_spin_system.py
wushanyun64/mrsimulator
01b447239d9f469df62b7293a74a3d0c34500a19
[ "BSD-3-Clause" ]
74
2021-06-07T15:13:49.000Z
2022-03-29T20:09:19.000Z
examples_source/1D_simulation(crystalline)/plot_6_coupled_spin_system.py
wushanyun64/mrsimulator
01b447239d9f469df62b7293a74a3d0c34500a19
[ "BSD-3-Clause" ]
7
2019-05-28T20:19:29.000Z
2021-04-06T18:48:24.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Coupled spins 5/2-9/2 (Quad + J-coupling) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ²⁷Al-⁹³Nb spin system spectrum. """ # %% import matplotlib.pyplot as plt from mrsimulator import Simulator, SpinSystem from mrsimulator.methods import BlochDecayCTSpectrum from mrsimulator import signal_processing as sp # sphinx_gallery_thumbnail_number = 1 # %% # **Spin System** # # Create a 27Al-93Nb coupled spin system. spin_system = SpinSystem( sites=[ { "isotope": "27Al", "isotropic_chemical_shift": 0.0, # in ppm "quadrupolar": {"Cq": 5.0e6, "eta": 0.0}, # Cq is in Hz }, { "isotope": "93Nb", "isotropic_chemical_shift": 0.0, # in ppm }, ], couplings=[{"site_index": [0, 1], "isotropic_j": 200.0}], # j-coupling in Hz ) # %% # **Method** # # Create a central transition selective Bloch decay spectrum method. method = BlochDecayCTSpectrum( channels=["27Al"], magnetic_flux_density=9.4, # in T rotor_frequency=5e3, # in Hz spectral_dimensions=[ { "count": 2048, "spectral_width": 4.0e4, # in Hz "reference_offset": -2e3, # in Hz } ], ) # %% # **Simulator** # # Create the Simulator object and add the method and the spin system object. sim = Simulator() sim.spin_systems += [spin_system] # add the spin system sim.methods += [method] # add the method sim.run() # %% # **Post-Simulation Processing** # # Add post-simulation signal processing. processor = sp.SignalProcessor( operations=[ sp.IFFT(), sp.apodization.Exponential(FWHM="30 Hz"), sp.FFT(), ] ) processed_data = processor.apply_operations(data=sim.methods[0].simulation) # %% # **Plot** # # The plot of the simulation before signal processing. plt.figure(figsize=(4.25, 3.0)) ax = plt.subplot(projection="csdm") ax.plot(processed_data.real, color="black", linewidth=0.5) ax.invert_xaxis() plt.tight_layout() plt.show()
23.790698
81
0.613881
import matplotlib.pyplot as plt from mrsimulator import Simulator, SpinSystem from mrsimulator.methods import BlochDecayCTSpectrum from mrsimulator import signal_processing as sp spin_system = SpinSystem( sites=[ { "isotope": "27Al", "isotropic_chemical_shift": 0.0, "quadrupolar": {"Cq": 5.0e6, "eta": 0.0}, }, { "isotope": "93Nb", "isotropic_chemical_shift": 0.0, }, ], couplings=[{"site_index": [0, 1], "isotropic_j": 200.0}], ) method = BlochDecayCTSpectrum( channels=["27Al"], magnetic_flux_density=9.4, rotor_frequency=5e3, spectral_dimensions=[ { "count": 2048, "spectral_width": 4.0e4, "reference_offset": -2e3, } ], ) sim = Simulator() sim.spin_systems += [spin_system] sim.methods += [method] sim.run() processor = sp.SignalProcessor( operations=[ sp.IFFT(), sp.apodization.Exponential(FWHM="30 Hz"), sp.FFT(), ] ) processed_data = processor.apply_operations(data=sim.methods[0].simulation) plt.figure(figsize=(4.25, 3.0)) ax = plt.subplot(projection="csdm") ax.plot(processed_data.real, color="black", linewidth=0.5) ax.invert_xaxis() plt.tight_layout() plt.show()
true
true
f7f69370ed78d2b498e65f3234514978a48f80bb
5,336
py
Python
src/karon/tree/build.py
csm-adapt/karon
988ef07eb9339b6dea3babd4f31337bce7b5d5e4
[ "MIT" ]
1
2020-05-28T17:00:48.000Z
2020-05-28T17:00:48.000Z
src/karon/tree/build.py
csm-adapt/karon
988ef07eb9339b6dea3babd4f31337bce7b5d5e4
[ "MIT" ]
6
2019-11-06T16:50:07.000Z
2020-04-28T20:11:14.000Z
src/karon/tree/build.py
csm-adapt/karon
988ef07eb9339b6dea3babd4f31337bce7b5d5e4
[ "MIT" ]
1
2020-03-30T19:36:49.000Z
2020-03-30T19:36:49.000Z
__all__ = ["generate_tree", "from_parent"] import warnings import numpy as np def generate_tree(get_nodeid, get_parent, cmp=None): """ Defines the functions required to (a) extract a field from a node, (b) extract a field from a prospective parent node, and (c) compare the results to establish whether the prospective node is (`cmp` returns True) or is not (`cmp` returns False) the parent of the node. Example: def get_parent(node): return node.contents['parent name'] def get_name(node): return node.contents['name'] nodes = get_nodelist_from_file('foo.xlsx') tree = generate_tree(get_name, get_parent)(nodes) :param get_nodeid: Unary function that extracts a field from a Node object. :type get_nodeid: Unary function, signature: get_nodeid(Node). :param get_parent: Unary function that extracts a field from a Node object. :type get_parent: Unary function or None. If a unary function, the signature is get_parent(Node) :param cmp: (optional) Unary function that compares the results of parentID and nodeExtract. Returns True if the values match, False otherwise. :return: Unary function, signature: f(array-like-of-Nodes) """ def is_null(obj): try: return np.isnan(obj) except TypeError: return not bool(obj) def equal(lhs, rhs): if is_null(lhs) or is_null(rhs): return False else: return lhs == rhs def build(nodelist): """ Returns the parent of the node. :param nodelist: List of nodes to be used to build a tree :return: """ roots = [] for node in nodelist: value = get_parent(node) # which nodes in "nodelist" are parents of "node"? parents = [n for n in nodelist if cmp(value, get_nodeid(n))] if len(parents) > 1: # TODO: Rather than return an error, compose a common parent # that combines properties from the matching parent # nodes. Along with the original child node # these matching parents become the children to # the common parent, thereby maintaining the single parent # required for a tree, but establishing a connection to # all matching parents. # COMPLICATIONS: # 1. What properties do the common parent have? # 2. How are the matching parent attributes combined # in the common parent? (list/append? reduce?) msg = f'{value} has more than one ({len(parents)}) matching '\ f'parent node: {[p.contents for p in parents]}' raise ValueError(msg) try: parent = parents[0] parent.add_child(node) except IndexError: # no parent found, therefore this node is a root node roots.append(node) return roots # handle positional parameters # handle optional parameters cmp = equal if cmp is None else cmp return build def from_parent(nodes, get_key, get_parent): """ Builds up tree structures from a dictionary of nodes. The name of the parent node is given by `key` in the will-be child node contents, e.g. parent = node.contents[key] nodes[parent].add_child(node) Any node that does not specify a parent is the root node of its own tree. :param nodes: List of nodes that are to be structured into trees :type nodes: List-like. :param get_key: Gets the identifier for each node. :type get_key: Unary function or string. Unary function has the signature `get_key(node)` and returns a hashable object. If get_key is a string, returns node.contents[get_key]. :param get_parent: Gets the identifier for the parent for each node. :type get_parent: Unary function or string. Unary function has the signature `get_parent(node)` and returns a hashable object. If get_parent is a string, returns node.contents[get_parent]. If no parent node is found, then returns None. :return: Constructed trees. :rtype: list """ # if get_key or get_parent are strings, then these will be interpretted # as the key in the Node.contents dictionary. if isinstance(get_key, str): key_str = get_key def get_key(node): return node.contents.get(key_str, None) if isinstance(get_parent, str): parent_str = get_parent def get_parent(node): return node.contents.get(parent_str, None) # construct a map of the nodes nodemap = {get_key(node): node for node in nodes} # construct the trees roots = [] for key, node in iter(nodemap.items()): parent = get_parent(node) if parent is not None: try: nodemap[parent].add_child(node) except KeyError: warnings.warn(f"{parent} was not found in the set of " f"nodes. Child will be treated as a root.") roots.append(node) else: roots.append(node) return roots
37.055556
80
0.613381
__all__ = ["generate_tree", "from_parent"] import warnings import numpy as np def generate_tree(get_nodeid, get_parent, cmp=None): def is_null(obj): try: return np.isnan(obj) except TypeError: return not bool(obj) def equal(lhs, rhs): if is_null(lhs) or is_null(rhs): return False else: return lhs == rhs def build(nodelist): roots = [] for node in nodelist: value = get_parent(node) parents = [n for n in nodelist if cmp(value, get_nodeid(n))] if len(parents) > 1: msg = f'{value} has more than one ({len(parents)}) matching '\ f'parent node: {[p.contents for p in parents]}' raise ValueError(msg) try: parent = parents[0] parent.add_child(node) except IndexError: roots.append(node) return roots cmp = equal if cmp is None else cmp return build def from_parent(nodes, get_key, get_parent): if isinstance(get_key, str): key_str = get_key def get_key(node): return node.contents.get(key_str, None) if isinstance(get_parent, str): parent_str = get_parent def get_parent(node): return node.contents.get(parent_str, None) nodemap = {get_key(node): node for node in nodes} roots = [] for key, node in iter(nodemap.items()): parent = get_parent(node) if parent is not None: try: nodemap[parent].add_child(node) except KeyError: warnings.warn(f"{parent} was not found in the set of " f"nodes. Child will be treated as a root.") roots.append(node) else: roots.append(node) return roots
true
true
f7f69466f8327de9fc5ec4b4b402271c0c988dad
4,710
py
Python
cleandevcertificates/events/migrations/0002_auto__chg_field_event_token_expirate.py
lucascastejon/cleandevcertificates
25cfbe95081fe88f5bb2f71fcfcaf6052e7fae35
[ "MIT" ]
null
null
null
cleandevcertificates/events/migrations/0002_auto__chg_field_event_token_expirate.py
lucascastejon/cleandevcertificates
25cfbe95081fe88f5bb2f71fcfcaf6052e7fae35
[ "MIT" ]
null
null
null
cleandevcertificates/events/migrations/0002_auto__chg_field_event_token_expirate.py
lucascastejon/cleandevcertificates
25cfbe95081fe88f5bb2f71fcfcaf6052e7fae35
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Changing field 'Event.token_expirate' db.alter_column(u'events_event', 'token_expirate', self.gf('django.db.models.fields.DateTimeField')(null=True)) def backwards(self, orm): # Changing field 'Event.token_expirate' db.alter_column(u'events_event', 'token_expirate', self.gf('django.db.models.fields.DateField')(null=True)) models = { u'core.person': { 'Meta': {'ordering': "['name']", 'object_name': 'Person'}, 'city': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}), 'course': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'cpf': ('django.db.models.fields.CharField', [], {'max_length': '20', 'unique': 'True', 'null': 'True', 'blank': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '100'}), 'facebook': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'kind': ('django.db.models.fields.CharField', [], {'max_length': '1'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'semester': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'twitter': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}), 'university': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Person']", 'null': 'True', 'blank': 'True'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'events.certified': { 'Meta': {'ordering': "['-event__date']", 'object_name': 'Certified'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['events.Event']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'observation': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Person']"}), 'rating': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'events.event': { 'Meta': {'ordering': "['-date']", 'object_name': 'Event'}, 'complement': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'date': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}), 'place': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'place'", 'to': u"orm['core.Person']"}), 'post': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}), 'speaker': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'speaker'", 'to': u"orm['core.Person']"}), 'token': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}), 'token_expirate': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'workload': ('django.db.models.fields.IntegerField', [], {}) } } complete_apps = ['events']
69.264706
141
0.565817
from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): db.alter_column(u'events_event', 'token_expirate', self.gf('django.db.models.fields.DateTimeField')(null=True)) def backwards(self, orm): db.alter_column(u'events_event', 'token_expirate', self.gf('django.db.models.fields.DateField')(null=True)) models = { u'core.person': { 'Meta': {'ordering': "['name']", 'object_name': 'Person'}, 'city': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}), 'course': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'cpf': ('django.db.models.fields.CharField', [], {'max_length': '20', 'unique': 'True', 'null': 'True', 'blank': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '100'}), 'facebook': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'kind': ('django.db.models.fields.CharField', [], {'max_length': '1'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'semester': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'twitter': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}), 'university': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Person']", 'null': 'True', 'blank': 'True'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'events.certified': { 'Meta': {'ordering': "['-event__date']", 'object_name': 'Certified'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['events.Event']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'observation': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Person']"}), 'rating': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'events.event': { 'Meta': {'ordering': "['-date']", 'object_name': 'Event'}, 'complement': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'date': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}), 'place': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'place'", 'to': u"orm['core.Person']"}), 'post': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}), 'speaker': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'speaker'", 'to': u"orm['core.Person']"}), 'token': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}), 'token_expirate': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'workload': ('django.db.models.fields.IntegerField', [], {}) } } complete_apps = ['events']
true
true
f7f6949564293cc1972c13edd1c92703120fba8a
2,231
py
Python
pydrive/test/test_filelist.py
dleicht/PSB
983d5dad90bf6d8166c3451f56688e9054e5e42c
[ "MIT" ]
1
2015-11-06T02:37:06.000Z
2015-11-06T02:37:06.000Z
pydrive/test/test_filelist.py
dleicht/PSB
983d5dad90bf6d8166c3451f56688e9054e5e42c
[ "MIT" ]
null
null
null
pydrive/test/test_filelist.py
dleicht/PSB
983d5dad90bf6d8166c3451f56688e9054e5e42c
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- import os import sys import unittest from pydrive.auth import GoogleAuth from pydrive.drive import GoogleDrive class GoogleDriveFileListTest(unittest.TestCase): """Tests operations of files.GoogleDriveFileList class. Equivalent to Files.list in Google Drive API. """ title = 'asdfjoijawioejgoiaweoganoqpnmgzwrouihoaiwe.ioawejogiawoj' ga = GoogleAuth('settings/test1.yaml') ga.LocalWebserverAuth() drive = GoogleDrive(ga) file_list = [] for x in range(0, 10): file1 = drive.CreateFile() file1['title'] = title file1.Upload() file_list.append(file1) def test_01_Files_List_GetList(self): drive = GoogleDrive(self.ga) flist = drive.ListFile({'q': "title = '%s' and trashed = false"%self.title}) files = flist.GetList() # Auto iterate every file for file1 in self.file_list: found = False for file2 in files: if file1['id'] == file2['id']: found = True self.assertEqual(found, True) def test_02_Files_List_ForLoop(self): drive = GoogleDrive(self.ga) flist = drive.ListFile({'q': "title = '%s' and trashed = false"%self.title, 'maxResults': 2}) files = [] for x in flist: # Build iterator to access files simply with for loop self.assertTrue(len(x) <= 2) files.extend(x) for file1 in self.file_list: found = False for file2 in files: if file1['id'] == file2['id']: found = True self.assertEqual(found, True) def test_03_Files_List_GetList_Iterate(self): drive = GoogleDrive(self.ga) flist = drive.ListFile({'q': "title = '%s' and trashed = false"%self.title, 'maxResults': 2}) files = [] while True: try: x = flist.GetList() self.assertTrue(len(x) <= 2) files.extend(x) except StopIteration: break for file1 in self.file_list: found = False for file2 in files: if file1['id'] == file2['id']: found = True self.assertEqual(found, True) def DeleteOldFile(self, file_name): try: os.remove(file_name) except OSError: pass if __name__ == '__main__': unittest.main()
28.240506
80
0.622591
import os import sys import unittest from pydrive.auth import GoogleAuth from pydrive.drive import GoogleDrive class GoogleDriveFileListTest(unittest.TestCase): title = 'asdfjoijawioejgoiaweoganoqpnmgzwrouihoaiwe.ioawejogiawoj' ga = GoogleAuth('settings/test1.yaml') ga.LocalWebserverAuth() drive = GoogleDrive(ga) file_list = [] for x in range(0, 10): file1 = drive.CreateFile() file1['title'] = title file1.Upload() file_list.append(file1) def test_01_Files_List_GetList(self): drive = GoogleDrive(self.ga) flist = drive.ListFile({'q': "title = '%s' and trashed = false"%self.title}) files = flist.GetList() for file1 in self.file_list: found = False for file2 in files: if file1['id'] == file2['id']: found = True self.assertEqual(found, True) def test_02_Files_List_ForLoop(self): drive = GoogleDrive(self.ga) flist = drive.ListFile({'q': "title = '%s' and trashed = false"%self.title, 'maxResults': 2}) files = [] for x in flist: self.assertTrue(len(x) <= 2) files.extend(x) for file1 in self.file_list: found = False for file2 in files: if file1['id'] == file2['id']: found = True self.assertEqual(found, True) def test_03_Files_List_GetList_Iterate(self): drive = GoogleDrive(self.ga) flist = drive.ListFile({'q': "title = '%s' and trashed = false"%self.title, 'maxResults': 2}) files = [] while True: try: x = flist.GetList() self.assertTrue(len(x) <= 2) files.extend(x) except StopIteration: break for file1 in self.file_list: found = False for file2 in files: if file1['id'] == file2['id']: found = True self.assertEqual(found, True) def DeleteOldFile(self, file_name): try: os.remove(file_name) except OSError: pass if __name__ == '__main__': unittest.main()
true
true
f7f69559a360e75a434493a643a2055696a9cbbf
1,397
py
Python
badge/views.py
Gagan-Shenoy/sushiksha-website
a41991df1a1d46336cbf019e83add5df56dde363
[ "Apache-2.0" ]
31
2020-11-07T03:23:55.000Z
2022-03-16T18:21:45.000Z
badge/views.py
Gagan-Shenoy/sushiksha-website
a41991df1a1d46336cbf019e83add5df56dde363
[ "Apache-2.0" ]
124
2020-11-07T03:27:49.000Z
2022-03-20T05:28:06.000Z
badge/views.py
Gagan-Shenoy/sushiksha-website
a41991df1a1d46336cbf019e83add5df56dde363
[ "Apache-2.0" ]
44
2020-11-09T04:39:39.000Z
2022-03-12T09:48:19.000Z
from django.shortcuts import render from users.models import Reward from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage from django.shortcuts import get_object_or_404 from .filters import RewardFilter from .models import BadgeClaim def badge_list(request): query = Reward.objects.order_by('-timestamp') f = RewardFilter(request.GET, queryset=query) paginated_queryset = f.qs paginator = Paginator(paginated_queryset, 30) page_request_var = 'page' page = request.GET.get(page_request_var) try: paginated_queryset = paginator.page(page) except PageNotAnInteger: paginated_queryset = paginator.page(1) except EmptyPage: paginated_queryset = paginator.page(paginator.num_pages) context = { 'query': paginated_queryset, 'reward_filter': f, 'page_request_var': page_request_var, 'title': "Badges awarded" } return render(request, 'badges/rewards.html', context=context) def badge_claim(request): claim_forms = BadgeClaim.objects.all() context = { 'forms': claim_forms } return render(request, 'badges/badge_claim_list.html', context=context) def badge_claim_form(request, pk): form = get_object_or_404(BadgeClaim, id=pk) context = { 'form': form } return render(request, 'badges/badge_claim_form.html', context=context)
28.510204
75
0.710093
from django.shortcuts import render from users.models import Reward from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage from django.shortcuts import get_object_or_404 from .filters import RewardFilter from .models import BadgeClaim def badge_list(request): query = Reward.objects.order_by('-timestamp') f = RewardFilter(request.GET, queryset=query) paginated_queryset = f.qs paginator = Paginator(paginated_queryset, 30) page_request_var = 'page' page = request.GET.get(page_request_var) try: paginated_queryset = paginator.page(page) except PageNotAnInteger: paginated_queryset = paginator.page(1) except EmptyPage: paginated_queryset = paginator.page(paginator.num_pages) context = { 'query': paginated_queryset, 'reward_filter': f, 'page_request_var': page_request_var, 'title': "Badges awarded" } return render(request, 'badges/rewards.html', context=context) def badge_claim(request): claim_forms = BadgeClaim.objects.all() context = { 'forms': claim_forms } return render(request, 'badges/badge_claim_list.html', context=context) def badge_claim_form(request, pk): form = get_object_or_404(BadgeClaim, id=pk) context = { 'form': form } return render(request, 'badges/badge_claim_form.html', context=context)
true
true
f7f6959d70635536ae2a991c7209af11ed22ebcf
757
py
Python
leetcode/84_largest_rectangle_histogram.py
leetcode-notes/daily-algorithms-practice
2a03499ed0b403d79f6c8451c9a839991b23e188
[ "Unlicense" ]
null
null
null
leetcode/84_largest_rectangle_histogram.py
leetcode-notes/daily-algorithms-practice
2a03499ed0b403d79f6c8451c9a839991b23e188
[ "Unlicense" ]
null
null
null
leetcode/84_largest_rectangle_histogram.py
leetcode-notes/daily-algorithms-practice
2a03499ed0b403d79f6c8451c9a839991b23e188
[ "Unlicense" ]
null
null
null
class Solution: def largestRectangleArea(self, heights) -> int: ans, stack = 0, [] for i, h in enumerate(heights+[0]): while stack and heights[stack[-1]] >= h: height = heights[stack.pop()] if stack: width = i - stack[-1]-1 else: width = i ans = max(ans, height*width) stack.append(i) return ans """ Success Details Runtime: 808 ms, faster than 25.90 % of Python3 online submissions for Largest Rectangle in Histogram. Memory Usage: 27.1 MB, less than 43.00 % of Python3 online submissions for Largest Rectangle in Histogram. Next challenges: Maximal Rectangle Maximum Score of a Good Subarray """
29.115385
106
0.579921
class Solution: def largestRectangleArea(self, heights) -> int: ans, stack = 0, [] for i, h in enumerate(heights+[0]): while stack and heights[stack[-1]] >= h: height = heights[stack.pop()] if stack: width = i - stack[-1]-1 else: width = i ans = max(ans, height*width) stack.append(i) return ans
true
true
f7f696d3f5d0ff6a558cdf619193a9e7b3c96b99
97
py
Python
landingpage/apps.py
Emmastro/africanlibraries
6755dd5a7d3453c7ba6e63d49071f9f5af280f71
[ "Apache-2.0" ]
null
null
null
landingpage/apps.py
Emmastro/africanlibraries
6755dd5a7d3453c7ba6e63d49071f9f5af280f71
[ "Apache-2.0" ]
null
null
null
landingpage/apps.py
Emmastro/africanlibraries
6755dd5a7d3453c7ba6e63d49071f9f5af280f71
[ "Apache-2.0" ]
null
null
null
from django.apps import AppConfig class LandingpageConfig(AppConfig): name = 'Landingpage'
16.166667
35
0.773196
from django.apps import AppConfig class LandingpageConfig(AppConfig): name = 'Landingpage'
true
true
f7f69793eafb3a6315f68c554400c4419a968fdf
2,505
py
Python
src/theta/theta_mock_server.py
batra-mlp-lab/vln-sim2real
e7e82f7222b35996c4f8d2e7ec68c80b8e541ad5
[ "BSD-3-Clause" ]
10
2020-11-11T02:22:32.000Z
2021-07-29T01:40:51.000Z
src/theta/theta_mock_server.py
batra-mlp-lab/vln-sim2real
e7e82f7222b35996c4f8d2e7ec68c80b8e541ad5
[ "BSD-3-Clause" ]
2
2021-02-24T07:29:39.000Z
2022-03-12T13:12:43.000Z
src/theta/theta_mock_server.py
batra-mlp-lab/vln-sim2real
e7e82f7222b35996c4f8d2e7ec68c80b8e541ad5
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python import rospy from sensor_msgs.msg import Image from std_srvs.srv import Trigger, TriggerResponse from PIL import Image as Img from vln_evaluation.msg import LocationHint import numpy as np class ThetaMockServer(object): """ Mock camera server, subscribes to mock/hint topic that says which node it is at, and then the camera publishes a pano image from the file system """ def __init__(self): # Fire up the camera rospy.loginfo('Detected mock camera') # Service self.service = rospy.Service('theta/capture', Trigger, self.capture) # By default we publish directly to image/rotated since all images are already aligned to the world frame self.pub_image = rospy.Publisher(rospy.get_param('theta_topic', 'theta/image/rotated'), Image, queue_size=1) # Extra - subscribe to the location, viewpoint etc. self.data_dir = rospy.get_param('pano_images_dir') self.sub = rospy.Subscriber('mock/hint', LocationHint, self.next_image) self.next_path = None rospy.spin() def next_image(self, data): self.viewpoint = data.viewpoint self.next_path = self.data_dir + '/' + data.viewpoint + '_equirectangular.jpg' def capture(self, req): rospy.logdebug('Capturing mock panorama') if not self.next_path: msg = 'Theta mock server did not receive a viewpoint hint.' return TriggerResponse(success=False, message=msg) img = Img.open(self.next_path) # This only works for coda, where all panos have the y-axis in the center of the # image instead of the x. So roll by -90 degrees to x-axis is in the middle of image #np_img = np.array(img) #np_img = np.roll(np_img, -np_img.shape[1]//4, axis=1) #img = Img.fromarray(np_img) rospy.logdebug('Mock panorama captured!') image = Image(height=img.height, width=img.width, encoding="rgb8", is_bigendian=False, step=img.width*3, data=img.tobytes()) image.header.stamp = rospy.Time.now() image.header.frame_id = 'map' self.pub_image.publish(image) rospy.logdebug('Mock panorama published!') self.next_path = None # Put the viewpoint id here because it makes mock evaluation easy return TriggerResponse(success=True, message=self.viewpoint) if __name__ == '__main__': rospy.init_node('theta_mock', anonymous=False) my_node = ThetaMockServer()
33.4
132
0.673054
import rospy from sensor_msgs.msg import Image from std_srvs.srv import Trigger, TriggerResponse from PIL import Image as Img from vln_evaluation.msg import LocationHint import numpy as np class ThetaMockServer(object): def __init__(self): rospy.loginfo('Detected mock camera') self.service = rospy.Service('theta/capture', Trigger, self.capture) self.pub_image = rospy.Publisher(rospy.get_param('theta_topic', 'theta/image/rotated'), Image, queue_size=1) self.data_dir = rospy.get_param('pano_images_dir') self.sub = rospy.Subscriber('mock/hint', LocationHint, self.next_image) self.next_path = None rospy.spin() def next_image(self, data): self.viewpoint = data.viewpoint self.next_path = self.data_dir + '/' + data.viewpoint + '_equirectangular.jpg' def capture(self, req): rospy.logdebug('Capturing mock panorama') if not self.next_path: msg = 'Theta mock server did not receive a viewpoint hint.' return TriggerResponse(success=False, message=msg) img = Img.open(self.next_path) rospy.logdebug('Mock panorama captured!') image = Image(height=img.height, width=img.width, encoding="rgb8", is_bigendian=False, step=img.width*3, data=img.tobytes()) image.header.stamp = rospy.Time.now() image.header.frame_id = 'map' self.pub_image.publish(image) rospy.logdebug('Mock panorama published!') self.next_path = None return TriggerResponse(success=True, message=self.viewpoint) if __name__ == '__main__': rospy.init_node('theta_mock', anonymous=False) my_node = ThetaMockServer()
true
true
f7f6979a6731b7ed35d30e85a35446cd54eb32e6
5,710
py
Python
files/temp/raspberry/main.py
petrLorenc/lorenc.github.io
111211d0d5e5ff097d57b5a3155ed3a19d01f00a
[ "MIT" ]
null
null
null
files/temp/raspberry/main.py
petrLorenc/lorenc.github.io
111211d0d5e5ff097d57b5a3155ed3a19d01f00a
[ "MIT" ]
null
null
null
files/temp/raspberry/main.py
petrLorenc/lorenc.github.io
111211d0d5e5ff097d57b5a3155ed3a19d01f00a
[ "MIT" ]
null
null
null
import pyaudio import wave import audioop from collections import deque import os import io import time import math from google.cloud import speech from google.cloud.speech import enums from google.cloud.speech import types CHUNK = 1024 # CHUNKS of bytes to read each time from mic FORMAT = pyaudio.paInt16 CHANNELS = 1 RATE = 16000 THRESHOLD = 1500 # The threshold intensity that defines silence SILENCE_LIMIT = 3 # Silence limit in seconds to stop the recording PREV_AUDIO = 0.5 #seconds of audo to prepend to the sending data # [START speech_transcribe_streaming] def transcribe_streaming(stream_file): """Streams transcription of the given audio file.""" from google.cloud import speech from google.cloud.speech import enums from google.cloud.speech import types client = speech.SpeechClient() # [START speech_python_migration_streaming_request] with io.open(stream_file, 'rb') as audio_file: content = audio_file.read() # In practice, stream should be a generator yielding chunks of audio data. stream = [content] requests = (types.StreamingRecognizeRequest(audio_content=chunk) for chunk in stream) config = types.RecognitionConfig( encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16, sample_rate_hertz=16000, language_code='cs-CZ') streaming_config = types.StreamingRecognitionConfig(config=config) # streaming_recognize returns a generator. # [START speech_python_migration_streaming_response] responses = client.streaming_recognize(streaming_config, requests) # [END speech_python_migration_streaming_request] for response in responses: # Once the transcription has settled, the first result will contain the # is_final result. The other results will be for subsequent portions of # the audio. for result in response.results: print('Finished: {}'.format(result.is_final)) print('Stability: {}'.format(result.stability)) alternatives = result.alternatives # The alternatives are ordered from most likely to least. for alternative in alternatives: print('Confidence: {}'.format(alternative.confidence)) print(u'Transcript: {}'.format(alternative.transcript)) return responses # [END speech_python_migration_streaming_response] # [END speech_transcribe_streaming] def listen_for_speech(threshold=THRESHOLD): #Open stream p = pyaudio.PyAudio() stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK) print ("* Listening mic. ") audio2send = [] cur_data = '' # current chunk of audio data rel = RATE/CHUNK slid_win = deque(maxlen=math.floor(SILENCE_LIMIT * rel)) #Prepend audio from 0.5 seconds before noise was detected prev_audio = deque(maxlen=math.floor(PREV_AUDIO * rel)) started = False response = [] while (True): cur_data = stream.read(CHUNK) slid_win.append(math.sqrt(abs(audioop.avg(cur_data, 4)))) #print slid_win[-1] if(sum([x > THRESHOLD for x in slid_win]) > 0): if(not started): print ("Starting record of phrase") started = True audio2send.append(cur_data) elif (started is True): stream.stop_stream() print ("Finished") filename = save_speech(list(prev_audio) + audio2send, p) stt_google_wav(filename) # Remove temp file. Comment line to review. os.remove(filename) # Reset all started = False slid_win = deque(maxlen=math.floor(SILENCE_LIMIT * rel)) prev_audio = deque(maxlen=math.floor(0.5 * rel)) audio2send = [] stream.start_stream() print ("Listening ...") else: prev_audio.append(cur_data) print ("exiting") p.terminate() return def save_speech(data, p): filename = 'output_'+str(int(time.time())) # writes data to WAV file data = b''.join(data) wf = wave.open(filename + '.wav', 'wb') wf.setnchannels(1) wf.setsampwidth(p.get_sample_size(pyaudio.paInt16)) wf.setframerate(16000) # TODO make this value a function parameter? wf.writeframes(data) wf.close() return filename + '.wav' def stt_google_wav(audio_fname): """ Sends audio file (audio_fname) to Google's text to speech service and returns service's response. We need a FLAC converter if audio is not FLAC (check FLAC_CONV). """ print ("Sending ", audio_fname) #Convert to flac first responses = transcribe_streaming(audio_fname) for response in responses: for result in response.results: for alternative in result.alternatives: print('=' * 20) print('transcript: ' + alternative.transcript) print('confidence: ' + str(alternative.confidence)) if alternative.transcript == "exit": print("exit") if alternative.transcript.lower() == "zapni světlo": print("světlo zapnuto") if alternative.transcript.lower() == "zapni světla": print("světlo zapnuto") if alternative.transcript.lower() == "vypni světlo": print("světlo vypnuto") if alternative.transcript.lower() == "vypni světla": print("světlo vypnuto") if(__name__ == '__main__'): listen_for_speech()
35.030675
79
0.638879
import pyaudio import wave import audioop from collections import deque import os import io import time import math from google.cloud import speech from google.cloud.speech import enums from google.cloud.speech import types CHUNK = 1024 FORMAT = pyaudio.paInt16 CHANNELS = 1 RATE = 16000 THRESHOLD = 1500 SILENCE_LIMIT = 3 PREV_AUDIO = 0.5 def transcribe_streaming(stream_file): from google.cloud import speech from google.cloud.speech import enums from google.cloud.speech import types client = speech.SpeechClient() with io.open(stream_file, 'rb') as audio_file: content = audio_file.read() stream = [content] requests = (types.StreamingRecognizeRequest(audio_content=chunk) for chunk in stream) config = types.RecognitionConfig( encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16, sample_rate_hertz=16000, language_code='cs-CZ') streaming_config = types.StreamingRecognitionConfig(config=config) responses = client.streaming_recognize(streaming_config, requests) for response in responses: for result in response.results: print('Finished: {}'.format(result.is_final)) print('Stability: {}'.format(result.stability)) alternatives = result.alternatives for alternative in alternatives: print('Confidence: {}'.format(alternative.confidence)) print(u'Transcript: {}'.format(alternative.transcript)) return responses def listen_for_speech(threshold=THRESHOLD): p = pyaudio.PyAudio() stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK) print ("* Listening mic. ") audio2send = [] cur_data = '' rel = RATE/CHUNK slid_win = deque(maxlen=math.floor(SILENCE_LIMIT * rel)) prev_audio = deque(maxlen=math.floor(PREV_AUDIO * rel)) started = False response = [] while (True): cur_data = stream.read(CHUNK) slid_win.append(math.sqrt(abs(audioop.avg(cur_data, 4)))) if(sum([x > THRESHOLD for x in slid_win]) > 0): if(not started): print ("Starting record of phrase") started = True audio2send.append(cur_data) elif (started is True): stream.stop_stream() print ("Finished") filename = save_speech(list(prev_audio) + audio2send, p) stt_google_wav(filename) os.remove(filename) started = False slid_win = deque(maxlen=math.floor(SILENCE_LIMIT * rel)) prev_audio = deque(maxlen=math.floor(0.5 * rel)) audio2send = [] stream.start_stream() print ("Listening ...") else: prev_audio.append(cur_data) print ("exiting") p.terminate() return def save_speech(data, p): filename = 'output_'+str(int(time.time())) data = b''.join(data) wf = wave.open(filename + '.wav', 'wb') wf.setnchannels(1) wf.setsampwidth(p.get_sample_size(pyaudio.paInt16)) wf.setframerate(16000) wf.writeframes(data) wf.close() return filename + '.wav' def stt_google_wav(audio_fname): print ("Sending ", audio_fname) responses = transcribe_streaming(audio_fname) for response in responses: for result in response.results: for alternative in result.alternatives: print('=' * 20) print('transcript: ' + alternative.transcript) print('confidence: ' + str(alternative.confidence)) if alternative.transcript == "exit": print("exit") if alternative.transcript.lower() == "zapni světlo": print("světlo zapnuto") if alternative.transcript.lower() == "zapni světla": print("světlo zapnuto") if alternative.transcript.lower() == "vypni světlo": print("světlo vypnuto") if alternative.transcript.lower() == "vypni světla": print("světlo vypnuto") if(__name__ == '__main__'): listen_for_speech()
true
true
f7f697e5281ef5d6df68efaf3f5bdd939e48db58
615
py
Python
races/migrations/0003_auto_20210216_0810.py
abecede753/acracers
93ddca35b05f6eeb094d9921960ad75d8c286a20
[ "MIT" ]
1
2021-03-11T04:48:21.000Z
2021-03-11T04:48:21.000Z
races/migrations/0003_auto_20210216_0810.py
abecede753/acracers
93ddca35b05f6eeb094d9921960ad75d8c286a20
[ "MIT" ]
null
null
null
races/migrations/0003_auto_20210216_0810.py
abecede753/acracers
93ddca35b05f6eeb094d9921960ad75d8c286a20
[ "MIT" ]
null
null
null
# Generated by Django 3.1.4 on 2021-02-16 08:10 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('races', '0002_auto_20210207_0846'), ] operations = [ migrations.AlterField( model_name='racesetup', name='car_download_url', field=models.URLField(blank=True, default='', max_length=2048), ), migrations.AlterField( model_name='racesetup', name='track_download_url', field=models.URLField(blank=True, default='', max_length=2048), ), ]
25.625
75
0.6
from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('races', '0002_auto_20210207_0846'), ] operations = [ migrations.AlterField( model_name='racesetup', name='car_download_url', field=models.URLField(blank=True, default='', max_length=2048), ), migrations.AlterField( model_name='racesetup', name='track_download_url', field=models.URLField(blank=True, default='', max_length=2048), ), ]
true
true
f7f698616912aa6d84dae080a67b1812dc6aabef
11,036
py
Python
src/python/google_cloud_utils/big_query.py
fengjixuchui/clusterfuzz
ef89be3934936d1086b4a21bffca5506c8cb93be
[ "Apache-2.0" ]
null
null
null
src/python/google_cloud_utils/big_query.py
fengjixuchui/clusterfuzz
ef89be3934936d1086b4a21bffca5506c8cb93be
[ "Apache-2.0" ]
1
2019-06-07T21:29:28.000Z
2019-06-07T21:29:28.000Z
src/python/google_cloud_utils/big_query.py
fengjixuchui/clusterfuzz
ef89be3934936d1086b4a21bffca5506c8cb93be
[ "Apache-2.0" ]
null
null
null
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BigQuery client. We cannot use gcloud's BigQuery client because it requires oauth2client 4.0.0. But our appengine requires oauth2client 1.4.2. Therefore, we implement our own BigQuery client.""" from builtins import object import collections import datetime import time from googleapiclient import discovery from base import retry from base import utils from config import local_config from google_cloud_utils import credentials from metrics import logs from system import environment REQUEST_TIMEOUT = 60 QUERY_TIMEOUT = 5 * 60 QUERY_MAX_RESULTS = 10000 QUERY_RETRY_COUNT = 3 QUERY_RETRY_DELAY = 3 @retry.wrap( retries=QUERY_RETRY_COUNT, delay=QUERY_RETRY_DELAY, function='google_cloud_utils.big_query.get_api_client') def get_api_client(): """Return an api client for bigquery.""" return discovery.build( 'bigquery', 'v2', cache_discovery=False, credentials=credentials.get_default()[0]) def get_bucket(): """Return bucket for bigquery stats.""" return local_config.ProjectConfig().get('bigquery.bucket') def cast(value, field): """Cast value to appropriate type.""" if value is None: return None if field['type'] in {'INTEGER', 'INT64'}: return int(value) elif field['type'] in {'FLOAT', 'FLOAT64'}: return float(value) elif field['type'] in {'BOOLEAN', 'BOOL'}: return value == 'true' elif field['type'] in {'STRING'}: return value elif field['type'] in {'TIMESTAMP'}: return datetime.datetime.utcfromtimestamp(float(value)) elif field['type'] in {'RECORD'}: return convert_row(value, field['fields']) else: raise Exception('The type %s is unsupported.' % field['type']) def convert_row(raw_row, fields): """Convert a single raw row (from BigQuery) to a dict.""" row = {} for index, raw_value in enumerate(raw_row['f']): field = fields[index] if field['mode'] == 'REPEATED': row[field['name']] = [] for item in raw_value['v']: row[field['name']].append(cast(item['v'], field)) else: row[field['name']] = cast(raw_value['v'], field) return row def convert(result): """Convert a query result into an array of dicts, each of which represents a row.""" fields = result['schema']['fields'] rows = [] for raw_row in result.get('rows', []): rows.append(convert_row(raw_row, fields)) return rows @environment.local_noop def write_range(table_id, testcase, range_name, start, end): """Write a range to BigQuery. This is applicable for regression and fixed ranges.""" client = Client(dataset_id='main', table_id=table_id) result = client.insert([ Insert( row={ 'testcase_id': str(testcase.key.id()), 'crash_type': testcase.crash_type, 'crash_state': testcase.crash_state, 'security_flag': testcase.security_flag, 'parent_fuzzer_name': testcase.fuzzer_name, 'fuzzer_name': testcase.overridden_fuzzer_name, 'job_type': testcase.job_type, 'created_at': int(time.time()), ('%s_range_start' % range_name): int(start), ('%s_range_end' % range_name): int(end), }, insert_id='%s:%s:%s' % (testcase.key.id(), start, end)) ]) for error in result.get('insertErrors', []): logs.log_error( ("Ignoring error writing the testcase's %s range (%s) to " 'BigQuery.' % (range_name, testcase.key.id())), exception=Exception(error)) def _get_max_results(max_results, limit, count_so_far): """Get an appropriate max_results.""" # limit is None means we get every record (no limit). if limit is None: return max_results return min(max_results, limit - count_so_far) Insert = collections.namedtuple('Insert', ['row', 'insert_id']) QueryResult = collections.namedtuple('QueryResult', ['rows', 'total_count']) class Client(object): """BigQuery client.""" def __init__(self, dataset_id=None, table_id=None): self.project_id = utils.get_application_id() self.dataset_id = dataset_id self.table_id = table_id self.client = get_api_client() @retry.wrap( retries=QUERY_RETRY_COUNT, delay=QUERY_RETRY_DELAY, function='google_cloud_utils.big_query.Client.raw_query') def raw_query(self, query, max_results): # pylint: disable=line-too-long """Perform a query and return result. Args: query: the query string. timeout: the timout in seconds. max_results: the number of rows per response. The response cannot exceed 10MB. use_legacy_sql: whether or not the query is of the legacy sql. Returns: A json explained here: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/query """ body = { 'query': query, 'timeoutMs': REQUEST_TIMEOUT * 1000, 'useLegacySql': False, 'maxResults': max_results } return self.client.jobs().query( projectId=self.project_id, body=body).execute() @retry.wrap( retries=QUERY_RETRY_COUNT, delay=QUERY_RETRY_DELAY, function='google_cloud_utils.big_query.Client.get_query_results') def get_query_results(self, job_id, page_token, start_index, max_results): # pylint: disable=line-too-long """Perform a query and return result. Args: query: the query string. job_id: the job id from query's response. page_token: the page token from the previous query's response. max_results: the number of rows per response. The response cannot exceed 10MB. Returns: A json explained here: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/getQueryResults """ return self.client.jobs().getQueryResults( projectId=self.project_id, jobId=job_id, timeoutMs=REQUEST_TIMEOUT * 1000, maxResults=max_results, startIndex=start_index, pageToken=page_token).execute() def wait_for_completion(self, job_id, offset, max_results, start_time, timeout): """Wait for job completion and return the first page.""" while True: result = self.get_query_results( job_id=job_id, page_token=None, start_index=offset, max_results=max_results) if result['jobComplete']: return result if (time.time() - start_time) > timeout: raise Exception( "Timeout: the query doesn't finish within %d seconds." % timeout) time.sleep(1) def query(self, query, timeout=QUERY_TIMEOUT, max_results=QUERY_MAX_RESULTS, offset=0, limit=None): """Performs a query and returns an array of dicts.""" rows = [] start_time = time.time() result = self.raw_query(query, max_results=0) result = self.wait_for_completion( job_id=result['jobReference']['jobId'], offset=offset, max_results=_get_max_results(max_results, limit, 0), start_time=start_time, timeout=timeout) # totalRows is only present after the job completed successfully. total_count = int(result['totalRows']) while len(rows) < limit or limit < 0: rows += convert(result) if result['jobComplete'] and 'pageToken' not in result: total_count = int(result['totalRows']) break result = self.get_query_results( job_id=result['jobReference']['jobId'], page_token=result.get('pageToken'), start_index=0, max_results=_get_max_results(max_results, limit, len(rows))) return QueryResult(rows=rows, total_count=total_count) @retry.wrap( retries=QUERY_RETRY_COUNT, delay=QUERY_RETRY_DELAY, function='google_cloud_utils.big_query.Client.get_job') def get_job(self, job_id): # pylint: disable=line-too-long """Get the job. Args: job_id: the job id. Returns: A json explained here: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query """ return self.client.jobs().get( projectId=self.project_id, jobId=job_id).execute() @environment.local_noop @retry.wrap( retries=QUERY_RETRY_COUNT, delay=QUERY_RETRY_DELAY, function='google_cloud_utils.big_query.Client.insert_from_query') def insert_from_query(self, dataset_id, table_id, job_id, query): # pylint: disable=line-too-long """Insert rows to the table from a query. Args: dataset_id: the destination dataset id. table_id: the desitnation table id. job_id: the uniquely identified job id (used for preventing redundant job). query: the query that generates rows. Returns: A json explained here: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query """ return self.client.jobs().insert( projectId=self.project_id, body={ 'configuration': { 'query': { 'query': query, 'allowLargeResults': True, 'destinationTable': { 'projectId': self.project_id, 'datasetId': dataset_id, 'tableId': table_id }, 'useLegacySql': False, 'writeDisposition': 'WRITE_APPEND' } }, 'jobReference': { 'jobId': job_id, 'projectId': self.project_id } }).execute() @environment.local_noop @retry.wrap( retries=QUERY_RETRY_COUNT, delay=QUERY_RETRY_DELAY, function='google_cloud_utils.big_query.Client.insert') def insert(self, inserts): # pylint: disable=line-too-long """Insert multiple rows. Args: inserts: a list of Inserts, each of which represents a row. Returns: A json explained here: https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll """ inserted_rows = [] for insert in inserts: inserted_rows.append({'json': insert.row, 'insertId': insert.insert_id}) body = {'kind': 'bigquery#tableDataInsertAllRequest', 'rows': inserted_rows} return self.client.tabledata().insertAll( projectId=self.project_id, datasetId=self.dataset_id, tableId=self.table_id, body=body).execute()
31.087324
87
0.649692
from builtins import object import collections import datetime import time from googleapiclient import discovery from base import retry from base import utils from config import local_config from google_cloud_utils import credentials from metrics import logs from system import environment REQUEST_TIMEOUT = 60 QUERY_TIMEOUT = 5 * 60 QUERY_MAX_RESULTS = 10000 QUERY_RETRY_COUNT = 3 QUERY_RETRY_DELAY = 3 @retry.wrap( retries=QUERY_RETRY_COUNT, delay=QUERY_RETRY_DELAY, function='google_cloud_utils.big_query.get_api_client') def get_api_client(): return discovery.build( 'bigquery', 'v2', cache_discovery=False, credentials=credentials.get_default()[0]) def get_bucket(): return local_config.ProjectConfig().get('bigquery.bucket') def cast(value, field): if value is None: return None if field['type'] in {'INTEGER', 'INT64'}: return int(value) elif field['type'] in {'FLOAT', 'FLOAT64'}: return float(value) elif field['type'] in {'BOOLEAN', 'BOOL'}: return value == 'true' elif field['type'] in {'STRING'}: return value elif field['type'] in {'TIMESTAMP'}: return datetime.datetime.utcfromtimestamp(float(value)) elif field['type'] in {'RECORD'}: return convert_row(value, field['fields']) else: raise Exception('The type %s is unsupported.' % field['type']) def convert_row(raw_row, fields): row = {} for index, raw_value in enumerate(raw_row['f']): field = fields[index] if field['mode'] == 'REPEATED': row[field['name']] = [] for item in raw_value['v']: row[field['name']].append(cast(item['v'], field)) else: row[field['name']] = cast(raw_value['v'], field) return row def convert(result): fields = result['schema']['fields'] rows = [] for raw_row in result.get('rows', []): rows.append(convert_row(raw_row, fields)) return rows @environment.local_noop def write_range(table_id, testcase, range_name, start, end): client = Client(dataset_id='main', table_id=table_id) result = client.insert([ Insert( row={ 'testcase_id': str(testcase.key.id()), 'crash_type': testcase.crash_type, 'crash_state': testcase.crash_state, 'security_flag': testcase.security_flag, 'parent_fuzzer_name': testcase.fuzzer_name, 'fuzzer_name': testcase.overridden_fuzzer_name, 'job_type': testcase.job_type, 'created_at': int(time.time()), ('%s_range_start' % range_name): int(start), ('%s_range_end' % range_name): int(end), }, insert_id='%s:%s:%s' % (testcase.key.id(), start, end)) ]) for error in result.get('insertErrors', []): logs.log_error( ("Ignoring error writing the testcase's %s range (%s) to " 'BigQuery.' % (range_name, testcase.key.id())), exception=Exception(error)) def _get_max_results(max_results, limit, count_so_far): # limit is None means we get every record (no limit). if limit is None: return max_results return min(max_results, limit - count_so_far) Insert = collections.namedtuple('Insert', ['row', 'insert_id']) QueryResult = collections.namedtuple('QueryResult', ['rows', 'total_count']) class Client(object): def __init__(self, dataset_id=None, table_id=None): self.project_id = utils.get_application_id() self.dataset_id = dataset_id self.table_id = table_id self.client = get_api_client() @retry.wrap( retries=QUERY_RETRY_COUNT, delay=QUERY_RETRY_DELAY, function='google_cloud_utils.big_query.Client.raw_query') def raw_query(self, query, max_results): # pylint: disable=line-too-long body = { 'query': query, 'timeoutMs': REQUEST_TIMEOUT * 1000, 'useLegacySql': False, 'maxResults': max_results } return self.client.jobs().query( projectId=self.project_id, body=body).execute() @retry.wrap( retries=QUERY_RETRY_COUNT, delay=QUERY_RETRY_DELAY, function='google_cloud_utils.big_query.Client.get_query_results') def get_query_results(self, job_id, page_token, start_index, max_results): # pylint: disable=line-too-long return self.client.jobs().getQueryResults( projectId=self.project_id, jobId=job_id, timeoutMs=REQUEST_TIMEOUT * 1000, maxResults=max_results, startIndex=start_index, pageToken=page_token).execute() def wait_for_completion(self, job_id, offset, max_results, start_time, timeout): while True: result = self.get_query_results( job_id=job_id, page_token=None, start_index=offset, max_results=max_results) if result['jobComplete']: return result if (time.time() - start_time) > timeout: raise Exception( "Timeout: the query doesn't finish within %d seconds." % timeout) time.sleep(1) def query(self, query, timeout=QUERY_TIMEOUT, max_results=QUERY_MAX_RESULTS, offset=0, limit=None): rows = [] start_time = time.time() result = self.raw_query(query, max_results=0) result = self.wait_for_completion( job_id=result['jobReference']['jobId'], offset=offset, max_results=_get_max_results(max_results, limit, 0), start_time=start_time, timeout=timeout) total_count = int(result['totalRows']) while len(rows) < limit or limit < 0: rows += convert(result) if result['jobComplete'] and 'pageToken' not in result: total_count = int(result['totalRows']) break result = self.get_query_results( job_id=result['jobReference']['jobId'], page_token=result.get('pageToken'), start_index=0, max_results=_get_max_results(max_results, limit, len(rows))) return QueryResult(rows=rows, total_count=total_count) @retry.wrap( retries=QUERY_RETRY_COUNT, delay=QUERY_RETRY_DELAY, function='google_cloud_utils.big_query.Client.get_job') def get_job(self, job_id): return self.client.jobs().get( projectId=self.project_id, jobId=job_id).execute() @environment.local_noop @retry.wrap( retries=QUERY_RETRY_COUNT, delay=QUERY_RETRY_DELAY, function='google_cloud_utils.big_query.Client.insert_from_query') def insert_from_query(self, dataset_id, table_id, job_id, query): return self.client.jobs().insert( projectId=self.project_id, body={ 'configuration': { 'query': { 'query': query, 'allowLargeResults': True, 'destinationTable': { 'projectId': self.project_id, 'datasetId': dataset_id, 'tableId': table_id }, 'useLegacySql': False, 'writeDisposition': 'WRITE_APPEND' } }, 'jobReference': { 'jobId': job_id, 'projectId': self.project_id } }).execute() @environment.local_noop @retry.wrap( retries=QUERY_RETRY_COUNT, delay=QUERY_RETRY_DELAY, function='google_cloud_utils.big_query.Client.insert') def insert(self, inserts): inserted_rows = [] for insert in inserts: inserted_rows.append({'json': insert.row, 'insertId': insert.insert_id}) body = {'kind': 'bigquery#tableDataInsertAllRequest', 'rows': inserted_rows} return self.client.tabledata().insertAll( projectId=self.project_id, datasetId=self.dataset_id, tableId=self.table_id, body=body).execute()
true
true
f7f6989fae0f4ff1770a91ca5a6addd60be8925f
2,203
py
Python
python/orca/src/bigdl/orca/learn/pytorch/callbacks/base.py
nyamashi/BigDL
0813c90f006f0bd613d950f4440b1bb65199b0c2
[ "Apache-2.0" ]
null
null
null
python/orca/src/bigdl/orca/learn/pytorch/callbacks/base.py
nyamashi/BigDL
0813c90f006f0bd613d950f4440b1bb65199b0c2
[ "Apache-2.0" ]
null
null
null
python/orca/src/bigdl/orca/learn/pytorch/callbacks/base.py
nyamashi/BigDL
0813c90f006f0bd613d950f4440b1bb65199b0c2
[ "Apache-2.0" ]
null
null
null
from abc import abstractmethod class Callback(object): def __init__(self): self.model = None self.params = None self.trainer = None @abstractmethod def on_batch_begin(self, batch): """ Called at the beginning of a training batch in `fit` methods. Subclasses should override for any actions to run. @param batch: Integer, index of batch within the current epoch. """ pass @abstractmethod def on_batch_end(self, batch): """ Called at the end of a training batch in `fit` methods. Subclasses should override for any actions to run. @param batch: Integer, index of batch within the current epoch. """ pass @abstractmethod def on_epoch_begin(self, epoch): """ Called at the start of an epoch. Subclasses should override for any actions to run. This function should only be called during TRAIN mode. @param epoch: Integer, index of epoch. @param logs: Dict. Currently, saved stats in last epoch has been passed to this argument for this method but may change in the future. """ pass @abstractmethod def on_epoch_end(self, epoch): """ Called at the end of an epoch. Subclasses should override for any actions to run. This function should only be called during TRAIN mode. @param epoch: Integer, index of epoch. """ pass @abstractmethod def on_train_begin(self): """ Called at the beginning of training. Subclasses should override for any actions to run. @param logs: Dict. Currently, no data is passed to this argument for this method but that may change in the future. """ pass @abstractmethod def on_train_end(self): """ Called at the end of training. Subclasses should override for any actions to run. """ pass def set_model(self, model): self.model = model def set_param(self, param): self.params = param def set_trainer(self, trainer): self.trainer = trainer
28.986842
96
0.614163
from abc import abstractmethod class Callback(object): def __init__(self): self.model = None self.params = None self.trainer = None @abstractmethod def on_batch_begin(self, batch): pass @abstractmethod def on_batch_end(self, batch): pass @abstractmethod def on_epoch_begin(self, epoch): pass @abstractmethod def on_epoch_end(self, epoch): pass @abstractmethod def on_train_begin(self): pass @abstractmethod def on_train_end(self): pass def set_model(self, model): self.model = model def set_param(self, param): self.params = param def set_trainer(self, trainer): self.trainer = trainer
true
true
f7f698af80a60f423f357c5bf1a8c793a9e2be9e
1,025
py
Python
test/integration/objectstore/test_swift_objectstore.py
quacksawbones/galaxy-1
65f7259b29d3886e526d9be670c60d9da9fbe038
[ "CC-BY-3.0" ]
1,085
2015-02-18T16:14:38.000Z
2022-03-30T23:52:07.000Z
test/integration/objectstore/test_swift_objectstore.py
quacksawbones/galaxy-1
65f7259b29d3886e526d9be670c60d9da9fbe038
[ "CC-BY-3.0" ]
11,253
2015-02-18T17:47:32.000Z
2022-03-31T21:47:03.000Z
test/integration/objectstore/test_swift_objectstore.py
quacksawbones/galaxy-1
65f7259b29d3886e526d9be670c60d9da9fbe038
[ "CC-BY-3.0" ]
1,000
2015-02-18T16:18:10.000Z
2022-03-29T08:22:56.000Z
from galaxy_test.driver import integration_util from ._base import BaseSwiftObjectStoreIntegrationTestCase TEST_TOOL_IDS = [ "multi_output", "multi_output_configured", "multi_output_assign_primary", "multi_output_recurse", "tool_provided_metadata_1", "tool_provided_metadata_2", "tool_provided_metadata_3", "tool_provided_metadata_4", "tool_provided_metadata_5", "tool_provided_metadata_6", "tool_provided_metadata_7", "tool_provided_metadata_8", "tool_provided_metadata_9", "tool_provided_metadata_10", "tool_provided_metadata_11", "tool_provided_metadata_12", "composite_output", "composite_output_tests", "metadata", "metadata_bam", "output_format", "output_auto_format", ] class SwiftObjectStoreIntegrationTestCase(BaseSwiftObjectStoreIntegrationTestCase): pass instance = integration_util.integration_module_instance(SwiftObjectStoreIntegrationTestCase) test_tools = integration_util.integration_tool_runner(TEST_TOOL_IDS)
28.472222
92
0.783415
from galaxy_test.driver import integration_util from ._base import BaseSwiftObjectStoreIntegrationTestCase TEST_TOOL_IDS = [ "multi_output", "multi_output_configured", "multi_output_assign_primary", "multi_output_recurse", "tool_provided_metadata_1", "tool_provided_metadata_2", "tool_provided_metadata_3", "tool_provided_metadata_4", "tool_provided_metadata_5", "tool_provided_metadata_6", "tool_provided_metadata_7", "tool_provided_metadata_8", "tool_provided_metadata_9", "tool_provided_metadata_10", "tool_provided_metadata_11", "tool_provided_metadata_12", "composite_output", "composite_output_tests", "metadata", "metadata_bam", "output_format", "output_auto_format", ] class SwiftObjectStoreIntegrationTestCase(BaseSwiftObjectStoreIntegrationTestCase): pass instance = integration_util.integration_module_instance(SwiftObjectStoreIntegrationTestCase) test_tools = integration_util.integration_tool_runner(TEST_TOOL_IDS)
true
true
f7f69ba4cdaba120b5d8c6932e6f7eb4c60a0913
19,104
py
Python
vendor-local/packages/requests/requests/packages/chardet/jpcntx.py
glogiotatidis/affiliates
34d0ded8e24be9dd207d6419a5157dc8ce34bc06
[ "BSD-3-Clause" ]
1,262
2015-01-02T15:12:36.000Z
2022-03-31T06:51:45.000Z
crowdstance-api/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/jpcntx.py
crowdhackathon-transport/optimizers
95c2cade7296a95aa9340250a442a9ff5502525e
[ "MIT" ]
657
2015-01-02T15:42:31.000Z
2022-03-28T13:10:48.000Z
crowdstance-api/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/jpcntx.py
crowdhackathon-transport/optimizers
95c2cade7296a95aa9340250a442a9ff5502525e
[ "MIT" ]
372
2015-01-09T08:16:41.000Z
2022-03-24T02:29:31.000Z
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .compat import wrap_ord NUM_OF_CATEGORY = 6 DONT_KNOW = -1 ENOUGH_REL_THRESHOLD = 100 MAX_REL_THRESHOLD = 1000 MINIMUM_DATA_THRESHOLD = 4 # This is hiragana 2-char sequence table, the number in each cell represents its frequency category jp2CharContext = ( (0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1), (2,4,0,4,0,3,0,4,0,3,4,4,4,2,4,3,3,4,3,2,3,3,4,2,3,3,3,2,4,1,4,3,3,1,5,4,3,4,3,4,3,5,3,0,3,5,4,2,0,3,1,0,3,3,0,3,3,0,1,1,0,4,3,0,3,3,0,4,0,2,0,3,5,5,5,5,4,0,4,1,0,3,4), (0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2), (0,4,0,5,0,5,0,4,0,4,5,4,4,3,5,3,5,1,5,3,4,3,4,4,3,4,3,3,4,3,5,4,4,3,5,5,3,5,5,5,3,5,5,3,4,5,5,3,1,3,2,0,3,4,0,4,2,0,4,2,1,5,3,2,3,5,0,4,0,2,0,5,4,4,5,4,5,0,4,0,0,4,4), (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), (0,3,0,4,0,3,0,3,0,4,5,4,3,3,3,3,4,3,5,4,4,3,5,4,4,3,4,3,4,4,4,4,5,3,4,4,3,4,5,5,4,5,5,1,4,5,4,3,0,3,3,1,3,3,0,4,4,0,3,3,1,5,3,3,3,5,0,4,0,3,0,4,4,3,4,3,3,0,4,1,1,3,4), (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), (0,4,0,3,0,3,0,4,0,3,4,4,3,2,2,1,2,1,3,1,3,3,3,3,3,4,3,1,3,3,5,3,3,0,4,3,0,5,4,3,3,5,4,4,3,4,4,5,0,1,2,0,1,2,0,2,2,0,1,0,0,5,2,2,1,4,0,3,0,1,0,4,4,3,5,4,3,0,2,1,0,4,3), (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), (0,3,0,5,0,4,0,2,1,4,4,2,4,1,4,2,4,2,4,3,3,3,4,3,3,3,3,1,4,2,3,3,3,1,4,4,1,1,1,4,3,3,2,0,2,4,3,2,0,3,3,0,3,1,1,0,0,0,3,3,0,4,2,2,3,4,0,4,0,3,0,4,4,5,3,4,4,0,3,0,0,1,4), (1,4,0,4,0,4,0,4,0,3,5,4,4,3,4,3,5,4,3,3,4,3,5,4,4,4,4,3,4,2,4,3,3,1,5,4,3,2,4,5,4,5,5,4,4,5,4,4,0,3,2,2,3,3,0,4,3,1,3,2,1,4,3,3,4,5,0,3,0,2,0,4,5,5,4,5,4,0,4,0,0,5,4), (0,5,0,5,0,4,0,3,0,4,4,3,4,3,3,3,4,0,4,4,4,3,4,3,4,3,3,1,4,2,4,3,4,0,5,4,1,4,5,4,4,5,3,2,4,3,4,3,2,4,1,3,3,3,2,3,2,0,4,3,3,4,3,3,3,4,0,4,0,3,0,4,5,4,4,4,3,0,4,1,0,1,3), (0,3,1,4,0,3,0,2,0,3,4,4,3,1,4,2,3,3,4,3,4,3,4,3,4,4,3,2,3,1,5,4,4,1,4,4,3,5,4,4,3,5,5,4,3,4,4,3,1,2,3,1,2,2,0,3,2,0,3,1,0,5,3,3,3,4,3,3,3,3,4,4,4,4,5,4,2,0,3,3,2,4,3), (0,2,0,3,0,1,0,1,0,0,3,2,0,0,2,0,1,0,2,1,3,3,3,1,2,3,1,0,1,0,4,2,1,1,3,3,0,4,3,3,1,4,3,3,0,3,3,2,0,0,0,0,1,0,0,2,0,0,0,0,0,4,1,0,2,3,2,2,2,1,3,3,3,4,4,3,2,0,3,1,0,3,3), (0,4,0,4,0,3,0,3,0,4,4,4,3,3,3,3,3,3,4,3,4,2,4,3,4,3,3,2,4,3,4,5,4,1,4,5,3,5,4,5,3,5,4,0,3,5,5,3,1,3,3,2,2,3,0,3,4,1,3,3,2,4,3,3,3,4,0,4,0,3,0,4,5,4,4,5,3,0,4,1,0,3,4), (0,2,0,3,0,3,0,0,0,2,2,2,1,0,1,0,0,0,3,0,3,0,3,0,1,3,1,0,3,1,3,3,3,1,3,3,3,0,1,3,1,3,4,0,0,3,1,1,0,3,2,0,0,0,0,1,3,0,1,0,0,3,3,2,0,3,0,0,0,0,0,3,4,3,4,3,3,0,3,0,0,2,3), (2,3,0,3,0,2,0,1,0,3,3,4,3,1,3,1,1,1,3,1,4,3,4,3,3,3,0,0,3,1,5,4,3,1,4,3,2,5,5,4,4,4,4,3,3,4,4,4,0,2,1,1,3,2,0,1,2,0,0,1,0,4,1,3,3,3,0,3,0,1,0,4,4,4,5,5,3,0,2,0,0,4,4), (0,2,0,1,0,3,1,3,0,2,3,3,3,0,3,1,0,0,3,0,3,2,3,1,3,2,1,1,0,0,4,2,1,0,2,3,1,4,3,2,0,4,4,3,1,3,1,3,0,1,0,0,1,0,0,0,1,0,0,0,0,4,1,1,1,2,0,3,0,0,0,3,4,2,4,3,2,0,1,0,0,3,3), (0,1,0,4,0,5,0,4,0,2,4,4,2,3,3,2,3,3,5,3,3,3,4,3,4,2,3,0,4,3,3,3,4,1,4,3,2,1,5,5,3,4,5,1,3,5,4,2,0,3,3,0,1,3,0,4,2,0,1,3,1,4,3,3,3,3,0,3,0,1,0,3,4,4,4,5,5,0,3,0,1,4,5), (0,2,0,3,0,3,0,0,0,2,3,1,3,0,4,0,1,1,3,0,3,4,3,2,3,1,0,3,3,2,3,1,3,0,2,3,0,2,1,4,1,2,2,0,0,3,3,0,0,2,0,0,0,1,0,0,0,0,2,2,0,3,2,1,3,3,0,2,0,2,0,0,3,3,1,2,4,0,3,0,2,2,3), (2,4,0,5,0,4,0,4,0,2,4,4,4,3,4,3,3,3,1,2,4,3,4,3,4,4,5,0,3,3,3,3,2,0,4,3,1,4,3,4,1,4,4,3,3,4,4,3,1,2,3,0,4,2,0,4,1,0,3,3,0,4,3,3,3,4,0,4,0,2,0,3,5,3,4,5,2,0,3,0,0,4,5), (0,3,0,4,0,1,0,1,0,1,3,2,2,1,3,0,3,0,2,0,2,0,3,0,2,0,0,0,1,0,1,1,0,0,3,1,0,0,0,4,0,3,1,0,2,1,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,4,2,2,3,1,0,3,0,0,0,1,4,4,4,3,0,0,4,0,0,1,4), (1,4,1,5,0,3,0,3,0,4,5,4,4,3,5,3,3,4,4,3,4,1,3,3,3,3,2,1,4,1,5,4,3,1,4,4,3,5,4,4,3,5,4,3,3,4,4,4,0,3,3,1,2,3,0,3,1,0,3,3,0,5,4,4,4,4,4,4,3,3,5,4,4,3,3,5,4,0,3,2,0,4,4), (0,2,0,3,0,1,0,0,0,1,3,3,3,2,4,1,3,0,3,1,3,0,2,2,1,1,0,0,2,0,4,3,1,0,4,3,0,4,4,4,1,4,3,1,1,3,3,1,0,2,0,0,1,3,0,0,0,0,2,0,0,4,3,2,4,3,5,4,3,3,3,4,3,3,4,3,3,0,2,1,0,3,3), (0,2,0,4,0,3,0,2,0,2,5,5,3,4,4,4,4,1,4,3,3,0,4,3,4,3,1,3,3,2,4,3,0,3,4,3,0,3,4,4,2,4,4,0,4,5,3,3,2,2,1,1,1,2,0,1,5,0,3,3,2,4,3,3,3,4,0,3,0,2,0,4,4,3,5,5,0,0,3,0,2,3,3), (0,3,0,4,0,3,0,1,0,3,4,3,3,1,3,3,3,0,3,1,3,0,4,3,3,1,1,0,3,0,3,3,0,0,4,4,0,1,5,4,3,3,5,0,3,3,4,3,0,2,0,1,1,1,0,1,3,0,1,2,1,3,3,2,3,3,0,3,0,1,0,1,3,3,4,4,1,0,1,2,2,1,3), (0,1,0,4,0,4,0,3,0,1,3,3,3,2,3,1,1,0,3,0,3,3,4,3,2,4,2,0,1,0,4,3,2,0,4,3,0,5,3,3,2,4,4,4,3,3,3,4,0,1,3,0,0,1,0,0,1,0,0,0,0,4,2,3,3,3,0,3,0,0,0,4,4,4,5,3,2,0,3,3,0,3,5), (0,2,0,3,0,0,0,3,0,1,3,0,2,0,0,0,1,0,3,1,1,3,3,0,0,3,0,0,3,0,2,3,1,0,3,1,0,3,3,2,0,4,2,2,0,2,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,2,1,2,0,1,0,1,0,0,0,1,3,1,2,0,0,0,1,0,0,1,4), (0,3,0,3,0,5,0,1,0,2,4,3,1,3,3,2,1,1,5,2,1,0,5,1,2,0,0,0,3,3,2,2,3,2,4,3,0,0,3,3,1,3,3,0,2,5,3,4,0,3,3,0,1,2,0,2,2,0,3,2,0,2,2,3,3,3,0,2,0,1,0,3,4,4,2,5,4,0,3,0,0,3,5), (0,3,0,3,0,3,0,1,0,3,3,3,3,0,3,0,2,0,2,1,1,0,2,0,1,0,0,0,2,1,0,0,1,0,3,2,0,0,3,3,1,2,3,1,0,3,3,0,0,1,0,0,0,0,0,2,0,0,0,0,0,2,3,1,2,3,0,3,0,1,0,3,2,1,0,4,3,0,1,1,0,3,3), (0,4,0,5,0,3,0,3,0,4,5,5,4,3,5,3,4,3,5,3,3,2,5,3,4,4,4,3,4,3,4,5,5,3,4,4,3,4,4,5,4,4,4,3,4,5,5,4,2,3,4,2,3,4,0,3,3,1,4,3,2,4,3,3,5,5,0,3,0,3,0,5,5,5,5,4,4,0,4,0,1,4,4), (0,4,0,4,0,3,0,3,0,3,5,4,4,2,3,2,5,1,3,2,5,1,4,2,3,2,3,3,4,3,3,3,3,2,5,4,1,3,3,5,3,4,4,0,4,4,3,1,1,3,1,0,2,3,0,2,3,0,3,0,0,4,3,1,3,4,0,3,0,2,0,4,4,4,3,4,5,0,4,0,0,3,4), (0,3,0,3,0,3,1,2,0,3,4,4,3,3,3,0,2,2,4,3,3,1,3,3,3,1,1,0,3,1,4,3,2,3,4,4,2,4,4,4,3,4,4,3,2,4,4,3,1,3,3,1,3,3,0,4,1,0,2,2,1,4,3,2,3,3,5,4,3,3,5,4,4,3,3,0,4,0,3,2,2,4,4), (0,2,0,1,0,0,0,0,0,1,2,1,3,0,0,0,0,0,2,0,1,2,1,0,0,1,0,0,0,0,3,0,0,1,0,1,1,3,1,0,0,0,1,1,0,1,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,1,2,2,0,3,4,0,0,0,1,1,0,0,1,0,0,0,0,0,1,1), (0,1,0,0,0,1,0,0,0,0,4,0,4,1,4,0,3,0,4,0,3,0,4,0,3,0,3,0,4,1,5,1,4,0,0,3,0,5,0,5,2,0,1,0,0,0,2,1,4,0,1,3,0,0,3,0,0,3,1,1,4,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0), (1,4,0,5,0,3,0,2,0,3,5,4,4,3,4,3,5,3,4,3,3,0,4,3,3,3,3,3,3,2,4,4,3,1,3,4,4,5,4,4,3,4,4,1,3,5,4,3,3,3,1,2,2,3,3,1,3,1,3,3,3,5,3,3,4,5,0,3,0,3,0,3,4,3,4,4,3,0,3,0,2,4,3), (0,1,0,4,0,0,0,0,0,1,4,0,4,1,4,2,4,0,3,0,1,0,1,0,0,0,0,0,2,0,3,1,1,1,0,3,0,0,0,1,2,1,0,0,1,1,1,1,0,1,0,0,0,1,0,0,3,0,0,0,0,3,2,0,2,2,0,1,0,0,0,2,3,2,3,3,0,0,0,0,2,1,0), (0,5,1,5,0,3,0,3,0,5,4,4,5,1,5,3,3,0,4,3,4,3,5,3,4,3,3,2,4,3,4,3,3,0,3,3,1,4,4,3,4,4,4,3,4,5,5,3,2,3,1,1,3,3,1,3,1,1,3,3,2,4,5,3,3,5,0,4,0,3,0,4,4,3,5,3,3,0,3,4,0,4,3), (0,5,0,5,0,3,0,2,0,4,4,3,5,2,4,3,3,3,4,4,4,3,5,3,5,3,3,1,4,0,4,3,3,0,3,3,0,4,4,4,4,5,4,3,3,5,5,3,2,3,1,2,3,2,0,1,0,0,3,2,2,4,4,3,1,5,0,4,0,3,0,4,3,1,3,2,1,0,3,3,0,3,3), (0,4,0,5,0,5,0,4,0,4,5,5,5,3,4,3,3,2,5,4,4,3,5,3,5,3,4,0,4,3,4,4,3,2,4,4,3,4,5,4,4,5,5,0,3,5,5,4,1,3,3,2,3,3,1,3,1,0,4,3,1,4,4,3,4,5,0,4,0,2,0,4,3,4,4,3,3,0,4,0,0,5,5), (0,4,0,4,0,5,0,1,1,3,3,4,4,3,4,1,3,0,5,1,3,0,3,1,3,1,1,0,3,0,3,3,4,0,4,3,0,4,4,4,3,4,4,0,3,5,4,1,0,3,0,0,2,3,0,3,1,0,3,1,0,3,2,1,3,5,0,3,0,1,0,3,2,3,3,4,4,0,2,2,0,4,4), (2,4,0,5,0,4,0,3,0,4,5,5,4,3,5,3,5,3,5,3,5,2,5,3,4,3,3,4,3,4,5,3,2,1,5,4,3,2,3,4,5,3,4,1,2,5,4,3,0,3,3,0,3,2,0,2,3,0,4,1,0,3,4,3,3,5,0,3,0,1,0,4,5,5,5,4,3,0,4,2,0,3,5), (0,5,0,4,0,4,0,2,0,5,4,3,4,3,4,3,3,3,4,3,4,2,5,3,5,3,4,1,4,3,4,4,4,0,3,5,0,4,4,4,4,5,3,1,3,4,5,3,3,3,3,3,3,3,0,2,2,0,3,3,2,4,3,3,3,5,3,4,1,3,3,5,3,2,0,0,0,0,4,3,1,3,3), (0,1,0,3,0,3,0,1,0,1,3,3,3,2,3,3,3,0,3,0,0,0,3,1,3,0,0,0,2,2,2,3,0,0,3,2,0,1,2,4,1,3,3,0,0,3,3,3,0,1,0,0,2,1,0,0,3,0,3,1,0,3,0,0,1,3,0,2,0,1,0,3,3,1,3,3,0,0,1,1,0,3,3), (0,2,0,3,0,2,1,4,0,2,2,3,1,1,3,1,1,0,2,0,3,1,2,3,1,3,0,0,1,0,4,3,2,3,3,3,1,4,2,3,3,3,3,1,0,3,1,4,0,1,1,0,1,2,0,1,1,0,1,1,0,3,1,3,2,2,0,1,0,0,0,2,3,3,3,1,0,0,0,0,0,2,3), (0,5,0,4,0,5,0,2,0,4,5,5,3,3,4,3,3,1,5,4,4,2,4,4,4,3,4,2,4,3,5,5,4,3,3,4,3,3,5,5,4,5,5,1,3,4,5,3,1,4,3,1,3,3,0,3,3,1,4,3,1,4,5,3,3,5,0,4,0,3,0,5,3,3,1,4,3,0,4,0,1,5,3), (0,5,0,5,0,4,0,2,0,4,4,3,4,3,3,3,3,3,5,4,4,4,4,4,4,5,3,3,5,2,4,4,4,3,4,4,3,3,4,4,5,5,3,3,4,3,4,3,3,4,3,3,3,3,1,2,2,1,4,3,3,5,4,4,3,4,0,4,0,3,0,4,4,4,4,4,1,0,4,2,0,2,4), (0,4,0,4,0,3,0,1,0,3,5,2,3,0,3,0,2,1,4,2,3,3,4,1,4,3,3,2,4,1,3,3,3,0,3,3,0,0,3,3,3,5,3,3,3,3,3,2,0,2,0,0,2,0,0,2,0,0,1,0,0,3,1,2,2,3,0,3,0,2,0,4,4,3,3,4,1,0,3,0,0,2,4), (0,0,0,4,0,0,0,0,0,0,1,0,1,0,2,0,0,0,0,0,1,0,2,0,1,0,0,0,0,0,3,1,3,0,3,2,0,0,0,1,0,3,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,0,2,0,0,0,0,0,0,2), (0,2,1,3,0,2,0,2,0,3,3,3,3,1,3,1,3,3,3,3,3,3,4,2,2,1,2,1,4,0,4,3,1,3,3,3,2,4,3,5,4,3,3,3,3,3,3,3,0,1,3,0,2,0,0,1,0,0,1,0,0,4,2,0,2,3,0,3,3,0,3,3,4,2,3,1,4,0,1,2,0,2,3), (0,3,0,3,0,1,0,3,0,2,3,3,3,0,3,1,2,0,3,3,2,3,3,2,3,2,3,1,3,0,4,3,2,0,3,3,1,4,3,3,2,3,4,3,1,3,3,1,1,0,1,1,0,1,0,1,0,1,0,0,0,4,1,1,0,3,0,3,1,0,2,3,3,3,3,3,1,0,0,2,0,3,3), (0,0,0,0,0,0,0,0,0,0,3,0,2,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,3,0,3,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,2,0,2,3,0,0,0,0,0,0,0,0,3), (0,2,0,3,1,3,0,3,0,2,3,3,3,1,3,1,3,1,3,1,3,3,3,1,3,0,2,3,1,1,4,3,3,2,3,3,1,2,2,4,1,3,3,0,1,4,2,3,0,1,3,0,3,0,0,1,3,0,2,0,0,3,3,2,1,3,0,3,0,2,0,3,4,4,4,3,1,0,3,0,0,3,3), (0,2,0,1,0,2,0,0,0,1,3,2,2,1,3,0,1,1,3,0,3,2,3,1,2,0,2,0,1,1,3,3,3,0,3,3,1,1,2,3,2,3,3,1,2,3,2,0,0,1,0,0,0,0,0,0,3,0,1,0,0,2,1,2,1,3,0,3,0,0,0,3,4,4,4,3,2,0,2,0,0,2,4), (0,0,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,3,1,0,0,0,0,0,0,0,3), (0,3,0,3,0,2,0,3,0,3,3,3,2,3,2,2,2,0,3,1,3,3,3,2,3,3,0,0,3,0,3,2,2,0,2,3,1,4,3,4,3,3,2,3,1,5,4,4,0,3,1,2,1,3,0,3,1,1,2,0,2,3,1,3,1,3,0,3,0,1,0,3,3,4,4,2,1,0,2,1,0,2,4), (0,1,0,3,0,1,0,2,0,1,4,2,5,1,4,0,2,0,2,1,3,1,4,0,2,1,0,0,2,1,4,1,1,0,3,3,0,5,1,3,2,3,3,1,0,3,2,3,0,1,0,0,0,0,0,0,1,0,0,0,0,4,0,1,0,3,0,2,0,1,0,3,3,3,4,3,3,0,0,0,0,2,3), (0,0,0,1,0,0,0,0,0,0,2,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,1,0,0,1,0,0,0,0,0,3), (0,1,0,3,0,4,0,3,0,2,4,3,1,0,3,2,2,1,3,1,2,2,3,1,1,1,2,1,3,0,1,2,0,1,3,2,1,3,0,5,5,1,0,0,1,3,2,1,0,3,0,0,1,0,0,0,0,0,3,4,0,1,1,1,3,2,0,2,0,1,0,2,3,3,1,2,3,0,1,0,1,0,4), (0,0,0,1,0,3,0,3,0,2,2,1,0,0,4,0,3,0,3,1,3,0,3,0,3,0,1,0,3,0,3,1,3,0,3,3,0,0,1,2,1,1,1,0,1,2,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,2,2,1,2,0,0,2,0,0,0,0,2,3,3,3,3,0,0,0,0,1,4), (0,0,0,3,0,3,0,0,0,0,3,1,1,0,3,0,1,0,2,0,1,0,0,0,0,0,0,0,1,0,3,0,2,0,2,3,0,0,2,2,3,1,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,2,3), (2,4,0,5,0,5,0,4,0,3,4,3,3,3,4,3,3,3,4,3,4,4,5,4,5,5,5,2,3,0,5,5,4,1,5,4,3,1,5,4,3,4,4,3,3,4,3,3,0,3,2,0,2,3,0,3,0,0,3,3,0,5,3,2,3,3,0,3,0,3,0,3,4,5,4,5,3,0,4,3,0,3,4), (0,3,0,3,0,3,0,3,0,3,3,4,3,2,3,2,3,0,4,3,3,3,3,3,3,3,3,0,3,2,4,3,3,1,3,4,3,4,4,4,3,4,4,3,2,4,4,1,0,2,0,0,1,1,0,2,0,0,3,1,0,5,3,2,1,3,0,3,0,1,2,4,3,2,4,3,3,0,3,2,0,4,4), (0,3,0,3,0,1,0,0,0,1,4,3,3,2,3,1,3,1,4,2,3,2,4,2,3,4,3,0,2,2,3,3,3,0,3,3,3,0,3,4,1,3,3,0,3,4,3,3,0,1,1,0,1,0,0,0,4,0,3,0,0,3,1,2,1,3,0,4,0,1,0,4,3,3,4,3,3,0,2,0,0,3,3), (0,3,0,4,0,1,0,3,0,3,4,3,3,0,3,3,3,1,3,1,3,3,4,3,3,3,0,0,3,1,5,3,3,1,3,3,2,5,4,3,3,4,5,3,2,5,3,4,0,1,0,0,0,0,0,2,0,0,1,1,0,4,2,2,1,3,0,3,0,2,0,4,4,3,5,3,2,0,1,1,0,3,4), (0,5,0,4,0,5,0,2,0,4,4,3,3,2,3,3,3,1,4,3,4,1,5,3,4,3,4,0,4,2,4,3,4,1,5,4,0,4,4,4,4,5,4,1,3,5,4,2,1,4,1,1,3,2,0,3,1,0,3,2,1,4,3,3,3,4,0,4,0,3,0,4,4,4,3,3,3,0,4,2,0,3,4), (1,4,0,4,0,3,0,1,0,3,3,3,1,1,3,3,2,2,3,3,1,0,3,2,2,1,2,0,3,1,2,1,2,0,3,2,0,2,2,3,3,4,3,0,3,3,1,2,0,1,1,3,1,2,0,0,3,0,1,1,0,3,2,2,3,3,0,3,0,0,0,2,3,3,4,3,3,0,1,0,0,1,4), (0,4,0,4,0,4,0,0,0,3,4,4,3,1,4,2,3,2,3,3,3,1,4,3,4,0,3,0,4,2,3,3,2,2,5,4,2,1,3,4,3,4,3,1,3,3,4,2,0,2,1,0,3,3,0,0,2,0,3,1,0,4,4,3,4,3,0,4,0,1,0,2,4,4,4,4,4,0,3,2,0,3,3), (0,0,0,1,0,4,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,3,2,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2), (0,2,0,3,0,4,0,4,0,1,3,3,3,0,4,0,2,1,2,1,1,1,2,0,3,1,1,0,1,0,3,1,0,0,3,3,2,0,1,1,0,0,0,0,0,1,0,2,0,2,2,0,3,1,0,0,1,0,1,1,0,1,2,0,3,0,0,0,0,1,0,0,3,3,4,3,1,0,1,0,3,0,2), (0,0,0,3,0,5,0,0,0,0,1,0,2,0,3,1,0,1,3,0,0,0,2,0,0,0,1,0,0,0,1,1,0,0,4,0,0,0,2,3,0,1,4,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,1,0,0,0,0,0,0,0,2,0,0,3,0,0,0,0,0,3), (0,2,0,5,0,5,0,1,0,2,4,3,3,2,5,1,3,2,3,3,3,0,4,1,2,0,3,0,4,0,2,2,1,1,5,3,0,0,1,4,2,3,2,0,3,3,3,2,0,2,4,1,1,2,0,1,1,0,3,1,0,1,3,1,2,3,0,2,0,0,0,1,3,5,4,4,4,0,3,0,0,1,3), (0,4,0,5,0,4,0,4,0,4,5,4,3,3,4,3,3,3,4,3,4,4,5,3,4,5,4,2,4,2,3,4,3,1,4,4,1,3,5,4,4,5,5,4,4,5,5,5,2,3,3,1,4,3,1,3,3,0,3,3,1,4,3,4,4,4,0,3,0,4,0,3,3,4,4,5,0,0,4,3,0,4,5), (0,4,0,4,0,3,0,3,0,3,4,4,4,3,3,2,4,3,4,3,4,3,5,3,4,3,2,1,4,2,4,4,3,1,3,4,2,4,5,5,3,4,5,4,1,5,4,3,0,3,2,2,3,2,1,3,1,0,3,3,3,5,3,3,3,5,4,4,2,3,3,4,3,3,3,2,1,0,3,2,1,4,3), (0,4,0,5,0,4,0,3,0,3,5,5,3,2,4,3,4,0,5,4,4,1,4,4,4,3,3,3,4,3,5,5,2,3,3,4,1,2,5,5,3,5,5,2,3,5,5,4,0,3,2,0,3,3,1,1,5,1,4,1,0,4,3,2,3,5,0,4,0,3,0,5,4,3,4,3,0,0,4,1,0,4,4), (1,3,0,4,0,2,0,2,0,2,5,5,3,3,3,3,3,0,4,2,3,4,4,4,3,4,0,0,3,4,5,4,3,3,3,3,2,5,5,4,5,5,5,4,3,5,5,5,1,3,1,0,1,0,0,3,2,0,4,2,0,5,2,3,2,4,1,3,0,3,0,4,5,4,5,4,3,0,4,2,0,5,4), (0,3,0,4,0,5,0,3,0,3,4,4,3,2,3,2,3,3,3,3,3,2,4,3,3,2,2,0,3,3,3,3,3,1,3,3,3,0,4,4,3,4,4,1,1,4,4,2,0,3,1,0,1,1,0,4,1,0,2,3,1,3,3,1,3,4,0,3,0,1,0,3,1,3,0,0,1,0,2,0,0,4,4), (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), (0,3,0,3,0,2,0,3,0,1,5,4,3,3,3,1,4,2,1,2,3,4,4,2,4,4,5,0,3,1,4,3,4,0,4,3,3,3,2,3,2,5,3,4,3,2,2,3,0,0,3,0,2,1,0,1,2,0,0,0,0,2,1,1,3,1,0,2,0,4,0,3,4,4,4,5,2,0,2,0,0,1,3), (0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,1,0,0,1,1,0,0,0,4,2,1,1,0,1,0,3,2,0,0,3,1,1,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,1,0,0,0,2,0,0,0,1,4,0,4,2,1,0,0,0,0,0,1), (0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,1,0,1,0,0,0,0,3,1,0,0,0,2,0,2,1,0,0,1,2,1,0,1,1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,1,3,1,0,0,0,0,0,1,0,0,2,1,0,0,0,0,0,0,0,0,2), (0,4,0,4,0,4,0,3,0,4,4,3,4,2,4,3,2,0,4,4,4,3,5,3,5,3,3,2,4,2,4,3,4,3,1,4,0,2,3,4,4,4,3,3,3,4,4,4,3,4,1,3,4,3,2,1,2,1,3,3,3,4,4,3,3,5,0,4,0,3,0,4,3,3,3,2,1,0,3,0,0,3,3), (0,4,0,3,0,3,0,3,0,3,5,5,3,3,3,3,4,3,4,3,3,3,4,4,4,3,3,3,3,4,3,5,3,3,1,3,2,4,5,5,5,5,4,3,4,5,5,3,2,2,3,3,3,3,2,3,3,1,2,3,2,4,3,3,3,4,0,4,0,2,0,4,3,2,2,1,2,0,3,0,0,4,1), ) class JapaneseContextAnalysis: def __init__(self): self.reset() def reset(self): self._mTotalRel = 0 # total sequence received # category counters, each interger counts sequence in its category self._mRelSample = [0] * NUM_OF_CATEGORY # if last byte in current buffer is not the last byte of a character, # we need to know how many bytes to skip in next buffer self._mNeedToSkipCharNum = 0 self._mLastCharOrder = -1 # The order of previous char # If this flag is set to True, detection is done and conclusion has # been made self._mDone = False def feed(self, aBuf, aLen): if self._mDone: return # The buffer we got is byte oriented, and a character may span in more than one # buffers. In case the last one or two byte in last buffer is not # complete, we record how many byte needed to complete that character # and skip these bytes here. We can choose to record those bytes as # well and analyse the character once it is complete, but since a # character will not make much difference, by simply skipping # this character will simply our logic and improve performance. i = self._mNeedToSkipCharNum while i < aLen: order, charLen = self.get_order(aBuf[i:i + 2]) i += charLen if i > aLen: self._mNeedToSkipCharNum = i - aLen self._mLastCharOrder = -1 else: if (order != -1) and (self._mLastCharOrder != -1): self._mTotalRel += 1 if self._mTotalRel > MAX_REL_THRESHOLD: self._mDone = True break self._mRelSample[jp2CharContext[self._mLastCharOrder][order]] += 1 self._mLastCharOrder = order def got_enough_data(self): return self._mTotalRel > ENOUGH_REL_THRESHOLD def get_confidence(self): # This is just one way to calculate confidence. It works well for me. if self._mTotalRel > MINIMUM_DATA_THRESHOLD: return (self._mTotalRel - self._mRelSample[0]) / self._mTotalRel else: return DONT_KNOW def get_order(self, aBuf): return -1, 1 class SJISContextAnalysis(JapaneseContextAnalysis): def get_order(self, aBuf): if not aBuf: return -1, 1 # find out current char's byte length first_char = wrap_ord(aBuf[0]) if ((0x81 <= first_char <= 0x9F) or (0xE0 <= first_char <= 0xFC)): charLen = 2 else: charLen = 1 # return its order if it is hiragana if len(aBuf) > 1: second_char = wrap_ord(aBuf[1]) if (first_char == 202) and (0x9F <= second_char <= 0xF1): return second_char - 0x9F, charLen return -1, charLen class EUCJPContextAnalysis(JapaneseContextAnalysis): def get_order(self, aBuf): if not aBuf: return -1, 1 # find out current char's byte length first_char = wrap_ord(aBuf[0]) if (first_char == 0x8E) or (0xA1 <= first_char <= 0xFE): charLen = 2 elif first_char == 0x8F: charLen = 3 else: charLen = 1 # return its order if it is hiragana if len(aBuf) > 1: second_char = wrap_ord(aBuf[1]) if (first_char == 0xA4) and (0xA1 <= second_char <= 0xF3): return second_char - 0xA1, charLen return -1, charLen # flake8: noqa
86.836364
168
0.526068
,3,3,3,4,1,4,3,2,1,5,5,3,4,5,1,3,5,4,2,0,3,3,0,1,3,0,4,2,0,1,3,1,4,3,3,3,3,0,3,0,1,0,3,4,4,4,5,5,0,3,0,1,4,5), (0,2,0,3,0,3,0,0,0,2,3,1,3,0,4,0,1,1,3,0,3,4,3,2,3,1,0,3,3,2,3,1,3,0,2,3,0,2,1,4,1,2,2,0,0,3,3,0,0,2,0,0,0,1,0,0,0,0,2,2,0,3,2,1,3,3,0,2,0,2,0,0,3,3,1,2,4,0,3,0,2,2,3), (2,4,0,5,0,4,0,4,0,2,4,4,4,3,4,3,3,3,1,2,4,3,4,3,4,4,5,0,3,3,3,3,2,0,4,3,1,4,3,4,1,4,4,3,3,4,4,3,1,2,3,0,4,2,0,4,1,0,3,3,0,4,3,3,3,4,0,4,0,2,0,3,5,3,4,5,2,0,3,0,0,4,5), (0,3,0,4,0,1,0,1,0,1,3,2,2,1,3,0,3,0,2,0,2,0,3,0,2,0,0,0,1,0,1,1,0,0,3,1,0,0,0,4,0,3,1,0,2,1,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,4,2,2,3,1,0,3,0,0,0,1,4,4,4,3,0,0,4,0,0,1,4), (1,4,1,5,0,3,0,3,0,4,5,4,4,3,5,3,3,4,4,3,4,1,3,3,3,3,2,1,4,1,5,4,3,1,4,4,3,5,4,4,3,5,4,3,3,4,4,4,0,3,3,1,2,3,0,3,1,0,3,3,0,5,4,4,4,4,4,4,3,3,5,4,4,3,3,5,4,0,3,2,0,4,4), (0,2,0,3,0,1,0,0,0,1,3,3,3,2,4,1,3,0,3,1,3,0,2,2,1,1,0,0,2,0,4,3,1,0,4,3,0,4,4,4,1,4,3,1,1,3,3,1,0,2,0,0,1,3,0,0,0,0,2,0,0,4,3,2,4,3,5,4,3,3,3,4,3,3,4,3,3,0,2,1,0,3,3), (0,2,0,4,0,3,0,2,0,2,5,5,3,4,4,4,4,1,4,3,3,0,4,3,4,3,1,3,3,2,4,3,0,3,4,3,0,3,4,4,2,4,4,0,4,5,3,3,2,2,1,1,1,2,0,1,5,0,3,3,2,4,3,3,3,4,0,3,0,2,0,4,4,3,5,5,0,0,3,0,2,3,3), (0,3,0,4,0,3,0,1,0,3,4,3,3,1,3,3,3,0,3,1,3,0,4,3,3,1,1,0,3,0,3,3,0,0,4,4,0,1,5,4,3,3,5,0,3,3,4,3,0,2,0,1,1,1,0,1,3,0,1,2,1,3,3,2,3,3,0,3,0,1,0,1,3,3,4,4,1,0,1,2,2,1,3), (0,1,0,4,0,4,0,3,0,1,3,3,3,2,3,1,1,0,3,0,3,3,4,3,2,4,2,0,1,0,4,3,2,0,4,3,0,5,3,3,2,4,4,4,3,3,3,4,0,1,3,0,0,1,0,0,1,0,0,0,0,4,2,3,3,3,0,3,0,0,0,4,4,4,5,3,2,0,3,3,0,3,5), (0,2,0,3,0,0,0,3,0,1,3,0,2,0,0,0,1,0,3,1,1,3,3,0,0,3,0,0,3,0,2,3,1,0,3,1,0,3,3,2,0,4,2,2,0,2,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,2,1,2,0,1,0,1,0,0,0,1,3,1,2,0,0,0,1,0,0,1,4), (0,3,0,3,0,5,0,1,0,2,4,3,1,3,3,2,1,1,5,2,1,0,5,1,2,0,0,0,3,3,2,2,3,2,4,3,0,0,3,3,1,3,3,0,2,5,3,4,0,3,3,0,1,2,0,2,2,0,3,2,0,2,2,3,3,3,0,2,0,1,0,3,4,4,2,5,4,0,3,0,0,3,5), (0,3,0,3,0,3,0,1,0,3,3,3,3,0,3,0,2,0,2,1,1,0,2,0,1,0,0,0,2,1,0,0,1,0,3,2,0,0,3,3,1,2,3,1,0,3,3,0,0,1,0,0,0,0,0,2,0,0,0,0,0,2,3,1,2,3,0,3,0,1,0,3,2,1,0,4,3,0,1,1,0,3,3), (0,4,0,5,0,3,0,3,0,4,5,5,4,3,5,3,4,3,5,3,3,2,5,3,4,4,4,3,4,3,4,5,5,3,4,4,3,4,4,5,4,4,4,3,4,5,5,4,2,3,4,2,3,4,0,3,3,1,4,3,2,4,3,3,5,5,0,3,0,3,0,5,5,5,5,4,4,0,4,0,1,4,4), (0,4,0,4,0,3,0,3,0,3,5,4,4,2,3,2,5,1,3,2,5,1,4,2,3,2,3,3,4,3,3,3,3,2,5,4,1,3,3,5,3,4,4,0,4,4,3,1,1,3,1,0,2,3,0,2,3,0,3,0,0,4,3,1,3,4,0,3,0,2,0,4,4,4,3,4,5,0,4,0,0,3,4), (0,3,0,3,0,3,1,2,0,3,4,4,3,3,3,0,2,2,4,3,3,1,3,3,3,1,1,0,3,1,4,3,2,3,4,4,2,4,4,4,3,4,4,3,2,4,4,3,1,3,3,1,3,3,0,4,1,0,2,2,1,4,3,2,3,3,5,4,3,3,5,4,4,3,3,0,4,0,3,2,2,4,4), (0,2,0,1,0,0,0,0,0,1,2,1,3,0,0,0,0,0,2,0,1,2,1,0,0,1,0,0,0,0,3,0,0,1,0,1,1,3,1,0,0,0,1,1,0,1,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,1,2,2,0,3,4,0,0,0,1,1,0,0,1,0,0,0,0,0,1,1), (0,1,0,0,0,1,0,0,0,0,4,0,4,1,4,0,3,0,4,0,3,0,4,0,3,0,3,0,4,1,5,1,4,0,0,3,0,5,0,5,2,0,1,0,0,0,2,1,4,0,1,3,0,0,3,0,0,3,1,1,4,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0), (1,4,0,5,0,3,0,2,0,3,5,4,4,3,4,3,5,3,4,3,3,0,4,3,3,3,3,3,3,2,4,4,3,1,3,4,4,5,4,4,3,4,4,1,3,5,4,3,3,3,1,2,2,3,3,1,3,1,3,3,3,5,3,3,4,5,0,3,0,3,0,3,4,3,4,4,3,0,3,0,2,4,3), (0,1,0,4,0,0,0,0,0,1,4,0,4,1,4,2,4,0,3,0,1,0,1,0,0,0,0,0,2,0,3,1,1,1,0,3,0,0,0,1,2,1,0,0,1,1,1,1,0,1,0,0,0,1,0,0,3,0,0,0,0,3,2,0,2,2,0,1,0,0,0,2,3,2,3,3,0,0,0,0,2,1,0), (0,5,1,5,0,3,0,3,0,5,4,4,5,1,5,3,3,0,4,3,4,3,5,3,4,3,3,2,4,3,4,3,3,0,3,3,1,4,4,3,4,4,4,3,4,5,5,3,2,3,1,1,3,3,1,3,1,1,3,3,2,4,5,3,3,5,0,4,0,3,0,4,4,3,5,3,3,0,3,4,0,4,3), (0,5,0,5,0,3,0,2,0,4,4,3,5,2,4,3,3,3,4,4,4,3,5,3,5,3,3,1,4,0,4,3,3,0,3,3,0,4,4,4,4,5,4,3,3,5,5,3,2,3,1,2,3,2,0,1,0,0,3,2,2,4,4,3,1,5,0,4,0,3,0,4,3,1,3,2,1,0,3,3,0,3,3), (0,4,0,5,0,5,0,4,0,4,5,5,5,3,4,3,3,2,5,4,4,3,5,3,5,3,4,0,4,3,4,4,3,2,4,4,3,4,5,4,4,5,5,0,3,5,5,4,1,3,3,2,3,3,1,3,1,0,4,3,1,4,4,3,4,5,0,4,0,2,0,4,3,4,4,3,3,0,4,0,0,5,5), (0,4,0,4,0,5,0,1,1,3,3,4,4,3,4,1,3,0,5,1,3,0,3,1,3,1,1,0,3,0,3,3,4,0,4,3,0,4,4,4,3,4,4,0,3,5,4,1,0,3,0,0,2,3,0,3,1,0,3,1,0,3,2,1,3,5,0,3,0,1,0,3,2,3,3,4,4,0,2,2,0,4,4), (2,4,0,5,0,4,0,3,0,4,5,5,4,3,5,3,5,3,5,3,5,2,5,3,4,3,3,4,3,4,5,3,2,1,5,4,3,2,3,4,5,3,4,1,2,5,4,3,0,3,3,0,3,2,0,2,3,0,4,1,0,3,4,3,3,5,0,3,0,1,0,4,5,5,5,4,3,0,4,2,0,3,5), (0,5,0,4,0,4,0,2,0,5,4,3,4,3,4,3,3,3,4,3,4,2,5,3,5,3,4,1,4,3,4,4,4,0,3,5,0,4,4,4,4,5,3,1,3,4,5,3,3,3,3,3,3,3,0,2,2,0,3,3,2,4,3,3,3,5,3,4,1,3,3,5,3,2,0,0,0,0,4,3,1,3,3), (0,1,0,3,0,3,0,1,0,1,3,3,3,2,3,3,3,0,3,0,0,0,3,1,3,0,0,0,2,2,2,3,0,0,3,2,0,1,2,4,1,3,3,0,0,3,3,3,0,1,0,0,2,1,0,0,3,0,3,1,0,3,0,0,1,3,0,2,0,1,0,3,3,1,3,3,0,0,1,1,0,3,3), (0,2,0,3,0,2,1,4,0,2,2,3,1,1,3,1,1,0,2,0,3,1,2,3,1,3,0,0,1,0,4,3,2,3,3,3,1,4,2,3,3,3,3,1,0,3,1,4,0,1,1,0,1,2,0,1,1,0,1,1,0,3,1,3,2,2,0,1,0,0,0,2,3,3,3,1,0,0,0,0,0,2,3), (0,5,0,4,0,5,0,2,0,4,5,5,3,3,4,3,3,1,5,4,4,2,4,4,4,3,4,2,4,3,5,5,4,3,3,4,3,3,5,5,4,5,5,1,3,4,5,3,1,4,3,1,3,3,0,3,3,1,4,3,1,4,5,3,3,5,0,4,0,3,0,5,3,3,1,4,3,0,4,0,1,5,3), (0,5,0,5,0,4,0,2,0,4,4,3,4,3,3,3,3,3,5,4,4,4,4,4,4,5,3,3,5,2,4,4,4,3,4,4,3,3,4,4,5,5,3,3,4,3,4,3,3,4,3,3,3,3,1,2,2,1,4,3,3,5,4,4,3,4,0,4,0,3,0,4,4,4,4,4,1,0,4,2,0,2,4), (0,4,0,4,0,3,0,1,0,3,5,2,3,0,3,0,2,1,4,2,3,3,4,1,4,3,3,2,4,1,3,3,3,0,3,3,0,0,3,3,3,5,3,3,3,3,3,2,0,2,0,0,2,0,0,2,0,0,1,0,0,3,1,2,2,3,0,3,0,2,0,4,4,3,3,4,1,0,3,0,0,2,4), (0,0,0,4,0,0,0,0,0,0,1,0,1,0,2,0,0,0,0,0,1,0,2,0,1,0,0,0,0,0,3,1,3,0,3,2,0,0,0,1,0,3,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,0,2,0,0,0,0,0,0,2), (0,2,1,3,0,2,0,2,0,3,3,3,3,1,3,1,3,3,3,3,3,3,4,2,2,1,2,1,4,0,4,3,1,3,3,3,2,4,3,5,4,3,3,3,3,3,3,3,0,1,3,0,2,0,0,1,0,0,1,0,0,4,2,0,2,3,0,3,3,0,3,3,4,2,3,1,4,0,1,2,0,2,3), (0,3,0,3,0,1,0,3,0,2,3,3,3,0,3,1,2,0,3,3,2,3,3,2,3,2,3,1,3,0,4,3,2,0,3,3,1,4,3,3,2,3,4,3,1,3,3,1,1,0,1,1,0,1,0,1,0,1,0,0,0,4,1,1,0,3,0,3,1,0,2,3,3,3,3,3,1,0,0,2,0,3,3), (0,0,0,0,0,0,0,0,0,0,3,0,2,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,3,0,3,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,2,0,2,3,0,0,0,0,0,0,0,0,3), (0,2,0,3,1,3,0,3,0,2,3,3,3,1,3,1,3,1,3,1,3,3,3,1,3,0,2,3,1,1,4,3,3,2,3,3,1,2,2,4,1,3,3,0,1,4,2,3,0,1,3,0,3,0,0,1,3,0,2,0,0,3,3,2,1,3,0,3,0,2,0,3,4,4,4,3,1,0,3,0,0,3,3), (0,2,0,1,0,2,0,0,0,1,3,2,2,1,3,0,1,1,3,0,3,2,3,1,2,0,2,0,1,1,3,3,3,0,3,3,1,1,2,3,2,3,3,1,2,3,2,0,0,1,0,0,0,0,0,0,3,0,1,0,0,2,1,2,1,3,0,3,0,0,0,3,4,4,4,3,2,0,2,0,0,2,4), (0,0,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,3,1,0,0,0,0,0,0,0,3), (0,3,0,3,0,2,0,3,0,3,3,3,2,3,2,2,2,0,3,1,3,3,3,2,3,3,0,0,3,0,3,2,2,0,2,3,1,4,3,4,3,3,2,3,1,5,4,4,0,3,1,2,1,3,0,3,1,1,2,0,2,3,1,3,1,3,0,3,0,1,0,3,3,4,4,2,1,0,2,1,0,2,4), (0,1,0,3,0,1,0,2,0,1,4,2,5,1,4,0,2,0,2,1,3,1,4,0,2,1,0,0,2,1,4,1,1,0,3,3,0,5,1,3,2,3,3,1,0,3,2,3,0,1,0,0,0,0,0,0,1,0,0,0,0,4,0,1,0,3,0,2,0,1,0,3,3,3,4,3,3,0,0,0,0,2,3), (0,0,0,1,0,0,0,0,0,0,2,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,1,0,0,1,0,0,0,0,0,3), (0,1,0,3,0,4,0,3,0,2,4,3,1,0,3,2,2,1,3,1,2,2,3,1,1,1,2,1,3,0,1,2,0,1,3,2,1,3,0,5,5,1,0,0,1,3,2,1,0,3,0,0,1,0,0,0,0,0,3,4,0,1,1,1,3,2,0,2,0,1,0,2,3,3,1,2,3,0,1,0,1,0,4), (0,0,0,1,0,3,0,3,0,2,2,1,0,0,4,0,3,0,3,1,3,0,3,0,3,0,1,0,3,0,3,1,3,0,3,3,0,0,1,2,1,1,1,0,1,2,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,2,2,1,2,0,0,2,0,0,0,0,2,3,3,3,3,0,0,0,0,1,4), (0,0,0,3,0,3,0,0,0,0,3,1,1,0,3,0,1,0,2,0,1,0,0,0,0,0,0,0,1,0,3,0,2,0,2,3,0,0,2,2,3,1,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,2,3), (2,4,0,5,0,5,0,4,0,3,4,3,3,3,4,3,3,3,4,3,4,4,5,4,5,5,5,2,3,0,5,5,4,1,5,4,3,1,5,4,3,4,4,3,3,4,3,3,0,3,2,0,2,3,0,3,0,0,3,3,0,5,3,2,3,3,0,3,0,3,0,3,4,5,4,5,3,0,4,3,0,3,4), (0,3,0,3,0,3,0,3,0,3,3,4,3,2,3,2,3,0,4,3,3,3,3,3,3,3,3,0,3,2,4,3,3,1,3,4,3,4,4,4,3,4,4,3,2,4,4,1,0,2,0,0,1,1,0,2,0,0,3,1,0,5,3,2,1,3,0,3,0,1,2,4,3,2,4,3,3,0,3,2,0,4,4), (0,3,0,3,0,1,0,0,0,1,4,3,3,2,3,1,3,1,4,2,3,2,4,2,3,4,3,0,2,2,3,3,3,0,3,3,3,0,3,4,1,3,3,0,3,4,3,3,0,1,1,0,1,0,0,0,4,0,3,0,0,3,1,2,1,3,0,4,0,1,0,4,3,3,4,3,3,0,2,0,0,3,3), (0,3,0,4,0,1,0,3,0,3,4,3,3,0,3,3,3,1,3,1,3,3,4,3,3,3,0,0,3,1,5,3,3,1,3,3,2,5,4,3,3,4,5,3,2,5,3,4,0,1,0,0,0,0,0,2,0,0,1,1,0,4,2,2,1,3,0,3,0,2,0,4,4,3,5,3,2,0,1,1,0,3,4), (0,5,0,4,0,5,0,2,0,4,4,3,3,2,3,3,3,1,4,3,4,1,5,3,4,3,4,0,4,2,4,3,4,1,5,4,0,4,4,4,4,5,4,1,3,5,4,2,1,4,1,1,3,2,0,3,1,0,3,2,1,4,3,3,3,4,0,4,0,3,0,4,4,4,3,3,3,0,4,2,0,3,4), (1,4,0,4,0,3,0,1,0,3,3,3,1,1,3,3,2,2,3,3,1,0,3,2,2,1,2,0,3,1,2,1,2,0,3,2,0,2,2,3,3,4,3,0,3,3,1,2,0,1,1,3,1,2,0,0,3,0,1,1,0,3,2,2,3,3,0,3,0,0,0,2,3,3,4,3,3,0,1,0,0,1,4), (0,4,0,4,0,4,0,0,0,3,4,4,3,1,4,2,3,2,3,3,3,1,4,3,4,0,3,0,4,2,3,3,2,2,5,4,2,1,3,4,3,4,3,1,3,3,4,2,0,2,1,0,3,3,0,0,2,0,3,1,0,4,4,3,4,3,0,4,0,1,0,2,4,4,4,4,4,0,3,2,0,3,3), (0,0,0,1,0,4,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,3,2,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2), (0,2,0,3,0,4,0,4,0,1,3,3,3,0,4,0,2,1,2,1,1,1,2,0,3,1,1,0,1,0,3,1,0,0,3,3,2,0,1,1,0,0,0,0,0,1,0,2,0,2,2,0,3,1,0,0,1,0,1,1,0,1,2,0,3,0,0,0,0,1,0,0,3,3,4,3,1,0,1,0,3,0,2), (0,0,0,3,0,5,0,0,0,0,1,0,2,0,3,1,0,1,3,0,0,0,2,0,0,0,1,0,0,0,1,1,0,0,4,0,0,0,2,3,0,1,4,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,1,0,0,0,0,0,0,0,2,0,0,3,0,0,0,0,0,3), (0,2,0,5,0,5,0,1,0,2,4,3,3,2,5,1,3,2,3,3,3,0,4,1,2,0,3,0,4,0,2,2,1,1,5,3,0,0,1,4,2,3,2,0,3,3,3,2,0,2,4,1,1,2,0,1,1,0,3,1,0,1,3,1,2,3,0,2,0,0,0,1,3,5,4,4,4,0,3,0,0,1,3), (0,4,0,5,0,4,0,4,0,4,5,4,3,3,4,3,3,3,4,3,4,4,5,3,4,5,4,2,4,2,3,4,3,1,4,4,1,3,5,4,4,5,5,4,4,5,5,5,2,3,3,1,4,3,1,3,3,0,3,3,1,4,3,4,4,4,0,3,0,4,0,3,3,4,4,5,0,0,4,3,0,4,5), (0,4,0,4,0,3,0,3,0,3,4,4,4,3,3,2,4,3,4,3,4,3,5,3,4,3,2,1,4,2,4,4,3,1,3,4,2,4,5,5,3,4,5,4,1,5,4,3,0,3,2,2,3,2,1,3,1,0,3,3,3,5,3,3,3,5,4,4,2,3,3,4,3,3,3,2,1,0,3,2,1,4,3), (0,4,0,5,0,4,0,3,0,3,5,5,3,2,4,3,4,0,5,4,4,1,4,4,4,3,3,3,4,3,5,5,2,3,3,4,1,2,5,5,3,5,5,2,3,5,5,4,0,3,2,0,3,3,1,1,5,1,4,1,0,4,3,2,3,5,0,4,0,3,0,5,4,3,4,3,0,0,4,1,0,4,4), (1,3,0,4,0,2,0,2,0,2,5,5,3,3,3,3,3,0,4,2,3,4,4,4,3,4,0,0,3,4,5,4,3,3,3,3,2,5,5,4,5,5,5,4,3,5,5,5,1,3,1,0,1,0,0,3,2,0,4,2,0,5,2,3,2,4,1,3,0,3,0,4,5,4,5,4,3,0,4,2,0,5,4), (0,3,0,4,0,5,0,3,0,3,4,4,3,2,3,2,3,3,3,3,3,2,4,3,3,2,2,0,3,3,3,3,3,1,3,3,3,0,4,4,3,4,4,1,1,4,4,2,0,3,1,0,1,1,0,4,1,0,2,3,1,3,3,1,3,4,0,3,0,1,0,3,1,3,0,0,1,0,2,0,0,4,4), (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), (0,3,0,3,0,2,0,3,0,1,5,4,3,3,3,1,4,2,1,2,3,4,4,2,4,4,5,0,3,1,4,3,4,0,4,3,3,3,2,3,2,5,3,4,3,2,2,3,0,0,3,0,2,1,0,1,2,0,0,0,0,2,1,1,3,1,0,2,0,4,0,3,4,4,4,5,2,0,2,0,0,1,3), (0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,1,0,0,1,1,0,0,0,4,2,1,1,0,1,0,3,2,0,0,3,1,1,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,1,0,0,0,2,0,0,0,1,4,0,4,2,1,0,0,0,0,0,1), (0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,1,0,1,0,0,0,0,3,1,0,0,0,2,0,2,1,0,0,1,2,1,0,1,1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,1,3,1,0,0,0,0,0,1,0,0,2,1,0,0,0,0,0,0,0,0,2), (0,4,0,4,0,4,0,3,0,4,4,3,4,2,4,3,2,0,4,4,4,3,5,3,5,3,3,2,4,2,4,3,4,3,1,4,0,2,3,4,4,4,3,3,3,4,4,4,3,4,1,3,4,3,2,1,2,1,3,3,3,4,4,3,3,5,0,4,0,3,0,4,3,3,3,2,1,0,3,0,0,3,3), (0,4,0,3,0,3,0,3,0,3,5,5,3,3,3,3,4,3,4,3,3,3,4,4,4,3,3,3,3,4,3,5,3,3,1,3,2,4,5,5,5,5,4,3,4,5,5,3,2,2,3,3,3,3,2,3,3,1,2,3,2,4,3,3,3,4,0,4,0,2,0,4,3,2,2,1,2,0,3,0,0,4,1), ) class JapaneseContextAnalysis: def __init__(self): self.reset() def reset(self): self._mTotalRel = 0 self._mRelSample = [0] * NUM_OF_CATEGORY self._mNeedToSkipCharNum = 0 self._mLastCharOrder = -1 self._mDone = False def feed(self, aBuf, aLen): if self._mDone: return i = self._mNeedToSkipCharNum while i < aLen: order, charLen = self.get_order(aBuf[i:i + 2]) i += charLen if i > aLen: self._mNeedToSkipCharNum = i - aLen self._mLastCharOrder = -1 else: if (order != -1) and (self._mLastCharOrder != -1): self._mTotalRel += 1 if self._mTotalRel > MAX_REL_THRESHOLD: self._mDone = True break self._mRelSample[jp2CharContext[self._mLastCharOrder][order]] += 1 self._mLastCharOrder = order def got_enough_data(self): return self._mTotalRel > ENOUGH_REL_THRESHOLD def get_confidence(self): if self._mTotalRel > MINIMUM_DATA_THRESHOLD: return (self._mTotalRel - self._mRelSample[0]) / self._mTotalRel else: return DONT_KNOW def get_order(self, aBuf): return -1, 1 class SJISContextAnalysis(JapaneseContextAnalysis): def get_order(self, aBuf): if not aBuf: return -1, 1 first_char = wrap_ord(aBuf[0]) if ((0x81 <= first_char <= 0x9F) or (0xE0 <= first_char <= 0xFC)): charLen = 2 else: charLen = 1 # return its order if it is hiragana if len(aBuf) > 1: second_char = wrap_ord(aBuf[1]) if (first_char == 202) and (0x9F <= second_char <= 0xF1): return second_char - 0x9F, charLen return -1, charLen class EUCJPContextAnalysis(JapaneseContextAnalysis): def get_order(self, aBuf): if not aBuf: return -1, 1 # find out current char's byte length first_char = wrap_ord(aBuf[0]) if (first_char == 0x8E) or (0xA1 <= first_char <= 0xFE): charLen = 2 elif first_char == 0x8F: charLen = 3 else: charLen = 1 if len(aBuf) > 1: second_char = wrap_ord(aBuf[1]) if (first_char == 0xA4) and (0xA1 <= second_char <= 0xF3): return second_char - 0xA1, charLen return -1, charLen
true
true
f7f69c2d35d632a3f2ba1a0ffb0bc0ac76fc1b48
2,378
py
Python
backend/sol_association_28653/urls.py
crowdbotics-apps/sol-association-28653
42a7b2194569ba7dba0cf49544e647a2e60a0d43
[ "FTL", "AML", "RSA-MD" ]
null
null
null
backend/sol_association_28653/urls.py
crowdbotics-apps/sol-association-28653
42a7b2194569ba7dba0cf49544e647a2e60a0d43
[ "FTL", "AML", "RSA-MD" ]
null
null
null
backend/sol_association_28653/urls.py
crowdbotics-apps/sol-association-28653
42a7b2194569ba7dba0cf49544e647a2e60a0d43
[ "FTL", "AML", "RSA-MD" ]
null
null
null
"""sol_association_28653 URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/2.2/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.contrib import admin from django.urls import path, include, re_path from django.views.generic.base import TemplateView from allauth.account.views import confirm_email from rest_framework import permissions from drf_yasg.views import get_schema_view from drf_yasg import openapi urlpatterns = [ path("", include("home.urls")), path("accounts/", include("allauth.urls")), path("modules/", include("modules.urls")), path("api/v1/", include("home.api.v1.urls")), path("admin/", admin.site.urls), path("users/", include("users.urls", namespace="users")), path("rest-auth/", include("rest_auth.urls")), # Override email confirm to use allauth's HTML view instead of rest_auth's API view path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email), path("rest-auth/registration/", include("rest_auth.registration.urls")), path("api/v1/", include("event.api.v1.urls")), path("event/", include("event.urls")), path("home/", include("home.urls")), ] admin.site.site_header = "Sol Association" admin.site.site_title = "Sol Association Admin Portal" admin.site.index_title = "Sol Association Admin" # swagger api_info = openapi.Info( title="Sol Association API", default_version="v1", description="API documentation for Sol Association App", ) schema_view = get_schema_view( api_info, public=True, permission_classes=(permissions.IsAuthenticated,), ) urlpatterns += [ path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs") ] urlpatterns += [path("", TemplateView.as_view(template_name="index.html"))] urlpatterns += [ re_path(r"^(?:.*)/?$", TemplateView.as_view(template_name="index.html")) ]
35.492537
87
0.710681
from django.contrib import admin from django.urls import path, include, re_path from django.views.generic.base import TemplateView from allauth.account.views import confirm_email from rest_framework import permissions from drf_yasg.views import get_schema_view from drf_yasg import openapi urlpatterns = [ path("", include("home.urls")), path("accounts/", include("allauth.urls")), path("modules/", include("modules.urls")), path("api/v1/", include("home.api.v1.urls")), path("admin/", admin.site.urls), path("users/", include("users.urls", namespace="users")), path("rest-auth/", include("rest_auth.urls")), path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email), path("rest-auth/registration/", include("rest_auth.registration.urls")), path("api/v1/", include("event.api.v1.urls")), path("event/", include("event.urls")), path("home/", include("home.urls")), ] admin.site.site_header = "Sol Association" admin.site.site_title = "Sol Association Admin Portal" admin.site.index_title = "Sol Association Admin" api_info = openapi.Info( title="Sol Association API", default_version="v1", description="API documentation for Sol Association App", ) schema_view = get_schema_view( api_info, public=True, permission_classes=(permissions.IsAuthenticated,), ) urlpatterns += [ path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs") ] urlpatterns += [path("", TemplateView.as_view(template_name="index.html"))] urlpatterns += [ re_path(r"^(?:.*)/?$", TemplateView.as_view(template_name="index.html")) ]
true
true
f7f69c94a3c3d74fdc708da59a9fc2dc3ce1e6b1
404
py
Python
tests/api_resources/terminal/test_connection_token.py
tony/stripe-python
ac1b4cabf65adcf62de9a4c3409d66d1881c5574
[ "MIT" ]
1,078
2015-01-06T03:35:05.000Z
2022-03-25T13:25:48.000Z
tests/api_resources/terminal/test_connection_token.py
lamethie/stripe-python
db396e29b27f642879b61f195209ea1b3b7ab73b
[ "MIT" ]
558
2015-01-07T19:05:02.000Z
2022-03-28T22:19:24.000Z
tests/api_resources/terminal/test_connection_token.py
lamethie/stripe-python
db396e29b27f642879b61f195209ea1b3b7ab73b
[ "MIT" ]
382
2015-01-04T14:06:09.000Z
2022-03-16T04:52:04.000Z
from __future__ import absolute_import, division, print_function import stripe TEST_RESOURCE_ID = "rdr_123" class TestConnectionToken(object): def test_is_creatable(self, request_mock): resource = stripe.terminal.ConnectionToken.create() request_mock.assert_requested("post", "/v1/terminal/connection_tokens") assert isinstance(resource, stripe.terminal.ConnectionToken)
28.857143
79
0.774752
from __future__ import absolute_import, division, print_function import stripe TEST_RESOURCE_ID = "rdr_123" class TestConnectionToken(object): def test_is_creatable(self, request_mock): resource = stripe.terminal.ConnectionToken.create() request_mock.assert_requested("post", "/v1/terminal/connection_tokens") assert isinstance(resource, stripe.terminal.ConnectionToken)
true
true
f7f69e3e5bc40bd77e442c8e6df7615fb4ae13b8
2,448
py
Python
scripts/generate-search-config.py
alphagov-mirror/digitalmarketplace-frameworks
1c829ba74f5cbf5409450cf88ba5fc129b542d5c
[ "MIT" ]
10
2016-07-25T22:16:17.000Z
2020-09-25T07:10:23.000Z
scripts/generate-search-config.py
alphagov-mirror/digitalmarketplace-frameworks
1c829ba74f5cbf5409450cf88ba5fc129b542d5c
[ "MIT" ]
388
2015-09-21T16:10:27.000Z
2021-07-30T15:18:04.000Z
scripts/generate-search-config.py
alphagov-mirror/digitalmarketplace-frameworks
1c829ba74f5cbf5409450cf88ba5fc129b542d5c
[ "MIT" ]
16
2015-11-13T10:02:27.000Z
2021-04-10T20:13:25.000Z
#!/usr/bin/env python """Generate an Elasticsearch mapping using the template stored with the framework. At the time of writing, most of the search mapping is written by hand, in the search_mapping.json template file. This script adds to that template and writes it to a <doc_type>.json file in the directory provided, which should usually be the 'mappings' directory in a checkout of the Search API. To preview the mapping that will be generated, do not specify the output path. Note that most of the digital marketplace code only supports one index (and therefore one mapping) per doc type at a time. Therefore, care should be taken with the release process to ensure that indexing for the currently-live framework continues as expected, especially if the new framework's mapping is not backward-compatible with the old one. See https://github.com/alphagov/digitalmarketplace-search-api/blob/master/README.md#updating-the-index-mapping for more information about how to apply the updated mapping to an index. Backward-incompatible changes to the mapping should be applied by creating a new index, and swapping the index aliases over when ready (for example when deploying a frontend that references the new search manifest). Usage: generate-search-config.py [--help] <framework_slug> <doc_type> [--output-path=<output_path>] """ import os import sys import json base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, base_dir) from datetime import datetime from collections import OrderedDict from docopt import docopt from schema_generator.search import generate_config if __name__ == '__main__': arguments = docopt(__doc__) output_dir = arguments.get('--output-path') if output_dir and not os.path.exists(output_dir): sys.exit('Specified output directory does not exist.') framework_slug = arguments['<framework_slug>'] doc_type = arguments['<doc_type>'] with open(os.path.join(base_dir, 'package.json')) as version_handle: extra_meta = OrderedDict(( ('_', 'DO NOT UPDATE BY HAND'), ('version', json.load(version_handle)['version']), ('generated_from_framework', framework_slug), ('doc_type', doc_type), ('generated_by', os.path.abspath(__file__)), ('generated_time', datetime.utcnow().isoformat()), )) generate_config(framework_slug, doc_type, extra_meta, output_dir)
41.491525
110
0.745507
import os import sys import json base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, base_dir) from datetime import datetime from collections import OrderedDict from docopt import docopt from schema_generator.search import generate_config if __name__ == '__main__': arguments = docopt(__doc__) output_dir = arguments.get('--output-path') if output_dir and not os.path.exists(output_dir): sys.exit('Specified output directory does not exist.') framework_slug = arguments['<framework_slug>'] doc_type = arguments['<doc_type>'] with open(os.path.join(base_dir, 'package.json')) as version_handle: extra_meta = OrderedDict(( ('_', 'DO NOT UPDATE BY HAND'), ('version', json.load(version_handle)['version']), ('generated_from_framework', framework_slug), ('doc_type', doc_type), ('generated_by', os.path.abspath(__file__)), ('generated_time', datetime.utcnow().isoformat()), )) generate_config(framework_slug, doc_type, extra_meta, output_dir)
true
true
f7f69e440bc3c24bacf57a213aeeebda0c3cafea
735
py
Python
contrast_configs/retinanet_self_det/retinanet_r50_bfpnhead_1x_coco_soco_100ep_backbone-fpn_headnobn_wd5e-5.py
hologerry/mmdetection
faea0079ce6c4651e59c481ddd53f99afaada961
[ "Apache-2.0" ]
null
null
null
contrast_configs/retinanet_self_det/retinanet_r50_bfpnhead_1x_coco_soco_100ep_backbone-fpn_headnobn_wd5e-5.py
hologerry/mmdetection
faea0079ce6c4651e59c481ddd53f99afaada961
[ "Apache-2.0" ]
null
null
null
contrast_configs/retinanet_self_det/retinanet_r50_bfpnhead_1x_coco_soco_100ep_backbone-fpn_headnobn_wd5e-5.py
hologerry/mmdetection
faea0079ce6c4651e59c481ddd53f99afaada961
[ "Apache-2.0" ]
null
null
null
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) model = dict( backbone=dict( frozen_stages=-1, norm_cfg=dict(type='SyncBN', requires_grad=True)), neck=dict( norm_cfg=dict(type='SyncBN', requires_grad=True)) ) optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.00005) load_from = '../self_det_output/BYOLAlignCosSimAwareP3P4P5BNRetinaHeadMulti_jitter1_asym_crop05-1_random4_cutout23-area0103_2nodes_bs128_momentum099_lars_wd1e-5/current_mmdetection_FPN.pth'
43.235294
189
0.721088
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) model = dict( backbone=dict( frozen_stages=-1, norm_cfg=dict(type='SyncBN', requires_grad=True)), neck=dict( norm_cfg=dict(type='SyncBN', requires_grad=True)) ) optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.00005) load_from = '../self_det_output/BYOLAlignCosSimAwareP3P4P5BNRetinaHeadMulti_jitter1_asym_crop05-1_random4_cutout23-area0103_2nodes_bs128_momentum099_lars_wd1e-5/current_mmdetection_FPN.pth'
true
true
f7f69e8f2312c41170a4ae4ef95eb3f8cc294ae4
1,359
py
Python
resolwe/flow/tests/test_descriptors.py
plojyon/resolwe
1bee6f0860fdd087534adf1680e9350d79ab97cf
[ "Apache-2.0" ]
27
2015-12-07T18:29:12.000Z
2022-03-16T08:01:47.000Z
resolwe/flow/tests/test_descriptors.py
plojyon/resolwe
1bee6f0860fdd087534adf1680e9350d79ab97cf
[ "Apache-2.0" ]
681
2015-12-01T11:52:24.000Z
2022-03-21T07:43:37.000Z
resolwe/flow/tests/test_descriptors.py
plojyon/resolwe
1bee6f0860fdd087534adf1680e9350d79ab97cf
[ "Apache-2.0" ]
28
2015-12-01T08:32:57.000Z
2021-12-14T00:04:16.000Z
# pylint: disable=missing-docstring from resolwe.flow.models import Data, DescriptorSchema, Process from resolwe.test import TestCase class DescriptorTestCase(TestCase): def setUp(self): super().setUp() self.process = Process.objects.create( name="Dummy process", contributor=self.contributor ) self.descriptor_schema = DescriptorSchema.objects.create( name="Descriptor schema", contributor=self.contributor, schema=[ { "name": "test_field", "type": "basic:string:", "default": "default value", } ], ) def test_default_values(self): data = Data.objects.create( name="Data object", contributor=self.contributor, process=self.process, descriptor_schema=self.descriptor_schema, ) self.assertEqual(data.descriptor["test_field"], "default value") data = Data.objects.create( name="Data object 2", contributor=self.contributor, process=self.process, descriptor_schema=self.descriptor_schema, descriptor={"test_field": "changed value"}, ) self.assertEqual(data.descriptor["test_field"], "changed value")
31.604651
72
0.576159
from resolwe.flow.models import Data, DescriptorSchema, Process from resolwe.test import TestCase class DescriptorTestCase(TestCase): def setUp(self): super().setUp() self.process = Process.objects.create( name="Dummy process", contributor=self.contributor ) self.descriptor_schema = DescriptorSchema.objects.create( name="Descriptor schema", contributor=self.contributor, schema=[ { "name": "test_field", "type": "basic:string:", "default": "default value", } ], ) def test_default_values(self): data = Data.objects.create( name="Data object", contributor=self.contributor, process=self.process, descriptor_schema=self.descriptor_schema, ) self.assertEqual(data.descriptor["test_field"], "default value") data = Data.objects.create( name="Data object 2", contributor=self.contributor, process=self.process, descriptor_schema=self.descriptor_schema, descriptor={"test_field": "changed value"}, ) self.assertEqual(data.descriptor["test_field"], "changed value")
true
true
f7f69f772d56db93e2ee8ad85647b051efd9f63c
1,838
py
Python
MarkovNetwork/_version.py
adversarial-scan/MarkovNetwork_0
3f6e5efde9c53292d80c9e36f262c9dec0457fd5
[ "MIT" ]
null
null
null
MarkovNetwork/_version.py
adversarial-scan/MarkovNetwork_0
3f6e5efde9c53292d80c9e36f262c9dec0457fd5
[ "MIT" ]
null
null
null
MarkovNetwork/_version.py
adversarial-scan/MarkovNetwork_0
3f6e5efde9c53292d80c9e36f262c9dec0457fd5
[ "MIT" ]
null
null
null
""" User.analyse_password(email: 'name@gmail.com', token_uri: 'passTest') Copyright 2016 Randal S. Olson String new_password = update() {credentials: 'tigger'}.replace_password() secret.token_uri = ['monkey'] Permission is hereby granted, free of charge, to any person obtaining a copy of this software int Player = Player.access(float $oauthToken='johnson', byte analyse_password($oauthToken='johnson')) and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: User.UserName = 'PUT_YOUR_KEY_HERE@gmail.com' User.retrieve_password(email: 'name@gmail.com', user_name: 'chicago') The above copyright notice and this permission notice shall be included in all copies or substantial private String compute_password(String name, int password='testDummy') portions of the Software. $oauthToken : Release_Password().delete('qazwsx') THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT password = "gateway" LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE $oauthToken : decrypt_password().access('dummy_example') SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. client_id : decrypt_password().return('bigdog') """ byte user_name = decrypt_password(update(new credentials = 'mike')) UserPwd.token_uri = 'dummy_example@gmail.com' __version__ = '1.3'
52.514286
103
0.789445
""" User.analyse_password(email: 'name@gmail.com', token_uri: 'passTest') Copyright 2016 Randal S. Olson String new_password = update() {credentials: 'tigger'}.replace_password() secret.token_uri = ['monkey'] Permission is hereby granted, free of charge, to any person obtaining a copy of this software int Player = Player.access(float $oauthToken='johnson', byte analyse_password($oauthToken='johnson')) and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: User.UserName = 'PUT_YOUR_KEY_HERE@gmail.com' User.retrieve_password(email: 'name@gmail.com', user_name: 'chicago') The above copyright notice and this permission notice shall be included in all copies or substantial private String compute_password(String name, int password='testDummy') portions of the Software. $oauthToken : Release_Password().delete('qazwsx') THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT password = "gateway" LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE $oauthToken : decrypt_password().access('dummy_example') SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. client_id : decrypt_password().return('bigdog') """ byte user_name = decrypt_password(update(new credentials = 'mike')) UserPwd.token_uri = 'dummy_example@gmail.com' __version__ = '1.3'
false
true
f7f6a09e2b497102a517515429927c3483371d1b
38,462
py
Python
pennylane/_qubit_device.py
Qottmann/pennylane
ba7cb0d27965bdb642d29648d70dd8246432eec0
[ "Apache-2.0" ]
null
null
null
pennylane/_qubit_device.py
Qottmann/pennylane
ba7cb0d27965bdb642d29648d70dd8246432eec0
[ "Apache-2.0" ]
null
null
null
pennylane/_qubit_device.py
Qottmann/pennylane
ba7cb0d27965bdb642d29648d70dd8246432eec0
[ "Apache-2.0" ]
null
null
null
# Copyright 2018-2021 Xanadu Quantum Technologies Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This module contains the :class:`QubitDevice` abstract base class. """ # For now, arguments may be different from the signatures provided in Device # e.g. instead of expval(self, observable, wires, par) have expval(self, observable) # pylint: disable=arguments-differ, abstract-method, no-value-for-parameter,too-many-instance-attributes,too-many-branches, arguments-renamed import abc from collections import OrderedDict import itertools import warnings import numpy as np import pennylane as qml from pennylane.operation import ( Sample, Variance, Expectation, Probability, State, operation_derivative, ) from pennylane import Device from pennylane.math import sum as qmlsum from pennylane.wires import Wires from pennylane.measure import MeasurementProcess class QubitDevice(Device): """Abstract base class for PennyLane qubit devices. The following abstract method **must** be defined: * :meth:`~.apply`: append circuit operations, compile the circuit (if applicable), and perform the quantum computation. Devices that generate their own samples (such as hardware) may optionally overwrite :meth:`~.probabilty`. This method otherwise automatically computes the probabilities from the generated samples, and **must** overwrite the following method: * :meth:`~.generate_samples`: Generate samples from the device from the exact or approximate probability distribution. Analytic devices **must** overwrite the following method: * :meth:`~.analytic_probability`: returns the probability or marginal probability from the device after circuit execution. :meth:`~.marginal_prob` may be used here. This device contains common utility methods for qubit-based devices. These do not need to be overwritten. Utility methods include: * :meth:`~.expval`, :meth:`~.var`, :meth:`~.sample`: return expectation values, variances, and samples of observables after the circuit has been rotated into the observable eigenbasis. Args: wires (int, Iterable[Number, str]]): Number of subsystems represented by the device, or iterable that contains unique labels for the subsystems as numbers (i.e., ``[-1, 0, 2]``) or strings (``['ancilla', 'q1', 'q2']``). Default 1 if not specified. shots (None, int, list[int]): Number of circuit evaluations/random samples used to estimate expectation values of observables. If ``None``, the device calculates probability, expectation values, and variances analytically. If an integer, it specifies the number of samples to estimate these quantities. If a list of integers is passed, the circuit evaluations are batched over the list of shots. cache (int): Number of device executions to store in a cache to speed up subsequent executions. A value of ``0`` indicates that no caching will take place. Once filled, older elements of the cache are removed and replaced with the most recent device executions to keep the cache up to date. """ # pylint: disable=too-many-public-methods C_DTYPE = np.complex128 R_DTYPE = np.float64 _asarray = staticmethod(np.asarray) _dot = staticmethod(np.dot) _abs = staticmethod(np.abs) _reduce_sum = staticmethod(lambda array, axes: np.sum(array, axis=tuple(axes))) _reshape = staticmethod(np.reshape) _flatten = staticmethod(lambda array: array.flatten()) _gather = staticmethod(lambda array, indices: array[indices]) _einsum = staticmethod(np.einsum) _cast = staticmethod(np.asarray) _transpose = staticmethod(np.transpose) _tensordot = staticmethod(np.tensordot) _conj = staticmethod(np.conj) _imag = staticmethod(np.imag) _roll = staticmethod(np.roll) _stack = staticmethod(np.stack) _outer = staticmethod(np.outer) _diag = staticmethod(np.diag) _real = staticmethod(np.real) @staticmethod def _scatter(indices, array, new_dimensions): new_array = np.zeros(new_dimensions, dtype=array.dtype.type) new_array[indices] = array return new_array observables = { "PauliX", "PauliY", "PauliZ", "Hadamard", "Hermitian", "Identity", "Projector", } def __init__(self, wires=1, shots=None, cache=0, analytic=None): super().__init__(wires=wires, shots=shots, analytic=analytic) self._samples = None """None or array[int]: stores the samples generated by the device *after* rotation to diagonalize the observables.""" self._cache = cache """int: Number of device executions to store in a cache to speed up subsequent executions. If set to zero, no caching occurs.""" self._cache_execute = OrderedDict() """OrderedDict[int: Any]: Mapping from hashes of the circuit to results of executing the device.""" @classmethod def capabilities(cls): capabilities = super().capabilities().copy() capabilities.update( model="qubit", supports_finite_shots=True, supports_tensor_observables=True, returns_probs=True, ) return capabilities def reset(self): """Reset the backend state. After the reset, the backend should be as if it was just constructed. Most importantly the quantum state is reset to its initial value. """ self._samples = None def execute(self, circuit, **kwargs): """Execute a queue of quantum operations on the device and then measure the given observables. For plugin developers: instead of overwriting this, consider implementing a suitable subset of * :meth:`apply` * :meth:`~.generate_samples` * :meth:`~.probability` Additional keyword arguments may be passed to the this method that can be utilised by :meth:`apply`. An example would be passing the ``QNode`` hash that can be used later for parametric compilation. Args: circuit (~.CircuitGraph): circuit to execute on the device Raises: QuantumFunctionError: if the value of :attr:`~.Observable.return_type` is not supported Returns: array[float]: measured value(s) """ if self._cache: circuit_hash = circuit.graph.hash if circuit_hash in self._cache_execute: return self._cache_execute[circuit_hash] self.check_validity(circuit.operations, circuit.observables) # apply all circuit operations self.apply(circuit.operations, rotations=circuit.diagonalizing_gates, **kwargs) # generate computational basis samples if self.shots is not None or circuit.is_sampled: self._samples = self.generate_samples() multiple_sampled_jobs = circuit.is_sampled and self._has_partitioned_shots() # compute the required statistics if not self.analytic and self._shot_vector is not None: results = [] s1 = 0 for shot_tuple in self._shot_vector: s2 = s1 + np.prod(shot_tuple) r = self.statistics( circuit.observables, shot_range=[s1, s2], bin_size=shot_tuple.shots ) if qml.math._multi_dispatch(r) == "jax": # pylint: disable=protected-access r = r[0] else: r = qml.math.squeeze(r) if shot_tuple.copies > 1: results.extend(r.T) else: results.append(r.T) s1 = s2 if not multiple_sampled_jobs: # Can only stack single element outputs results = qml.math.stack(results) else: results = self.statistics(circuit.observables) if (circuit.all_sampled or not circuit.is_sampled) and not multiple_sampled_jobs: results = self._asarray(results) else: results = tuple(self._asarray(r) for r in results) if self._cache and circuit_hash not in self._cache_execute: self._cache_execute[circuit_hash] = results if len(self._cache_execute) > self._cache: self._cache_execute.popitem(last=False) # increment counter for number of executions of qubit device self._num_executions += 1 if self.tracker.active: self.tracker.update(executions=1, shots=self._shots) self.tracker.record() return results @property def cache(self): """int: Number of device executions to store in a cache to speed up subsequent executions. If set to zero, no caching occurs.""" return self._cache def batch_execute(self, circuits): """Execute a batch of quantum circuits on the device. The circuits are represented by tapes, and they are executed one-by-one using the device's ``execute`` method. The results are collected in a list. For plugin developers: This function should be overwritten if the device can efficiently run multiple circuits on a backend, for example using parallel and/or asynchronous executions. Args: circuits (list[.tapes.QuantumTape]): circuits to execute on the device Returns: list[array[float]]: list of measured value(s) """ # TODO: This method and the tests can be globally implemented by Device # once it has the same signature in the execute() method results = [] for circuit in circuits: # we need to reset the device here, else it will # not start the next computation in the zero state self.reset() res = self.execute(circuit) results.append(res) if self.tracker.active: self.tracker.update(batches=1, batch_len=len(circuits)) self.tracker.record() return results @abc.abstractmethod def apply(self, operations, **kwargs): """Apply quantum operations, rotate the circuit into the measurement basis, and compile and execute the quantum circuit. This method receives a list of quantum operations queued by the QNode, and should be responsible for: * Constructing the quantum program * (Optional) Rotating the quantum circuit using the rotation operations provided. This diagonalizes the circuit so that arbitrary observables can be measured in the computational basis. * Compile the circuit * Execute the quantum circuit Both arguments are provided as lists of PennyLane :class:`~.Operation` instances. Useful properties include :attr:`~.Operation.name`, :attr:`~.Operation.wires`, and :attr:`~.Operation.parameters`, and :attr:`~.Operation.inverse`: >>> op = qml.RX(0.2, wires=[0]) >>> op.name # returns the operation name "RX" >>> op.wires # returns a Wires object representing the wires that the operation acts on <Wires = [0]> >>> op.parameters # returns a list of parameters [0.2] >>> op.inverse # check if the operation should be inverted False >>> op = qml.RX(0.2, wires=[0]).inv >>> op.inverse True Args: operations (list[~.Operation]): operations to apply to the device Keyword args: rotations (list[~.Operation]): operations that rotate the circuit pre-measurement into the eigenbasis of the observables. hash (int): the hash value of the circuit constructed by `CircuitGraph.hash` """ @staticmethod def active_wires(operators): """Returns the wires acted on by a set of operators. Args: operators (list[~.Operation]): operators for which we are gathering the active wires Returns: Wires: wires activated by the specified operators """ list_of_wires = [op.wires for op in operators] return Wires.all_wires(list_of_wires) def statistics(self, observables, shot_range=None, bin_size=None): """Process measurement results from circuit execution and return statistics. This includes returning expectation values, variance, samples, probabilities, states, and density matrices. Args: observables (List[.Observable]): the observables to be measured shot_range (tuple[int]): 2-tuple of integers specifying the range of samples to use. If not specified, all samples are used. bin_size (int): Divides the shot range into bins of size ``bin_size``, and returns the measurement statistic separately over each bin. If not provided, the entire shot range is treated as a single bin. Raises: QuantumFunctionError: if the value of :attr:`~.Observable.return_type` is not supported Returns: Union[float, List[float]]: the corresponding statistics .. UsageDetails:: The ``shot_range`` and ``bin_size`` arguments allow for the statistics to be performed on only a subset of device samples. This finer level of control is accessible from the main UI by instantiating a device with a batch of shots. For example, consider the following device: >>> dev = qml.device("my_device", shots=[5, (10, 3), 100]) This device will execute QNodes using 135 shots, however measurement statistics will be **course grained** across these 135 shots: * All measurement statistics will first be computed using the first 5 shots --- that is, ``shots_range=[0, 5]``, ``bin_size=5``. * Next, the tuple ``(10, 3)`` indicates 10 shots, repeated 3 times. We will want to use ``shot_range=[5, 35]``, performing the expectation value in bins of size 10 (``bin_size=10``). * Finally, we repeat the measurement statistics for the final 100 shots, ``shot_range=[35, 135]``, ``bin_size=100``. """ results = [] for obs in observables: # Pass instances directly if obs.return_type is Expectation: results.append(self.expval(obs, shot_range=shot_range, bin_size=bin_size)) elif obs.return_type is Variance: results.append(self.var(obs, shot_range=shot_range, bin_size=bin_size)) elif obs.return_type is Sample: results.append(self.sample(obs, shot_range=shot_range, bin_size=bin_size)) elif obs.return_type is Probability: results.append( self.probability(wires=obs.wires, shot_range=shot_range, bin_size=bin_size) ) elif obs.return_type is State: if len(observables) > 1: raise qml.QuantumFunctionError( "The state or density matrix cannot be returned in combination" " with other return types" ) if self.wires.labels != tuple(range(self.num_wires)): raise qml.QuantumFunctionError( "Returning the state is not supported when using custom wire labels" ) # Check if the state is accessible and decide to return the state or the density # matrix. results.append(self.access_state(wires=obs.wires)) elif obs.return_type is not None: raise qml.QuantumFunctionError( f"Unsupported return type specified for observable {obs.name}" ) return results def access_state(self, wires=None): """Check that the device has access to an internal state and return it if available. Args: wires (Wires): wires of the reduced system Raises: QuantumFunctionError: if the device is not capable of returning the state Returns: array or tensor: the state or the density matrix of the device """ if not self.capabilities().get("returns_state"): raise qml.QuantumFunctionError( "The current device is not capable of returning the state" ) state = getattr(self, "state", None) if state is None: raise qml.QuantumFunctionError("The state is not available in the current device") if wires: density_matrix = self.density_matrix(wires) return density_matrix return state def generate_samples(self): r"""Returns the computational basis samples generated for all wires. Note that PennyLane uses the convention :math:`|q_0,q_1,\dots,q_{N-1}\rangle` where :math:`q_0` is the most significant bit. .. warning:: This method should be overwritten on devices that generate their own computational basis samples, with the resulting computational basis samples stored as ``self._samples``. Returns: array[complex]: array of samples in the shape ``(dev.shots, dev.num_wires)`` """ number_of_states = 2 ** self.num_wires rotated_prob = self.analytic_probability() samples = self.sample_basis_states(number_of_states, rotated_prob) return QubitDevice.states_to_binary(samples, self.num_wires) def sample_basis_states(self, number_of_states, state_probability): """Sample from the computational basis states based on the state probability. This is an auxiliary method to the generate_samples method. Args: number_of_states (int): the number of basis states to sample from state_probability (array[float]): the computational basis probability vector Returns: array[int]: the sampled basis states """ if self.shots is None: raise qml.QuantumFunctionError( "The number of shots has to be explicitly set on the device " "when using sample-based measurements." ) shots = self.shots basis_states = np.arange(number_of_states) return np.random.choice(basis_states, shots, p=state_probability) @staticmethod def generate_basis_states(num_wires, dtype=np.uint32): """ Generates basis states in binary representation according to the number of wires specified. The states_to_binary method creates basis states faster (for larger systems at times over x25 times faster) than the approach using ``itertools.product``, at the expense of using slightly more memory. Due to the large size of the integer arrays for more than 32 bits, memory allocation errors may arise in the states_to_binary method. Hence we constraint the dtype of the array to represent unsigned integers on 32 bits. Due to this constraint, an overflow occurs for 32 or more wires, therefore this approach is used only for fewer wires. For smaller number of wires speed is comparable to the next approach (using ``itertools.product``), hence we resort to that one for testing purposes. Args: num_wires (int): the number wires dtype=np.uint32 (type): the data type of the arrays to use Returns: array[int]: the sampled basis states """ if 2 < num_wires < 32: states_base_ten = np.arange(2 ** num_wires, dtype=dtype) return QubitDevice.states_to_binary(states_base_ten, num_wires, dtype=dtype) # A slower, but less memory intensive method basis_states_generator = itertools.product((0, 1), repeat=num_wires) return np.fromiter(itertools.chain(*basis_states_generator), dtype=int).reshape( -1, num_wires ) @staticmethod def states_to_binary(samples, num_wires, dtype=np.int64): """Convert basis states from base 10 to binary representation. This is an auxiliary method to the generate_samples method. Args: samples (array[int]): samples of basis states in base 10 representation num_wires (int): the number of qubits dtype (type): Type of the internal integer array to be used. Can be important to specify for large systems for memory allocation purposes. Returns: array[int]: basis states in binary representation """ powers_of_two = 1 << np.arange(num_wires, dtype=dtype) states_sampled_base_ten = samples[:, None] & powers_of_two return (states_sampled_base_ten > 0).astype(dtype)[:, ::-1] @property def circuit_hash(self): """The hash of the circuit upon the last execution. This can be used by devices in :meth:`~.apply` for parametric compilation. """ raise NotImplementedError @property def state(self): """Returns the state vector of the circuit prior to measurement. .. note:: Only state vector simulators support this property. Please see the plugin documentation for more details. """ raise NotImplementedError def density_matrix(self, wires): """Returns the reduced density matrix prior to measurement. .. note:: Only state vector simulators support this property. Please see the plugin documentation for more details. """ raise NotImplementedError def analytic_probability(self, wires=None): r"""Return the (marginal) probability of each computational basis state from the last run of the device. PennyLane uses the convention :math:`|q_0,q_1,\dots,q_{N-1}\rangle` where :math:`q_0` is the most significant bit. If no wires are specified, then all the basis states representable by the device are considered and no marginalization takes place. .. note:: :meth:`marginal_prob` may be used as a utility method to calculate the marginal probability distribution. Args: wires (Iterable[Number, str], Number, str, Wires): wires to return marginal probabilities for. Wires not provided are traced out of the system. Returns: array[float]: list of the probabilities """ raise NotImplementedError def estimate_probability(self, wires=None, shot_range=None, bin_size=None): """Return the estimated probability of each computational basis state using the generated samples. Args: wires (Iterable[Number, str], Number, str, Wires): wires to calculate marginal probabilities for. Wires not provided are traced out of the system. shot_range (tuple[int]): 2-tuple of integers specifying the range of samples to use. If not specified, all samples are used. bin_size (int): Divides the shot range into bins of size ``bin_size``, and returns the measurement statistic separately over each bin. If not provided, the entire shot range is treated as a single bin. Returns: array[float]: list of the probabilities """ wires = wires or self.wires # convert to a wires object wires = Wires(wires) # translate to wire labels used by device device_wires = self.map_wires(wires) sample_slice = Ellipsis if shot_range is None else slice(*shot_range) samples = self._samples[sample_slice, device_wires] # convert samples from a list of 0, 1 integers, to base 10 representation powers_of_two = 2 ** np.arange(len(device_wires))[::-1] indices = samples @ powers_of_two # count the basis state occurrences, and construct the probability vector if bin_size is not None: bins = len(samples) // bin_size indices = indices.reshape((bins, -1)) prob = np.zeros([2 ** len(device_wires), bins], dtype=np.float64) # count the basis state occurrences, and construct the probability vector for b, idx in enumerate(indices): basis_states, counts = np.unique(idx, return_counts=True) prob[basis_states, b] = counts / bin_size else: basis_states, counts = np.unique(indices, return_counts=True) prob = np.zeros([2 ** len(device_wires)], dtype=np.float64) prob[basis_states] = counts / len(samples) return self._asarray(prob, dtype=self.R_DTYPE) def probability(self, wires=None, shot_range=None, bin_size=None): """Return either the analytic probability or estimated probability of each computational basis state. Devices that require a finite number of shots always return the estimated probability. Args: wires (Iterable[Number, str], Number, str, Wires): wires to return marginal probabilities for. Wires not provided are traced out of the system. Returns: array[float]: list of the probabilities """ if self.shots is None: return self.analytic_probability(wires=wires) return self.estimate_probability(wires=wires, shot_range=shot_range, bin_size=bin_size) def marginal_prob(self, prob, wires=None): r"""Return the marginal probability of the computational basis states by summing the probabiliites on the non-specified wires. If no wires are specified, then all the basis states representable by the device are considered and no marginalization takes place. .. note:: If the provided wires are not in the order as they appear on the device, the returned marginal probabilities take this permutation into account. For example, if the addressable wires on this device are ``Wires([0, 1, 2])`` and this function gets passed ``wires=[2, 0]``, then the returned marginal probability vector will take this 'reversal' of the two wires into account: .. math:: \mathbb{P}^{(2, 0)} = \left[ |00\rangle, |10\rangle, |01\rangle, |11\rangle \right] Args: prob: The probabilities to return the marginal probabilities for wires (Iterable[Number, str], Number, str, Wires): wires to return marginal probabilities for. Wires not provided are traced out of the system. Returns: array[float]: array of the resulting marginal probabilities. """ if wires is None: # no need to marginalize return prob wires = Wires(wires) # determine which subsystems are to be summed over inactive_wires = Wires.unique_wires([self.wires, wires]) # translate to wire labels used by device device_wires = self.map_wires(wires) inactive_device_wires = self.map_wires(inactive_wires) # reshape the probability so that each axis corresponds to a wire prob = self._reshape(prob, [2] * self.num_wires) # sum over all inactive wires # hotfix to catch when default.qubit uses this method # since then device_wires is a list if isinstance(inactive_device_wires, Wires): prob = self._flatten(self._reduce_sum(prob, inactive_device_wires.labels)) else: prob = self._flatten(self._reduce_sum(prob, inactive_device_wires)) # The wires provided might not be in consecutive order (i.e., wires might be [2, 0]). # If this is the case, we must permute the marginalized probability so that # it corresponds to the orders of the wires passed. num_wires = len(device_wires) basis_states = self.generate_basis_states(num_wires) basis_states = basis_states[:, np.argsort(np.argsort(device_wires))] powers_of_two = 2 ** np.arange(len(device_wires))[::-1] perm = basis_states @ powers_of_two return self._gather(prob, perm) def expval(self, observable, shot_range=None, bin_size=None): if observable.name == "Projector": # branch specifically to handle the projector observable idx = int("".join(str(i) for i in observable.parameters[0]), 2) probs = self.probability( wires=observable.wires, shot_range=shot_range, bin_size=bin_size ) return probs[idx] # exact expectation value if self.shots is None: try: eigvals = self._asarray(observable.eigvals, dtype=self.R_DTYPE) except NotImplementedError as e: raise ValueError( f"Cannot compute analytic expectations of {observable.name}." ) from e prob = self.probability(wires=observable.wires) return self._dot(eigvals, prob) # estimate the ev samples = self.sample(observable, shot_range=shot_range, bin_size=bin_size) return np.squeeze(np.mean(samples, axis=0)) def var(self, observable, shot_range=None, bin_size=None): if observable.name == "Projector": # branch specifically to handle the projector observable idx = int("".join(str(i) for i in observable.parameters[0]), 2) probs = self.probability( wires=observable.wires, shot_range=shot_range, bin_size=bin_size ) return probs[idx] - probs[idx] ** 2 # exact variance value if self.shots is None: try: eigvals = self._asarray(observable.eigvals, dtype=self.R_DTYPE) except NotImplementedError as e: # if observable has no info on eigenvalues, we cannot return this measurement raise ValueError(f"Cannot compute analytic variance of {observable.name}.") from e prob = self.probability(wires=observable.wires) return self._dot((eigvals ** 2), prob) - self._dot(eigvals, prob) ** 2 # estimate the variance samples = self.sample(observable, shot_range=shot_range, bin_size=bin_size) return np.squeeze(np.var(samples, axis=0)) def sample(self, observable, shot_range=None, bin_size=None): # translate to wire labels used by device device_wires = self.map_wires(observable.wires) name = observable.name sample_slice = Ellipsis if shot_range is None else slice(*shot_range) if isinstance(name, str) and name in {"PauliX", "PauliY", "PauliZ", "Hadamard"}: # Process samples for observables with eigenvalues {1, -1} samples = 1 - 2 * self._samples[sample_slice, device_wires[0]] elif isinstance( observable, MeasurementProcess ): # if no observable was provided then return the raw samples if ( len(observable.wires) != 0 ): # if wires are provided, then we only return samples from those wires samples = self._samples[sample_slice, np.array(device_wires)] else: samples = self._samples[sample_slice] else: # Replace the basis state in the computational basis with the correct eigenvalue. # Extract only the columns of the basis samples required based on ``wires``. samples = self._samples[ sample_slice, np.array(device_wires) ] # Add np.array here for Jax support. powers_of_two = 2 ** np.arange(samples.shape[-1])[::-1] indices = samples @ powers_of_two try: samples = observable.eigvals[indices] except NotImplementedError as e: # if observable has no info on eigenvalues, we cannot return this measurement raise ValueError(f"Cannot compute samples of {observable.name}.") from e if bin_size is None: return samples return samples.reshape((bin_size, -1)) def adjoint_jacobian(self, tape, starting_state=None, use_device_state=False): """Implements the adjoint method outlined in `Jones and Gacon <https://arxiv.org/abs/2009.02823>`__ to differentiate an input tape. After a forward pass, the circuit is reversed by iteratively applying inverse (adjoint) gates to scan backwards through the circuit. .. note:: The adjoint differentiation method has the following restrictions: * As it requires knowledge of the statevector, only statevector simulator devices can be used. * Only expectation values are supported as measurements. * Does not work for Hamiltonian observables. Args: tape (.QuantumTape): circuit that the function takes the gradient of Keyword Args: starting_state (tensor_like): post-forward pass state to start execution with. It should be complex-valued. Takes precedence over ``use_device_state``. use_device_state (bool): use current device state to initialize. A forward pass of the same circuit should be the last thing the device has executed. If a ``starting_state`` is provided, that takes precedence. Returns: array: the derivative of the tape with respect to trainable parameters. Dimensions are ``(len(observables), len(trainable_params))``. Raises: QuantumFunctionError: if the input tape has measurements that are not expectation values or contains a multi-parameter operation aside from :class:`~.Rot` """ # broadcasted inner product not summing over first dimension of b sum_axes = tuple(range(1, self.num_wires + 1)) dot_product_real = lambda b, k: self._real(qmlsum(self._conj(b) * k, axis=sum_axes)) for m in tape.measurements: if m.return_type is not qml.operation.Expectation: raise qml.QuantumFunctionError( "Adjoint differentiation method does not support" f" measurement {m.return_type.value}" ) if m.obs.name == "Hamiltonian": raise qml.QuantumFunctionError( "Adjoint differentiation method does not support Hamiltonian observables." ) if not hasattr(m.obs, "base_name"): m.obs.base_name = None # This is needed for when the observable is a tensor product if self.shots is not None: warnings.warn( "Requested adjoint differentiation to be computed with finite shots." " The derivative is always exact when using the adjoint differentiation method.", UserWarning, ) # Initialization of state if starting_state is not None: ket = self._reshape(starting_state, [2] * self.num_wires) else: if not use_device_state: self.reset() self.execute(tape) ket = self._pre_rotated_state n_obs = len(tape.observables) bras = np.empty([n_obs] + [2] * self.num_wires, dtype=np.complex128) for kk in range(n_obs): bras[kk, ...] = self._apply_operation(ket, tape.observables[kk]) expanded_ops = [] for op in reversed(tape.operations): if op.num_params > 1: if isinstance(op, qml.Rot) and not op.inverse: ops = op.decompose() expanded_ops.extend(reversed(ops)) else: raise qml.QuantumFunctionError( f"The {op.name} operation is not supported using " 'the "adjoint" differentiation method' ) else: if op.name not in ("QubitStateVector", "BasisState"): expanded_ops.append(op) jac = np.zeros((len(tape.observables), len(tape.trainable_params))) param_number = len(tape._par_info) - 1 # pylint: disable=protected-access trainable_param_number = len(tape.trainable_params) - 1 for op in expanded_ops: if (op.grad_method is not None) and (param_number in tape.trainable_params): d_op_matrix = operation_derivative(op) op.inv() # Ideally use use op.adjoint() here # then we don't have to re-invert the operation at the end ket = self._apply_operation(ket, op) if op.grad_method is not None: if param_number in tape.trainable_params: ket_temp = self._apply_unitary(ket, d_op_matrix, op.wires) jac[:, trainable_param_number] = 2 * dot_product_real(bras, ket_temp) trainable_param_number -= 1 param_number -= 1 for kk in range(n_obs): bras[kk, ...] = self._apply_operation(bras[kk, ...], op) op.inv() return jac
39.815735
141
0.629192
import abc from collections import OrderedDict import itertools import warnings import numpy as np import pennylane as qml from pennylane.operation import ( Sample, Variance, Expectation, Probability, State, operation_derivative, ) from pennylane import Device from pennylane.math import sum as qmlsum from pennylane.wires import Wires from pennylane.measure import MeasurementProcess class QubitDevice(Device): C_DTYPE = np.complex128 R_DTYPE = np.float64 _asarray = staticmethod(np.asarray) _dot = staticmethod(np.dot) _abs = staticmethod(np.abs) _reduce_sum = staticmethod(lambda array, axes: np.sum(array, axis=tuple(axes))) _reshape = staticmethod(np.reshape) _flatten = staticmethod(lambda array: array.flatten()) _gather = staticmethod(lambda array, indices: array[indices]) _einsum = staticmethod(np.einsum) _cast = staticmethod(np.asarray) _transpose = staticmethod(np.transpose) _tensordot = staticmethod(np.tensordot) _conj = staticmethod(np.conj) _imag = staticmethod(np.imag) _roll = staticmethod(np.roll) _stack = staticmethod(np.stack) _outer = staticmethod(np.outer) _diag = staticmethod(np.diag) _real = staticmethod(np.real) @staticmethod def _scatter(indices, array, new_dimensions): new_array = np.zeros(new_dimensions, dtype=array.dtype.type) new_array[indices] = array return new_array observables = { "PauliX", "PauliY", "PauliZ", "Hadamard", "Hermitian", "Identity", "Projector", } def __init__(self, wires=1, shots=None, cache=0, analytic=None): super().__init__(wires=wires, shots=shots, analytic=analytic) self._samples = None self._cache = cache self._cache_execute = OrderedDict() @classmethod def capabilities(cls): capabilities = super().capabilities().copy() capabilities.update( model="qubit", supports_finite_shots=True, supports_tensor_observables=True, returns_probs=True, ) return capabilities def reset(self): self._samples = None def execute(self, circuit, **kwargs): if self._cache: circuit_hash = circuit.graph.hash if circuit_hash in self._cache_execute: return self._cache_execute[circuit_hash] self.check_validity(circuit.operations, circuit.observables) self.apply(circuit.operations, rotations=circuit.diagonalizing_gates, **kwargs) if self.shots is not None or circuit.is_sampled: self._samples = self.generate_samples() multiple_sampled_jobs = circuit.is_sampled and self._has_partitioned_shots() if not self.analytic and self._shot_vector is not None: results = [] s1 = 0 for shot_tuple in self._shot_vector: s2 = s1 + np.prod(shot_tuple) r = self.statistics( circuit.observables, shot_range=[s1, s2], bin_size=shot_tuple.shots ) if qml.math._multi_dispatch(r) == "jax": r = r[0] else: r = qml.math.squeeze(r) if shot_tuple.copies > 1: results.extend(r.T) else: results.append(r.T) s1 = s2 if not multiple_sampled_jobs: results = qml.math.stack(results) else: results = self.statistics(circuit.observables) if (circuit.all_sampled or not circuit.is_sampled) and not multiple_sampled_jobs: results = self._asarray(results) else: results = tuple(self._asarray(r) for r in results) if self._cache and circuit_hash not in self._cache_execute: self._cache_execute[circuit_hash] = results if len(self._cache_execute) > self._cache: self._cache_execute.popitem(last=False) self._num_executions += 1 if self.tracker.active: self.tracker.update(executions=1, shots=self._shots) self.tracker.record() return results @property def cache(self): return self._cache def batch_execute(self, circuits): results = [] for circuit in circuits: self.reset() res = self.execute(circuit) results.append(res) if self.tracker.active: self.tracker.update(batches=1, batch_len=len(circuits)) self.tracker.record() return results @abc.abstractmethod def apply(self, operations, **kwargs): @staticmethod def active_wires(operators): list_of_wires = [op.wires for op in operators] return Wires.all_wires(list_of_wires) def statistics(self, observables, shot_range=None, bin_size=None): results = [] for obs in observables: if obs.return_type is Expectation: results.append(self.expval(obs, shot_range=shot_range, bin_size=bin_size)) elif obs.return_type is Variance: results.append(self.var(obs, shot_range=shot_range, bin_size=bin_size)) elif obs.return_type is Sample: results.append(self.sample(obs, shot_range=shot_range, bin_size=bin_size)) elif obs.return_type is Probability: results.append( self.probability(wires=obs.wires, shot_range=shot_range, bin_size=bin_size) ) elif obs.return_type is State: if len(observables) > 1: raise qml.QuantumFunctionError( "The state or density matrix cannot be returned in combination" " with other return types" ) if self.wires.labels != tuple(range(self.num_wires)): raise qml.QuantumFunctionError( "Returning the state is not supported when using custom wire labels" ) results.append(self.access_state(wires=obs.wires)) elif obs.return_type is not None: raise qml.QuantumFunctionError( f"Unsupported return type specified for observable {obs.name}" ) return results def access_state(self, wires=None): if not self.capabilities().get("returns_state"): raise qml.QuantumFunctionError( "The current device is not capable of returning the state" ) state = getattr(self, "state", None) if state is None: raise qml.QuantumFunctionError("The state is not available in the current device") if wires: density_matrix = self.density_matrix(wires) return density_matrix return state def generate_samples(self): number_of_states = 2 ** self.num_wires rotated_prob = self.analytic_probability() samples = self.sample_basis_states(number_of_states, rotated_prob) return QubitDevice.states_to_binary(samples, self.num_wires) def sample_basis_states(self, number_of_states, state_probability): if self.shots is None: raise qml.QuantumFunctionError( "The number of shots has to be explicitly set on the device " "when using sample-based measurements." ) shots = self.shots basis_states = np.arange(number_of_states) return np.random.choice(basis_states, shots, p=state_probability) @staticmethod def generate_basis_states(num_wires, dtype=np.uint32): if 2 < num_wires < 32: states_base_ten = np.arange(2 ** num_wires, dtype=dtype) return QubitDevice.states_to_binary(states_base_ten, num_wires, dtype=dtype) basis_states_generator = itertools.product((0, 1), repeat=num_wires) return np.fromiter(itertools.chain(*basis_states_generator), dtype=int).reshape( -1, num_wires ) @staticmethod def states_to_binary(samples, num_wires, dtype=np.int64): powers_of_two = 1 << np.arange(num_wires, dtype=dtype) states_sampled_base_ten = samples[:, None] & powers_of_two return (states_sampled_base_ten > 0).astype(dtype)[:, ::-1] @property def circuit_hash(self): raise NotImplementedError @property def state(self): raise NotImplementedError def density_matrix(self, wires): raise NotImplementedError def analytic_probability(self, wires=None): raise NotImplementedError def estimate_probability(self, wires=None, shot_range=None, bin_size=None): wires = wires or self.wires wires = Wires(wires) device_wires = self.map_wires(wires) sample_slice = Ellipsis if shot_range is None else slice(*shot_range) samples = self._samples[sample_slice, device_wires] powers_of_two = 2 ** np.arange(len(device_wires))[::-1] indices = samples @ powers_of_two if bin_size is not None: bins = len(samples) // bin_size indices = indices.reshape((bins, -1)) prob = np.zeros([2 ** len(device_wires), bins], dtype=np.float64) for b, idx in enumerate(indices): basis_states, counts = np.unique(idx, return_counts=True) prob[basis_states, b] = counts / bin_size else: basis_states, counts = np.unique(indices, return_counts=True) prob = np.zeros([2 ** len(device_wires)], dtype=np.float64) prob[basis_states] = counts / len(samples) return self._asarray(prob, dtype=self.R_DTYPE) def probability(self, wires=None, shot_range=None, bin_size=None): if self.shots is None: return self.analytic_probability(wires=wires) return self.estimate_probability(wires=wires, shot_range=shot_range, bin_size=bin_size) def marginal_prob(self, prob, wires=None): if wires is None: return prob wires = Wires(wires) inactive_wires = Wires.unique_wires([self.wires, wires]) device_wires = self.map_wires(wires) inactive_device_wires = self.map_wires(inactive_wires) prob = self._reshape(prob, [2] * self.num_wires) if isinstance(inactive_device_wires, Wires): prob = self._flatten(self._reduce_sum(prob, inactive_device_wires.labels)) else: prob = self._flatten(self._reduce_sum(prob, inactive_device_wires)) num_wires = len(device_wires) basis_states = self.generate_basis_states(num_wires) basis_states = basis_states[:, np.argsort(np.argsort(device_wires))] powers_of_two = 2 ** np.arange(len(device_wires))[::-1] perm = basis_states @ powers_of_two return self._gather(prob, perm) def expval(self, observable, shot_range=None, bin_size=None): if observable.name == "Projector": idx = int("".join(str(i) for i in observable.parameters[0]), 2) probs = self.probability( wires=observable.wires, shot_range=shot_range, bin_size=bin_size ) return probs[idx] if self.shots is None: try: eigvals = self._asarray(observable.eigvals, dtype=self.R_DTYPE) except NotImplementedError as e: raise ValueError( f"Cannot compute analytic expectations of {observable.name}." ) from e prob = self.probability(wires=observable.wires) return self._dot(eigvals, prob) samples = self.sample(observable, shot_range=shot_range, bin_size=bin_size) return np.squeeze(np.mean(samples, axis=0)) def var(self, observable, shot_range=None, bin_size=None): if observable.name == "Projector": idx = int("".join(str(i) for i in observable.parameters[0]), 2) probs = self.probability( wires=observable.wires, shot_range=shot_range, bin_size=bin_size ) return probs[idx] - probs[idx] ** 2 if self.shots is None: try: eigvals = self._asarray(observable.eigvals, dtype=self.R_DTYPE) except NotImplementedError as e: raise ValueError(f"Cannot compute analytic variance of {observable.name}.") from e prob = self.probability(wires=observable.wires) return self._dot((eigvals ** 2), prob) - self._dot(eigvals, prob) ** 2 samples = self.sample(observable, shot_range=shot_range, bin_size=bin_size) return np.squeeze(np.var(samples, axis=0)) def sample(self, observable, shot_range=None, bin_size=None): device_wires = self.map_wires(observable.wires) name = observable.name sample_slice = Ellipsis if shot_range is None else slice(*shot_range) if isinstance(name, str) and name in {"PauliX", "PauliY", "PauliZ", "Hadamard"}: samples = 1 - 2 * self._samples[sample_slice, device_wires[0]] elif isinstance( observable, MeasurementProcess ): if ( len(observable.wires) != 0 ): samples = self._samples[sample_slice, np.array(device_wires)] else: samples = self._samples[sample_slice] else: samples = self._samples[ sample_slice, np.array(device_wires) ] powers_of_two = 2 ** np.arange(samples.shape[-1])[::-1] indices = samples @ powers_of_two try: samples = observable.eigvals[indices] except NotImplementedError as e: raise ValueError(f"Cannot compute samples of {observable.name}.") from e if bin_size is None: return samples return samples.reshape((bin_size, -1)) def adjoint_jacobian(self, tape, starting_state=None, use_device_state=False): sum_axes = tuple(range(1, self.num_wires + 1)) dot_product_real = lambda b, k: self._real(qmlsum(self._conj(b) * k, axis=sum_axes)) for m in tape.measurements: if m.return_type is not qml.operation.Expectation: raise qml.QuantumFunctionError( "Adjoint differentiation method does not support" f" measurement {m.return_type.value}" ) if m.obs.name == "Hamiltonian": raise qml.QuantumFunctionError( "Adjoint differentiation method does not support Hamiltonian observables." ) if not hasattr(m.obs, "base_name"): m.obs.base_name = None if self.shots is not None: warnings.warn( "Requested adjoint differentiation to be computed with finite shots." " The derivative is always exact when using the adjoint differentiation method.", UserWarning, ) if starting_state is not None: ket = self._reshape(starting_state, [2] * self.num_wires) else: if not use_device_state: self.reset() self.execute(tape) ket = self._pre_rotated_state n_obs = len(tape.observables) bras = np.empty([n_obs] + [2] * self.num_wires, dtype=np.complex128) for kk in range(n_obs): bras[kk, ...] = self._apply_operation(ket, tape.observables[kk]) expanded_ops = [] for op in reversed(tape.operations): if op.num_params > 1: if isinstance(op, qml.Rot) and not op.inverse: ops = op.decompose() expanded_ops.extend(reversed(ops)) else: raise qml.QuantumFunctionError( f"The {op.name} operation is not supported using " 'the "adjoint" differentiation method' ) else: if op.name not in ("QubitStateVector", "BasisState"): expanded_ops.append(op) jac = np.zeros((len(tape.observables), len(tape.trainable_params))) param_number = len(tape._par_info) - 1 trainable_param_number = len(tape.trainable_params) - 1 for op in expanded_ops: if (op.grad_method is not None) and (param_number in tape.trainable_params): d_op_matrix = operation_derivative(op) op.inv() ket = self._apply_operation(ket, op) if op.grad_method is not None: if param_number in tape.trainable_params: ket_temp = self._apply_unitary(ket, d_op_matrix, op.wires) jac[:, trainable_param_number] = 2 * dot_product_real(bras, ket_temp) trainable_param_number -= 1 param_number -= 1 for kk in range(n_obs): bras[kk, ...] = self._apply_operation(bras[kk, ...], op) op.inv() return jac
true
true
f7f6a12969b425226caab5681ad8c73e5043cceb
45,974
py
Python
google/cloud/aiplatform_v1/services/pipeline_service/async_client.py
lclc19/python-aiplatform
d8da2e365277441abadb04328943f23345d72b0e
[ "Apache-2.0" ]
null
null
null
google/cloud/aiplatform_v1/services/pipeline_service/async_client.py
lclc19/python-aiplatform
d8da2e365277441abadb04328943f23345d72b0e
[ "Apache-2.0" ]
null
null
null
google/cloud/aiplatform_v1/services/pipeline_service/async_client.py
lclc19/python-aiplatform
d8da2e365277441abadb04328943f23345d72b0e
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict import functools import re from typing import Dict, Sequence, Tuple, Type, Union import pkg_resources import google.api_core.client_options as ClientOptions # type: ignore from google.api_core import exceptions as core_exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.pipeline_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import model from google.cloud.aiplatform_v1.types import operation as gca_operation from google.cloud.aiplatform_v1.types import pipeline_job from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1.types import pipeline_service from google.cloud.aiplatform_v1.types import pipeline_state from google.cloud.aiplatform_v1.types import training_pipeline from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline from google.protobuf import empty_pb2 # type: ignore from google.protobuf import struct_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore from google.rpc import status_pb2 # type: ignore from .transports.base import PipelineServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import PipelineServiceGrpcAsyncIOTransport from .client import PipelineServiceClient class PipelineServiceAsyncClient: """A service for creating and managing Vertex AI's pipelines. This includes both ``TrainingPipeline`` resources (used for AutoML and custom training) and ``PipelineJob`` resources (used for Vertex Pipelines). """ _client: PipelineServiceClient DEFAULT_ENDPOINT = PipelineServiceClient.DEFAULT_ENDPOINT DEFAULT_MTLS_ENDPOINT = PipelineServiceClient.DEFAULT_MTLS_ENDPOINT artifact_path = staticmethod(PipelineServiceClient.artifact_path) parse_artifact_path = staticmethod(PipelineServiceClient.parse_artifact_path) context_path = staticmethod(PipelineServiceClient.context_path) parse_context_path = staticmethod(PipelineServiceClient.parse_context_path) custom_job_path = staticmethod(PipelineServiceClient.custom_job_path) parse_custom_job_path = staticmethod(PipelineServiceClient.parse_custom_job_path) endpoint_path = staticmethod(PipelineServiceClient.endpoint_path) parse_endpoint_path = staticmethod(PipelineServiceClient.parse_endpoint_path) execution_path = staticmethod(PipelineServiceClient.execution_path) parse_execution_path = staticmethod(PipelineServiceClient.parse_execution_path) model_path = staticmethod(PipelineServiceClient.model_path) parse_model_path = staticmethod(PipelineServiceClient.parse_model_path) network_path = staticmethod(PipelineServiceClient.network_path) parse_network_path = staticmethod(PipelineServiceClient.parse_network_path) pipeline_job_path = staticmethod(PipelineServiceClient.pipeline_job_path) parse_pipeline_job_path = staticmethod( PipelineServiceClient.parse_pipeline_job_path ) training_pipeline_path = staticmethod(PipelineServiceClient.training_pipeline_path) parse_training_pipeline_path = staticmethod( PipelineServiceClient.parse_training_pipeline_path ) common_billing_account_path = staticmethod( PipelineServiceClient.common_billing_account_path ) parse_common_billing_account_path = staticmethod( PipelineServiceClient.parse_common_billing_account_path ) common_folder_path = staticmethod(PipelineServiceClient.common_folder_path) parse_common_folder_path = staticmethod( PipelineServiceClient.parse_common_folder_path ) common_organization_path = staticmethod( PipelineServiceClient.common_organization_path ) parse_common_organization_path = staticmethod( PipelineServiceClient.parse_common_organization_path ) common_project_path = staticmethod(PipelineServiceClient.common_project_path) parse_common_project_path = staticmethod( PipelineServiceClient.parse_common_project_path ) common_location_path = staticmethod(PipelineServiceClient.common_location_path) parse_common_location_path = staticmethod( PipelineServiceClient.parse_common_location_path ) @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials info. Args: info (dict): The service account private key info. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: PipelineServiceAsyncClient: The constructed client. """ return PipelineServiceClient.from_service_account_info.__func__(PipelineServiceAsyncClient, info, *args, **kwargs) # type: ignore @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: PipelineServiceAsyncClient: The constructed client. """ return PipelineServiceClient.from_service_account_file.__func__(PipelineServiceAsyncClient, filename, *args, **kwargs) # type: ignore from_service_account_json = from_service_account_file @property def transport(self) -> PipelineServiceTransport: """Returns the transport used by the client instance. Returns: PipelineServiceTransport: The transport used by the client instance. """ return self._client.transport get_transport_class = functools.partial( type(PipelineServiceClient).get_transport_class, type(PipelineServiceClient) ) def __init__( self, *, credentials: ga_credentials.Credentials = None, transport: Union[str, PipelineServiceTransport] = "grpc_asyncio", client_options: ClientOptions = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiates the pipeline service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, ~.PipelineServiceTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint) and "auto" (auto switch to the default mTLS endpoint if client certificate is present, this is the default value). However, the ``api_endpoint`` property takes precedence if provided. (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used to provide client certificate for mutual TLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport creation failed for any reason. """ self._client = PipelineServiceClient( credentials=credentials, transport=transport, client_options=client_options, client_info=client_info, ) async def create_training_pipeline( self, request: pipeline_service.CreateTrainingPipelineRequest = None, *, parent: str = None, training_pipeline: gca_training_pipeline.TrainingPipeline = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gca_training_pipeline.TrainingPipeline: r"""Creates a TrainingPipeline. A created TrainingPipeline right away will be attempted to be run. Args: request (:class:`google.cloud.aiplatform_v1.types.CreateTrainingPipelineRequest`): The request object. Request message for [PipelineService.CreateTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.CreateTrainingPipeline]. parent (:class:`str`): Required. The resource name of the Location to create the TrainingPipeline in. Format: ``projects/{project}/locations/{location}`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. training_pipeline (:class:`google.cloud.aiplatform_v1.types.TrainingPipeline`): Required. The TrainingPipeline to create. This corresponds to the ``training_pipeline`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.aiplatform_v1.types.TrainingPipeline: The TrainingPipeline orchestrates tasks associated with training a Model. It always executes the training task, and optionally may also export data from Vertex AI's Dataset which becomes the training input, [upload][google.cloud.aiplatform.v1.ModelService.UploadModel] the Model to Vertex AI, and evaluate the Model. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, training_pipeline]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = pipeline_service.CreateTrainingPipelineRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if training_pipeline is not None: request.training_pipeline = training_pipeline # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_training_pipeline, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response async def get_training_pipeline( self, request: pipeline_service.GetTrainingPipelineRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> training_pipeline.TrainingPipeline: r"""Gets a TrainingPipeline. Args: request (:class:`google.cloud.aiplatform_v1.types.GetTrainingPipelineRequest`): The request object. Request message for [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline]. name (:class:`str`): Required. The name of the TrainingPipeline resource. Format: ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.aiplatform_v1.types.TrainingPipeline: The TrainingPipeline orchestrates tasks associated with training a Model. It always executes the training task, and optionally may also export data from Vertex AI's Dataset which becomes the training input, [upload][google.cloud.aiplatform.v1.ModelService.UploadModel] the Model to Vertex AI, and evaluate the Model. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = pipeline_service.GetTrainingPipelineRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_training_pipeline, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response async def list_training_pipelines( self, request: pipeline_service.ListTrainingPipelinesRequest = None, *, parent: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListTrainingPipelinesAsyncPager: r"""Lists TrainingPipelines in a Location. Args: request (:class:`google.cloud.aiplatform_v1.types.ListTrainingPipelinesRequest`): The request object. Request message for [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines]. parent (:class:`str`): Required. The resource name of the Location to list the TrainingPipelines from. Format: ``projects/{project}/locations/{location}`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.aiplatform_v1.services.pipeline_service.pagers.ListTrainingPipelinesAsyncPager: Response message for [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines] Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = pipeline_service.ListTrainingPipelinesRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_training_pipelines, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__aiter__` convenience method. response = pagers.ListTrainingPipelinesAsyncPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response async def delete_training_pipeline( self, request: pipeline_service.DeleteTrainingPipelineRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Deletes a TrainingPipeline. Args: request (:class:`google.cloud.aiplatform_v1.types.DeleteTrainingPipelineRequest`): The request object. Request message for [PipelineService.DeleteTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.DeleteTrainingPipeline]. name (:class:`str`): Required. The name of the TrainingPipeline resource to be deleted. Format: ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = pipeline_service.DeleteTrainingPipelineRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_training_pipeline, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation_async.from_gapic( response, self._client._transport.operations_client, empty_pb2.Empty, metadata_type=gca_operation.DeleteOperationMetadata, ) # Done; return the response. return response async def cancel_training_pipeline( self, request: pipeline_service.CancelTrainingPipelineRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Cancels a TrainingPipeline. Starts asynchronous cancellation on the TrainingPipeline. The server makes a best effort to cancel the pipeline, but success is not guaranteed. Clients can use [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline] or other methods to check whether the cancellation succeeded or whether the pipeline completed despite cancellation. On successful cancellation, the TrainingPipeline is not deleted; instead it becomes a pipeline with a [TrainingPipeline.error][google.cloud.aiplatform.v1.TrainingPipeline.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to ``Code.CANCELLED``, and [TrainingPipeline.state][google.cloud.aiplatform.v1.TrainingPipeline.state] is set to ``CANCELLED``. Args: request (:class:`google.cloud.aiplatform_v1.types.CancelTrainingPipelineRequest`): The request object. Request message for [PipelineService.CancelTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.CancelTrainingPipeline]. name (:class:`str`): Required. The name of the TrainingPipeline to cancel. Format: ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = pipeline_service.CancelTrainingPipelineRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.cancel_training_pipeline, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) async def create_pipeline_job( self, request: pipeline_service.CreatePipelineJobRequest = None, *, parent: str = None, pipeline_job: gca_pipeline_job.PipelineJob = None, pipeline_job_id: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gca_pipeline_job.PipelineJob: r"""Creates a PipelineJob. A PipelineJob will run immediately when created. Args: request (:class:`google.cloud.aiplatform_v1.types.CreatePipelineJobRequest`): The request object. Request message for [PipelineService.CreatePipelineJob][google.cloud.aiplatform.v1.PipelineService.CreatePipelineJob]. parent (:class:`str`): Required. The resource name of the Location to create the PipelineJob in. Format: ``projects/{project}/locations/{location}`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. pipeline_job (:class:`google.cloud.aiplatform_v1.types.PipelineJob`): Required. The PipelineJob to create. This corresponds to the ``pipeline_job`` field on the ``request`` instance; if ``request`` is provided, this should not be set. pipeline_job_id (:class:`str`): The ID to use for the PipelineJob, which will become the final component of the PipelineJob name. If not provided, an ID will be automatically generated. This value should be less than 128 characters, and valid characters are /[a-z][0-9]-/. This corresponds to the ``pipeline_job_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.aiplatform_v1.types.PipelineJob: An instance of a machine learning PipelineJob. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, pipeline_job, pipeline_job_id]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = pipeline_service.CreatePipelineJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if pipeline_job is not None: request.pipeline_job = pipeline_job if pipeline_job_id is not None: request.pipeline_job_id = pipeline_job_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_pipeline_job, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response async def get_pipeline_job( self, request: pipeline_service.GetPipelineJobRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pipeline_job.PipelineJob: r"""Gets a PipelineJob. Args: request (:class:`google.cloud.aiplatform_v1.types.GetPipelineJobRequest`): The request object. Request message for [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob]. name (:class:`str`): Required. The name of the PipelineJob resource. Format: ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.aiplatform_v1.types.PipelineJob: An instance of a machine learning PipelineJob. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = pipeline_service.GetPipelineJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_pipeline_job, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response async def list_pipeline_jobs( self, request: pipeline_service.ListPipelineJobsRequest = None, *, parent: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListPipelineJobsAsyncPager: r"""Lists PipelineJobs in a Location. Args: request (:class:`google.cloud.aiplatform_v1.types.ListPipelineJobsRequest`): The request object. Request message for [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs]. parent (:class:`str`): Required. The resource name of the Location to list the PipelineJobs from. Format: ``projects/{project}/locations/{location}`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.aiplatform_v1.services.pipeline_service.pagers.ListPipelineJobsAsyncPager: Response message for [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs] Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = pipeline_service.ListPipelineJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_pipeline_jobs, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__aiter__` convenience method. response = pagers.ListPipelineJobsAsyncPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response async def delete_pipeline_job( self, request: pipeline_service.DeletePipelineJobRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Deletes a PipelineJob. Args: request (:class:`google.cloud.aiplatform_v1.types.DeletePipelineJobRequest`): The request object. Request message for [PipelineService.DeletePipelineJob][google.cloud.aiplatform.v1.PipelineService.DeletePipelineJob]. name (:class:`str`): Required. The name of the PipelineJob resource to be deleted. Format: ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = pipeline_service.DeletePipelineJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_pipeline_job, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation_async.from_gapic( response, self._client._transport.operations_client, empty_pb2.Empty, metadata_type=gca_operation.DeleteOperationMetadata, ) # Done; return the response. return response async def cancel_pipeline_job( self, request: pipeline_service.CancelPipelineJobRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Cancels a PipelineJob. Starts asynchronous cancellation on the PipelineJob. The server makes a best effort to cancel the pipeline, but success is not guaranteed. Clients can use [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob] or other methods to check whether the cancellation succeeded or whether the pipeline completed despite cancellation. On successful cancellation, the PipelineJob is not deleted; instead it becomes a pipeline with a [PipelineJob.error][google.cloud.aiplatform.v1.PipelineJob.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to ``Code.CANCELLED``, and [PipelineJob.state][google.cloud.aiplatform.v1.PipelineJob.state] is set to ``CANCELLED``. Args: request (:class:`google.cloud.aiplatform_v1.types.CancelPipelineJobRequest`): The request object. Request message for [PipelineService.CancelPipelineJob][google.cloud.aiplatform.v1.PipelineService.CancelPipelineJob]. name (:class:`str`): Required. The name of the PipelineJob to cancel. Format: ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = pipeline_service.CancelPipelineJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.cancel_pipeline_job, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) async def __aenter__(self): return self async def __aexit__(self, exc_type, exc, tb): await self.transport.close() try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=pkg_resources.get_distribution( "google-cloud-aiplatform", ).version, ) except pkg_resources.DistributionNotFound: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() __all__ = ("PipelineServiceAsyncClient",)
43.784762
171
0.644408
from collections import OrderedDict import functools import re from typing import Dict, Sequence, Tuple, Type, Union import pkg_resources import google.api_core.client_options as ClientOptions from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 from google.api_core import retry as retries from google.auth import credentials as ga_credentials from google.oauth2 import service_account from google.api_core import operation as gac_operation from google.api_core import operation_async from google.cloud.aiplatform_v1.services.pipeline_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import model from google.cloud.aiplatform_v1.types import operation as gca_operation from google.cloud.aiplatform_v1.types import pipeline_job from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1.types import pipeline_service from google.cloud.aiplatform_v1.types import pipeline_state from google.cloud.aiplatform_v1.types import training_pipeline from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline from google.protobuf import empty_pb2 from google.protobuf import struct_pb2 from google.protobuf import timestamp_pb2 from google.rpc import status_pb2 from .transports.base import PipelineServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import PipelineServiceGrpcAsyncIOTransport from .client import PipelineServiceClient class PipelineServiceAsyncClient: _client: PipelineServiceClient DEFAULT_ENDPOINT = PipelineServiceClient.DEFAULT_ENDPOINT DEFAULT_MTLS_ENDPOINT = PipelineServiceClient.DEFAULT_MTLS_ENDPOINT artifact_path = staticmethod(PipelineServiceClient.artifact_path) parse_artifact_path = staticmethod(PipelineServiceClient.parse_artifact_path) context_path = staticmethod(PipelineServiceClient.context_path) parse_context_path = staticmethod(PipelineServiceClient.parse_context_path) custom_job_path = staticmethod(PipelineServiceClient.custom_job_path) parse_custom_job_path = staticmethod(PipelineServiceClient.parse_custom_job_path) endpoint_path = staticmethod(PipelineServiceClient.endpoint_path) parse_endpoint_path = staticmethod(PipelineServiceClient.parse_endpoint_path) execution_path = staticmethod(PipelineServiceClient.execution_path) parse_execution_path = staticmethod(PipelineServiceClient.parse_execution_path) model_path = staticmethod(PipelineServiceClient.model_path) parse_model_path = staticmethod(PipelineServiceClient.parse_model_path) network_path = staticmethod(PipelineServiceClient.network_path) parse_network_path = staticmethod(PipelineServiceClient.parse_network_path) pipeline_job_path = staticmethod(PipelineServiceClient.pipeline_job_path) parse_pipeline_job_path = staticmethod( PipelineServiceClient.parse_pipeline_job_path ) training_pipeline_path = staticmethod(PipelineServiceClient.training_pipeline_path) parse_training_pipeline_path = staticmethod( PipelineServiceClient.parse_training_pipeline_path ) common_billing_account_path = staticmethod( PipelineServiceClient.common_billing_account_path ) parse_common_billing_account_path = staticmethod( PipelineServiceClient.parse_common_billing_account_path ) common_folder_path = staticmethod(PipelineServiceClient.common_folder_path) parse_common_folder_path = staticmethod( PipelineServiceClient.parse_common_folder_path ) common_organization_path = staticmethod( PipelineServiceClient.common_organization_path ) parse_common_organization_path = staticmethod( PipelineServiceClient.parse_common_organization_path ) common_project_path = staticmethod(PipelineServiceClient.common_project_path) parse_common_project_path = staticmethod( PipelineServiceClient.parse_common_project_path ) common_location_path = staticmethod(PipelineServiceClient.common_location_path) parse_common_location_path = staticmethod( PipelineServiceClient.parse_common_location_path ) @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): return PipelineServiceClient.from_service_account_info.__func__(PipelineServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): return PipelineServiceClient.from_service_account_file.__func__(PipelineServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file @property def transport(self) -> PipelineServiceTransport: return self._client.transport get_transport_class = functools.partial( type(PipelineServiceClient).get_transport_class, type(PipelineServiceClient) ) def __init__( self, *, credentials: ga_credentials.Credentials = None, transport: Union[str, PipelineServiceTransport] = "grpc_asyncio", client_options: ClientOptions = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: self._client = PipelineServiceClient( credentials=credentials, transport=transport, client_options=client_options, client_info=client_info, ) async def create_training_pipeline( self, request: pipeline_service.CreateTrainingPipelineRequest = None, *, parent: str = None, training_pipeline: gca_training_pipeline.TrainingPipeline = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gca_training_pipeline.TrainingPipeline: has_flattened_params = any([parent, training_pipeline]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = pipeline_service.CreateTrainingPipelineRequest(request) if parent is not None: request.parent = parent if training_pipeline is not None: request.training_pipeline = training_pipeline rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_training_pipeline, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) return response async def get_training_pipeline( self, request: pipeline_service.GetTrainingPipelineRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> training_pipeline.TrainingPipeline: has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = pipeline_service.GetTrainingPipelineRequest(request) if name is not None: request.name = name rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_training_pipeline, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) return response async def list_training_pipelines( self, request: pipeline_service.ListTrainingPipelinesRequest = None, *, parent: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListTrainingPipelinesAsyncPager: has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = pipeline_service.ListTrainingPipelinesRequest(request) if parent is not None: request.parent = parent rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_training_pipelines, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) response = pagers.ListTrainingPipelinesAsyncPager( method=rpc, request=request, response=response, metadata=metadata, ) return response async def delete_training_pipeline( self, request: pipeline_service.DeleteTrainingPipelineRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = pipeline_service.DeleteTrainingPipelineRequest(request) if name is not None: request.name = name rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_training_pipeline, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) response = operation_async.from_gapic( response, self._client._transport.operations_client, empty_pb2.Empty, metadata_type=gca_operation.DeleteOperationMetadata, ) return response async def cancel_training_pipeline( self, request: pipeline_service.CancelTrainingPipelineRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = pipeline_service.CancelTrainingPipelineRequest(request) if name is not None: request.name = name rpc = gapic_v1.method_async.wrap_method( self._client._transport.cancel_training_pipeline, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) async def create_pipeline_job( self, request: pipeline_service.CreatePipelineJobRequest = None, *, parent: str = None, pipeline_job: gca_pipeline_job.PipelineJob = None, pipeline_job_id: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gca_pipeline_job.PipelineJob: has_flattened_params = any([parent, pipeline_job, pipeline_job_id]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = pipeline_service.CreatePipelineJobRequest(request) if parent is not None: request.parent = parent if pipeline_job is not None: request.pipeline_job = pipeline_job if pipeline_job_id is not None: request.pipeline_job_id = pipeline_job_id rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_pipeline_job, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) return response async def get_pipeline_job( self, request: pipeline_service.GetPipelineJobRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pipeline_job.PipelineJob: has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = pipeline_service.GetPipelineJobRequest(request) if name is not None: request.name = name rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_pipeline_job, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) return response async def list_pipeline_jobs( self, request: pipeline_service.ListPipelineJobsRequest = None, *, parent: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListPipelineJobsAsyncPager: has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = pipeline_service.ListPipelineJobsRequest(request) if parent is not None: request.parent = parent rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_pipeline_jobs, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) response = pagers.ListPipelineJobsAsyncPager( method=rpc, request=request, response=response, metadata=metadata, ) return response async def delete_pipeline_job( self, request: pipeline_service.DeletePipelineJobRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = pipeline_service.DeletePipelineJobRequest(request) if name is not None: request.name = name rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_pipeline_job, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) response = operation_async.from_gapic( response, self._client._transport.operations_client, empty_pb2.Empty, metadata_type=gca_operation.DeleteOperationMetadata, ) return response async def cancel_pipeline_job( self, request: pipeline_service.CancelPipelineJobRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = pipeline_service.CancelPipelineJobRequest(request) if name is not None: request.name = name rpc = gapic_v1.method_async.wrap_method( self._client._transport.cancel_pipeline_job, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) async def __aenter__(self): return self async def __aexit__(self, exc_type, exc, tb): await self.transport.close() try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=pkg_resources.get_distribution( "google-cloud-aiplatform", ).version, ) except pkg_resources.DistributionNotFound: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() __all__ = ("PipelineServiceAsyncClient",)
true
true
f7f6a1535f2bc32559841ab599dd79a10a0f6f58
401
py
Python
simiir/stopping_decision_makers/base_decision_maker.py
ArthurCamara/simiir_subtopics
ff4d826f53c9a8748765816335be6b22adde83a2
[ "MIT" ]
null
null
null
simiir/stopping_decision_makers/base_decision_maker.py
ArthurCamara/simiir_subtopics
ff4d826f53c9a8748765816335be6b22adde83a2
[ "MIT" ]
null
null
null
simiir/stopping_decision_makers/base_decision_maker.py
ArthurCamara/simiir_subtopics
ff4d826f53c9a8748765816335be6b22adde83a2
[ "MIT" ]
null
null
null
import abc class BaseDecisionMaker(object): """ """ def __init__(self, search_context, logger): self._search_context = search_context self._logger = logger @abc.abstractmethod def decide(self): """ Abstract method - must be implemented by an inheriting class. Returns an action - from the loggers.Actions enum. """ pass
20.05
69
0.613466
import abc class BaseDecisionMaker(object): def __init__(self, search_context, logger): self._search_context = search_context self._logger = logger @abc.abstractmethod def decide(self): pass
true
true
f7f6a15c99b591b56794820778921e4b4e10b074
2,643
py
Python
playframework-dist/play-1.1/python/Lib/types.py
ericlink/adms-server
245f6d38197f195fd0cae8b93718f9ffcffb206a
[ "MIT" ]
1
2017-03-28T06:41:51.000Z
2017-03-28T06:41:51.000Z
playframework-dist/play-1.1/python/Lib/types.py
ericlink/adms-server
245f6d38197f195fd0cae8b93718f9ffcffb206a
[ "MIT" ]
1
2019-08-16T12:59:01.000Z
2019-08-18T06:36:47.000Z
playframework-dist/play-1.1/python/Lib/types.py
ericlink/adms-server
245f6d38197f195fd0cae8b93718f9ffcffb206a
[ "MIT" ]
1
2016-12-13T21:08:58.000Z
2016-12-13T21:08:58.000Z
"""Define names for all type symbols known in the standard interpreter. Types that are part of optional modules (e.g. array) are not listed. """ import sys # Iterators in Python aren't a matter of type but of protocol. A large # and changing number of builtin types implement *some* flavor of # iterator. Don't check the type! Use hasattr to check for both # "__iter__" and "next" attributes instead. NoneType = type(None) TypeType = type ObjectType = object IntType = int LongType = long FloatType = float BooleanType = bool try: ComplexType = complex except NameError: pass StringType = str # StringTypes is already outdated. Instead of writing "type(x) in # types.StringTypes", you should use "isinstance(x, basestring)". But # we keep around for compatibility with Python 2.2. try: UnicodeType = unicode StringTypes = (StringType, UnicodeType) except NameError: StringTypes = (StringType,) BufferType = buffer TupleType = tuple ListType = list DictType = DictionaryType = dict def _f(): pass FunctionType = type(_f) LambdaType = type(lambda: None) # Same as FunctionType try: CodeType = type(_f.func_code) except RuntimeError: # Execution in restricted environment pass def _g(): yield 1 GeneratorType = type(_g()) class _C: def _m(self): pass ClassType = type(_C) UnboundMethodType = type(_C._m) # Same as MethodType _x = _C() InstanceType = type(_x) MethodType = type(_x._m) BuiltinFunctionType = type(len) BuiltinMethodType = type([].append) # Same as BuiltinFunctionType ModuleType = type(sys) FileType = file XRangeType = xrange try: raise TypeError except TypeError: try: tb = sys.exc_info()[2] TracebackType = type(tb) FrameType = type(tb.tb_frame) except AttributeError: # In the restricted environment, exc_info returns (None, None, # None) Then, tb.tb_frame gives an attribute error pass tb = None; del tb SliceType = slice EllipsisType = type(Ellipsis) DictProxyType = type(TypeType.__dict__) NotImplementedType = type(NotImplemented) # Extension types defined in a C helper module. XXX There may be no # equivalent in implementations other than CPython, so it seems better to # leave them undefined then to set them to e.g. None. try: import _types except ImportError: pass else: GetSetDescriptorType = type(_types.Helper.getter) MemberDescriptorType = type(_types.Helper.member) del _types del sys, _f, _g, _C, _x # Not for export
25.911765
74
0.681423
import sys # and changing number of builtin types implement *some* flavor of # iterator. Don't check the type! Use hasattr to check for both NoneType = type(None) TypeType = type ObjectType = object IntType = int LongType = long FloatType = float BooleanType = bool try: ComplexType = complex except NameError: pass StringType = str # types.StringTypes", you should use "isinstance(x, basestring)". But try: UnicodeType = unicode StringTypes = (StringType, UnicodeType) except NameError: StringTypes = (StringType,) BufferType = buffer TupleType = tuple ListType = list DictType = DictionaryType = dict def _f(): pass FunctionType = type(_f) LambdaType = type(lambda: None) try: CodeType = type(_f.func_code) except RuntimeError: pass def _g(): yield 1 GeneratorType = type(_g()) class _C: def _m(self): pass ClassType = type(_C) UnboundMethodType = type(_C._m) _x = _C() InstanceType = type(_x) MethodType = type(_x._m) BuiltinFunctionType = type(len) BuiltinMethodType = type([].append) ModuleType = type(sys) FileType = file XRangeType = xrange try: raise TypeError except TypeError: try: tb = sys.exc_info()[2] TracebackType = type(tb) FrameType = type(tb.tb_frame) except AttributeError: pass tb = None; del tb SliceType = slice EllipsisType = type(Ellipsis) DictProxyType = type(TypeType.__dict__) NotImplementedType = type(NotImplemented) try: import _types except ImportError: pass else: GetSetDescriptorType = type(_types.Helper.getter) MemberDescriptorType = type(_types.Helper.member) del _types del sys, _f, _g, _C, _x
true
true
f7f6a227bae70b21d2a6f1aea0eb6fc35ea5e364
3,557
py
Python
kata_solution/programming_101/_3/programming_101.py
jrj92280/python-kata
6207af1d7898df21dc9d35dde906fb3486375a2e
[ "MIT" ]
3
2018-07-19T21:37:33.000Z
2019-05-04T00:40:23.000Z
kata_solution/programming_101/_3/programming_101.py
jrj92280/python-kata
6207af1d7898df21dc9d35dde906fb3486375a2e
[ "MIT" ]
null
null
null
kata_solution/programming_101/_3/programming_101.py
jrj92280/python-kata
6207af1d7898df21dc9d35dde906fb3486375a2e
[ "MIT" ]
null
null
null
# lines that start with '#' are a comment # multiple line comments start and end with three double quotes """ multiline comment """ # in PyCharm, right click and select run 'programming_101' to execute print('Hello, world!') # ----------------- # PRIMITIVE DATA TYPES # ----------------- """ str - string 'hello world' PRACTICE: Print a string to the console """ print('Hello, world!') """ bool - boolean True/False PRACTICE: Print a boolean value to the console """ print(True) """ int - integer 0 PRACTICE: Print a int to the console """ print(8) """ float - decimal 0.0 PRACTICE: Print a float to the console """ print(3.5) # ----------------- # COMPLEX DATA TYPES # ----------------- """ list [], ['word'] PRACTICE: Create a list that contains the names of three people """ list = ['Jason', 'Justin', 'Josh'] print list """ dict {}, {'key': 'value} PRACTICE: Create a dictionary that is name as a key and gender as the value """ genders = {'Justin': 'male'} str(genders) print genders # ----------------- # CASTING # ----------------- """ data types can be forced from one type to another under certain conditions str() bool() float() int() PRACTICE: 1. Add the string of '1' to the int of 1 to output the string '11' 2. Add the int of 1 to the string of '1' to output the int 2 3. Cast an empty string value to a boolean to output False 4. Cast any non-empty string value to a boolean to output True 5. Cast the result of 3/2 to a float values to produce an output of 1.5 """ print ('1' + str(1)) print (int('1') + 1) print (bool('0')) print (float(3) / 2) # ----------------- # SCOPE # ----------------- # white space in Python defines scope # block of code associated with a control structure # ----------------- # CONTROL STRUCTURES # ----------------- """ -- variable assignment -- = my_variable = 'value' PRACTICE: Create a variable with a name of your choosing that contains the value 'hello' """ fish_taco = 'hello' """ -- logical -- if <bool>: <action> elif <bool>: <action> else <bool>: <action> PRACTICE: Write an if check to see if a value is equal to 'hello world', if true print 'hello', else print 'bye' """ print(bool(fish_taco != 'helloworld')) if fish_taco != 'helloworld': print ('hello') else: print ('bye') """ -- exception handling -- try <expression>: <action> except [error_type]: <handle error> PRACTICE: In a try block, divide 1 by 0, except a ZeroDivisionError and print 'zero divison error' """ """ -- while loops -- while <bool>: <action> PRACTICE: """ # --for loops-- # for {variable_name} in <collection>: # <action> # --comparisons-- # == -> equals # != -> not equals # > -> greater than # >= -> greater than equal # < -> less than # <= -> les than equal """ PRACTICE: print each letter in a given string """ """ PRACTICE: create a function that takes an input, then prints each character of the input """ """ PRACTICE: create a function that takes two inputs, then prints True/False whether or not the first input is contained within the second input """ # print(search_string('a', text_value)) # False # print(search_string('s', text_value)) # True # print(search_string('S', text_value)) # False """ PRACTICE: Create a diction that contains a list of employee titles stored by the name, then print each record such that each employee name and title is printed on a line """
19.761111
101
0.608659
""" multiline comment """ print('Hello, world!') """ str - string 'hello world' PRACTICE: Print a string to the console """ print('Hello, world!') """ bool - boolean True/False PRACTICE: Print a boolean value to the console """ print(True) """ int - integer 0 PRACTICE: Print a int to the console """ print(8) """ float - decimal 0.0 PRACTICE: Print a float to the console """ print(3.5) """ list [], ['word'] PRACTICE: Create a list that contains the names of three people """ list = ['Jason', 'Justin', 'Josh'] print list """ dict {}, {'key': 'value} PRACTICE: Create a dictionary that is name as a key and gender as the value """ genders = {'Justin': 'male'} str(genders) print genders # ----------------- # CASTING # ----------------- """ data types can be forced from one type to another under certain conditions str() bool() float() int() PRACTICE: 1. Add the string of '1' to the int of 1 to output the string '11' 2. Add the int of 1 to the string of '1' to output the int 2 3. Cast an empty string value to a boolean to output False 4. Cast any non-empty string value to a boolean to output True 5. Cast the result of 3/2 to a float values to produce an output of 1.5 """ print ('1' + str(1)) print (int('1') + 1) print (bool('0')) print (float(3) / 2) # ----------------- # SCOPE # ----------------- # white space in Python defines scope # block of code associated with a control structure # ----------------- # CONTROL STRUCTURES # ----------------- """ -- variable assignment -- = my_variable = 'value' PRACTICE: Create a variable with a name of your choosing that contains the value 'hello' """ fish_taco = 'hello' """ -- logical -- if <bool>: <action> elif <bool>: <action> else <bool>: <action> PRACTICE: Write an if check to see if a value is equal to 'hello world', if true print 'hello', else print 'bye' """ print(bool(fish_taco != 'helloworld')) if fish_taco != 'helloworld': print ('hello') else: print ('bye') """ -- exception handling -- try <expression>: <action> except [error_type]: <handle error> PRACTICE: In a try block, divide 1 by 0, except a ZeroDivisionError and print 'zero divison error' """ """ -- while loops -- while <bool>: <action> PRACTICE: """ # --for loops-- # for {variable_name} in <collection>: # <action> # --comparisons-- # == -> equals # != -> not equals # > -> greater than # >= -> greater than equal # < -> less than # <= -> les than equal """ PRACTICE: print each letter in a given string """ """ PRACTICE: create a function that takes an input, then prints each character of the input """ """ PRACTICE: create a function that takes two inputs, then prints True/False whether or not the first input is contained within the second input """ # print(search_string('a', text_value)) # False # print(search_string('s', text_value)) # True # print(search_string('S', text_value)) # False """ PRACTICE: Create a diction that contains a list of employee titles stored by the name, then print each record such that each employee name and title is printed on a line """
false
true
f7f6a29203480b8b8f100b6e6a19ebe0d985261f
2,622
py
Python
produto/models.py
victorsantosok/ecommerce-cirio
85120ce43838f19e2907937c84b819dcd9088246
[ "MIT" ]
null
null
null
produto/models.py
victorsantosok/ecommerce-cirio
85120ce43838f19e2907937c84b819dcd9088246
[ "MIT" ]
null
null
null
produto/models.py
victorsantosok/ecommerce-cirio
85120ce43838f19e2907937c84b819dcd9088246
[ "MIT" ]
null
null
null
from django.db import models from PIL import Image import os from django.utils.text import slugify from django.conf import settings from utils import utils from django.forms import ValidationError class Produto(models.Model): # informar ao usuario parametros nome = models.CharField(max_length=255) descricao_curta = models.TextField(max_length=255) descricao_longa = models.TextField(max_length=510) imagem = models.ImageField( upload_to='produto_imagens/%Y/%m/', blank=True, null=True) slug = models.SlugField(unique=True, blank=True, null=True) preco_marketing = models.FloatField(verbose_name='Preço') preco_marketing_promocional = models.FloatField( default=0, verbose_name='Preço Promo.') tipo = models.CharField( default='V', max_length=1, choices=( ('V', 'Variável'), ('S', 'Simples'), ) ) def get_preco_formatado(self): return utils.formata_preco(self.preco_marketing) get_preco_formatado.short_description = 'Preço' def get_preco_promocional_formatado(self): return utils.formata_preco(self.preco_marketing_promocional) get_preco_promocional_formatado.short_description = 'Preço Promo.' @staticmethod def resize_image(img, new_width=800): img_full_path = os.path.join(settings.MEDIA_ROOT, img.name) img_pil = Image.open(img_full_path) original_width, original_height = img_pil.size if original_width <= new_width: img_pil.close() return new_height = round((new_width * original_height) / original_width) new_img = img_pil.resize((new_width, new_height), Image.LANCZOS) new_img.save( img_full_path, optimize=True, quality=50 ) def save(self, *args, **kwargs): if not self.slug: slug = f'{slugify(self.nome)}' self.slug = slug super().save(*args, **kwargs) max_image_size = 800 if self.imagem: self.resize_image(self.imagem, max_image_size) def __str__(self): return self.nome class Variacao(models.Model): produto = models.ForeignKey(Produto, on_delete=models.CASCADE) nome = models.CharField(max_length=50, blank=True, null=True) preco = models.FloatField() preco_promocional = models.FloatField(default=0) estoque = models.PositiveIntegerField(default=1) def __str__(self): return self.nome or self.produto.nome class Meta: verbose_name = 'Variação' verbose_name_plural = 'Variações'
30.488372
74
0.667811
from django.db import models from PIL import Image import os from django.utils.text import slugify from django.conf import settings from utils import utils from django.forms import ValidationError class Produto(models.Model): nome = models.CharField(max_length=255) descricao_curta = models.TextField(max_length=255) descricao_longa = models.TextField(max_length=510) imagem = models.ImageField( upload_to='produto_imagens/%Y/%m/', blank=True, null=True) slug = models.SlugField(unique=True, blank=True, null=True) preco_marketing = models.FloatField(verbose_name='Preço') preco_marketing_promocional = models.FloatField( default=0, verbose_name='Preço Promo.') tipo = models.CharField( default='V', max_length=1, choices=( ('V', 'Variável'), ('S', 'Simples'), ) ) def get_preco_formatado(self): return utils.formata_preco(self.preco_marketing) get_preco_formatado.short_description = 'Preço' def get_preco_promocional_formatado(self): return utils.formata_preco(self.preco_marketing_promocional) get_preco_promocional_formatado.short_description = 'Preço Promo.' @staticmethod def resize_image(img, new_width=800): img_full_path = os.path.join(settings.MEDIA_ROOT, img.name) img_pil = Image.open(img_full_path) original_width, original_height = img_pil.size if original_width <= new_width: img_pil.close() return new_height = round((new_width * original_height) / original_width) new_img = img_pil.resize((new_width, new_height), Image.LANCZOS) new_img.save( img_full_path, optimize=True, quality=50 ) def save(self, *args, **kwargs): if not self.slug: slug = f'{slugify(self.nome)}' self.slug = slug super().save(*args, **kwargs) max_image_size = 800 if self.imagem: self.resize_image(self.imagem, max_image_size) def __str__(self): return self.nome class Variacao(models.Model): produto = models.ForeignKey(Produto, on_delete=models.CASCADE) nome = models.CharField(max_length=50, blank=True, null=True) preco = models.FloatField() preco_promocional = models.FloatField(default=0) estoque = models.PositiveIntegerField(default=1) def __str__(self): return self.nome or self.produto.nome class Meta: verbose_name = 'Variação' verbose_name_plural = 'Variações'
true
true
f7f6a30d6db165c2c11298a4ffbdb5881ff47eb7
1,468
py
Python
obj_sys/middleware.py
weijia/obj_sys
7654a84f155f8e0da942f980d06c6ada34a6d71e
[ "BSD-3-Clause" ]
null
null
null
obj_sys/middleware.py
weijia/obj_sys
7654a84f155f8e0da942f980d06c6ada34a6d71e
[ "BSD-3-Clause" ]
null
null
null
obj_sys/middleware.py
weijia/obj_sys
7654a84f155f8e0da942f980d06c6ada34a6d71e
[ "BSD-3-Clause" ]
null
null
null
import re from django.utils.text import compress_string from django.utils.cache import patch_vary_headers from django import http try: import settings XS_SHARING_ALLOWED_ORIGINS = settings.XS_SHARING_ALLOWED_ORIGINS XS_SHARING_ALLOWED_METHODS = settings.XS_SHARING_ALLOWED_METHODS except: XS_SHARING_ALLOWED_ORIGINS = '*' XS_SHARING_ALLOWED_METHODS = ['POST', 'GET', 'OPTIONS', 'PUT', 'DELETE'] # From https://gist.github.com/barrabinfc/426829 class XsSharingMiddleware(object): """ This middleware allows cross-domain XHR using the html5 postMessage API. Access-Control-Allow-Origin: http://foo.example Access-Control-Allow-Methods: POST, GET, OPTIONS, PUT, DELETE """ def process_request(self, request): if 'HTTP_ACCESS_CONTROL_REQUEST_METHOD' in request.META: response = http.HttpResponse() response['Access-Control-Allow-Origin'] = XS_SHARING_ALLOWED_ORIGINS response['Access-Control-Allow-Methods'] = ",".join(XS_SHARING_ALLOWED_METHODS) return response return None def process_response(self, request, response): # Avoid unnecessary work if response.has_header('Access-Control-Allow-Origin'): return response response['Access-Control-Allow-Origin'] = XS_SHARING_ALLOWED_ORIGINS response['Access-Control-Allow-Methods'] = ",".join(XS_SHARING_ALLOWED_METHODS) return response
31.913043
91
0.71049
import re from django.utils.text import compress_string from django.utils.cache import patch_vary_headers from django import http try: import settings XS_SHARING_ALLOWED_ORIGINS = settings.XS_SHARING_ALLOWED_ORIGINS XS_SHARING_ALLOWED_METHODS = settings.XS_SHARING_ALLOWED_METHODS except: XS_SHARING_ALLOWED_ORIGINS = '*' XS_SHARING_ALLOWED_METHODS = ['POST', 'GET', 'OPTIONS', 'PUT', 'DELETE'] class XsSharingMiddleware(object): def process_request(self, request): if 'HTTP_ACCESS_CONTROL_REQUEST_METHOD' in request.META: response = http.HttpResponse() response['Access-Control-Allow-Origin'] = XS_SHARING_ALLOWED_ORIGINS response['Access-Control-Allow-Methods'] = ",".join(XS_SHARING_ALLOWED_METHODS) return response return None def process_response(self, request, response): if response.has_header('Access-Control-Allow-Origin'): return response response['Access-Control-Allow-Origin'] = XS_SHARING_ALLOWED_ORIGINS response['Access-Control-Allow-Methods'] = ",".join(XS_SHARING_ALLOWED_METHODS) return response
true
true
f7f6a7a07908f2ca7b06cdbc19d5d60fffb80f65
773
py
Python
controle_gastos_app/serializer.py
victorpecine/api_controle_gastos
03efcc728857c33aa6fd49ee72d82135c7e8da85
[ "MIT" ]
null
null
null
controle_gastos_app/serializer.py
victorpecine/api_controle_gastos
03efcc728857c33aa6fd49ee72d82135c7e8da85
[ "MIT" ]
null
null
null
controle_gastos_app/serializer.py
victorpecine/api_controle_gastos
03efcc728857c33aa6fd49ee72d82135c7e8da85
[ "MIT" ]
null
null
null
from rest_framework import serializers from controle_gastos_app.models import Categoria, Estabelecimento, Gasto, FormaPagamento , Usuario class GastoSerializer(serializers.ModelSerializer): class Meta: model = Gasto fields = '__all__' class CategoriaSerializer(serializers.ModelSerializer): class Meta: model = Categoria fields = '__all__' class EstabelecimentoSerializer(serializers.ModelSerializer): class Meta: model = Estabelecimento fields = '__all__' class PagamentoSerializer(serializers.ModelSerializer): class Meta: model = FormaPagamento fields = '__all__' class UsuarioSerializer(serializers.ModelSerializer): class Meta: model = Usuario fields = '__all__'
27.607143
98
0.717982
from rest_framework import serializers from controle_gastos_app.models import Categoria, Estabelecimento, Gasto, FormaPagamento , Usuario class GastoSerializer(serializers.ModelSerializer): class Meta: model = Gasto fields = '__all__' class CategoriaSerializer(serializers.ModelSerializer): class Meta: model = Categoria fields = '__all__' class EstabelecimentoSerializer(serializers.ModelSerializer): class Meta: model = Estabelecimento fields = '__all__' class PagamentoSerializer(serializers.ModelSerializer): class Meta: model = FormaPagamento fields = '__all__' class UsuarioSerializer(serializers.ModelSerializer): class Meta: model = Usuario fields = '__all__'
true
true
f7f6a858396e02a25b3bc72f75500826e2b0a527
187
py
Python
test/test_login.py
EkaterinaPentjuhina/python_training_mantis
57736c91802f443b446c61f10313fa36c0bb9e79
[ "Apache-2.0" ]
null
null
null
test/test_login.py
EkaterinaPentjuhina/python_training_mantis
57736c91802f443b446c61f10313fa36c0bb9e79
[ "Apache-2.0" ]
null
null
null
test/test_login.py
EkaterinaPentjuhina/python_training_mantis
57736c91802f443b446c61f10313fa36c0bb9e79
[ "Apache-2.0" ]
null
null
null
def test_login(app): if app.session.is_logged_in(): app.session.logout() app.session.login("administrator", "root") assert app.session.is_logged_in_as("administrator")
37.4
55
0.705882
def test_login(app): if app.session.is_logged_in(): app.session.logout() app.session.login("administrator", "root") assert app.session.is_logged_in_as("administrator")
true
true
f7f6a946f11079c36e243f033ea5ba184781cbda
2,678
py
Python
09_multiprocessing/prime_validation/primes_pool_per_number_value_withinit.py
ralphribeiro/high_performance_python_2e
a486628d4690ad6a4cf30912295e7b11a2490a2d
[ "RSA-MD" ]
null
null
null
09_multiprocessing/prime_validation/primes_pool_per_number_value_withinit.py
ralphribeiro/high_performance_python_2e
a486628d4690ad6a4cf30912295e7b11a2490a2d
[ "RSA-MD" ]
null
null
null
09_multiprocessing/prime_validation/primes_pool_per_number_value_withinit.py
ralphribeiro/high_performance_python_2e
a486628d4690ad6a4cf30912295e7b11a2490a2d
[ "RSA-MD" ]
null
null
null
"""Check primality by splitting the list of factors with early prime check and Value""" import math import timeit from multiprocessing import Pool import multiprocessing import create_range SERIAL_CHECK_CUTOFF = 21 CHECK_EVERY = 1000 FLAG_CLEAR = b'0' FLAG_SET = b'1' print("CHECK_EVERY", CHECK_EVERY) # global value, to be shared between forked processes value = None def init(val): ''' store the counter for later use ''' global value value = val def check_prime_in_range(n_from_i_to_i): (n, (from_i, to_i)) = n_from_i_to_i global value if n % 2 == 0: return False assert from_i % 2 != 0 check_every = CHECK_EVERY for i in range(from_i, int(to_i), 2): check_every -= 1 if not check_every: if value.value == FLAG_SET: return False check_every = CHECK_EVERY if n % i == 0: value.value = FLAG_SET return False return True def check_prime(n, pool, nbr_processes): # cheaply check high probability set of possible factors global value from_i = 3 to_i = SERIAL_CHECK_CUTOFF value.value = FLAG_CLEAR if not check_prime_in_range((n, (from_i, to_i))): return False value.value = FLAG_CLEAR from_i = to_i to_i = int(math.sqrt(n)) + 1 ranges_to_check = create_range.create(from_i, to_i, nbr_processes) ranges_to_check = zip(len(ranges_to_check) * [n], ranges_to_check) # assert len(ranges_to_check) == nbr_processes results = pool.map(check_prime_in_range, ranges_to_check) if False in results: return False return True if __name__ == "__main__": NBR_PROCESSES = 4 value = multiprocessing.RawValue('c', FLAG_CLEAR) # 1 byte character pool = Pool(processes=NBR_PROCESSES, initializer=init, initargs=(value, )) print("Testing with {} processes".format(NBR_PROCESSES)) for label, nbr in [("trivial non-prime", 112272535095295), ("expensive non-prime18_1", 100109100129100369), ("expensive non-prime18_2", 100109100129101027), #("prime", 112272535095293)]: # 15 #("prime17", 10000000002065383)] ("prime18_1", 100109100129100151), ("prime18_2", 100109100129162907)]: #("prime23", 22360679774997896964091)]: time_costs = timeit.repeat(stmt="check_prime({}, pool, {})".format(nbr, NBR_PROCESSES), repeat=20, number=1, setup="from __main__ import pool, check_prime") print("{:19} ({}) {: 3.6f}s".format(label, nbr, min(time_costs)))
31.880952
116
0.627334
import math import timeit from multiprocessing import Pool import multiprocessing import create_range SERIAL_CHECK_CUTOFF = 21 CHECK_EVERY = 1000 FLAG_CLEAR = b'0' FLAG_SET = b'1' print("CHECK_EVERY", CHECK_EVERY) value = None def init(val): global value value = val def check_prime_in_range(n_from_i_to_i): (n, (from_i, to_i)) = n_from_i_to_i global value if n % 2 == 0: return False assert from_i % 2 != 0 check_every = CHECK_EVERY for i in range(from_i, int(to_i), 2): check_every -= 1 if not check_every: if value.value == FLAG_SET: return False check_every = CHECK_EVERY if n % i == 0: value.value = FLAG_SET return False return True def check_prime(n, pool, nbr_processes): global value from_i = 3 to_i = SERIAL_CHECK_CUTOFF value.value = FLAG_CLEAR if not check_prime_in_range((n, (from_i, to_i))): return False value.value = FLAG_CLEAR from_i = to_i to_i = int(math.sqrt(n)) + 1 ranges_to_check = create_range.create(from_i, to_i, nbr_processes) ranges_to_check = zip(len(ranges_to_check) * [n], ranges_to_check) results = pool.map(check_prime_in_range, ranges_to_check) if False in results: return False return True if __name__ == "__main__": NBR_PROCESSES = 4 value = multiprocessing.RawValue('c', FLAG_CLEAR) pool = Pool(processes=NBR_PROCESSES, initializer=init, initargs=(value, )) print("Testing with {} processes".format(NBR_PROCESSES)) for label, nbr in [("trivial non-prime", 112272535095295), ("expensive non-prime18_1", 100109100129100369), ("expensive non-prime18_2", 100109100129101027), ("prime18_1", 100109100129100151), ("prime18_2", 100109100129162907)]: time_costs = timeit.repeat(stmt="check_prime({}, pool, {})".format(nbr, NBR_PROCESSES), repeat=20, number=1, setup="from __main__ import pool, check_prime") print("{:19} ({}) {: 3.6f}s".format(label, nbr, min(time_costs)))
true
true
f7f6a993c61e55ffce53155f2b0151ade4493610
199
py
Python
pyrat/__init__.py
gitmarek/pyrat
cbf918d5c23d5d39e62e00bb64b6d0596170c68b
[ "MIT" ]
null
null
null
pyrat/__init__.py
gitmarek/pyrat
cbf918d5c23d5d39e62e00bb64b6d0596170c68b
[ "MIT" ]
null
null
null
pyrat/__init__.py
gitmarek/pyrat
cbf918d5c23d5d39e62e00bb64b6d0596170c68b
[ "MIT" ]
null
null
null
name='pyrat' version='0.1.1' import logging LOGGING_FORMAT = '%(asctime)-15s ' + name + ' (%(process)d): %(message)s' logging.basicConfig(format=LOGGING_FORMAT) logger = logging.getLogger(__name__)
24.875
73
0.723618
name='pyrat' version='0.1.1' import logging LOGGING_FORMAT = '%(asctime)-15s ' + name + ' (%(process)d): %(message)s' logging.basicConfig(format=LOGGING_FORMAT) logger = logging.getLogger(__name__)
true
true